aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-class-backlight36
-rw-r--r--Documentation/ABI/testing/sysfs-class-lcd23
-rw-r--r--Documentation/ABI/testing/sysfs-class-led28
-rw-r--r--Documentation/ABI/testing/sysfs-gpio1
-rw-r--r--Documentation/ABI/testing/sysfs-platform-asus-laptop52
-rw-r--r--Documentation/ABI/testing/sysfs-platform-eeepc-laptop50
-rw-r--r--Documentation/DocBook/mtdnand.tmpl2
-rw-r--r--Documentation/DocBook/scsi.tmpl2
-rw-r--r--Documentation/Intel-IOMMU.txt6
-rw-r--r--Documentation/SubmittingPatches2
-rw-r--r--Documentation/accounting/getdelays.c12
-rw-r--r--Documentation/arm/tcm.txt145
-rw-r--r--Documentation/auxdisplay/cfag12864b-example.c23
-rw-r--r--Documentation/cgroups/cgroups.txt32
-rw-r--r--Documentation/cgroups/memory.txt41
-rw-r--r--Documentation/crypto/async-tx-api.txt75
-rw-r--r--Documentation/fb/ep93xx-fb.txt135
-rw-r--r--Documentation/fb/matroxfb.txt4
-rw-r--r--Documentation/feature-removal-schedule.txt8
-rw-r--r--Documentation/filesystems/9p.txt40
-rw-r--r--Documentation/filesystems/ncpfs.txt6
-rw-r--r--Documentation/filesystems/nfs41-server.txt54
-rw-r--r--Documentation/filesystems/nfsroot.txt2
-rw-r--r--Documentation/filesystems/proc.txt27
-rw-r--r--Documentation/filesystems/sharedsubtree.txt220
-rw-r--r--Documentation/filesystems/vfs.txt7
-rw-r--r--Documentation/gcov.txt2
-rw-r--r--Documentation/gpio.txt17
-rw-r--r--Documentation/hwmon/acpi_power_meter51
-rw-r--r--Documentation/hwmon/coretemp4
-rw-r--r--Documentation/hwmon/fscher169
-rw-r--r--Documentation/hwmon/hpfall.c115
-rw-r--r--Documentation/hwmon/pc874272
-rw-r--r--Documentation/i2c/busses/i2c-piix42
-rw-r--r--Documentation/i2c/chips/pca953958
-rw-r--r--Documentation/i2c/chips/pcf857465
-rw-r--r--Documentation/i2c/chips/pcf857569
-rw-r--r--Documentation/ia64/aliasing-test.c8
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/kbuild/kbuild.txt16
-rw-r--r--Documentation/kbuild/makefiles.txt20
-rw-r--r--Documentation/kernel-parameters.txt4
-rw-r--r--Documentation/kmemcheck.txt21
-rw-r--r--Documentation/laptops/asus-laptop.txt258
-rw-r--r--Documentation/laptops/thinkpad-acpi.txt56
-rw-r--r--Documentation/leds-class.txt9
-rw-r--r--Documentation/lguest/lguest.c26
-rw-r--r--Documentation/memory.txt31
-rw-r--r--Documentation/networking/regulatory.txt2
-rw-r--r--Documentation/numastat.txt8
-rw-r--r--Documentation/pcmcia/crc32hash.c2
-rw-r--r--Documentation/power/power_supply_class.txt7
-rw-r--r--Documentation/power/regulator/design.txt33
-rw-r--r--Documentation/power/regulator/machine.txt4
-rw-r--r--Documentation/power/regulator/overview.txt4
-rw-r--r--Documentation/power/regulator/regulator.txt5
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/esdhc.txt2
-rw-r--r--Documentation/powerpc/dts-bindings/marvell.txt2
-rw-r--r--Documentation/powerpc/dts-bindings/mtd-physmap.txt42
-rw-r--r--Documentation/rtc.txt26
-rw-r--r--Documentation/scsi/ChangeLog.megaraid2
-rw-r--r--Documentation/scsi/scsi_fc_transport.txt2
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt2
-rw-r--r--Documentation/spi/spi-summary2
-rw-r--r--Documentation/spi/spidev_test.c4
-rw-r--r--Documentation/sysctl/fs.txt17
-rw-r--r--Documentation/sysctl/kernel.txt60
-rw-r--r--Documentation/sysctl/vm.txt45
-rw-r--r--Documentation/trace/events-kmem.txt107
-rw-r--r--Documentation/trace/events.txt2
-rw-r--r--Documentation/trace/ftrace.txt2
-rw-r--r--Documentation/trace/postprocess/trace-pagealloc-postprocess.pl418
-rw-r--r--Documentation/trace/tracepoint-analysis.txt327
-rw-r--r--Documentation/usb/authorization.txt10
-rw-r--r--Documentation/usb/usbmon.txt8
-rw-r--r--Documentation/video4linux/v4lgrab.c2
-rw-r--r--Documentation/vm/.gitignore1
-rw-r--r--Documentation/vm/00-INDEX4
-rw-r--r--Documentation/vm/hugetlbpage.txt147
-rw-r--r--Documentation/vm/ksm.txt89
-rw-r--r--Documentation/vm/locking2
-rw-r--r--Documentation/vm/map_hugetlb.c77
-rw-r--r--Documentation/vm/page-types.c248
-rw-r--r--Documentation/vm/slabinfo.c68
-rw-r--r--Documentation/watchdog/src/watchdog-test.c2
-rw-r--r--Documentation/x86/earlyprintk.txt39
-rw-r--r--MAINTAINERS219
-rw-r--r--Makefile62
-rw-r--r--arch/alpha/Kconfig8
-rw-r--r--arch/alpha/boot/tools/objstrip.c2
-rw-r--r--arch/alpha/include/asm/fcntl.h2
-rw-r--r--arch/alpha/include/asm/hardirq.h14
-rw-r--r--arch/alpha/include/asm/mman.h5
-rw-r--r--arch/alpha/include/asm/smp.h2
-rw-r--r--arch/alpha/include/asm/topology.h18
-rw-r--r--arch/alpha/kernel/core_marvel.c2
-rw-r--r--arch/alpha/kernel/core_titan.c2
-rw-r--r--arch/alpha/kernel/init_task.c5
-rw-r--r--arch/alpha/kernel/pci_impl.h2
-rw-r--r--arch/alpha/kernel/pci_iommu.c10
-rw-r--r--arch/alpha/kernel/process.c1
-rw-r--r--arch/alpha/kernel/smp.c14
-rw-r--r--arch/alpha/kernel/time.c79
-rw-r--r--arch/alpha/kernel/vmlinux.lds.S95
-rw-r--r--arch/alpha/mm/init.c2
-rw-r--r--arch/alpha/mm/numa.c2
-rw-r--r--arch/arm/Kconfig5
-rw-r--r--arch/arm/Makefile8
-rw-r--r--arch/arm/boot/install.sh4
-rw-r--r--arch/arm/common/locomo.c17
-rw-r--r--arch/arm/common/vic.c1
-rw-r--r--arch/arm/configs/littleton_defconfig783
-rw-r--r--arch/arm/configs/n770_defconfig2
-rw-r--r--arch/arm/configs/nhk8815_defconfig2
-rw-r--r--arch/arm/configs/omap3_beagle_defconfig47
-rw-r--r--arch/arm/configs/omap_3430sdp_defconfig39
-rw-r--r--arch/arm/configs/omap_ldp_defconfig54
-rw-r--r--arch/arm/configs/pxa3xx_defconfig1332
-rw-r--r--arch/arm/configs/xcep_defconfig1129
-rw-r--r--arch/arm/configs/zylonite_defconfig736
-rw-r--r--arch/arm/include/asm/atomic.h26
-rw-r--r--arch/arm/include/asm/cache.h2
-rw-r--r--arch/arm/include/asm/cacheflush.h8
-rw-r--r--arch/arm/include/asm/cputype.h10
-rw-r--r--arch/arm/include/asm/hardware/iop3xx-adma.h81
-rw-r--r--arch/arm/include/asm/hardware/iop_adma.h3
-rw-r--r--arch/arm/include/asm/mman.h18
-rw-r--r--arch/arm/include/asm/mmu_context.h7
-rw-r--r--arch/arm/include/asm/smp.h1
-rw-r--r--arch/arm/include/asm/tcm.h31
-rw-r--r--arch/arm/include/asm/tlbflush.h4
-rw-r--r--arch/arm/include/asm/unified.h4
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/kernel/Makefile4
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/entry-armv.S19
-rw-r--r--arch/arm/kernel/entry-header.S16
-rw-r--r--arch/arm/kernel/init_task.c5
-rw-r--r--arch/arm/kernel/kprobes.c19
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/kernel/smp.c10
-rw-r--r--arch/arm/kernel/sys_arm.c1
-rw-r--r--arch/arm/kernel/tcm.c246
-rw-r--r--arch/arm/kernel/tcm.h17
-rw-r--r--arch/arm/kernel/vmlinux.lds.S57
-rw-r--r--arch/arm/lib/copy_page.S16
-rw-r--r--arch/arm/mach-at91/Kconfig7
-rw-r--r--arch/arm/mach-at91/Makefile1
-rw-r--r--arch/arm/mach-at91/at91cap9_devices.c10
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c96
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c36
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c104
-rw-r--r--arch/arm/mach-at91/at91sam9rl_devices.c102
-rw-r--r--arch/arm/mach-at91/board-afeb-9260v1.c2
-rw-r--r--arch/arm/mach-at91/board-cam60.c2
-rw-r--r--arch/arm/mach-at91/board-cap9adk.c2
-rw-r--r--arch/arm/mach-at91/board-neocore926.c4
-rw-r--r--arch/arm/mach-at91/board-qil-a9260.c2
-rw-r--r--arch/arm/mach-at91/board-sam9260ek.c2
-rw-r--r--arch/arm/mach-at91/board-sam9261ek.c2
-rw-r--r--arch/arm/mach-at91/board-sam9263ek.c21
-rw-r--r--arch/arm/mach-at91/board-sam9g20ek-2slot-mmc.c277
-rw-r--r--arch/arm/mach-at91/board-sam9g20ek.c2
-rw-r--r--arch/arm/mach-at91/board-sam9m10g45ek.c10
-rw-r--r--arch/arm/mach-at91/board-sam9rlek.c12
-rw-r--r--arch/arm/mach-at91/board-usb-a9260.c2
-rw-r--r--arch/arm/mach-at91/board-usb-a9263.c2
-rw-r--r--arch/arm/mach-at91/include/mach/board.h11
-rw-r--r--arch/arm/mach-ep93xx/clock.c88
-rw-r--r--arch/arm/mach-ep93xx/core.c32
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h6
-rw-r--r--arch/arm/mach-ep93xx/include/mach/fb.h56
-rw-r--r--arch/arm/mach-ep93xx/include/mach/platform.h2
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c4
-rw-r--r--arch/arm/mach-iop13xx/include/mach/adma.h119
-rw-r--r--arch/arm/mach-iop13xx/setup.c17
-rw-r--r--arch/arm/mach-ixp4xx/common.c16
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/system.h6
-rw-r--r--arch/arm/mach-nomadik/board-nhk8815.c155
-rw-r--r--arch/arm/mach-nomadik/include/mach/fsmc.h29
-rw-r--r--arch/arm/mach-nomadik/include/mach/nand.h16
-rw-r--r--arch/arm/mach-omap2/board-apollon.c4
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c4
-rw-r--r--arch/arm/mach-omap2/devices.c71
-rw-r--r--arch/arm/mach-omap2/gpmc.c63
-rw-r--r--arch/arm/mach-omap2/mmc-twl4030.c78
-rw-r--r--arch/arm/mach-omap2/mmc-twl4030.h2
-rw-r--r--arch/arm/mach-pxa/Kconfig36
-rw-r--r--arch/arm/mach-pxa/Makefile4
-rw-r--r--arch/arm/mach-pxa/balloon3.c361
-rw-r--r--arch/arm/mach-pxa/clock.h1
-rw-r--r--arch/arm/mach-pxa/cm-x270.c146
-rw-r--r--arch/arm/mach-pxa/cm-x300.c71
-rw-r--r--arch/arm/mach-pxa/colibri-pxa300.c1
-rw-r--r--arch/arm/mach-pxa/colibri-pxa320.c33
-rw-r--r--arch/arm/mach-pxa/colibri-pxa3xx.c52
-rw-r--r--arch/arm/mach-pxa/corgi.c233
-rw-r--r--arch/arm/mach-pxa/csb726.c54
-rw-r--r--arch/arm/mach-pxa/devices.c27
-rw-r--r--arch/arm/mach-pxa/devices.h2
-rw-r--r--arch/arm/mach-pxa/e740.c1
-rw-r--r--arch/arm/mach-pxa/e750.c1
-rw-r--r--arch/arm/mach-pxa/em-x270.c45
-rw-r--r--arch/arm/mach-pxa/eseries.c39
-rw-r--r--arch/arm/mach-pxa/gumstix.c5
-rw-r--r--arch/arm/mach-pxa/hx4700.c65
-rw-r--r--arch/arm/mach-pxa/idp.c5
-rw-r--r--arch/arm/mach-pxa/imote2.c3
-rw-r--r--arch/arm/mach-pxa/include/mach/balloon3.h134
-rw-r--r--arch/arm/mach-pxa/include/mach/colibri.h6
-rw-r--r--arch/arm/mach-pxa/include/mach/entry-macro.S25
-rw-r--r--arch/arm/mach-pxa/include/mach/hardware.h17
-rw-r--r--arch/arm/mach-pxa/include/mach/irda.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/irqs.h42
-rw-r--r--arch/arm/mach-pxa/include/mach/mfp.h301
-rw-r--r--arch/arm/mach-pxa/include/mach/mmc.h5
-rw-r--r--arch/arm/mach-pxa/include/mach/palmtc.h86
-rw-r--r--arch/arm/mach-pxa/include/mach/palmtx.h5
-rw-r--r--arch/arm/mach-pxa/include/mach/pxa3xx-regs.h4
-rw-r--r--arch/arm/mach-pxa/include/mach/pxafb.h3
-rw-r--r--arch/arm/mach-pxa/include/mach/regs-intc.h11
-rw-r--r--arch/arm/mach-pxa/include/mach/uncompress.h2
-rw-r--r--arch/arm/mach-pxa/irq.c8
-rw-r--r--arch/arm/mach-pxa/littleton.c43
-rw-r--r--arch/arm/mach-pxa/lubbock.c18
-rw-r--r--arch/arm/mach-pxa/magician.c59
-rw-r--r--arch/arm/mach-pxa/mainstone.c16
-rw-r--r--arch/arm/mach-pxa/mioa701.c84
-rw-r--r--arch/arm/mach-pxa/palmld.c143
-rw-r--r--arch/arm/mach-pxa/palmt5.c111
-rw-r--r--arch/arm/mach-pxa/palmtc.c436
-rw-r--r--arch/arm/mach-pxa/palmte2.c110
-rw-r--r--arch/arm/mach-pxa/palmtx.c225
-rw-r--r--arch/arm/mach-pxa/palmz72.c116
-rw-r--r--arch/arm/mach-pxa/pcm990-baseboard.c67
-rw-r--r--arch/arm/mach-pxa/poodle.c80
-rw-r--r--arch/arm/mach-pxa/pxa2xx.c1
-rw-r--r--arch/arm/mach-pxa/pxa300.c2
-rw-r--r--arch/arm/mach-pxa/pxa320.c2
-rw-r--r--arch/arm/mach-pxa/pxa930.c19
-rw-r--r--arch/arm/mach-pxa/spitz.c238
-rw-r--r--arch/arm/mach-pxa/tosa.c91
-rw-r--r--arch/arm/mach-pxa/treo680.c159
-rw-r--r--arch/arm/mach-pxa/trizeps4.c4
-rw-r--r--arch/arm/mach-pxa/xcep.c187
-rw-r--r--arch/arm/mach-pxa/zylonite.c3
-rw-r--r--arch/arm/mach-realview/core.c24
-rw-r--r--arch/arm/mach-realview/core.h4
-rw-r--r--arch/arm/mach-realview/realview_eb.c40
-rw-r--r--arch/arm/mach-realview/realview_pb1176.c40
-rw-r--r--arch/arm/mach-realview/realview_pb11mp.c40
-rw-r--r--arch/arm/mach-realview/realview_pba8.c40
-rw-r--r--arch/arm/mach-realview/realview_pbx.c40
-rw-r--r--arch/arm/mach-s3c2410/Kconfig5
-rw-r--r--arch/arm/mach-s3c2412/Kconfig3
-rw-r--r--arch/arm/mach-s3c2440/Kconfig6
-rw-r--r--arch/arm/mach-s3c6400/Kconfig1
-rw-r--r--arch/arm/mach-s3c6410/Kconfig1
-rw-r--r--arch/arm/mach-sa1100/dma.c2
-rw-r--r--arch/arm/mach-u300/Kconfig12
-rw-r--r--arch/arm/mach-u300/Makefile3
-rw-r--r--arch/arm/mach-u300/core.c14
-rw-r--r--arch/arm/mach-u300/dummyspichip.c290
-rw-r--r--arch/arm/mach-u300/gpio.c13
-rw-r--r--arch/arm/mach-u300/i2c.c43
-rw-r--r--arch/arm/mach-u300/i2c.h23
-rw-r--r--arch/arm/mach-u300/include/mach/memory.h8
-rw-r--r--arch/arm/mach-u300/include/mach/syscon.h120
-rw-r--r--arch/arm/mach-u300/mmc.c22
-rw-r--r--arch/arm/mach-u300/padmux.c395
-rw-r--r--arch/arm/mach-u300/padmux.h28
-rw-r--r--arch/arm/mach-u300/spi.c124
-rw-r--r--arch/arm/mach-u300/spi.h26
-rw-r--r--arch/arm/mach-u300/timer.c15
-rw-r--r--arch/arm/mach-versatile/core.c4
-rw-r--r--arch/arm/mach-versatile/versatile_pb.c4
-rw-r--r--arch/arm/mm/Kconfig5
-rw-r--r--arch/arm/mm/context.c2
-rw-r--r--arch/arm/mm/fault.c110
-rw-r--r--arch/arm/mm/flush.c10
-rw-r--r--arch/arm/mm/init.c12
-rw-r--r--arch/arm/plat-iop/adma.c4
-rw-r--r--arch/arm/plat-mxc/include/mach/spi.h27
-rw-r--r--arch/arm/plat-omap/include/mach/gpmc.h4
-rw-r--r--arch/arm/plat-omap/include/mach/irqs.h2
-rw-r--r--arch/arm/plat-omap/include/mach/lcd_mipid.h5
-rw-r--r--arch/arm/plat-omap/include/mach/mmc.h20
-rw-r--r--arch/arm/plat-omap/include/mach/omapfb.h4
-rw-r--r--arch/arm/plat-pxa/dma.c281
-rw-r--r--arch/arm/plat-pxa/include/plat/mfp.h73
-rw-r--r--arch/arm/plat-pxa/mfp.c3
-rw-r--r--arch/arm/plat-s3c/gpio.c2
-rw-r--r--arch/arm/plat-s3c64xx/dma.c6
-rw-r--r--arch/arm/plat-s3c64xx/include/plat/dma-plat.h2
-rw-r--r--arch/arm/plat-s3c64xx/include/plat/irqs.h10
-rw-r--r--arch/arm/plat-s3c64xx/s3c6400-clock.c8
-rw-r--r--arch/arm/plat-stmp3xxx/dma.c2
-rw-r--r--arch/arm/tools/mach-types20
-rw-r--r--arch/avr32/include/asm/mman.h18
-rw-r--r--arch/avr32/kernel/init_task.c5
-rw-r--r--arch/avr32/mm/init.c6
-rw-r--r--arch/blackfin/Makefile4
-rw-r--r--arch/blackfin/boot/install.sh6
-rw-r--r--arch/blackfin/include/asm/sections.h38
-rw-r--r--arch/blackfin/include/asm/unistd.h2
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S56
-rw-r--r--arch/blackfin/mach-bf538/include/mach/defBF539.h2
-rw-r--r--arch/blackfin/mach-common/entry.S2
-rw-r--r--arch/cris/Makefile2
-rw-r--r--arch/cris/arch-v10/kernel/time.c1
-rw-r--r--arch/cris/arch-v32/kernel/smp.c2
-rw-r--r--arch/cris/arch-v32/kernel/time.c1
-rw-r--r--arch/cris/arch-v32/mach-a3/io.c2
-rw-r--r--arch/cris/arch-v32/mach-fs/io.c2
-rw-r--r--arch/cris/include/arch-v10/arch/mmu.h9
-rw-r--r--arch/cris/include/arch-v32/arch/mmu.h10
-rw-r--r--arch/cris/include/asm/hardirq.h12
-rw-r--r--arch/cris/include/asm/mman.h20
-rw-r--r--arch/cris/include/asm/pgtable.h2
-rw-r--r--arch/cris/kernel/Makefile1
-rw-r--r--arch/cris/kernel/irq.c5
-rw-r--r--arch/cris/kernel/process.c5
-rw-r--r--arch/cris/kernel/vmlinux.lds.S37
-rw-r--r--arch/cris/mm/init.c2
-rw-r--r--arch/frv/Kconfig2
-rw-r--r--arch/frv/include/asm/gdb-stub.h1
-rw-r--r--arch/frv/include/asm/hardirq.h17
-rw-r--r--arch/frv/include/asm/mman.h19
-rw-r--r--arch/frv/include/asm/perf_event.h (renamed from arch/frv/include/asm/perf_counter.h)10
-rw-r--r--arch/frv/include/asm/unistd.h2
-rw-r--r--arch/frv/kernel/debug-stub.c1
-rw-r--r--arch/frv/kernel/entry.S2
-rw-r--r--arch/frv/kernel/init_task.c5
-rw-r--r--arch/frv/kernel/pm.c14
-rw-r--r--arch/frv/kernel/process.c4
-rw-r--r--arch/frv/kernel/sys_frv.c1
-rw-r--r--arch/frv/lib/Makefile2
-rw-r--r--arch/frv/lib/cache.S2
-rw-r--r--arch/frv/lib/perf_event.c (renamed from arch/frv/lib/perf_counter.c)8
-rw-r--r--arch/frv/mb93090-mb00/Makefile2
-rw-r--r--arch/frv/mb93090-mb00/flash.c90
-rw-r--r--arch/h8300/include/asm/hardirq.h15
-rw-r--r--arch/h8300/include/asm/mman.h18
-rw-r--r--arch/h8300/kernel/init_task.c5
-rw-r--r--arch/h8300/kernel/irq.c5
-rw-r--r--arch/h8300/kernel/sys_h8300.c1
-rw-r--r--arch/h8300/kernel/timer/tpu.c1
-rw-r--r--arch/h8300/kernel/vmlinux.lds.S25
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/hp/common/sba_iommu.c7
-rw-r--r--arch/ia64/ia32/sys_ia32.c2
-rw-r--r--arch/ia64/include/asm/cputime.h1
-rw-r--r--arch/ia64/include/asm/mman.h14
-rw-r--r--arch/ia64/include/asm/smp.h1
-rw-r--r--arch/ia64/include/asm/topology.h3
-rw-r--r--arch/ia64/install.sh4
-rw-r--r--arch/ia64/kernel/Makefile.gate2
-rw-r--r--arch/ia64/kernel/init_task.c3
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c2
-rw-r--r--arch/ia64/kernel/smp.c2
-rw-r--r--arch/ia64/mm/init.c7
-rw-r--r--arch/m32r/Kconfig6
-rw-r--r--arch/m32r/boot/compressed/install.sh4
-rw-r--r--arch/m32r/include/asm/hardirq.h15
-rw-r--r--arch/m32r/include/asm/mman.h18
-rw-r--r--arch/m32r/include/asm/mmu_context.h4
-rw-r--r--arch/m32r/include/asm/page.h4
-rw-r--r--arch/m32r/include/asm/processor.h2
-rw-r--r--arch/m32r/include/asm/smp.h2
-rw-r--r--arch/m32r/include/asm/thread_info.h15
-rw-r--r--arch/m32r/kernel/entry.S7
-rw-r--r--arch/m32r/kernel/head.S4
-rw-r--r--arch/m32r/kernel/init_task.c5
-rw-r--r--arch/m32r/kernel/ptrace.c5
-rw-r--r--arch/m32r/kernel/smp.c30
-rw-r--r--arch/m32r/kernel/smpboot.c4
-rw-r--r--arch/m32r/kernel/time.c74
-rw-r--r--arch/m32r/kernel/vmlinux.lds.S78
-rw-r--r--arch/m32r/mm/init.c2
-rw-r--r--arch/m68k/Kconfig6
-rw-r--r--arch/m68k/include/asm/hardirq_mm.h12
-rw-r--r--arch/m68k/include/asm/mman.h18
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/install.sh4
-rw-r--r--arch/m68k/kernel/entry.S2
-rw-r--r--arch/m68k/kernel/process.c6
-rw-r--r--arch/m68k/kernel/sys_m68k.c1
-rw-r--r--arch/m68k/kernel/time.c70
-rw-r--r--arch/m68k/mm/init.c2
-rw-r--r--arch/m68knommu/kernel/init_task.c5
-rw-r--r--arch/m68knommu/kernel/sys_m68k.c1
-rw-r--r--arch/m68knommu/kernel/syscalltable.S2
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/Makefile31
-rw-r--r--arch/microblaze/boot/Makefile41
l---------arch/microblaze/boot/dts/system.dts1
-rw-r--r--arch/microblaze/boot/linked_dtb.S3
-rw-r--r--arch/microblaze/configs/mmu_defconfig30
-rw-r--r--arch/microblaze/configs/nommu_defconfig35
-rw-r--r--arch/microblaze/include/asm/asm-compat.h17
-rw-r--r--arch/microblaze/include/asm/io.h3
-rw-r--r--arch/microblaze/include/asm/ipc.h1
-rw-r--r--arch/microblaze/include/asm/page.h3
-rw-r--r--arch/microblaze/include/asm/setup.h2
-rw-r--r--arch/microblaze/include/asm/syscall.h99
-rw-r--r--arch/microblaze/include/asm/unistd.h2
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c3
-rw-r--r--arch/microblaze/kernel/entry.S72
-rw-r--r--arch/microblaze/kernel/exceptions.c33
-rw-r--r--arch/microblaze/kernel/head.S14
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S10
-rw-r--r--arch/microblaze/kernel/init_task.c5
-rw-r--r--arch/microblaze/kernel/process.c1
-rw-r--r--arch/microblaze/kernel/ptrace.c62
-rw-r--r--arch/microblaze/kernel/setup.c12
-rw-r--r--arch/microblaze/kernel/sys_microblaze.c1
-rw-r--r--arch/microblaze/kernel/syscall_table.S2
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S72
-rw-r--r--arch/microblaze/mm/init.c5
-rw-r--r--arch/mips/Makefile27
-rw-r--r--arch/mips/alchemy/common/time.c2
-rw-r--r--arch/mips/include/asm/mach-ip27/topology.h2
-rw-r--r--arch/mips/include/asm/mman.h5
-rw-r--r--arch/mips/include/asm/mmu_context.h10
-rw-r--r--arch/mips/include/asm/pgtable.h10
-rw-r--r--arch/mips/include/asm/smp-ops.h2
-rw-r--r--arch/mips/include/asm/smp.h2
-rw-r--r--arch/mips/include/asm/unistd.h6
-rw-r--r--arch/mips/kernel/init_task.c5
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/smp-cmp.c6
-rw-r--r--arch/mips/kernel/smp-mt.c6
-rw-r--r--arch/mips/kernel/smp-up.c3
-rw-r--r--arch/mips/kernel/smp.c8
-rw-r--r--arch/mips/kernel/smtc.c6
-rw-r--r--arch/mips/kernel/vmlinux.lds.S13
-rw-r--r--arch/mips/lasat/sysctl.c18
-rw-r--r--arch/mips/mipssim/sim_smtc.c5
-rw-r--r--arch/mips/mm/c-octeon.c2
-rw-r--r--arch/mips/mm/init.c9
-rw-r--r--arch/mips/mti-malta/malta-smtc.c4
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c4
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c5
-rw-r--r--arch/mips/sibyte/sb1250/smp.c5
-rw-r--r--arch/mn10300/include/asm/cacheflush.h4
-rw-r--r--arch/mn10300/include/asm/gdb-stub.h1
-rw-r--r--arch/mn10300/include/asm/mman.h29
-rw-r--r--arch/mn10300/include/asm/mmu_context.h12
-rw-r--r--arch/mn10300/include/asm/unistd.h2
-rw-r--r--arch/mn10300/kernel/asm-offsets.c2
-rw-r--r--arch/mn10300/kernel/entry.S2
-rw-r--r--arch/mn10300/kernel/init_task.c5
-rw-r--r--arch/mn10300/kernel/mn10300-serial-low.S2
-rw-r--r--arch/mn10300/kernel/mn10300-serial.c14
-rw-r--r--arch/mn10300/kernel/setup.c2
-rw-r--r--arch/mn10300/kernel/sys_mn10300.c1
-rw-r--r--arch/mn10300/kernel/vmlinux.lds.S40
-rw-r--r--arch/mn10300/mm/init.c2
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/Makefile4
-rw-r--r--arch/parisc/include/asm/fcntl.h2
-rw-r--r--arch/parisc/include/asm/mman.h5
-rw-r--r--arch/parisc/include/asm/perf_counter.h7
-rw-r--r--arch/parisc/include/asm/perf_event.h7
-rw-r--r--arch/parisc/include/asm/smp.h1
-rw-r--r--arch/parisc/include/asm/unistd.h4
-rw-r--r--arch/parisc/install.sh4
-rw-r--r--arch/parisc/kernel/init_task.c4
-rw-r--r--arch/parisc/kernel/sys_parisc32.c1
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S104
-rw-r--r--arch/parisc/mm/init.c2
-rw-r--r--arch/powerpc/Kconfig8
-rw-r--r--arch/powerpc/Makefile17
-rw-r--r--arch/powerpc/boot/dts/mpc8377_mds.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8377_rdb.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8377_wlan.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8378_mds.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8378_rdb.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8379_mds.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8379_rdb.dts1
-rw-r--r--arch/powerpc/boot/install.sh4
-rw-r--r--arch/powerpc/include/asm/cputime.h13
-rw-r--r--arch/powerpc/include/asm/device.h11
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h27
-rw-r--r--arch/powerpc/include/asm/fsldma.h136
-rw-r--r--arch/powerpc/include/asm/hw_irq.h22
-rw-r--r--arch/powerpc/include/asm/iommu.h10
-rw-r--r--arch/powerpc/include/asm/mman.h2
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/include/asm/perf_event.h (renamed from arch/powerpc/include/asm/perf_counter.h)22
-rw-r--r--arch/powerpc/include/asm/pmc.h2
-rw-r--r--arch/powerpc/include/asm/pte-40x.h1
-rw-r--r--arch/powerpc/include/asm/pte-8xx.h1
-rw-r--r--arch/powerpc/include/asm/pte-common.h5
-rw-r--r--arch/powerpc/include/asm/smp.h2
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/powerpc/include/asm/topology.h12
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/dma-iommu.c16
-rw-r--r--arch/powerpc/kernel/dma.c15
-rw-r--r--arch/powerpc/kernel/entry_64.S8
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S1
-rw-r--r--arch/powerpc/kernel/init_task.c5
-rw-r--r--arch/powerpc/kernel/irq.c8
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c5
-rw-r--r--arch/powerpc/kernel/mpc7450-pmu.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c2
-rw-r--r--arch/powerpc/kernel/perf_callchain.c2
-rw-r--r--arch/powerpc/kernel/perf_event.c (renamed from arch/powerpc/kernel/perf_counter.c)566
-rw-r--r--arch/powerpc/kernel/power4-pmu.c2
-rw-r--r--arch/powerpc/kernel/power5+-pmu.c2
-rw-r--r--arch/powerpc/kernel/power5-pmu.c2
-rw-r--r--arch/powerpc/kernel/power6-pmu.c2
-rw-r--r--arch/powerpc/kernel/power7-pmu.c2
-rw-r--r--arch/powerpc/kernel/ppc970-pmu.c2
-rw-r--r--arch/powerpc/kernel/process.c17
-rw-r--r--arch/powerpc/kernel/prom_init.c3
-rw-r--r--arch/powerpc/kernel/setup-common.c9
-rw-r--r--arch/powerpc/kernel/smp.c12
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c1
-rw-r--r--arch/powerpc/kernel/time.c34
-rw-r--r--arch/powerpc/kernel/udbg_16550.c2
-rw-r--r--arch/powerpc/kernel/vdso.c17
-rw-r--r--arch/powerpc/kernel/vdso32/Makefile2
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32_wrapper.S3
-rw-r--r--arch/powerpc/kernel/vdso64/Makefile2
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64_wrapper.S3
-rw-r--r--arch/powerpc/kernel/vio.c4
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S69
-rw-r--r--arch/powerpc/mm/fault.c8
-rw-r--r--arch/powerpc/mm/init_32.c36
-rw-r--r--arch/powerpc/mm/init_64.c29
-rw-r--r--arch/powerpc/mm/mem.c8
-rw-r--r--arch/powerpc/mm/pgtable.c19
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S1
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype4
-rw-r--r--arch/powerpc/platforms/cell/beat_iommu.c2
-rw-r--r--arch/powerpc/platforms/cell/iommu.c9
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c4
-rw-r--r--arch/powerpc/platforms/iseries/iommu.c2
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c2
-rw-r--r--arch/powerpc/platforms/powermac/smp.c6
-rw-r--r--arch/powerpc/platforms/powermac/udbg_scc.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c6
-rw-r--r--arch/powerpc/platforms/pseries/hvCall_inst.c2
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c8
-rwxr-xr-xarch/powerpc/relocs_check.pl56
-rw-r--r--arch/powerpc/sysdev/axonram.c2
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c2
-rw-r--r--arch/powerpc/xmon/xmon.c16
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/appldata/appldata_base.c9
-rw-r--r--arch/s390/boot/install.sh4
-rw-r--r--arch/s390/defconfig37
-rw-r--r--arch/s390/hypfs/inode.c6
-rw-r--r--arch/s390/include/asm/cputime.h1
-rw-r--r--arch/s390/include/asm/lowcore.h9
-rw-r--r--arch/s390/include/asm/mman.h13
-rw-r--r--arch/s390/include/asm/perf_counter.h10
-rw-r--r--arch/s390/include/asm/perf_event.h10
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/smp.h2
-rw-r--r--arch/s390/include/asm/topology.h1
-rw-r--r--arch/s390/include/asm/unistd.h2
-rw-r--r--arch/s390/kernel/asm-offsets.c7
-rw-r--r--arch/s390/kernel/compat_linux.c84
-rw-r--r--arch/s390/kernel/compat_linux.h4
-rw-r--r--arch/s390/kernel/compat_wrapper.S35
-rw-r--r--arch/s390/kernel/debug.c4
-rw-r--r--arch/s390/kernel/entry.h6
-rw-r--r--arch/s390/kernel/init_task.c5
-rw-r--r--arch/s390/kernel/process.c41
-rw-r--r--arch/s390/kernel/ptrace.c19
-rw-r--r--arch/s390/kernel/sclp.S5
-rw-r--r--arch/s390/kernel/smp.c54
-rw-r--r--arch/s390/kernel/suspend.c24
-rw-r--r--arch/s390/kernel/swsusp_asm64.S102
-rw-r--r--arch/s390/kernel/syscalls.S10
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/s390/kernel/vdso32/Makefile2
-rw-r--r--arch/s390/kernel/vdso32/vdso32_wrapper.S3
-rw-r--r--arch/s390/kernel/vdso64/Makefile2
-rw-r--r--arch/s390/kernel/vdso64/vdso64_wrapper.S3
-rw-r--r--arch/s390/kvm/interrupt.c2
-rw-r--r--arch/s390/mm/cmm.c4
-rw-r--r--arch/s390/mm/fault.c8
-rw-r--r--arch/s390/mm/init.c2
-rw-r--r--arch/s390/mm/page-states.c52
-rw-r--r--arch/s390/mm/pgtable.c17
-rw-r--r--arch/score/include/asm/page.h3
-rw-r--r--arch/score/include/asm/thread_info.h15
-rw-r--r--arch/score/kernel/init_task.c5
-rw-r--r--arch/score/kernel/vmlinux.lds.S77
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c1
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c89
-rw-r--r--arch/sh/boot/compressed/install.sh4
-rw-r--r--arch/sh/configs/ap325rxa_defconfig100
-rw-r--r--arch/sh/configs/dreamcast_defconfig82
-rw-r--r--arch/sh/configs/ecovec24-romimage_defconfig52
-rw-r--r--arch/sh/configs/ecovec24_defconfig70
-rw-r--r--arch/sh/configs/edosk7705_defconfig50
-rw-r--r--arch/sh/configs/edosk7760_defconfig82
-rw-r--r--arch/sh/configs/espt_defconfig75
-rw-r--r--arch/sh/configs/hp6xx_defconfig69
-rw-r--r--arch/sh/configs/kfr2r09-romimage_defconfig47
-rw-r--r--arch/sh/configs/kfr2r09_defconfig57
-rw-r--r--arch/sh/configs/landisk_defconfig86
-rw-r--r--arch/sh/configs/lboxre2_defconfig86
-rw-r--r--arch/sh/configs/magicpanelr2_defconfig85
-rw-r--r--arch/sh/configs/microdev_defconfig74
-rw-r--r--arch/sh/configs/migor_defconfig111
-rw-r--r--arch/sh/configs/polaris_defconfig73
-rw-r--r--arch/sh/configs/r7780mp_defconfig116
-rw-r--r--arch/sh/configs/r7785rp_defconfig125
-rw-r--r--arch/sh/configs/rsk7201_defconfig60
-rw-r--r--arch/sh/configs/rsk7203_defconfig81
-rw-r--r--arch/sh/configs/rts7751r2d1_defconfig97
-rw-r--r--arch/sh/configs/rts7751r2dplus_defconfig100
-rw-r--r--arch/sh/configs/sdk7780_defconfig93
-rw-r--r--arch/sh/configs/se7206_defconfig80
-rw-r--r--arch/sh/configs/se7343_defconfig91
-rw-r--r--arch/sh/configs/se7619_defconfig56
-rw-r--r--arch/sh/configs/se7705_defconfig75
-rw-r--r--arch/sh/configs/se7712_defconfig79
-rw-r--r--arch/sh/configs/se7721_defconfig86
-rw-r--r--arch/sh/configs/se7722_defconfig92
-rw-r--r--arch/sh/configs/se7724_defconfig264
-rw-r--r--arch/sh/configs/se7750_defconfig75
-rw-r--r--arch/sh/configs/se7751_defconfig74
-rw-r--r--arch/sh/configs/se7780_defconfig89
-rw-r--r--arch/sh/configs/sh03_defconfig87
-rw-r--r--arch/sh/configs/sh7710voipgw_defconfig71
-rw-r--r--arch/sh/configs/sh7724_generic_defconfig64
-rw-r--r--arch/sh/configs/sh7763rdp_defconfig79
-rw-r--r--arch/sh/configs/sh7770_generic_defconfig62
-rw-r--r--arch/sh/configs/sh7785lcr_32bit_defconfig559
-rw-r--r--arch/sh/configs/sh7785lcr_defconfig107
-rw-r--r--arch/sh/configs/shmin_defconfig68
-rw-r--r--arch/sh/configs/shx3_defconfig95
-rw-r--r--arch/sh/configs/snapgear_defconfig40
-rw-r--r--arch/sh/configs/systemh_defconfig61
-rw-r--r--arch/sh/configs/titan_defconfig93
-rw-r--r--arch/sh/configs/ul2_defconfig104
-rw-r--r--arch/sh/configs/urquell_defconfig125
-rw-r--r--arch/sh/drivers/dma/Kconfig12
-rw-r--r--arch/sh/drivers/dma/Makefile3
-rw-r--r--arch/sh/include/asm/dma-sh.h13
-rw-r--r--arch/sh/include/asm/perf_counter.h9
-rw-r--r--arch/sh/include/asm/perf_event.h9
-rw-r--r--arch/sh/include/asm/smp.h1
-rw-r--r--arch/sh/include/asm/topology.h1
-rw-r--r--arch/sh/include/asm/unistd_32.h2
-rw-r--r--arch/sh/include/asm/unistd_64.h2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c14
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7780.c14
-rw-r--r--arch/sh/kernel/dwarf.c1
-rw-r--r--arch/sh/kernel/init_task.c5
-rw-r--r--arch/sh/kernel/irq.c6
-rw-r--r--arch/sh/kernel/sys_sh32.c1
-rw-r--r--arch/sh/kernel/sys_sh64.c1
-rw-r--r--arch/sh/kernel/syscalls_32.S2
-rw-r--r--arch/sh/kernel/syscalls_64.S2
-rw-r--r--arch/sh/kernel/traps_32.c23
-rw-r--r--arch/sh/kernel/vsyscall/Makefile2
-rw-r--r--arch/sh/mm/fault_32.c8
-rw-r--r--arch/sh/mm/init.c8
-rw-r--r--arch/sh/mm/tlbflush_64.c8
-rw-r--r--arch/sparc/Kconfig4
-rw-r--r--arch/sparc/Makefile4
-rw-r--r--arch/sparc/include/asm/mman.h2
-rw-r--r--arch/sparc/include/asm/perf_counter.h14
-rw-r--r--arch/sparc/include/asm/perf_event.h14
-rw-r--r--arch/sparc/include/asm/smp_64.h1
-rw-r--r--arch/sparc/include/asm/topology_64.h16
-rw-r--r--arch/sparc/include/asm/unistd.h2
-rw-r--r--arch/sparc/include/asm/vio.h2
-rw-r--r--arch/sparc/kernel/Makefile8
-rw-r--r--arch/sparc/kernel/init_task.c5
-rw-r--r--arch/sparc/kernel/irq_64.c2
-rw-r--r--arch/sparc/kernel/nmi.c4
-rw-r--r--arch/sparc/kernel/pcr.c10
-rw-r--r--arch/sparc/kernel/perf_event.c (renamed from arch/sparc/kernel/perf_counter.c)178
-rw-r--r--arch/sparc/kernel/sys_sparc32.c1
-rw-r--r--arch/sparc/kernel/systbls.h3
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/sparc/mm/init_32.c2
-rw-r--r--arch/um/Makefile9
-rw-r--r--arch/um/drivers/net_kern.c2
-rw-r--r--arch/um/drivers/ubd_kern.c2
-rw-r--r--arch/um/include/asm/common.lds.S29
-rw-r--r--arch/um/include/asm/hardirq.h26
-rw-r--r--arch/um/include/asm/mmu_context.h4
-rw-r--r--arch/um/include/shared/ptrace_user.h2
-rw-r--r--arch/um/kernel/Makefile3
-rw-r--r--arch/um/kernel/dyn.lds.S9
-rw-r--r--arch/um/kernel/init_task.c5
-rw-r--r--arch/um/kernel/mem.c2
-rw-r--r--arch/um/kernel/skas/mmu.c4
-rw-r--r--arch/um/kernel/smp.c2
-rw-r--r--arch/um/kernel/uml.lds.S26
-rw-r--r--arch/um/kernel/vmlinux.lds.S3
-rw-r--r--arch/x86/Kconfig12
-rw-r--r--arch/x86/Makefile4
-rw-r--r--arch/x86/boot/compressed/head_32.S3
-rw-r--r--arch/x86/boot/compressed/head_64.S3
-rw-r--r--arch/x86/boot/compressed/vmlinux.lds.S6
-rw-r--r--arch/x86/boot/install.sh4
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/include/asm/acpi.h1
-rw-r--r--arch/x86/include/asm/cache.h4
-rw-r--r--arch/x86/include/asm/entry_arch.h2
-rw-r--r--arch/x86/include/asm/mmu_context.h6
-rw-r--r--arch/x86/include/asm/nmi.h3
-rw-r--r--arch/x86/include/asm/pci.h6
-rw-r--r--arch/x86/include/asm/perf_event.h (renamed from arch/x86/include/asm/perf_counter.h)30
-rw-r--r--arch/x86/include/asm/pgtable_types.h1
-rw-r--r--arch/x86/include/asm/smp.h1
-rw-r--r--arch/x86/include/asm/syscall.h14
-rw-r--r--arch/x86/include/asm/topology.h10
-rw-r--r--arch/x86/include/asm/unistd_32.h2
-rw-r--r--arch/x86/include/asm/unistd_64.h4
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/apic/apic.c6
-rw-r--r--arch/x86/kernel/apic/io_apic.c7
-rw-r--r--arch/x86/kernel/apic/nmi.c4
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-inject.c7
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c23
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c67
-rw-r--r--arch/x86/kernel/cpu/perf_event.c (renamed from arch/x86/kernel/cpu/perf_counter.c)556
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c2
-rw-r--r--arch/x86/kernel/dumpstack_32.c1
-rw-r--r--arch/x86/kernel/dumpstack_64.c1
-rw-r--r--arch/x86/kernel/e820.c2
-rw-r--r--arch/x86/kernel/early_printk.c785
-rw-r--r--arch/x86/kernel/entry_64.S24
-rw-r--r--arch/x86/kernel/head_32.S6
-rw-r--r--arch/x86/kernel/head_64.S4
-rw-r--r--arch/x86/kernel/init_task.c5
-rw-r--r--arch/x86/kernel/irqinit.c2
-rw-r--r--arch/x86/kernel/ldt.c4
-rw-r--r--arch/x86/kernel/microcode_core.c4
-rw-r--r--arch/x86/kernel/pci-swiotlb.c5
-rw-r--r--arch/x86/kernel/process.c6
-rw-r--r--arch/x86/kernel/ptrace.c21
-rw-r--r--arch/x86/kernel/setup.c3
-rw-r--r--arch/x86/kernel/sfi.c122
-rw-r--r--arch/x86/kernel/smpboot.c9
-rw-r--r--arch/x86/kernel/syscall_table_32.S2
-rw-r--r--arch/x86/kernel/time.c1
-rw-r--r--arch/x86/kernel/traps.c7
-rw-r--r--arch/x86/kernel/tsc_sync.c2
-rw-r--r--arch/x86/kernel/vmlinux.lds.S79
-rw-r--r--arch/x86/kernel/vsyscall_64.c10
-rw-r--r--arch/x86/lguest/boot.c10
-rw-r--r--arch/x86/mm/Makefile3
-rw-r--r--arch/x86/mm/fault.c27
-rw-r--r--arch/x86/mm/init.c63
-rw-r--r--arch/x86/mm/init_32.c12
-rw-r--r--arch/x86/mm/init_64.c12
-rw-r--r--arch/x86/mm/kmemcheck/kmemcheck.c3
-rw-r--r--arch/x86/mm/pageattr.c1
-rw-r--r--arch/x86/mm/pat.c7
-rw-r--r--arch/x86/mm/setup_nx.c69
-rw-r--r--arch/x86/mm/tlb.c15
-rw-r--r--arch/x86/oprofile/op_model_ppro.c4
-rw-r--r--arch/x86/oprofile/op_x86_model.h2
-rw-r--r--arch/x86/pci/common.c2
-rw-r--r--arch/x86/pci/mmconfig-shared.c8
-rw-r--r--arch/x86/pci/mmconfig_32.c2
-rw-r--r--arch/x86/vdso/Makefile2
-rw-r--r--arch/x86/xen/enlighten.c10
-rw-r--r--arch/x86/xen/mmu.c4
-rw-r--r--arch/xtensa/include/asm/mman.h5
-rw-r--r--arch/xtensa/kernel/Makefile3
-rw-r--r--arch/xtensa/kernel/head.S2
-rw-r--r--arch/xtensa/kernel/init_task.c5
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S75
-rw-r--r--arch/xtensa/mm/init.c2
-rw-r--r--crypto/async_tx/Kconfig9
-rw-r--r--crypto/async_tx/Makefile3
-rw-r--r--crypto/async_tx/async_memcpy.c44
-rw-r--r--crypto/async_tx/async_memset.c43
-rw-r--r--crypto/async_tx/async_pq.c395
-rw-r--r--crypto/async_tx/async_raid6_recov.c468
-rw-r--r--crypto/async_tx/async_tx.c87
-rw-r--r--crypto/async_tx/async_xor.c207
-rw-r--r--crypto/async_tx/raid6test.c240
-rw-r--r--drivers/Makefile5
-rw-r--r--drivers/acpi/Kconfig17
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/ac.c2
-rw-r--r--drivers/acpi/acpi_memhotplug.c51
-rw-r--r--drivers/acpi/acpica/Makefile4
-rw-r--r--drivers/acpi/acpica/acconfig.h10
-rw-r--r--drivers/acpi/acpica/acdebug.h4
-rw-r--r--drivers/acpi/acpica/acglobal.h37
-rw-r--r--drivers/acpi/acpica/achware.h8
-rw-r--r--drivers/acpi/acpica/acinterp.h4
-rw-r--r--drivers/acpi/acpica/aclocal.h16
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h25
-rw-r--r--drivers/acpi/acpica/acobject.h1
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h584
-rw-r--r--drivers/acpi/acpica/acutils.h30
-rw-r--r--drivers/acpi/acpica/amlcode.h1
-rw-r--r--drivers/acpi/acpica/dsfield.c18
-rw-r--r--drivers/acpi/acpica/dsmethod.c15
-rw-r--r--drivers/acpi/acpica/dsmthdat.c8
-rw-r--r--drivers/acpi/acpica/dsobject.c23
-rw-r--r--drivers/acpi/acpica/dswload.c41
-rw-r--r--drivers/acpi/acpica/evgpe.c8
-rw-r--r--drivers/acpi/acpica/evgpeblk.c4
-rw-r--r--drivers/acpi/acpica/evrgnini.c45
-rw-r--r--drivers/acpi/acpica/exconfig.c7
-rw-r--r--drivers/acpi/acpica/exdump.c6
-rw-r--r--drivers/acpi/acpica/exfield.c82
-rw-r--r--drivers/acpi/acpica/exfldio.c7
-rw-r--r--drivers/acpi/acpica/exutils.c53
-rw-r--r--drivers/acpi/acpica/hwgpe.c34
-rw-r--r--drivers/acpi/acpica/hwregs.c206
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c181
-rw-r--r--drivers/acpi/acpica/nsalloc.c88
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c7
-rw-r--r--drivers/acpi/acpica/nseval.c137
-rw-r--r--drivers/acpi/acpica/nsinit.c15
-rw-r--r--drivers/acpi/acpica/nsload.c3
-rw-r--r--drivers/acpi/acpica/nspredef.c718
-rw-r--r--drivers/acpi/acpica/nsrepair.c203
-rw-r--r--drivers/acpi/acpica/nsutils.c5
-rw-r--r--drivers/acpi/acpica/nsxfeval.c23
-rw-r--r--drivers/acpi/acpica/nsxfname.c237
-rw-r--r--drivers/acpi/acpica/psloop.c119
-rw-r--r--drivers/acpi/acpica/psxface.c4
-rw-r--r--drivers/acpi/acpica/tbutils.c82
-rw-r--r--drivers/acpi/acpica/utdelete.c6
-rw-r--r--drivers/acpi/acpica/uteval.c378
-rw-r--r--drivers/acpi/acpica/utglobal.c12
-rw-r--r--drivers/acpi/acpica/utids.c382
-rw-r--r--drivers/acpi/acpica/utinit.c22
-rw-r--r--drivers/acpi/acpica/utmisc.c85
-rw-r--r--drivers/acpi/acpica/utxface.c26
-rw-r--r--drivers/acpi/battery.c22
-rw-r--r--drivers/acpi/blacklist.c2
-rw-r--r--drivers/acpi/bus.c52
-rw-r--r--drivers/acpi/button.c47
-rw-r--r--drivers/acpi/cm_sbs.c2
-rw-r--r--drivers/acpi/container.c13
-rw-r--r--drivers/acpi/debug.c82
-rw-r--r--drivers/acpi/dock.c10
-rw-r--r--drivers/acpi/ec.c270
-rw-r--r--drivers/acpi/event.c2
-rw-r--r--drivers/acpi/fan.c2
-rw-r--r--drivers/acpi/glue.c10
-rw-r--r--drivers/acpi/internal.h22
-rw-r--r--drivers/acpi/numa.c2
-rw-r--r--drivers/acpi/osl.c128
-rw-r--r--drivers/acpi/pci_irq.c2
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/pci_root.c2
-rw-r--r--drivers/acpi/pci_slot.c5
-rw-r--r--drivers/acpi/power.c3
-rw-r--r--drivers/acpi/power_meter.c1018
-rw-r--r--drivers/acpi/processor_core.c246
-rw-r--r--drivers/acpi/processor_idle.c12
-rw-r--r--drivers/acpi/processor_perflib.c5
-rw-r--r--drivers/acpi/processor_thermal.c5
-rw-r--r--drivers/acpi/processor_throttling.c8
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/sbshc.c2
-rw-r--r--drivers/acpi/scan.c698
-rw-r--r--drivers/acpi/sleep.c8
-rw-r--r--drivers/acpi/system.c2
-rw-r--r--drivers/acpi/tables.c6
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/acpi/utils.c2
-rw-r--r--drivers/acpi/video.c80
-rw-r--r--drivers/acpi/video_detect.c2
-rw-r--r--drivers/ata/pata_hpt37x.c2
-rw-r--r--drivers/atm/he.c59
-rw-r--r--drivers/atm/solos-attrlist.c11
-rw-r--r--drivers/atm/solos-pci.c75
-rw-r--r--drivers/base/node.c5
-rw-r--r--drivers/block/DAC960.c12
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/aoe/aoeblk.c2
-rw-r--r--drivers/block/ataflop.c2
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/cciss.c4
-rw-r--r--drivers/block/cpqarray.c2
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/hd.c2
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/mg_disk.c2
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/osdblk.c2
-rw-r--r--drivers/block/paride/pcd.c2
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/paride/pf.c2
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/ps3disk.c2
-rw-r--r--drivers/block/ps3vram.c2
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/block/swim.c2
-rw-r--r--drivers/block/swim3.c4
-rw-r--r--drivers/block/sx8.c2
-rw-r--r--drivers/block/ub.c2
-rw-r--r--drivers/block/umem.c3
-rw-r--r--drivers/block/viodasd.c2
-rw-r--r--drivers/block/virtio_blk.c35
-rw-r--r--drivers/block/xd.c2
-rw-r--r--drivers/block/xen-blkfront.c4
-rw-r--r--drivers/block/xsysace.c2
-rw-r--r--drivers/block/z2ram.c3
-rw-r--r--drivers/cdrom/cdrom.c8
-rw-r--r--drivers/cdrom/gdrom.c2
-rw-r--r--drivers/cdrom/viocd.c2
-rw-r--r--drivers/char/Kconfig8
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/backend.c4
-rw-r--r--drivers/char/agp/hp-agp.c9
-rw-r--r--drivers/char/agp/intel-agp.c37
-rw-r--r--drivers/char/agp/uninorth-agp.c2
-rw-r--r--drivers/char/bfin-otp.c173
-rw-r--r--drivers/char/epca.c2
-rw-r--r--drivers/char/hpet.c21
-rw-r--r--drivers/char/hvc_console.c6
-rw-r--r--drivers/char/hvc_console.h12
-rw-r--r--drivers/char/hvc_iucv.c4
-rw-r--r--drivers/char/hw_random/virtio-rng.c3
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c4
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/misc.c2
-rw-r--r--drivers/char/mwave/mwavedd.c22
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c2
-rw-r--r--drivers/char/random.c4
-rw-r--r--drivers/char/rio/rioctrl.c2
-rw-r--r--drivers/char/sysrq.c4
-rw-r--r--drivers/char/tpm/tpm.c7
-rw-r--r--drivers/char/tpm/tpm_bios.c4
-rw-r--r--drivers/char/uv_mmtimer.c216
-rw-r--r--drivers/char/virtio_console.c5
-rw-r--r--drivers/connector/cn_proc.c25
-rw-r--r--drivers/cpuidle/governors/menu.c271
-rw-r--r--drivers/dca/dca-core.c124
-rw-r--r--drivers/dma/Kconfig14
-rw-r--r--drivers/dma/Makefile4
-rw-r--r--drivers/dma/at_hdmac.c60
-rw-r--r--drivers/dma/at_hdmac_regs.h1
-rw-r--r--drivers/dma/dmaengine.c94
-rw-r--r--drivers/dma/dmatest.c40
-rw-r--r--drivers/dma/dw_dmac.c50
-rw-r--r--drivers/dma/dw_dmac_regs.h1
-rw-r--r--drivers/dma/fsldma.c288
-rw-r--r--drivers/dma/fsldma.h4
-rw-r--r--drivers/dma/ioat.c202
-rw-r--r--drivers/dma/ioat/Makefile2
-rw-r--r--drivers/dma/ioat/dca.c (renamed from drivers/dma/ioat_dca.c)13
-rw-r--r--drivers/dma/ioat/dma.c1238
-rw-r--r--drivers/dma/ioat/dma.h337
-rw-r--r--drivers/dma/ioat/dma_v2.c871
-rw-r--r--drivers/dma/ioat/dma_v2.h190
-rw-r--r--drivers/dma/ioat/dma_v3.c1223
-rw-r--r--drivers/dma/ioat/hw.h215
-rw-r--r--drivers/dma/ioat/pci.c210
-rw-r--r--drivers/dma/ioat/registers.h (renamed from drivers/dma/ioatdma_registers.h)54
-rw-r--r--drivers/dma/ioat_dma.c1741
-rw-r--r--drivers/dma/ioatdma.h165
-rw-r--r--drivers/dma/ioatdma_hw.h70
-rw-r--r--drivers/dma/iop-adma.c491
-rw-r--r--drivers/dma/iovlock.c10
-rw-r--r--drivers/dma/mv_xor.c7
-rw-r--r--drivers/dma/mv_xor.h4
-rw-r--r--drivers/dma/shdma.c786
-rw-r--r--drivers/dma/shdma.h64
-rw-r--r--drivers/dma/txx9dmac.c24
-rw-r--r--drivers/dma/txx9dmac.h1
-rw-r--r--drivers/edac/Kconfig13
-rw-r--r--drivers/edac/Makefile2
-rw-r--r--drivers/edac/cpc925_edac.c6
-rw-r--r--drivers/edac/edac_core.h2
-rw-r--r--drivers/edac/edac_device.c5
-rw-r--r--drivers/edac/edac_mc.c4
-rw-r--r--drivers/edac/edac_pci.c4
-rw-r--r--drivers/edac/i3200_edac.c527
-rw-r--r--drivers/edac/mpc85xx_edac.c30
-rw-r--r--drivers/edac/mv64x60_edac.c22
-rw-r--r--drivers/firewire/core-card.c13
-rw-r--r--drivers/firewire/core-transaction.c2
-rw-r--r--drivers/firewire/core.h14
-rw-r--r--drivers/firewire/ohci.c4
-rw-r--r--drivers/firewire/sbp2.c16
-rw-r--r--drivers/firmware/memmap.c2
-rw-r--r--drivers/gpio/Kconfig35
-rw-r--r--drivers/gpio/Makefile4
-rw-r--r--drivers/gpio/adp5520-gpio.c206
-rw-r--r--drivers/gpio/bt8xxgpio.c7
-rw-r--r--drivers/gpio/gpiolib.c253
-rw-r--r--drivers/gpio/langwell_gpio.c297
-rw-r--r--drivers/gpio/max7301.c1
-rw-r--r--drivers/gpio/mc33880.c196
-rw-r--r--drivers/gpio/mcp23s08.c5
-rw-r--r--drivers/gpio/pca953x.c4
-rw-r--r--drivers/gpio/pcf857x.c4
-rw-r--r--drivers/gpio/ucb1400_gpio.c125
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/drm_gem.c13
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c6
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c194
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c133
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h77
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c876
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c92
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c22
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h34
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c170
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h315
-rw-r--r--drivers/gpu/drm/i915/i915_trace_points.c11
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c9
-rw-r--r--drivers/gpu/drm/i915/intel_display.c616
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h5
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c63
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c502
-rw-r--r--drivers/gpu/drm/mga/mga_state.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h59
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h65
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h97
-rw-r--r--drivers/hid/Kconfig7
-rw-r--r--drivers/hid/hid-core.c14
-rw-r--r--drivers/hid/usbhid/hid-core.c16
-rw-r--r--drivers/hwmon/Kconfig34
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/adcxx.c101
-rw-r--r--drivers/hwmon/adm1021.c79
-rw-r--r--drivers/hwmon/adm1031.c40
-rw-r--r--drivers/hwmon/applesmc.c38
-rw-r--r--drivers/hwmon/coretemp.c57
-rw-r--r--drivers/hwmon/dme1737.c2
-rw-r--r--drivers/hwmon/fscher.c680
-rw-r--r--drivers/hwmon/fscpos.c654
-rw-r--r--drivers/hwmon/lis3lv02d.c9
-rw-r--r--drivers/hwmon/lis3lv02d.h24
-rw-r--r--drivers/hwmon/lis3lv02d_spi.c47
-rw-r--r--drivers/hwmon/lm70.c55
-rw-r--r--drivers/hwmon/ltc4215.c2
-rw-r--r--drivers/hwmon/ltc4245.c3
-rw-r--r--drivers/hwmon/max1111.c1
-rw-r--r--drivers/hwmon/sht15.c6
-rw-r--r--drivers/i2c/Kconfig8
-rw-r--r--drivers/i2c/busses/Kconfig23
-rw-r--r--drivers/i2c/busses/Makefile3
-rw-r--r--drivers/i2c/busses/i2c-piix4.c9
-rw-r--r--drivers/i2c/busses/i2c-pnx.c7
-rw-r--r--drivers/i2c/busses/i2c-scmi.c429
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c45
-rw-r--r--drivers/i2c/busses/scx200_acb.c6
-rw-r--r--drivers/i2c/chips/Kconfig48
-rw-r--r--drivers/i2c/chips/Makefile3
-rw-r--r--drivers/i2c/chips/pca9539.c152
-rw-r--r--drivers/i2c/chips/pcf8574.c215
-rw-r--r--drivers/i2c/chips/pcf8575.c198
-rw-r--r--drivers/i2c/chips/tsl2550.c42
-rw-r--r--drivers/i2c/i2c-core.c165
-rw-r--r--drivers/ide/ide-acpi.c5
-rw-r--r--drivers/ide/ide-cd.c2
-rw-r--r--drivers/ide/ide-gd.c2
-rw-r--r--drivers/ide/ide-probe.c2
-rw-r--r--drivers/ide/ide-tape.c2
-rw-r--r--drivers/ide/umc8672.c4
-rw-r--r--drivers/idle/i7300_idle.c20
-rw-r--r--drivers/ieee1394/raw1394.c4
-rw-r--r--drivers/ieee1394/sbp2.c3
-rw-r--r--drivers/infiniband/core/mad_rmpp.c17
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c11
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c7
-rw-r--r--drivers/input/input.c64
-rw-r--r--drivers/input/keyboard/Kconfig40
-rw-r--r--drivers/input/keyboard/Makefile4
-rw-r--r--drivers/input/keyboard/adp5588-keys.c361
-rw-r--r--drivers/input/keyboard/atkbd.c27
-rw-r--r--drivers/input/keyboard/max7359_keypad.c330
-rw-r--r--drivers/input/keyboard/opencores-kbd.c180
-rw-r--r--drivers/input/keyboard/qt2160.c397
-rw-r--r--drivers/input/misc/Kconfig17
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/dm355evm_keys.c26
-rw-r--r--drivers/input/misc/winbond-cir.c1614
-rw-r--r--drivers/input/mouse/sentelic.c18
-rw-r--r--drivers/input/mouse/synaptics_i2c.c51
-rw-r--r--drivers/input/serio/i8042.c41
-rw-r--r--drivers/input/serio/libps2.c28
-rw-r--r--drivers/input/touchscreen/Kconfig17
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/ad7877.c1
-rw-r--r--drivers/input/touchscreen/ad7879.c7
-rw-r--r--drivers/input/touchscreen/ads7846.c1
-rw-r--r--drivers/input/touchscreen/mcs5000_ts.c318
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c3
-rw-r--r--drivers/isdn/capi/capifs.c2
-rw-r--r--drivers/isdn/capi/capiutil.c2
-rw-r--r--drivers/isdn/capi/kcapi_proc.c10
-rw-r--r--drivers/isdn/i4l/isdn_common.c4
-rw-r--r--drivers/leds/leds-clevo-mail.c8
-rw-r--r--drivers/leds/leds-dac124s085.c1
-rw-r--r--drivers/lguest/core.c5
-rw-r--r--drivers/lguest/page_tables.c47
-rw-r--r--drivers/macintosh/rack-meter.c2
-rw-r--r--drivers/md/Kconfig26
-rw-r--r--drivers/md/bitmap.c5
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/linear.c3
-rw-r--r--drivers/md/md.c29
-rw-r--r--drivers/md/md.h3
-rw-r--r--drivers/md/multipath.c7
-rw-r--r--drivers/md/raid0.c8
-rw-r--r--drivers/md/raid1.c15
-rw-r--r--drivers/md/raid10.c12
-rw-r--r--drivers/md/raid5.c1493
-rw-r--r--drivers/md/raid5.h28
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.h5
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig2
-rw-r--r--drivers/media/dvb/pt1/pt1.c1
-rw-r--r--drivers/media/dvb/siano/smscoreapi.c2
-rw-r--r--drivers/media/dvb/siano/smscoreapi.h4
-rw-r--r--drivers/media/radio/radio-mr800.c2
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c4
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c4
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c4
-rw-r--r--drivers/media/video/cx88/cx88-video.c4
-rw-r--r--drivers/media/video/gspca/m5602/m5602_core.c2
-rw-r--r--drivers/media/video/saa7164/saa7164-api.c8
-rw-r--r--drivers/media/video/saa7164/saa7164-cmd.c2
-rw-r--r--drivers/media/video/saa7164/saa7164-core.c6
-rw-r--r--drivers/media/video/saa7164/saa7164.h4
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c4
-rw-r--r--drivers/media/video/usbvision/usbvision-core.c1
-rw-r--r--drivers/media/video/usbvision/usbvision-i2c.c1
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c1
-rw-r--r--drivers/memstick/core/memstick.c2
-rw-r--r--drivers/memstick/core/mspro_block.c2
-rw-r--r--drivers/message/fusion/mptbase.c4
-rw-r--r--drivers/message/i2o/i2o_block.c2
-rw-r--r--drivers/mfd/ab3100-core.c2
-rw-r--r--drivers/mfd/ezx-pcap.c1
-rw-r--r--drivers/mfd/ucb1400_core.c31
-rw-r--r--drivers/misc/eeprom/at25.c2
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c2
-rw-r--r--drivers/misc/lkdtm.c2
-rw-r--r--drivers/misc/sgi-gru/grukservices.c2
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c3
-rw-r--r--drivers/mmc/card/block.c2
-rw-r--r--drivers/mmc/core/core.c318
-rw-r--r--drivers/mmc/core/core.h8
-rw-r--r--drivers/mmc/core/host.c1
-rw-r--r--drivers/mmc/core/host.h2
-rw-r--r--drivers/mmc/core/mmc.c149
-rw-r--r--drivers/mmc/core/mmc_ops.c59
-rw-r--r--drivers/mmc/core/mmc_ops.h1
-rw-r--r--drivers/mmc/core/sd.c75
-rw-r--r--drivers/mmc/core/sdio.c316
-rw-r--r--drivers/mmc/core/sdio_bus.c3
-rw-r--r--drivers/mmc/core/sdio_cis.c2
-rw-r--r--drivers/mmc/core/sdio_io.c2
-rw-r--r--drivers/mmc/host/Kconfig29
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/atmel-mci.c42
-rw-r--r--drivers/mmc/host/mmc_spi.c1
-rw-r--r--drivers/mmc/host/mmci.c109
-rw-r--r--drivers/mmc/host/mmci.h3
-rw-r--r--drivers/mmc/host/msm_sdcc.c1287
-rw-r--r--drivers/mmc/host/msm_sdcc.h238
-rw-r--r--drivers/mmc/host/mxcmmc.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c1083
-rw-r--r--drivers/mmc/host/pxamci.c104
-rw-r--r--drivers/mmc/host/sdhci-of.c49
-rw-r--r--drivers/mmc/host/sdhci-pci.c5
-rw-r--r--drivers/mmc/host/sdhci.c54
-rw-r--r--drivers/mmc/host/sdhci.h6
-rw-r--r--drivers/mtd/Kconfig18
-rw-r--r--drivers/mtd/afs.c2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c11
-rwxr-xr-x[-rw-r--r--]drivers/mtd/chips/cfi_util.c4
-rw-r--r--drivers/mtd/chips/jedec_probe.c41
-rw-r--r--drivers/mtd/devices/Kconfig10
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/lart.c6
-rw-r--r--drivers/mtd/devices/m25p80.c141
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c5
-rw-r--r--drivers/mtd/devices/phram.c25
-rw-r--r--drivers/mtd/devices/slram.c4
-rw-r--r--drivers/mtd/devices/sst25l.c512
-rw-r--r--drivers/mtd/ftl.c2
-rwxr-xr-x[-rw-r--r--]drivers/mtd/inftlcore.c2
-rw-r--r--drivers/mtd/maps/Kconfig14
-rw-r--r--drivers/mtd/maps/Makefile3
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c311
-rw-r--r--drivers/mtd/maps/ixp2000.c2
-rw-r--r--drivers/mtd/maps/physmap_of.c24
-rw-r--r--drivers/mtd/maps/plat-ram.c2
-rw-r--r--drivers/mtd/maps/pmcmsp-flash.c76
-rw-r--r--drivers/mtd/maps/uclinux.c8
-rw-r--r--drivers/mtd/mtd_blkdevs.c2
-rw-r--r--drivers/mtd/mtdblock.c2
-rw-r--r--drivers/mtd/mtdconcat.c6
-rw-r--r--drivers/mtd/mtdcore.c4
-rw-r--r--drivers/mtd/mtdpart.c3
-rw-r--r--drivers/mtd/nand/Kconfig30
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/atmel_nand.c2
-rw-r--r--drivers/mtd/nand/cafe_nand.c6
-rw-r--r--drivers/mtd/nand/cmx270_nand.c4
-rw-r--r--drivers/mtd/nand/davinci_nand.c45
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c3
-rw-r--r--drivers/mtd/nand/mxc_nand.c16
-rw-r--r--drivers/mtd/nand/nand_base.c167
-rw-r--r--drivers/mtd/nand/nand_ecc.c31
-rw-r--r--drivers/mtd/nand/ndfc.c4
-rw-r--r--drivers/mtd/nand/nomadik_nand.c250
-rw-r--r--drivers/mtd/nand/omap2.c347
-rw-r--r--drivers/mtd/nand/orion_nand.c3
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c17
-rw-r--r--drivers/mtd/nand/sh_flctl.c5
-rw-r--r--drivers/mtd/nand/tmio_nand.c17
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c52
-rw-r--r--drivers/mtd/nand/w90p910_nand.c382
-rw-r--r--drivers/mtd/ofpart.c21
-rw-r--r--drivers/mtd/onenand/Kconfig3
-rw-r--r--drivers/mtd/onenand/generic.c24
-rw-r--r--drivers/mtd/onenand/onenand_base.c20
-rw-r--r--drivers/mtd/tests/mtd_oobtest.c2
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c12
-rw-r--r--drivers/mtd/ubi/eba.c2
-rw-r--r--drivers/mtd/ubi/ubi.h2
-rw-r--r--drivers/net/3c59x.c12
-rw-r--r--drivers/net/8139cp.c2
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/arcnet/arc-rawmode.c1
-rw-r--r--drivers/net/arcnet/capmode.c1
-rw-r--r--drivers/net/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/bnx2x_reg.h2
-rw-r--r--drivers/net/bonding/bond_3ad.c2
-rw-r--r--drivers/net/can/Kconfig13
-rw-r--r--drivers/net/can/Makefile3
-rw-r--r--drivers/net/can/at91_can.c1186
-rw-r--r--drivers/net/can/sja1000/ems_pci.c16
-rw-r--r--drivers/net/can/usb/Makefile5
-rw-r--r--drivers/net/can/usb/ems_usb.c1155
-rw-r--r--drivers/net/cnic.c4
-rw-r--r--drivers/net/cpmac.c8
-rw-r--r--drivers/net/cris/eth_v10.c20
-rw-r--r--drivers/net/davinci_emac.c9
-rw-r--r--drivers/net/e1000/e1000_hw.c2
-rw-r--r--drivers/net/ehea/ehea_main.c1
-rw-r--r--drivers/net/ehea/ehea_qmr.c2
-rw-r--r--drivers/net/enc28j60.c1
-rw-r--r--drivers/net/gianfar_ethtool.c2
-rw-r--r--drivers/net/ibm_newemac/core.c8
-rw-r--r--drivers/net/igb/e1000_mac.c72
-rw-r--r--drivers/net/igb/e1000_mac.h1
-rw-r--r--drivers/net/igb/igb_main.c2
-rw-r--r--drivers/net/irda/pxaficp_ir.c47
-rw-r--r--drivers/net/ixgbe/ixgbe.h6
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c75
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c111
-rw-r--r--drivers/net/ks8851.c1
-rw-r--r--drivers/net/ll_temac_main.c2
-rw-r--r--drivers/net/macb.c2
-rw-r--r--drivers/net/mlx4/fw.c5
-rw-r--r--drivers/net/netxen/netxen_nic_main.c8
-rw-r--r--drivers/net/ni52.c4
-rw-r--r--drivers/net/niu.c2
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c11
-rw-r--r--drivers/net/qlge/qlge_main.c4
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/sfc/efx.c3
-rw-r--r--drivers/net/skfp/pcmplc.c2
-rw-r--r--drivers/net/skfp/pmf.c8
-rw-r--r--drivers/net/skge.c2
-rw-r--r--drivers/net/sky2.c6
-rw-r--r--drivers/net/sunvnet.c1
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/cdc_eem.c17
-rw-r--r--drivers/net/usb/kaweth.c18
-rw-r--r--drivers/net/usb/smsc95xx.c67
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/virtio_net.c236
-rw-r--r--drivers/net/vxge/vxge-config.h2
-rw-r--r--drivers/net/vxge/vxge-main.c2
-rw-r--r--drivers/net/wireless/arlan-proc.c28
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c202
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h3
-rw-r--r--drivers/net/wireless/atmel.c2
-rw-r--r--drivers/net/wireless/b43/Kconfig21
-rw-r--r--drivers/net/wireless/b43/Makefile1
-rw-r--r--drivers/net/wireless/b43/b43.h23
-rw-r--r--drivers/net/wireless/b43/debugfs.c1
-rw-r--r--drivers/net/wireless/b43/debugfs.h1
-rw-r--r--drivers/net/wireless/b43/dma.c4
-rw-r--r--drivers/net/wireless/b43/leds.c266
-rw-r--r--drivers/net/wireless/b43/leds.h33
-rw-r--r--drivers/net/wireless/b43/main.c224
-rw-r--r--drivers/net/wireless/b43/phy_lp.c12
-rw-r--r--drivers/net/wireless/b43/pio.c2
-rw-r--r--drivers/net/wireless/b43/rfkill.c2
-rw-r--r--drivers/net/wireless/b43/sdio.c202
-rw-r--r--drivers/net/wireless/b43/sdio.h45
-rw-r--r--drivers/net/wireless/b43/xmit.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c9
-rw-r--r--drivers/net/wireless/libertas/if_spi.c1
-rw-r--r--drivers/net/wireless/p54/p54spi.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h2
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c1
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xilinx_emaclite.c7
-rw-r--r--drivers/of/base.c1
-rw-r--r--drivers/oprofile/buffer_sync.c3
-rw-r--r--drivers/oprofile/oprofilefs.c2
-rw-r--r--drivers/parisc/ccio-dma.c4
-rw-r--r--drivers/parisc/sba_iommu.c4
-rw-r--r--drivers/parport/procfs.c12
-rw-r--r--drivers/pci/dmar.c41
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c11
-rw-r--r--drivers/pci/hotplug/pciehp.h107
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c17
-rw-r--r--drivers/pci/hotplug/pciehp_core.c136
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c109
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c109
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c23
-rw-r--r--drivers/pci/intel-iommu.c323
-rw-r--r--drivers/pci/intr_remapping.c8
-rw-r--r--drivers/pci/iova.c16
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c2
-rw-r--r--drivers/pci/pcie/aspm.c3
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c36
-rw-r--r--drivers/pcmcia/pxa2xx_base.c18
-rw-r--r--drivers/pcmcia/pxa2xx_palmtc.c230
-rw-r--r--drivers/pcmcia/sa1100_jornada720.c156
-rw-r--r--drivers/pcmcia/yenta_socket.c2
-rw-r--r--drivers/platform/x86/Kconfig10
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/acer-wmi.c2
-rw-r--r--drivers/platform/x86/acerhdf.c121
-rw-r--r--drivers/platform/x86/asus-laptop.c227
-rw-r--r--drivers/platform/x86/eeepc-laptop.c340
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c109
-rw-r--r--drivers/platform/x86/hp-wmi.c2
-rw-r--r--drivers/platform/x86/sony-laptop.c7
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c970
-rw-r--r--drivers/platform/x86/topstar-laptop.c265
-rw-r--r--drivers/platform/x86/wmi.c1
-rw-r--r--drivers/pnp/driver.c10
-rw-r--r--drivers/pnp/pnpacpi/core.c21
-rw-r--r--drivers/power/Kconfig7
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/ds2760_battery.c147
-rw-r--r--drivers/power/olpc_battery.c50
-rw-r--r--drivers/power/power_supply_core.c44
-rw-r--r--drivers/power/power_supply_sysfs.c12
-rw-r--r--drivers/power/wm831x_power.c779
-rw-r--r--drivers/power/wm8350_power.c22
-rw-r--r--drivers/power/wm97xx_battery.c79
-rw-r--r--drivers/regulator/Kconfig24
-rw-r--r--drivers/regulator/Makefile3
-rw-r--r--drivers/regulator/core.c285
-rw-r--r--drivers/regulator/da903x.c77
-rw-r--r--drivers/regulator/fixed.c91
-rw-r--r--drivers/regulator/lp3971.c2
-rw-r--r--drivers/regulator/pcf50633-regulator.c98
-rw-r--r--drivers/regulator/tps65023-regulator.c632
-rw-r--r--drivers/regulator/tps6507x-regulator.c714
-rw-r--r--drivers/regulator/userspace-consumer.c45
-rw-r--r--drivers/regulator/virtual.c56
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/rtc/Kconfig49
-rw-r--r--drivers/rtc/Makefile17
-rw-r--r--drivers/rtc/rtc-at91rm9200.c24
-rw-r--r--drivers/rtc/rtc-bfin.c2
-rw-r--r--drivers/rtc/rtc-coh901331.c311
-rw-r--r--drivers/rtc/rtc-ds1305.c1
-rw-r--r--drivers/rtc/rtc-ds1307.c3
-rw-r--r--drivers/rtc/rtc-ds1390.c1
-rw-r--r--drivers/rtc/rtc-ds3234.c1
-rw-r--r--drivers/rtc/rtc-ep93xx.c14
-rw-r--r--drivers/rtc/rtc-m41t94.c1
-rw-r--r--drivers/rtc/rtc-max6902.c1
-rw-r--r--drivers/rtc/rtc-mxc.c507
-rw-r--r--drivers/rtc/rtc-omap.c2
-rw-r--r--drivers/rtc/rtc-pcap.c224
-rw-r--r--drivers/rtc/rtc-pcf2123.c364
-rw-r--r--drivers/rtc/rtc-pxa.c27
-rw-r--r--drivers/rtc/rtc-r9701.c1
-rw-r--r--drivers/rtc/rtc-rs5c348.c1
-rw-r--r--drivers/rtc/rtc-sa1100.c23
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c304
-rw-r--r--drivers/rtc/rtc-sysfs.c14
-rw-r--r--drivers/s390/block/dasd.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c15
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/tape_block.c2
-rw-r--r--drivers/s390/char/zcore.c1
-rw-r--r--drivers/s390/cio/css.c252
-rw-r--r--drivers/s390/cio/css.h3
-rw-r--r--drivers/s390/cio/device.c38
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/idset.c22
-rw-r--r--drivers/s390/cio/idset.h2
-rw-r--r--drivers/s390/cio/qdio_main.c32
-rw-r--r--drivers/s390/crypto/ap_bus.c40
-rw-r--r--drivers/s390/net/netiucv.c2
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c2
-rw-r--r--drivers/sbus/char/jsflash.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c4
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sg.c6
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c2
-rw-r--r--drivers/serial/crisv10.c1
-rw-r--r--drivers/serial/max3100.c1
-rw-r--r--drivers/serial/pxa.c20
-rw-r--r--drivers/serial/serial_core.c4
-rw-r--r--drivers/sfi/Kconfig17
-rw-r--r--drivers/sfi/Makefile3
-rw-r--r--drivers/sfi/sfi_acpi.c175
-rw-r--r--drivers/sfi/sfi_core.c407
-rw-r--r--drivers/sfi/sfi_core.h70
-rw-r--r--drivers/spi/Kconfig23
-rw-r--r--drivers/spi/Makefile4
-rw-r--r--drivers/spi/amba-pl022.c8
-rw-r--r--drivers/spi/mxc_spi.c685
-rw-r--r--drivers/spi/omap2_mcspi.c210
-rw-r--r--drivers/spi/omap_uwire.c2
-rw-r--r--drivers/spi/pxa2xx_spi.c32
-rw-r--r--drivers/spi/spi.c85
-rw-r--r--drivers/spi/spi_imx.c1770
-rw-r--r--drivers/spi/spi_ppc4xx.c612
-rw-r--r--drivers/spi/spi_s3c24xx.c159
-rw-r--r--drivers/spi/spi_stmp.c679
-rw-r--r--drivers/spi/spidev.c1
-rw-r--r--drivers/spi/tle62x0.c1
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/cpc-usb/Kconfig4
-rw-r--r--drivers/staging/cpc-usb/Makefile3
-rw-r--r--drivers/staging/cpc-usb/TODO10
-rw-r--r--drivers/staging/cpc-usb/cpc-usb_drv.c1184
-rw-r--r--drivers/staging/cpc-usb/cpc.h417
-rw-r--r--drivers/staging/cpc-usb/cpc_int.h83
-rw-r--r--drivers/staging/cpc-usb/cpcusb.h86
-rw-r--r--drivers/staging/cpc-usb/sja2m16c.h41
-rw-r--r--drivers/staging/cpc-usb/sja2m16c_2.c452
-rw-r--r--drivers/staging/go7007/Makefile5
-rw-r--r--drivers/staging/rt2860/rtmp.h2
-rw-r--r--drivers/staging/stlc45xx/stlc45xx.c1
-rw-r--r--drivers/thermal/Kconfig1
-rw-r--r--drivers/usb/Kconfig4
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/class/cdc-wdm.c32
-rw-r--r--drivers/usb/class/usbtmc.c84
-rw-r--r--drivers/usb/core/config.c2
-rw-r--r--drivers/usb/core/devio.c247
-rw-r--r--drivers/usb/core/driver.c75
-rw-r--r--drivers/usb/core/generic.c4
-rw-r--r--drivers/usb/core/hcd.c111
-rw-r--r--drivers/usb/core/hcd.h5
-rw-r--r--drivers/usb/core/hub.c126
-rw-r--r--drivers/usb/core/inode.c3
-rw-r--r--drivers/usb/core/message.c32
-rw-r--r--drivers/usb/core/usb.c13
-rw-r--r--drivers/usb/core/usb.h7
-rw-r--r--drivers/usb/early/Makefile5
-rw-r--r--drivers/usb/early/ehci-dbgp.c996
-rw-r--r--drivers/usb/gadget/Kconfig31
-rw-r--r--drivers/usb/gadget/amd5536udc.c56
-rw-r--r--drivers/usb/gadget/at91_udc.c1
-rw-r--r--drivers/usb/gadget/audio.c24
-rw-r--r--drivers/usb/gadget/composite.c2
-rw-r--r--drivers/usb/gadget/dummy_hcd.c5
-rw-r--r--drivers/usb/gadget/ether.c31
-rw-r--r--drivers/usb/gadget/f_audio.c97
-rw-r--r--drivers/usb/gadget/f_eem.c562
-rw-r--r--drivers/usb/gadget/f_loopback.c1
-rw-r--r--drivers/usb/gadget/f_obex.c1
-rw-r--r--drivers/usb/gadget/f_rndis.c15
-rw-r--r--drivers/usb/gadget/f_sourcesink.c1
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c4
-rw-r--r--drivers/usb/gadget/gmidi.c8
-rw-r--r--drivers/usb/gadget/inode.c2
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c49
-rw-r--r--drivers/usb/gadget/pxa25x_udc.h1
-rw-r--r--drivers/usb/gadget/rndis.c13
-rw-r--r--drivers/usb/gadget/rndis.h3
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c6
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c3
-rw-r--r--drivers/usb/gadget/u_audio.c11
-rw-r--r--drivers/usb/gadget/u_ether.c86
-rw-r--r--drivers/usb/gadget/u_ether.h12
-rw-r--r--drivers/usb/gadget/u_serial.c1
-rw-r--r--drivers/usb/host/Kconfig18
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-atmel.c230
-rw-r--r--drivers/usb/host/ehci-au1xxx.c29
-rw-r--r--drivers/usb/host/ehci-dbg.c46
-rw-r--r--drivers/usb/host/ehci-hcd.c89
-rw-r--r--drivers/usb/host/ehci-hub.c84
-rw-r--r--drivers/usb/host/ehci-mem.c26
-rw-r--r--drivers/usb/host/ehci-pci.c44
-rw-r--r--drivers/usb/host/ehci-q.c95
-rw-r--r--drivers/usb/host/ehci-sched.c100
-rw-r--r--drivers/usb/host/ehci-w90x900.c181
-rw-r--r--drivers/usb/host/ehci.h16
-rw-r--r--drivers/usb/host/isp1362-hcd.c2909
-rw-r--r--drivers/usb/host/isp1362.h1079
-rw-r--r--drivers/usb/host/isp1760-hcd.c4
-rw-r--r--drivers/usb/host/isp1760-hcd.h2
-rw-r--r--drivers/usb/host/isp1760-if.c21
-rw-r--r--drivers/usb/host/ohci-at91.c2
-rw-r--r--drivers/usb/host/ohci-au1xxx.c27
-rw-r--r--drivers/usb/host/ohci-ep93xx.c1
-rw-r--r--drivers/usb/host/ohci-hcd.c1
-rw-r--r--drivers/usb/host/ohci-pxa27x.c34
-rw-r--r--drivers/usb/host/ohci-q.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c1
-rw-r--r--drivers/usb/host/pci-quirks.c2
-rw-r--r--drivers/usb/host/sl811-hcd.c8
-rw-r--r--drivers/usb/host/uhci-q.c1
-rw-r--r--drivers/usb/host/whci/asl.c12
-rw-r--r--drivers/usb/host/whci/hcd.c8
-rw-r--r--drivers/usb/host/whci/pzl.c12
-rw-r--r--drivers/usb/host/whci/qset.c4
-rw-r--r--drivers/usb/host/whci/whci-hc.h1
-rw-r--r--drivers/usb/host/xhci-dbg.c5
-rw-r--r--drivers/usb/host/xhci-hcd.c530
-rw-r--r--drivers/usb/host/xhci-mem.c140
-rw-r--r--drivers/usb/host/xhci-pci.c16
-rw-r--r--drivers/usb/host/xhci-ring.c377
-rw-r--r--drivers/usb/host/xhci.h113
-rw-r--r--drivers/usb/image/microtek.c37
-rw-r--r--drivers/usb/misc/idmouse.c21
-rw-r--r--drivers/usb/misc/ldusb.c6
-rw-r--r--drivers/usb/misc/legousbtower.c6
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c53
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.h2
-rw-r--r--drivers/usb/misc/usbsevseg.c69
-rw-r--r--drivers/usb/mon/Kconfig4
-rw-r--r--drivers/usb/mon/Makefile2
-rw-r--r--drivers/usb/mon/mon_bin.c12
-rw-r--r--drivers/usb/mon/mon_dma.c95
-rw-r--r--drivers/usb/mon/mon_main.c1
-rw-r--r--drivers/usb/mon/mon_text.c14
-rw-r--r--drivers/usb/mon/usb_mon.h14
-rw-r--r--drivers/usb/musb/musb_core.c8
-rw-r--r--drivers/usb/otg/isp1301_omap.c23
-rw-r--r--drivers/usb/serial/ark3116.c24
-rw-r--r--drivers/usb/serial/ch341.c52
-rw-r--r--drivers/usb/serial/cypress_m8.h2
-rw-r--r--drivers/usb/serial/ftdi_sio.c7
-rw-r--r--drivers/usb/serial/ftdi_sio.h10
-rw-r--r--drivers/usb/serial/generic.c206
-rw-r--r--drivers/usb/serial/io_edgeport.c2
-rw-r--r--drivers/usb/serial/iuu_phoenix.c115
-rw-r--r--drivers/usb/serial/kl5kusb105.c2
-rw-r--r--drivers/usb/serial/moto_modem.c2
-rw-r--r--drivers/usb/serial/option.c138
-rw-r--r--drivers/usb/serial/pl2303.c71
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/serial/sierra.c162
-rw-r--r--drivers/usb/serial/spcp8x5.c2
-rw-r--r--drivers/usb/serial/usb-serial.c23
-rw-r--r--drivers/usb/storage/datafab.c4
-rw-r--r--drivers/usb/storage/initializers.c2
-rw-r--r--drivers/usb/storage/jumpshot.c2
-rw-r--r--drivers/usb/storage/onetouch.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h22
-rw-r--r--drivers/usb/usb-skeleton.c252
-rw-r--r--drivers/usb/wusbcore/wa-hc.h2
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/netdev.c2
-rw-r--r--drivers/video/Kconfig50
-rw-r--r--drivers/video/Makefile3
-rw-r--r--drivers/video/aty/atyfb_base.c829
-rw-r--r--drivers/video/au1100fb.c7
-rw-r--r--drivers/video/backlight/corgi_lcd.c1
-rw-r--r--drivers/video/backlight/da903x_bl.c20
-rw-r--r--drivers/video/backlight/ltv350qv.c1
-rw-r--r--drivers/video/backlight/tdo24m.c1
-rw-r--r--drivers/video/backlight/tosa_lcd.c2
-rw-r--r--drivers/video/backlight/vgg2432a4.c3
-rw-r--r--drivers/video/cfbcopyarea.c2
-rw-r--r--drivers/video/console/bitblit.c8
-rw-r--r--drivers/video/console/fbcon.c58
-rw-r--r--drivers/video/console/newport_con.c2
-rw-r--r--drivers/video/console/vgacon.c8
-rw-r--r--drivers/video/da8xx-fb.c890
-rw-r--r--drivers/video/ep93xx-fb.c646
-rw-r--r--drivers/video/fbmem.c19
-rw-r--r--drivers/video/imxfb.c2
-rw-r--r--drivers/video/matrox/g450_pll.c209
-rw-r--r--drivers/video/matrox/g450_pll.h8
-rw-r--r--drivers/video/matrox/i2c-matroxfb.c18
-rw-r--r--drivers/video/matrox/matroxfb_DAC1064.c614
-rw-r--r--drivers/video/matrox/matroxfb_DAC1064.h4
-rw-r--r--drivers/video/matrox/matroxfb_Ti3026.c258
-rw-r--r--drivers/video/matrox/matroxfb_accel.c129
-rw-r--r--drivers/video/matrox/matroxfb_accel.h2
-rw-r--r--drivers/video/matrox/matroxfb_base.c772
-rw-r--r--drivers/video/matrox/matroxfb_base.h80
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.c160
-rw-r--r--drivers/video/matrox/matroxfb_g450.c185
-rw-r--r--drivers/video/matrox/matroxfb_g450.h8
-rw-r--r--drivers/video/matrox/matroxfb_maven.c50
-rw-r--r--drivers/video/matrox/matroxfb_misc.c288
-rw-r--r--drivers/video/matrox/matroxfb_misc.h15
-rw-r--r--drivers/video/msm/Makefile19
-rw-r--r--drivers/video/msm/mddi.c828
-rw-r--r--drivers/video/msm/mddi_client_dummy.c97
-rw-r--r--drivers/video/msm/mddi_client_nt35399.c255
-rw-r--r--drivers/video/msm/mddi_client_toshiba.c283
-rw-r--r--drivers/video/msm/mddi_hw.h305
-rw-r--r--drivers/video/msm/mdp.c538
-rw-r--r--drivers/video/msm/mdp_csc_table.h582
-rw-r--r--drivers/video/msm/mdp_hw.h621
-rw-r--r--drivers/video/msm/mdp_ppp.c750
-rw-r--r--drivers/video/msm/mdp_scale_tables.c766
-rw-r--r--drivers/video/msm/mdp_scale_tables.h38
-rw-r--r--drivers/video/msm/msm_fb.c636
-rw-r--r--drivers/video/omap/Kconfig82
-rw-r--r--drivers/video/omap/Makefile12
-rw-r--r--drivers/video/omap/blizzard.c91
-rw-r--r--drivers/video/omap/dispc.c130
-rw-r--r--drivers/video/omap/dispc.h7
-rw-r--r--drivers/video/omap/hwa742.c2
-rw-r--r--drivers/video/omap/lcd_2430sdp.c202
-rw-r--r--drivers/video/omap/lcd_ams_delta.c137
-rw-r--r--drivers/video/omap/lcd_apollon.c138
-rw-r--r--drivers/video/omap/lcd_h3.c4
-rw-r--r--drivers/video/omap/lcd_h4.c4
-rw-r--r--drivers/video/omap/lcd_inn1510.c4
-rw-r--r--drivers/video/omap/lcd_inn1610.c4
-rw-r--r--drivers/video/omap/lcd_ldp.c200
-rw-r--r--drivers/video/omap/lcd_mipid.c625
-rw-r--r--drivers/video/omap/lcd_omap2evm.c191
-rw-r--r--drivers/video/omap/lcd_omap3beagle.c130
-rw-r--r--drivers/video/omap/lcd_omap3evm.c192
-rw-r--r--drivers/video/omap/lcd_osk.c4
-rw-r--r--drivers/video/omap/lcd_overo.c179
-rw-r--r--drivers/video/omap/lcd_palmte.c4
-rw-r--r--drivers/video/omap/lcd_palmtt.c4
-rw-r--r--drivers/video/omap/lcd_palmz71.c4
-rw-r--r--drivers/video/omap/omapfb_main.c64
-rw-r--r--drivers/video/omap/rfbi.c7
-rw-r--r--drivers/video/platinumfb.c12
-rw-r--r--drivers/video/pxafb.c32
-rw-r--r--drivers/video/s3c-fb.c2
-rw-r--r--drivers/video/s3c2410fb.c6
-rw-r--r--drivers/video/sis/sis_main.c4
-rw-r--r--drivers/video/sis/vstruct.h2
-rw-r--r--drivers/video/tmiofb.c2
-rw-r--r--drivers/video/via/accel.c589
-rw-r--r--drivers/video/via/accel.h13
-rw-r--r--drivers/video/via/chip.h4
-rw-r--r--drivers/video/via/dvi.c6
-rw-r--r--drivers/video/via/global.c3
-rw-r--r--drivers/video/via/global.h2
-rw-r--r--drivers/video/via/hw.c474
-rw-r--r--drivers/video/via/hw.h57
-rw-r--r--drivers/video/via/ioctl.h6
-rw-r--r--drivers/video/via/lcd.c16
-rw-r--r--drivers/video/via/share.h98
-rw-r--r--drivers/video/via/via_i2c.c64
-rw-r--r--drivers/video/via/viafbdev.c1049
-rw-r--r--drivers/video/via/viafbdev.h61
-rw-r--r--drivers/video/via/viamode.c100
-rw-r--r--drivers/video/via/viamode.h141
-rw-r--r--drivers/video/via/vt1636.c4
-rw-r--r--drivers/virtio/virtio_balloon.c3
-rw-r--r--drivers/virtio/virtio_pci.c125
-rw-r--r--drivers/virtio/virtio_ring.c6
-rw-r--r--drivers/vlynq/vlynq.c3
-rw-r--r--drivers/watchdog/Kconfig7
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/adx_wdt.c354
-rw-r--r--drivers/xen/balloon.c6
-rw-r--r--firmware/ihex2fw.c2
-rw-r--r--fs/9p/Kconfig9
-rw-r--r--fs/9p/Makefile3
-rw-r--r--fs/9p/cache.c474
-rw-r--r--fs/9p/cache.h176
-rw-r--r--fs/9p/v9fs.c196
-rw-r--r--fs/9p/v9fs.h13
-rw-r--r--fs/9p/v9fs_vfs.h6
-rw-r--r--fs/9p/vfs_addr.c88
-rw-r--r--fs/9p/vfs_file.c25
-rw-r--r--fs/9p/vfs_inode.c61
-rw-r--r--fs/9p/vfs_super.c16
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/adfs/inode.c7
-rw-r--r--fs/afs/flock.c2
-rw-r--r--fs/afs/proc.c8
-rw-r--r--fs/aio.c57
-rw-r--r--fs/anon_inodes.c68
-rw-r--r--fs/attr.c46
-rw-r--r--fs/autofs/dirhash.c2
-rw-r--r--fs/befs/linuxvfs.c9
-rw-r--r--fs/binfmt_elf.c96
-rw-r--r--fs/binfmt_elf_fdpic.c73
-rw-r--r--fs/binfmt_flat.c22
-rw-r--r--fs/block_dev.c142
-rw-r--r--fs/btrfs/async-thread.c254
-rw-r--r--fs/btrfs/async-thread.h12
-rw-r--r--fs/btrfs/btrfs_inode.h1
-rw-r--r--fs/btrfs/compression.c8
-rw-r--r--fs/btrfs/ctree.c6
-rw-r--r--fs/btrfs/ctree.h78
-rw-r--r--fs/btrfs/dir-item.c47
-rw-r--r--fs/btrfs/disk-io.c237
-rw-r--r--fs/btrfs/export.c133
-rw-r--r--fs/btrfs/extent-tree.c1662
-rw-r--r--fs/btrfs/extent_io.c330
-rw-r--r--fs/btrfs/extent_io.h16
-rw-r--r--fs/btrfs/extent_map.c103
-rw-r--r--fs/btrfs/extent_map.h5
-rw-r--r--fs/btrfs/file.c35
-rw-r--r--fs/btrfs/free-space-cache.c36
-rw-r--r--fs/btrfs/inode-item.c4
-rw-r--r--fs/btrfs/inode-map.c93
-rw-r--r--fs/btrfs/inode.c692
-rw-r--r--fs/btrfs/ioctl.c339
-rw-r--r--fs/btrfs/ioctl.h3
-rw-r--r--fs/btrfs/ordered-data.c33
-rw-r--r--fs/btrfs/ordered-data.h3
-rw-r--r--fs/btrfs/orphan.c20
-rw-r--r--fs/btrfs/relocation.c280
-rw-r--r--fs/btrfs/root-tree.c138
-rw-r--r--fs/btrfs/super.c5
-rw-r--r--fs/btrfs/transaction.c38
-rw-r--r--fs/btrfs/tree-log.c27
-rw-r--r--fs/btrfs/volumes.c117
-rw-r--r--fs/btrfs/volumes.h3
-rw-r--r--fs/buffer.c77
-rw-r--r--fs/char_dev.c3
-rw-r--r--fs/cifs/Kconfig1
-rw-r--r--fs/cifs/cifs_dfs_ref.c4
-rw-r--r--fs/cifs/cifsfs.c100
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h21
-rw-r--r--fs/cifs/cifsproto.h11
-rw-r--r--fs/cifs/cifssmb.c1
-rw-r--r--fs/cifs/connect.c1
-rw-r--r--fs/cifs/dir.c64
-rw-r--r--fs/cifs/file.c137
-rw-r--r--fs/cifs/inode.c53
-rw-r--r--fs/cifs/misc.c34
-rw-r--r--fs/cifs/readdir.c4
-rw-r--r--fs/cifs/transport.c50
-rw-r--r--fs/coda/coda_int.h1
-rw-r--r--fs/compat.c31
-rw-r--r--fs/devpts/inode.c3
-rw-r--r--fs/dlm/debug_fs.c12
-rw-r--r--fs/drop_caches.c4
-rw-r--r--fs/ecryptfs/Kconfig4
-rw-r--r--fs/ecryptfs/crypto.c39
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h2
-rw-r--r--fs/ecryptfs/inode.c2
-rw-r--r--fs/ecryptfs/keystore.c39
-rw-r--r--fs/ecryptfs/kthread.c24
-rw-r--r--fs/ecryptfs/main.c3
-rw-r--r--fs/ecryptfs/mmap.c6
-rw-r--r--fs/ecryptfs/read_write.c32
-rw-r--r--fs/ecryptfs/super.c2
-rw-r--r--fs/eventfd.c67
-rw-r--r--fs/exec.c125
-rw-r--r--fs/exofs/super.c6
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext2/namei.c2
-rw-r--r--fs/ext2/xip.c2
-rw-r--r--fs/ext3/inode.c3
-rw-r--r--fs/ext3/super.c4
-rw-r--r--fs/ext4/inode.c6
-rw-r--r--fs/ext4/super.c4
-rw-r--r--fs/fat/inode.c16
-rw-r--r--fs/fcntl.c108
-rw-r--r--fs/file_table.c6
-rw-r--r--fs/fs-writeback.c165
-rw-r--r--fs/fuse/dir.c14
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/fuse/inode.c11
-rw-r--r--fs/gfs2/aops.c3
-rw-r--r--fs/gfs2/ops_inode.c1
-rw-r--r--fs/gfs2/rgrp.c2
-rw-r--r--fs/hfs/mdb.c6
-rw-r--r--fs/hfsplus/super.c6
-rw-r--r--fs/hugetlbfs/inode.c48
-rw-r--r--fs/inode.c124
-rw-r--r--fs/internal.h1
-rw-r--r--fs/ioctl.c9
-rw-r--r--fs/isofs/inode.c8
-rw-r--r--fs/jbd2/journal.c4
-rw-r--r--fs/jffs2/background.c20
-rw-r--r--fs/jffs2/malloc.c4
-rw-r--r--fs/jffs2/super.c2
-rw-r--r--fs/jfs/super.c9
-rw-r--r--fs/libfs.c13
-rw-r--r--fs/lockd/clntlock.c2
-rw-r--r--fs/lockd/clntproc.c2
-rw-r--r--fs/lockd/host.c4
-rw-r--r--fs/lockd/mon.c2
-rw-r--r--fs/lockd/svclock.c2
-rw-r--r--fs/lockd/svcsubs.c2
-rw-r--r--fs/lockd/xdr.c1
-rw-r--r--fs/lockd/xdr4.c1
-rw-r--r--fs/locks.c2
-rw-r--r--fs/minix/dir.c22
-rw-r--r--fs/namespace.c77
-rw-r--r--fs/ncpfs/dir.c2
-rw-r--r--fs/ncpfs/inode.c12
-rw-r--r--fs/ncpfs/ioctl.c8
-rw-r--r--fs/nfs/callback_xdr.c2
-rw-r--r--fs/nfs/client.c17
-rw-r--r--fs/nfs/file.c1
-rw-r--r--fs/nfs/fscache.c25
-rw-r--r--fs/nfs/fscache.h6
-rw-r--r--fs/nfs/inode.c54
-rw-r--r--fs/nfs/nfs2xdr.c1
-rw-r--r--fs/nfs/nfs3proc.c1
-rw-r--r--fs/nfs/nfs3xdr.c1
-rw-r--r--fs/nfs/nfs4proc.c1
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/nfs/nfs4xdr.c1
-rw-r--r--fs/nfs/proc.c1
-rw-r--r--fs/nfs/super.c78
-rw-r--r--fs/nfsd/export.c4
-rw-r--r--fs/nfsd/nfs3xdr.c75
-rw-r--r--fs/nfsd/nfs4acl.c4
-rw-r--r--fs/nfsd/nfs4callback.c263
-rw-r--r--fs/nfsd/nfs4idmap.c1
-rw-r--r--fs/nfsd/nfs4proc.c89
-rw-r--r--fs/nfsd/nfs4state.c685
-rw-r--r--fs/nfsd/nfs4xdr.c42
-rw-r--r--fs/nfsd/nfsctl.c8
-rw-r--r--fs/nfsd/nfsfh.c158
-rw-r--r--fs/nfsd/nfssvc.c54
-rw-r--r--fs/nfsd/vfs.c9
-rw-r--r--fs/nilfs2/btnode.c2
-rw-r--r--fs/nilfs2/file.c2
-rw-r--r--fs/nilfs2/gcinode.c2
-rw-r--r--fs/nilfs2/inode.c2
-rw-r--r--fs/nilfs2/mdt.c4
-rw-r--r--fs/nilfs2/namei.c6
-rw-r--r--fs/nilfs2/nilfs.h10
-rw-r--r--fs/nilfs2/super.c4
-rw-r--r--fs/nls/nls_base.c3
-rw-r--r--fs/ntfs/aops.c2
-rw-r--r--fs/ntfs/file.c42
-rw-r--r--fs/ntfs/layout.h2
-rw-r--r--fs/ntfs/malloc.h2
-rw-r--r--fs/ntfs/super.c10
-rw-r--r--fs/ocfs2/Makefile1
-rw-r--r--fs/ocfs2/alloc.c1342
-rw-r--r--fs/ocfs2/alloc.h101
-rw-r--r--fs/ocfs2/aops.c38
-rw-r--r--fs/ocfs2/aops.h2
-rw-r--r--fs/ocfs2/buffer_head_io.c47
-rw-r--r--fs/ocfs2/buffer_head_io.h8
-rw-r--r--fs/ocfs2/cluster/masklog.c1
-rw-r--r--fs/ocfs2/cluster/masklog.h1
-rw-r--r--fs/ocfs2/cluster/netdebug.c4
-rw-r--r--fs/ocfs2/dir.c107
-rw-r--r--fs/ocfs2/dlm/dlmast.c1
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c1
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c3
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c1
-rw-r--r--fs/ocfs2/dlm/dlmlock.c1
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c1
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c1
-rw-r--r--fs/ocfs2/dlm/dlmthread.c7
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c1
-rw-r--r--fs/ocfs2/dlmglue.c105
-rw-r--r--fs/ocfs2/dlmglue.h6
-rw-r--r--fs/ocfs2/extent_map.c33
-rw-r--r--fs/ocfs2/extent_map.h8
-rw-r--r--fs/ocfs2/file.c151
-rw-r--r--fs/ocfs2/file.h2
-rw-r--r--fs/ocfs2/inode.c86
-rw-r--r--fs/ocfs2/inode.h20
-rw-r--r--fs/ocfs2/ioctl.c14
-rw-r--r--fs/ocfs2/journal.c82
-rw-r--r--fs/ocfs2/journal.h94
-rw-r--r--fs/ocfs2/localalloc.c12
-rw-r--r--fs/ocfs2/namei.c341
-rw-r--r--fs/ocfs2/namei.h6
-rw-r--r--fs/ocfs2/ocfs2.h52
-rw-r--r--fs/ocfs2/ocfs2_fs.h107
-rw-r--r--fs/ocfs2/ocfs2_lockid.h5
-rw-r--r--fs/ocfs2/quota.h2
-rw-r--r--fs/ocfs2/quota_global.c9
-rw-r--r--fs/ocfs2/quota_local.c26
-rw-r--r--fs/ocfs2/refcounttree.c4313
-rw-r--r--fs/ocfs2/refcounttree.h106
-rw-r--r--fs/ocfs2/resize.c16
-rw-r--r--fs/ocfs2/slot_map.c10
-rw-r--r--fs/ocfs2/suballoc.c35
-rw-r--r--fs/ocfs2/super.c16
-rw-r--r--fs/ocfs2/symlink.c1
-rw-r--r--fs/ocfs2/uptodate.c265
-rw-r--r--fs/ocfs2/uptodate.h51
-rw-r--r--fs/ocfs2/xattr.c2056
-rw-r--r--fs/ocfs2/xattr.h15
-rw-r--r--fs/omfs/dir.c2
-rw-r--r--fs/omfs/file.c4
-rw-r--r--fs/omfs/inode.c2
-rw-r--r--fs/omfs/omfs.h6
-rw-r--r--fs/open.c5
-rw-r--r--fs/partitions/check.c2
-rw-r--r--fs/proc/array.c92
-rw-r--r--fs/proc/base.c67
-rw-r--r--fs/proc/kcore.c335
-rw-r--r--fs/proc/meminfo.c13
-rw-r--r--fs/proc/nommu.c2
-rw-r--r--fs/proc/page.c5
-rw-r--r--fs/proc/proc_sysctl.c2
-rw-r--r--fs/proc/task_mmu.c57
-rw-r--r--fs/proc/uptime.c7
-rw-r--r--fs/qnx4/Kconfig11
-rw-r--r--fs/qnx4/Makefile2
-rw-r--r--fs/qnx4/bitmap.c81
-rw-r--r--fs/qnx4/dir.c5
-rw-r--r--fs/qnx4/file.c40
-rw-r--r--fs/qnx4/inode.c84
-rw-r--r--fs/qnx4/namei.c105
-rw-r--r--fs/qnx4/qnx4.h8
-rw-r--r--fs/qnx4/truncate.c34
-rw-r--r--fs/quota/dquot.c4
-rw-r--r--fs/ramfs/file-nommu.c18
-rw-r--r--fs/ramfs/inode.c4
-rw-r--r--fs/read_write.c3
-rw-r--r--fs/reiserfs/super.c4
-rw-r--r--fs/romfs/super.c4
-rw-r--r--fs/select.c14
-rw-r--r--fs/seq_file.c74
-rw-r--r--fs/smbfs/inode.c10
-rw-r--r--fs/smbfs/proc.c2
-rw-r--r--fs/squashfs/super.c4
-rw-r--r--fs/super.c69
-rw-r--r--fs/sync.c1
-rw-r--r--fs/ubifs/xattr.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_quotaops.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_super.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_sysctl.c3
-rw-r--r--fs/xfs/xfs_fs.h2
-rw-r--r--include/acpi/acpi_bus.h34
-rw-r--r--include/acpi/acpiosxf.h3
-rw-r--r--include/acpi/acpixf.h10
-rw-r--r--include/acpi/actbl.h78
-rw-r--r--include/acpi/actbl1.h872
-rw-r--r--include/acpi/actbl2.h868
-rw-r--r--include/acpi/actypes.h94
-rw-r--r--include/acpi/button.h25
-rw-r--r--include/acpi/platform/acgcc.h2
-rw-r--r--include/acpi/platform/aclinux.h4
-rw-r--r--include/asm-generic/cputime.h1
-rw-r--r--include/asm-generic/fcntl.h13
-rw-r--r--include/asm-generic/gpio.h8
-rw-r--r--include/asm-generic/kmap_types.h47
-rw-r--r--include/asm-generic/mman-common.h4
-rw-r--r--include/asm-generic/mman.h1
-rw-r--r--include/asm-generic/sections.h16
-rw-r--r--include/asm-generic/siginfo.h8
-rw-r--r--include/asm-generic/syscall.h8
-rw-r--r--include/asm-generic/topology.h17
-rw-r--r--include/asm-generic/unistd.h4
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--include/drm/drm_pciids.h1
-rw-r--r--include/drm/i915_drm.h19
-rw-r--r--include/linux/acpi.h15
-rw-r--r--include/linux/amba/mmci.h (renamed from arch/arm/include/asm/mach/mmc.h)9
-rw-r--r--include/linux/amba/pl022.h8
-rw-r--r--include/linux/anon_inodes.h3
-rw-r--r--include/linux/async_tx.h129
-rw-r--r--include/linux/backing-dev.h3
-rw-r--r--include/linux/binfmts.h2
-rw-r--r--include/linux/bootmem.h5
-rw-r--r--include/linux/capability.h2
-rw-r--r--include/linux/cgroup.h53
-rw-r--r--include/linux/cn_proc.h10
-rw-r--r--include/linux/configfs.h4
-rw-r--r--include/linux/cpumask.h721
-rw-r--r--include/linux/cred.h18
-rw-r--r--include/linux/dca.h11
-rw-r--r--include/linux/debugfs.h2
-rw-r--r--include/linux/dmaengine.h179
-rw-r--r--include/linux/eventfd.h6
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/firewire.h14
-rw-r--r--include/linux/flex_array.h32
-rw-r--r--include/linux/fs.h19
-rw-r--r--include/linux/ftrace.h4
-rw-r--r--include/linux/futex.h10
-rw-r--r--include/linux/genhd.h2
-rw-r--r--include/linux/gfp.h15
-rw-r--r--include/linux/gpio.h11
-rw-r--r--include/linux/hid.h3
-rw-r--r--include/linux/hugetlb.h42
-rw-r--r--include/linux/i2c-id.h11
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/i2c/adp5588.h92
-rw-r--r--include/linux/i2c/mcs5000_ts.h24
-rw-r--r--include/linux/i8042.h30
-rw-r--r--include/linux/init_task.h14
-rw-r--r--include/linux/input.h2
-rw-r--r--include/linux/intel-iommu.h2
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/ioport.h4
-rw-r--r--include/linux/iova.h1
-rw-r--r--include/linux/jbd.h2
-rw-r--r--include/linux/kernel.h16
-rw-r--r--include/linux/kmemcheck.h11
-rw-r--r--include/linux/ksm.h79
-rw-r--r--include/linux/libps2.h2
-rw-r--r--include/linux/linkage.h2
-rw-r--r--include/linux/lis3lv02d.h11
-rw-r--r--include/linux/lockd/lockd.h45
-rw-r--r--include/linux/mISDNif.h2
-rw-r--r--include/linux/magic.h6
-rw-r--r--include/linux/memcontrol.h10
-rw-r--r--include/linux/memory_hotplug.h8
-rw-r--r--include/linux/mempool.h10
-rw-r--r--include/linux/mfd/da903x.h4
-rw-r--r--include/linux/mfd/wm831x/pmu.h189
-rw-r--r--include/linux/mm.h49
-rw-r--r--include/linux/mm_inline.h31
-rw-r--r--include/linux/mm_types.h7
-rw-r--r--include/linux/mmc/card.h12
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/mmc/host.h58
-rw-r--r--include/linux/mmc/mmc.h3
-rw-r--r--include/linux/mmc/sdio_func.h3
-rw-r--r--include/linux/mmu_context.h9
-rw-r--r--include/linux/mmu_notifier.h34
-rw-r--r--include/linux/mmzone.h30
-rw-r--r--include/linux/mod_devicetable.h11
-rw-r--r--include/linux/module.h17
-rw-r--r--include/linux/mtd/nand.h5
-rw-r--r--include/linux/mtd/nand_ecc.h6
-rw-r--r--include/linux/mtd/onenand.h8
-rw-r--r--include/linux/mtd/onenand_regs.h3
-rw-r--r--include/linux/namei.h2
-rw-r--r--include/linux/netlink.h1
-rw-r--r--include/linux/nfs4.h2
-rw-r--r--include/linux/nfsd/nfsd.h9
-rw-r--r--include/linux/nfsd/state.h77
-rw-r--r--include/linux/nfsd/xdr4.h19
-rw-r--r--include/linux/oom.h11
-rw-r--r--include/linux/page-flags.h42
-rw-r--r--include/linux/page_cgroup.h13
-rw-r--r--include/linux/pci_ids.h11
-rw-r--r--include/linux/perf_counter.h497
-rw-r--r--include/linux/perf_event.h858
-rw-r--r--include/linux/phonet.h1
-rw-r--r--include/linux/pnp.h1
-rw-r--r--include/linux/poison.h3
-rw-r--r--include/linux/power_supply.h21
-rw-r--r--include/linux/prctl.h6
-rw-r--r--include/linux/proc_fs.h16
-rw-r--r--include/linux/quotaops.h4
-rw-r--r--include/linux/regulator/consumer.h4
-rw-r--r--include/linux/regulator/driver.h5
-rw-r--r--include/linux/regulator/fixed.h24
-rw-r--r--include/linux/regulator/machine.h26
-rw-r--r--include/linux/regulator/max1586.h4
-rw-r--r--include/linux/relay.h2
-rw-r--r--include/linux/res_counter.h64
-rw-r--r--include/linux/rmap.h27
-rw-r--r--include/linux/sched.h75
-rw-r--r--include/linux/security.h2
-rw-r--r--include/linux/seq_file.h38
-rw-r--r--include/linux/serial_core.h4
-rw-r--r--include/linux/sfi.h206
-rw-r--r--include/linux/sfi_acpi.h93
-rw-r--r--include/linux/signal.h2
-rw-r--r--include/linux/smp.h11
-rw-r--r--include/linux/spi/mc33880.h10
-rw-r--r--include/linux/spi/spi.h51
-rw-r--r--include/linux/sunrpc/auth.h4
-rw-r--r--include/linux/sunrpc/clnt.h114
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/sunrpc/svcsock.h1
-rw-r--r--include/linux/sunrpc/xdr.h5
-rw-r--r--include/linux/sunrpc/xprt.h19
-rw-r--r--include/linux/sunrpc/xprtrdma.h5
-rw-r--r--include/linux/sunrpc/xprtsock.h11
-rw-r--r--include/linux/swap.h61
-rw-r--r--include/linux/swapops.h38
-rw-r--r--include/linux/syscalls.h9
-rw-r--r--include/linux/sysctl.h19
-rw-r--r--include/linux/time.h28
-rw-r--r--include/linux/topology.h6
-rw-r--r--include/linux/tracehook.h34
-rw-r--r--include/linux/ucb1400.h19
-rw-r--r--include/linux/unaligned/be_byteshift.h2
-rw-r--r--include/linux/unaligned/le_byteshift.h2
-rw-r--r--include/linux/usb.h27
-rw-r--r--include/linux/usb/audio.h287
-rw-r--r--include/linux/usb/ch9.h8
-rw-r--r--include/linux/usb/ehci_def.h35
-rw-r--r--include/linux/usb/isp1362.h46
-rw-r--r--include/linux/usb/isp1760.h18
-rw-r--r--include/linux/usb/serial.h2
-rw-r--r--include/linux/usb/usbnet.h1
-rw-r--r--include/linux/usbdevice_fs.h3
-rw-r--r--include/linux/utsname.h1
-rw-r--r--include/linux/vgaarb.h14
-rw-r--r--include/linux/virtio.h2
-rw-r--r--include/linux/virtio_9p.h2
-rw-r--r--include/linux/virtio_balloon.h3
-rw-r--r--include/linux/virtio_blk.h18
-rw-r--r--include/linux/virtio_config.h3
-rw-r--r--include/linux/virtio_console.h3
-rw-r--r--include/linux/virtio_ids.h17
-rw-r--r--include/linux/virtio_net.h3
-rw-r--r--include/linux/virtio_rng.h3
-rw-r--r--include/linux/vmstat.h16
-rw-r--r--include/linux/wm97xx.h18
-rw-r--r--include/linux/wm97xx_batt.h18
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/linux/writeback.h11
-rw-r--r--include/net/9p/9p.h3
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/ipip.h1
-rw-r--r--include/net/ndisc.h2
-rw-r--r--include/rdma/ib_cm.h2
-rw-r--r--include/scsi/fc/fc_fc2.h3
-rw-r--r--include/trace/events/kmem.h163
-rw-r--r--include/trace/events/timer.h342
-rw-r--r--include/trace/ftrace.h10
-rw-r--r--include/video/da8xx-fb.h103
-rw-r--r--init/Kconfig53
-rw-r--r--init/main.c12
-rw-r--r--ipc/ipc_sysctl.c16
-rw-r--r--ipc/mq_sysctl.c8
-rw-r--r--ipc/mqueue.c4
-rw-r--r--ipc/shm.c2
-rw-r--r--ipc/util.c2
-rw-r--r--kernel/Makefile3
-rw-r--r--kernel/audit.c18
-rw-r--r--kernel/audit_watch.c2
-rw-r--r--kernel/auditsc.c6
-rw-r--r--kernel/cgroup.c1113
-rw-r--r--kernel/cgroup_debug.c105
-rw-r--r--kernel/cgroup_freezer.c15
-rw-r--r--kernel/cpuset.c66
-rw-r--r--kernel/cred.c19
-rw-r--r--kernel/exit.c164
-rw-r--r--kernel/fork.c75
-rw-r--r--kernel/gcov/Kconfig2
-rw-r--r--kernel/hrtimer.c40
-rw-r--r--kernel/hung_task.c4
-rw-r--r--kernel/itimer.c169
-rw-r--r--kernel/kallsyms.c3
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/lockdep.c3
-rw-r--r--kernel/lockdep_proc.c2
-rw-r--r--kernel/module.c164
-rw-r--r--kernel/ns_cgroup.c16
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/params.c7
-rw-r--r--kernel/perf_event.c (renamed from kernel/perf_counter.c)2449
-rw-r--r--kernel/pid.c15
-rw-r--r--kernel/pid_namespace.c2
-rw-r--r--kernel/posix-cpu-timers.c155
-rw-r--r--kernel/power/process.c1
-rw-r--r--kernel/power/snapshot.c2
-rw-r--r--kernel/power/swap.c1
-rw-r--r--kernel/printk.c27
-rw-r--r--kernel/ptrace.c11
-rw-r--r--kernel/res_counter.c21
-rw-r--r--kernel/resource.c23
-rw-r--r--kernel/sched.c75
-rw-r--r--kernel/sched_fair.c4
-rw-r--r--kernel/signal.c168
-rw-r--r--kernel/slow-work.c12
-rw-r--r--kernel/smp.c36
-rw-r--r--kernel/softlockup.c4
-rw-r--r--kernel/sys.c46
-rw-r--r--kernel/sys_ni.c3
-rw-r--r--kernel/sysctl.c149
-rw-r--r--kernel/time/Makefile2
-rw-r--r--kernel/time/timeconv.c127
-rw-r--r--kernel/timer.c36
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/ftrace.c8
-rw-r--r--kernel/trace/trace.c11
-rw-r--r--kernel/trace/trace_hw_branches.c2
-rw-r--r--kernel/trace/trace_stack.c4
-rw-r--r--kernel/trace/trace_syscalls.c6
-rw-r--r--kernel/tracepoint.c2
-rw-r--r--kernel/uid16.c1
-rw-r--r--kernel/utsname_sysctl.c4
-rw-r--r--lib/Kconfig.debug8
-rw-r--r--lib/Kconfig.kmemcheck3
-rw-r--r--lib/decompress_inflate.c8
-rw-r--r--lib/decompress_unlzma.c10
-rw-r--r--lib/flex_array.c121
-rw-r--r--lib/vsprintf.c39
-rw-r--r--lib/zlib_deflate/deflate.c4
-rw-r--r--mm/Kconfig26
-rw-r--r--mm/Kconfig.debug12
-rw-r--r--mm/Makefile9
-rw-r--r--mm/filemap.c10
-rw-r--r--mm/hugetlb.c263
-rw-r--r--mm/hwpoison-inject.c41
-rw-r--r--mm/internal.h10
-rw-r--r--mm/ksm.c1711
-rw-r--r--mm/madvise.c83
-rw-r--r--mm/memcontrol.c739
-rw-r--r--mm/memory-failure.c832
-rw-r--r--mm/memory.c298
-rw-r--r--mm/memory_hotplug.c13
-rw-r--r--mm/mempool.c7
-rw-r--r--mm/migrate.c26
-rw-r--r--mm/mlock.c128
-rw-r--r--mm/mmap.c57
-rw-r--r--mm/mmu_context.c58
-rw-r--r--mm/mmu_notifier.c20
-rw-r--r--mm/mprotect.c4
-rw-r--r--mm/mremap.c18
-rw-r--r--mm/nommu.c128
-rw-r--r--mm/oom_kill.c86
-rw-r--r--mm/page-writeback.c62
-rw-r--r--mm/page_alloc.c328
-rw-r--r--mm/page_cgroup.c12
-rw-r--r--mm/quicklist.c3
-rw-r--r--mm/rmap.c138
-rw-r--r--mm/shmem.c25
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slub.c3
-rw-r--r--mm/sparse-vmemmap.c8
-rw-r--r--mm/sparse.c9
-rw-r--r--mm/swap.c8
-rw-r--r--mm/swap_state.c143
-rw-r--r--mm/swapfile.c8
-rw-r--r--mm/truncate.c136
-rw-r--r--mm/vmalloc.c223
-rw-r--r--mm/vmscan.c272
-rw-r--r--mm/vmstat.c5
-rw-r--r--net/9p/trans_virtio.c5
-rw-r--r--net/ax25/af_ax25.c31
-rw-r--r--net/bluetooth/hidp/core.c7
-rw-r--r--net/bridge/br_netfilter.c4
-rw-r--r--net/core/pktgen.c160
-rw-r--r--net/core/sock.c4
-rw-r--r--net/dccp/proto.c6
-rw-r--r--net/decnet/dn_dev.c5
-rw-r--r--net/decnet/dn_route.c2
-rw-r--r--net/decnet/sysctl_net_decnet.c2
-rw-r--r--net/ipv4/devinet.c12
-rw-r--r--net/ipv4/ip_gre.c13
-rw-r--r--net/ipv4/ip_sockglue.c3
-rw-r--r--net/ipv4/ipip.c8
-rw-r--r--net/ipv4/route.c9
-rw-r--r--net/ipv4/sysctl_net_ipv4.c16
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv6/addrconf.c8
-rw-r--r--net/ipv6/ip6_tunnel.c7
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/ndisc.c8
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/ipv6/sit.c8
-rw-r--r--net/irda/irsysctl.c8
-rw-r--r--net/mac80211/scan.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c8
-rw-r--r--net/netfilter/nf_conntrack_core.c4
-rw-r--r--net/netfilter/nf_log.c4
-rw-r--r--net/netfilter/x_tables.c2
-rw-r--r--net/netfilter/xt_hashlimit.c8
-rw-r--r--net/netlink/af_netlink.c25
-rw-r--r--net/netlink/genetlink.c4
-rw-r--r--net/phonet/af_phonet.c6
-rw-r--r--net/phonet/socket.c16
-rw-r--r--net/phonet/sysctl.c4
-rw-r--r--net/rxrpc/ar-call.c2
-rw-r--r--net/sched/sch_hfsc.c2
-rw-r--r--net/sctp/protocol.c6
-rw-r--r--net/socket.c5
-rw-r--r--net/sunrpc/auth.c20
-rw-r--r--net/sunrpc/auth_generic.c4
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c6
-rw-r--r--net/sunrpc/auth_null.c1
-rw-r--r--net/sunrpc/cache.c109
-rw-r--r--net/sunrpc/clnt.c6
-rw-r--r--net/sunrpc/rpc_pipe.c5
-rw-r--r--net/sunrpc/sched.c7
-rw-r--r--net/sunrpc/sunrpc.h14
-rw-r--r--net/sunrpc/svc_xprt.c25
-rw-r--r--net/sunrpc/svcauth_unix.c1
-rw-r--r--net/sunrpc/svcsock.c335
-rw-r--r--net/sunrpc/sysctl.c4
-rw-r--r--net/sunrpc/xprt.c15
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma.c2
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c4
-rw-r--r--net/sunrpc/xprtsock.c251
-rw-r--r--net/wireless/wext-compat.c2
-rw-r--r--net/wireless/wext-sme.c2
-rw-r--r--scripts/Kbuild.include16
-rw-r--r--scripts/Makefile.build6
-rw-r--r--scripts/basic/docproc.c34
-rw-r--r--scripts/basic/fixdep.c30
-rw-r--r--scripts/basic/hash.c4
-rwxr-xr-xscripts/checkincludes.pl71
-rwxr-xr-xscripts/checkpatch.pl106
-rw-r--r--scripts/conmakehash.c6
-rw-r--r--scripts/genksyms/genksyms.c6
-rwxr-xr-xscripts/get_maintainer.pl421
-rw-r--r--scripts/kallsyms.c2
-rw-r--r--scripts/kconfig/conf.c24
-rw-r--r--scripts/kconfig/confdata.c2
-rw-r--r--scripts/kconfig/expr.c6
-rw-r--r--scripts/kconfig/gconf.c21
-rw-r--r--scripts/kconfig/gconf.glade4
-rw-r--r--scripts/kconfig/kxgettext.c4
-rw-r--r--scripts/kconfig/lkc_proto.h2
-rw-r--r--scripts/kconfig/mconf.c78
-rw-r--r--scripts/kconfig/menu.c84
-rw-r--r--scripts/kconfig/qconf.cc10
-rw-r--r--scripts/kconfig/symbol.c6
-rw-r--r--scripts/markup_oops.pl5
-rw-r--r--scripts/mod/file2alias.c13
-rw-r--r--scripts/mod/modpost.c4
-rw-r--r--scripts/mod/sumversion.c2
-rw-r--r--scripts/selinux/mdp/mdp.c4
-rwxr-xr-xscripts/tags.sh3
-rw-r--r--security/device_cgroup.c3
-rw-r--r--security/integrity/ima/ima_fs.c4
-rw-r--r--security/keys/gc.c4
-rw-r--r--security/lsm_audit.c2
-rw-r--r--security/min_addr.c4
-rw-r--r--security/selinux/avc.c19
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--security/smack/smack_lsm.c8
-rw-r--r--security/smack/smackfs.c6
-rw-r--r--sound/arm/pxa2xx-ac97.c20
-rw-r--r--sound/core/pcm_native.c73
-rw-r--r--sound/oss/swarm_cs4297a.c3
-rw-r--r--sound/oss/sys_timer.c3
-rw-r--r--sound/pci/lx6464es/lx6464es.h2
-rw-r--r--sound/pci/lx6464es/lx_core.c98
-rw-r--r--sound/soc/blackfin/bf5xx-ac97.c8
-rw-r--r--sound/soc/blackfin/bf5xx-ac97.h2
-rw-r--r--sound/soc/blackfin/bf5xx-i2s.c22
-rw-r--r--sound/soc/blackfin/bf5xx-i2s.h2
-rw-r--r--sound/soc/blackfin/bf5xx-sport.c2
-rw-r--r--sound/soc/codecs/ad1836.c3
-rw-r--r--sound/soc/codecs/ad1938.c2
-rw-r--r--sound/soc/codecs/wm8753.c1
-rw-r--r--sound/soc/codecs/wm9081.c2
-rw-r--r--sound/soc/davinci/davinci-mcasp.c24
-rw-r--r--sound/soc/pxa/pxa-ssp.c2
-rw-r--r--sound/soc/s3c24xx/s3c24xx_uda134x.c2
-rw-r--r--tools/perf/Makefile2
-rw-r--r--tools/perf/builtin-annotate.c28
-rw-r--r--tools/perf/builtin-record.c22
-rw-r--r--tools/perf/builtin-report.c48
-rw-r--r--tools/perf/builtin-sched.c20
-rw-r--r--tools/perf/builtin-stat.c10
-rw-r--r--tools/perf/builtin-timechart.c14
-rw-r--r--tools/perf/builtin-top.c12
-rw-r--r--tools/perf/builtin-trace.c22
-rw-r--r--tools/perf/design.txt58
-rw-r--r--tools/perf/perf.h12
-rw-r--r--tools/perf/util/event.h4
-rw-r--r--tools/perf/util/header.c6
-rw-r--r--tools/perf/util/header.h8
-rw-r--r--tools/perf/util/parse-events.c32
-rw-r--r--tools/perf/util/parse-events.h2
-rw-r--r--tools/perf/util/trace-event-info.c8
-rw-r--r--tools/perf/util/trace-event.h2
-rw-r--r--usr/.gitignore2
-rw-r--r--usr/Makefile2
-rw-r--r--usr/gen_init_cpio.c2
-rw-r--r--virt/kvm/kvm_main.c3
2424 files changed, 114724 insertions, 43285 deletions
diff --git a/Documentation/ABI/stable/sysfs-class-backlight b/Documentation/ABI/stable/sysfs-class-backlight
new file mode 100644
index 00000000000..4d637e1c4ff
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-class-backlight
@@ -0,0 +1,36 @@
+What: /sys/class/backlight/<backlight>/bl_power
+Date: April 2005
+KernelVersion: 2.6.12
+Contact: Richard Purdie <rpurdie@rpsys.net>
+Description:
+ Control BACKLIGHT power, values are FB_BLANK_* from fb.h
+ - FB_BLANK_UNBLANK (0) : power on.
+ - FB_BLANK_POWERDOWN (4) : power off
+Users: HAL
+
+What: /sys/class/backlight/<backlight>/brightness
+Date: April 2005
+KernelVersion: 2.6.12
+Contact: Richard Purdie <rpurdie@rpsys.net>
+Description:
+ Control the brightness for this <backlight>. Values
+ are between 0 and max_brightness. This file will also
+ show the brightness level stored in the driver, which
+ may not be the actual brightness (see actual_brightness).
+Users: HAL
+
+What: /sys/class/backlight/<backlight>/actual_brightness
+Date: March 2006
+KernelVersion: 2.6.17
+Contact: Richard Purdie <rpurdie@rpsys.net>
+Description:
+ Show the actual brightness by querying the hardware.
+Users: HAL
+
+What: /sys/class/backlight/<backlight>/max_brightness
+Date: April 2005
+KernelVersion: 2.6.12
+Contact: Richard Purdie <rpurdie@rpsys.net>
+Description:
+ Maximum brightness for <backlight>.
+Users: HAL
diff --git a/Documentation/ABI/testing/sysfs-class-lcd b/Documentation/ABI/testing/sysfs-class-lcd
new file mode 100644
index 00000000000..35906bf7aa7
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-lcd
@@ -0,0 +1,23 @@
+What: /sys/class/lcd/<lcd>/lcd_power
+Date: April 2005
+KernelVersion: 2.6.12
+Contact: Richard Purdie <rpurdie@rpsys.net>
+Description:
+ Control LCD power, values are FB_BLANK_* from fb.h
+ - FB_BLANK_UNBLANK (0) : power on.
+ - FB_BLANK_POWERDOWN (4) : power off
+
+What: /sys/class/lcd/<lcd>/contrast
+Date: April 2005
+KernelVersion: 2.6.12
+Contact: Richard Purdie <rpurdie@rpsys.net>
+Description:
+ Current contrast of this LCD device. Value is between 0 and
+ /sys/class/lcd/<lcd>/max_contrast.
+
+What: /sys/class/lcd/<lcd>/max_contrast
+Date: April 2005
+KernelVersion: 2.6.12
+Contact: Richard Purdie <rpurdie@rpsys.net>
+Description:
+ Maximum contrast for this LCD device.
diff --git a/Documentation/ABI/testing/sysfs-class-led b/Documentation/ABI/testing/sysfs-class-led
new file mode 100644
index 00000000000..9e4541d71cb
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-led
@@ -0,0 +1,28 @@
+What: /sys/class/leds/<led>/brightness
+Date: March 2006
+KernelVersion: 2.6.17
+Contact: Richard Purdie <rpurdie@rpsys.net>
+Description:
+ Set the brightness of the LED. Most LEDs don't
+ have hardware brightness support so will just be turned on for
+ non-zero brightness settings. The value is between 0 and
+ /sys/class/leds/<led>/max_brightness.
+
+What: /sys/class/leds/<led>/max_brightness
+Date: March 2006
+KernelVersion: 2.6.17
+Contact: Richard Purdie <rpurdie@rpsys.net>
+Description:
+ Maximum brightness level for this led, default is 255 (LED_FULL).
+
+What: /sys/class/leds/<led>/trigger
+Date: March 2006
+KernelVersion: 2.6.17
+Contact: Richard Purdie <rpurdie@rpsys.net>
+Description:
+ Set the trigger for this LED. A trigger is a kernel based source
+ of led events.
+ You can change triggers in a similar manner to the way an IO
+ scheduler is chosen. Trigger specific parameters can appear in
+ /sys/class/leds/<led> once a given trigger is selected.
+
diff --git a/Documentation/ABI/testing/sysfs-gpio b/Documentation/ABI/testing/sysfs-gpio
index 8aab8092ad3..80f4c94c7be 100644
--- a/Documentation/ABI/testing/sysfs-gpio
+++ b/Documentation/ABI/testing/sysfs-gpio
@@ -19,6 +19,7 @@ Description:
/gpioN ... for each exported GPIO #N
/value ... always readable, writes fail for input GPIOs
/direction ... r/w as: in, out (default low); write: high, low
+ /edge ... r/w as: none, falling, rising, both
/gpiochipN ... for each gpiochip; #N is its first GPIO
/base ... (r/o) same as N
/label ... (r/o) descriptive, not necessarily unique
diff --git a/Documentation/ABI/testing/sysfs-platform-asus-laptop b/Documentation/ABI/testing/sysfs-platform-asus-laptop
new file mode 100644
index 00000000000..a1cb660c50c
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-asus-laptop
@@ -0,0 +1,52 @@
+What: /sys/devices/platform/asus-laptop/display
+Date: January 2007
+KernelVersion: 2.6.20
+Contact: "Corentin Chary" <corentincj@iksaif.net>
+Description:
+ This file allows display switching. The value
+ is composed by 4 bits and defined as follow:
+ 4321
+ |||`- LCD
+ ||`-- CRT
+ |`--- TV
+ `---- DVI
+ Ex: - 0 (0000b) means no display
+ - 3 (0011b) CRT+LCD.
+
+What: /sys/devices/platform/asus-laptop/gps
+Date: January 2007
+KernelVersion: 2.6.20
+Contact: "Corentin Chary" <corentincj@iksaif.net>
+Description:
+ Control the gps device. 1 means on, 0 means off.
+Users: Lapsus
+
+What: /sys/devices/platform/asus-laptop/ledd
+Date: January 2007
+KernelVersion: 2.6.20
+Contact: "Corentin Chary" <corentincj@iksaif.net>
+Description:
+ Some models like the W1N have a LED display that can be
+ used to display several informations.
+ To control the LED display, use the following :
+ echo 0x0T000DDD > /sys/devices/platform/asus-laptop/
+ where T control the 3 letters display, and DDD the 3 digits display.
+ The DDD table can be found in Documentation/laptops/asus-laptop.txt
+
+What: /sys/devices/platform/asus-laptop/bluetooth
+Date: January 2007
+KernelVersion: 2.6.20
+Contact: "Corentin Chary" <corentincj@iksaif.net>
+Description:
+ Control the bluetooth device. 1 means on, 0 means off.
+ This may control the led, the device or both.
+Users: Lapsus
+
+What: /sys/devices/platform/asus-laptop/wlan
+Date: January 2007
+KernelVersion: 2.6.20
+Contact: "Corentin Chary" <corentincj@iksaif.net>
+Description:
+ Control the bluetooth device. 1 means on, 0 means off.
+ This may control the led, the device or both.
+Users: Lapsus
diff --git a/Documentation/ABI/testing/sysfs-platform-eeepc-laptop b/Documentation/ABI/testing/sysfs-platform-eeepc-laptop
new file mode 100644
index 00000000000..7445dfb321b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-eeepc-laptop
@@ -0,0 +1,50 @@
+What: /sys/devices/platform/eeepc-laptop/disp
+Date: May 2008
+KernelVersion: 2.6.26
+Contact: "Corentin Chary" <corentincj@iksaif.net>
+Description:
+ This file allows display switching.
+ - 1 = LCD
+ - 2 = CRT
+ - 3 = LCD+CRT
+ If you run X11, you should use xrandr instead.
+
+What: /sys/devices/platform/eeepc-laptop/camera
+Date: May 2008
+KernelVersion: 2.6.26
+Contact: "Corentin Chary" <corentincj@iksaif.net>
+Description:
+ Control the camera. 1 means on, 0 means off.
+
+What: /sys/devices/platform/eeepc-laptop/cardr
+Date: May 2008
+KernelVersion: 2.6.26
+Contact: "Corentin Chary" <corentincj@iksaif.net>
+Description:
+ Control the card reader. 1 means on, 0 means off.
+
+What: /sys/devices/platform/eeepc-laptop/cpufv
+Date: Jun 2009
+KernelVersion: 2.6.31
+Contact: "Corentin Chary" <corentincj@iksaif.net>
+Description:
+ Change CPU clock configuration.
+ On the Eee PC 1000H there are three available clock configuration:
+ * 0 -> Super Performance Mode
+ * 1 -> High Performance Mode
+ * 2 -> Power Saving Mode
+ On Eee PC 701 there is only 2 available clock configurations.
+ Available configuration are listed in available_cpufv file.
+ Reading this file will show the raw hexadecimal value which
+ is defined as follow:
+ | 8 bit | 8 bit |
+ | `---- Current mode
+ `------------ Availables modes
+ For example, 0x301 means: mode 1 selected, 3 available modes.
+
+What: /sys/devices/platform/eeepc-laptop/available_cpufv
+Date: Jun 2009
+KernelVersion: 2.6.31
+Contact: "Corentin Chary" <corentincj@iksaif.net>
+Description:
+ List available cpufv modes.
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index 8e145857fc9..df0d089d0fb 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -568,7 +568,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
<para>
The blocks in which the tables are stored are procteted against
accidental access by marking them bad in the memory bad block
- table. The bad block table managment functions are allowed
+ table. The bad block table management functions are allowed
to circumvernt this protection.
</para>
<para>
diff --git a/Documentation/DocBook/scsi.tmpl b/Documentation/DocBook/scsi.tmpl
index 10a150ae2a7..d87f4569e76 100644
--- a/Documentation/DocBook/scsi.tmpl
+++ b/Documentation/DocBook/scsi.tmpl
@@ -317,7 +317,7 @@
<para>
The SAS transport class contains common code to deal with SAS HBAs,
an aproximated representation of SAS topologies in the driver model,
- and various sysfs attributes to expose these topologies and managment
+ and various sysfs attributes to expose these topologies and management
interfaces to userspace.
</para>
<para>
diff --git a/Documentation/Intel-IOMMU.txt b/Documentation/Intel-IOMMU.txt
index 21bc416d887..cf9431db873 100644
--- a/Documentation/Intel-IOMMU.txt
+++ b/Documentation/Intel-IOMMU.txt
@@ -56,11 +56,7 @@ Graphics Problems?
------------------
If you encounter issues with graphics devices, you can try adding
option intel_iommu=igfx_off to turn off the integrated graphics engine.
-
-If it happens to be a PCI device included in the INCLUDE_ALL Engine,
-then try enabling CONFIG_DMAR_GFX_WA to setup a 1-1 map. We hear
-graphics drivers may be in process of using DMA api's in the near
-future and at that time this option can be yanked out.
+If this fixes anything, please ensure you file a bug reporting the problem.
Some exceptions to IOVA
-----------------------
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index 5c555a8b39e..b7f9d3b4bbf 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -183,7 +183,7 @@ the MAN-PAGES maintainer (as listed in the MAINTAINERS file)
a man-pages patch, or at least a notification of the change,
so that some information makes its way into the manual pages.
-Even if the maintainer did not respond in step #4, make sure to ALWAYS
+Even if the maintainer did not respond in step #5, make sure to ALWAYS
copy the maintainer when you change their code.
For small patches you may want to CC the Trivial Patch Monkey
diff --git a/Documentation/accounting/getdelays.c b/Documentation/accounting/getdelays.c
index aa73e72fd79..6e25c2659e0 100644
--- a/Documentation/accounting/getdelays.c
+++ b/Documentation/accounting/getdelays.c
@@ -116,7 +116,7 @@ error:
}
-int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
+static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
__u8 genl_cmd, __u16 nla_type,
void *nla_data, int nla_len)
{
@@ -160,7 +160,7 @@ int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
* Probe the controller in genetlink to find the family id
* for the TASKSTATS family
*/
-int get_family_id(int sd)
+static int get_family_id(int sd)
{
struct {
struct nlmsghdr n;
@@ -190,7 +190,7 @@ int get_family_id(int sd)
return id;
}
-void print_delayacct(struct taskstats *t)
+static void print_delayacct(struct taskstats *t)
{
printf("\n\nCPU %15s%15s%15s%15s\n"
" %15llu%15llu%15llu%15llu\n"
@@ -216,7 +216,7 @@ void print_delayacct(struct taskstats *t)
(unsigned long long)t->freepages_delay_total);
}
-void task_context_switch_counts(struct taskstats *t)
+static void task_context_switch_counts(struct taskstats *t)
{
printf("\n\nTask %15s%15s\n"
" %15llu%15llu\n",
@@ -224,7 +224,7 @@ void task_context_switch_counts(struct taskstats *t)
(unsigned long long)t->nvcsw, (unsigned long long)t->nivcsw);
}
-void print_cgroupstats(struct cgroupstats *c)
+static void print_cgroupstats(struct cgroupstats *c)
{
printf("sleeping %llu, blocked %llu, running %llu, stopped %llu, "
"uninterruptible %llu\n", (unsigned long long)c->nr_sleeping,
@@ -235,7 +235,7 @@ void print_cgroupstats(struct cgroupstats *c)
}
-void print_ioacct(struct taskstats *t)
+static void print_ioacct(struct taskstats *t)
{
printf("%s: read=%llu, write=%llu, cancelled_write=%llu\n",
t->ac_comm,
diff --git a/Documentation/arm/tcm.txt b/Documentation/arm/tcm.txt
new file mode 100644
index 00000000000..074f4be6667
--- /dev/null
+++ b/Documentation/arm/tcm.txt
@@ -0,0 +1,145 @@
+ARM TCM (Tightly-Coupled Memory) handling in Linux
+----
+Written by Linus Walleij <linus.walleij@stericsson.com>
+
+Some ARM SoC:s have a so-called TCM (Tightly-Coupled Memory).
+This is usually just a few (4-64) KiB of RAM inside the ARM
+processor.
+
+Due to being embedded inside the CPU The TCM has a
+Harvard-architecture, so there is an ITCM (instruction TCM)
+and a DTCM (data TCM). The DTCM can not contain any
+instructions, but the ITCM can actually contain data.
+The size of DTCM or ITCM is minimum 4KiB so the typical
+minimum configuration is 4KiB ITCM and 4KiB DTCM.
+
+ARM CPU:s have special registers to read out status, physical
+location and size of TCM memories. arch/arm/include/asm/cputype.h
+defines a CPUID_TCM register that you can read out from the
+system control coprocessor. Documentation from ARM can be found
+at http://infocenter.arm.com, search for "TCM Status Register"
+to see documents for all CPUs. Reading this register you can
+determine if ITCM (bit 0) and/or DTCM (bit 16) is present in the
+machine.
+
+There is further a TCM region register (search for "TCM Region
+Registers" at the ARM site) that can report and modify the location
+size of TCM memories at runtime. This is used to read out and modify
+TCM location and size. Notice that this is not a MMU table: you
+actually move the physical location of the TCM around. At the
+place you put it, it will mask any underlying RAM from the
+CPU so it is usually wise not to overlap any physical RAM with
+the TCM. The TCM memory exists totally outside the MMU and will
+override any MMU mappings.
+
+Code executing inside the ITCM does not "see" any MMU mappings
+and e.g. register accesses must be made to physical addresses.
+
+TCM is used for a few things:
+
+- FIQ and other interrupt handlers that need deterministic
+ timing and cannot wait for cache misses.
+
+- Idle loops where all external RAM is set to self-refresh
+ retention mode, so only on-chip RAM is accessible by
+ the CPU and then we hang inside ITCM waiting for an
+ interrupt.
+
+- Other operations which implies shutting off or reconfiguring
+ the external RAM controller.
+
+There is an interface for using TCM on the ARM architecture
+in <asm/tcm.h>. Using this interface it is possible to:
+
+- Define the physical address and size of ITCM and DTCM.
+
+- Tag functions to be compiled into ITCM.
+
+- Tag data and constants to be allocated to DTCM and ITCM.
+
+- Have the remaining TCM RAM added to a special
+ allocation pool with gen_pool_create() and gen_pool_add()
+ and provice tcm_alloc() and tcm_free() for this
+ memory. Such a heap is great for things like saving
+ device state when shutting off device power domains.
+
+A machine that has TCM memory shall select HAVE_TCM in
+arch/arm/Kconfig for itself, and then the
+rest of the functionality will depend on the physical
+location and size of ITCM and DTCM to be defined in
+mach/memory.h for the machine. Code that needs to use
+TCM shall #include <asm/tcm.h> If the TCM is not located
+at the place given in memory.h it will be moved using
+the TCM Region registers.
+
+Functions to go into itcm can be tagged like this:
+int __tcmfunc foo(int bar);
+
+Variables to go into dtcm can be tagged like this:
+int __tcmdata foo;
+
+Constants can be tagged like this:
+int __tcmconst foo;
+
+To put assembler into TCM just use
+.section ".tcm.text" or .section ".tcm.data"
+respectively.
+
+Example code:
+
+#include <asm/tcm.h>
+
+/* Uninitialized data */
+static u32 __tcmdata tcmvar;
+/* Initialized data */
+static u32 __tcmdata tcmassigned = 0x2BADBABEU;
+/* Constant */
+static const u32 __tcmconst tcmconst = 0xCAFEBABEU;
+
+static void __tcmlocalfunc tcm_to_tcm(void)
+{
+ int i;
+ for (i = 0; i < 100; i++)
+ tcmvar ++;
+}
+
+static void __tcmfunc hello_tcm(void)
+{
+ /* Some abstract code that runs in ITCM */
+ int i;
+ for (i = 0; i < 100; i++) {
+ tcmvar ++;
+ }
+ tcm_to_tcm();
+}
+
+static void __init test_tcm(void)
+{
+ u32 *tcmem;
+ int i;
+
+ hello_tcm();
+ printk("Hello TCM executed from ITCM RAM\n");
+
+ printk("TCM variable from testrun: %u @ %p\n", tcmvar, &tcmvar);
+ tcmvar = 0xDEADBEEFU;
+ printk("TCM variable: 0x%x @ %p\n", tcmvar, &tcmvar);
+
+ printk("TCM assigned variable: 0x%x @ %p\n", tcmassigned, &tcmassigned);
+
+ printk("TCM constant: 0x%x @ %p\n", tcmconst, &tcmconst);
+
+ /* Allocate some TCM memory from the pool */
+ tcmem = tcm_alloc(20);
+ if (tcmem) {
+ printk("TCM Allocated 20 bytes of TCM @ %p\n", tcmem);
+ tcmem[0] = 0xDEADBEEFU;
+ tcmem[1] = 0x2BADBABEU;
+ tcmem[2] = 0xCAFEBABEU;
+ tcmem[3] = 0xDEADBEEFU;
+ tcmem[4] = 0x2BADBABEU;
+ for (i = 0; i < 5; i++)
+ printk("TCM tcmem[%d] = %08x\n", i, tcmem[i]);
+ tcm_free(tcmem, 20);
+ }
+}
diff --git a/Documentation/auxdisplay/cfag12864b-example.c b/Documentation/auxdisplay/cfag12864b-example.c
index 2caeea5e499..e7823ffb1ca 100644
--- a/Documentation/auxdisplay/cfag12864b-example.c
+++ b/Documentation/auxdisplay/cfag12864b-example.c
@@ -62,7 +62,7 @@ unsigned char cfag12864b_buffer[CFAG12864B_SIZE];
* Unable to open: return = -1
* Unable to mmap: return = -2
*/
-int cfag12864b_init(char *path)
+static int cfag12864b_init(char *path)
{
cfag12864b_fd = open(path, O_RDWR);
if (cfag12864b_fd == -1)
@@ -81,7 +81,7 @@ int cfag12864b_init(char *path)
/*
* exit a cfag12864b framebuffer device
*/
-void cfag12864b_exit(void)
+static void cfag12864b_exit(void)
{
munmap(cfag12864b_mem, CFAG12864B_SIZE);
close(cfag12864b_fd);
@@ -90,7 +90,7 @@ void cfag12864b_exit(void)
/*
* set (x, y) pixel
*/
-void cfag12864b_set(unsigned char x, unsigned char y)
+static void cfag12864b_set(unsigned char x, unsigned char y)
{
if (CFAG12864B_CHECK(x, y))
cfag12864b_buffer[CFAG12864B_ADDRESS(x, y)] |=
@@ -100,7 +100,7 @@ void cfag12864b_set(unsigned char x, unsigned char y)
/*
* unset (x, y) pixel
*/
-void cfag12864b_unset(unsigned char x, unsigned char y)
+static void cfag12864b_unset(unsigned char x, unsigned char y)
{
if (CFAG12864B_CHECK(x, y))
cfag12864b_buffer[CFAG12864B_ADDRESS(x, y)] &=
@@ -113,7 +113,7 @@ void cfag12864b_unset(unsigned char x, unsigned char y)
* Pixel off: return = 0
* Pixel on: return = 1
*/
-unsigned char cfag12864b_isset(unsigned char x, unsigned char y)
+static unsigned char cfag12864b_isset(unsigned char x, unsigned char y)
{
if (CFAG12864B_CHECK(x, y))
if (cfag12864b_buffer[CFAG12864B_ADDRESS(x, y)] &
@@ -126,7 +126,7 @@ unsigned char cfag12864b_isset(unsigned char x, unsigned char y)
/*
* not (x, y) pixel
*/
-void cfag12864b_not(unsigned char x, unsigned char y)
+static void cfag12864b_not(unsigned char x, unsigned char y)
{
if (cfag12864b_isset(x, y))
cfag12864b_unset(x, y);
@@ -137,7 +137,7 @@ void cfag12864b_not(unsigned char x, unsigned char y)
/*
* fill (set all pixels)
*/
-void cfag12864b_fill(void)
+static void cfag12864b_fill(void)
{
unsigned short i;
@@ -148,7 +148,7 @@ void cfag12864b_fill(void)
/*
* clear (unset all pixels)
*/
-void cfag12864b_clear(void)
+static void cfag12864b_clear(void)
{
unsigned short i;
@@ -162,7 +162,7 @@ void cfag12864b_clear(void)
* Pixel off: src[i] = 0
* Pixel on: src[i] > 0
*/
-void cfag12864b_format(unsigned char * matrix)
+static void cfag12864b_format(unsigned char * matrix)
{
unsigned char i, j, n;
@@ -182,7 +182,7 @@ void cfag12864b_format(unsigned char * matrix)
/*
* blit buffer to lcd
*/
-void cfag12864b_blit(void)
+static void cfag12864b_blit(void)
{
memcpy(cfag12864b_mem, cfag12864b_buffer, CFAG12864B_SIZE);
}
@@ -194,11 +194,10 @@ void cfag12864b_blit(void)
*/
#include <stdio.h>
-#include <string.h>
#define EXAMPLES 6
-void example(unsigned char n)
+static void example(unsigned char n)
{
unsigned short i, j;
unsigned char matrix[CFAG12864B_WIDTH * CFAG12864B_HEIGHT];
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 6eb1a97e88c..455d4e6d346 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -408,6 +408,26 @@ You can attach the current shell task by echoing 0:
# echo 0 > tasks
+2.3 Mounting hierarchies by name
+--------------------------------
+
+Passing the name=<x> option when mounting a cgroups hierarchy
+associates the given name with the hierarchy. This can be used when
+mounting a pre-existing hierarchy, in order to refer to it by name
+rather than by its set of active subsystems. Each hierarchy is either
+nameless, or has a unique name.
+
+The name should match [\w.-]+
+
+When passing a name=<x> option for a new hierarchy, you need to
+specify subsystems manually; the legacy behaviour of mounting all
+subsystems when none are explicitly specified is not supported when
+you give a subsystem a name.
+
+The name of the subsystem appears as part of the hierarchy description
+in /proc/mounts and /proc/<pid>/cgroups.
+
+
3. Kernel API
=============
@@ -501,7 +521,7 @@ rmdir() will fail with it. From this behavior, pre_destroy() can be
called multiple times against a cgroup.
int can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
- struct task_struct *task)
+ struct task_struct *task, bool threadgroup)
(cgroup_mutex held by caller)
Called prior to moving a task into a cgroup; if the subsystem
@@ -509,14 +529,20 @@ returns an error, this will abort the attach operation. If a NULL
task is passed, then a successful result indicates that *any*
unspecified task can be moved into the cgroup. Note that this isn't
called on a fork. If this method returns 0 (success) then this should
-remain valid while the caller holds cgroup_mutex.
+remain valid while the caller holds cgroup_mutex. If threadgroup is
+true, then a successful result indicates that all threads in the given
+thread's threadgroup can be moved together.
void attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
- struct cgroup *old_cgrp, struct task_struct *task)
+ struct cgroup *old_cgrp, struct task_struct *task,
+ bool threadgroup)
(cgroup_mutex held by caller)
Called after the task has been attached to the cgroup, to allow any
post-attachment activity that requires memory allocations or blocking.
+If threadgroup is true, the subsystem should take care of all threads
+in the specified thread's threadgroup. Currently does not support any
+subsystem that might need the old_cgrp for every thread in the group.
void fork(struct cgroup_subsy *ss, struct task_struct *task)
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 23d1262c077..b871f2552b4 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -179,6 +179,9 @@ The reclaim algorithm has not been modified for cgroups, except that
pages that are selected for reclaiming come from the per cgroup LRU
list.
+NOTE: Reclaim does not work for the root cgroup, since we cannot set any
+limits on the root cgroup.
+
2. Locking
The memory controller uses the following hierarchy
@@ -210,6 +213,7 @@ We can alter the memory limit:
NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo,
mega or gigabytes.
NOTE: We can write "-1" to reset the *.limit_in_bytes(unlimited).
+NOTE: We cannot set limits on the root cgroup any more.
# cat /cgroups/0/memory.limit_in_bytes
4194304
@@ -375,7 +379,42 @@ cgroups created below it.
NOTE2: This feature can be enabled/disabled per subtree.
-7. TODO
+7. Soft limits
+
+Soft limits allow for greater sharing of memory. The idea behind soft limits
+is to allow control groups to use as much of the memory as needed, provided
+
+a. There is no memory contention
+b. They do not exceed their hard limit
+
+When the system detects memory contention or low memory control groups
+are pushed back to their soft limits. If the soft limit of each control
+group is very high, they are pushed back as much as possible to make
+sure that one control group does not starve the others of memory.
+
+Please note that soft limits is a best effort feature, it comes with
+no guarantees, but it does its best to make sure that when memory is
+heavily contended for, memory is allocated based on the soft limit
+hints/setup. Currently soft limit based reclaim is setup such that
+it gets invoked from balance_pgdat (kswapd).
+
+7.1 Interface
+
+Soft limits can be setup by using the following commands (in this example we
+assume a soft limit of 256 megabytes)
+
+# echo 256M > memory.soft_limit_in_bytes
+
+If we want to change this to 1G, we can at any time use
+
+# echo 1G > memory.soft_limit_in_bytes
+
+NOTE1: Soft limits take effect over a long period of time, since they involve
+ reclaiming memory for balancing between memory cgroups
+NOTE2: It is recommended to set the soft limit always below the hard limit,
+ otherwise the hard limit will take precedence.
+
+8. TODO
1. Add support for accounting huge pages (as a separate controller)
2. Make per-cgroup scanner reclaim not-shared pages first
diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt
index 9f59fcbf5d8..ba046b8fa92 100644
--- a/Documentation/crypto/async-tx-api.txt
+++ b/Documentation/crypto/async-tx-api.txt
@@ -54,20 +54,23 @@ features surfaced as a result:
3.1 General format of the API:
struct dma_async_tx_descriptor *
-async_<operation>(<op specific parameters>,
- enum async_tx_flags flags,
- struct dma_async_tx_descriptor *dependency,
- dma_async_tx_callback callback_routine,
- void *callback_parameter);
+async_<operation>(<op specific parameters>, struct async_submit ctl *submit)
3.2 Supported operations:
-memcpy - memory copy between a source and a destination buffer
-memset - fill a destination buffer with a byte value
-xor - xor a series of source buffers and write the result to a
- destination buffer
-xor_zero_sum - xor a series of source buffers and set a flag if the
- result is zero. The implementation attempts to prevent
- writes to memory
+memcpy - memory copy between a source and a destination buffer
+memset - fill a destination buffer with a byte value
+xor - xor a series of source buffers and write the result to a
+ destination buffer
+xor_val - xor a series of source buffers and set a flag if the
+ result is zero. The implementation attempts to prevent
+ writes to memory
+pq - generate the p+q (raid6 syndrome) from a series of source buffers
+pq_val - validate that a p and or q buffer are in sync with a given series of
+ sources
+datap - (raid6_datap_recov) recover a raid6 data block and the p block
+ from the given sources
+2data - (raid6_2data_recov) recover 2 raid6 data blocks from the given
+ sources
3.3 Descriptor management:
The return value is non-NULL and points to a 'descriptor' when the operation
@@ -80,8 +83,8 @@ acknowledged by the application before the offload engine driver is allowed to
recycle (or free) the descriptor. A descriptor can be acked by one of the
following methods:
1/ setting the ASYNC_TX_ACK flag if no child operations are to be submitted
-2/ setting the ASYNC_TX_DEP_ACK flag to acknowledge the parent
- descriptor of a new operation.
+2/ submitting an unacknowledged descriptor as a dependency to another
+ async_tx call will implicitly set the acknowledged state.
3/ calling async_tx_ack() on the descriptor.
3.4 When does the operation execute?
@@ -119,30 +122,42 @@ of an operation.
Perform a xor->copy->xor operation where each operation depends on the
result from the previous operation:
-void complete_xor_copy_xor(void *param)
+void callback(void *param)
{
- printk("complete\n");
+ struct completion *cmp = param;
+
+ complete(cmp);
}
-int run_xor_copy_xor(struct page **xor_srcs,
- int xor_src_cnt,
- struct page *xor_dest,
- size_t xor_len,
- struct page *copy_src,
- struct page *copy_dest,
- size_t copy_len)
+void run_xor_copy_xor(struct page **xor_srcs,
+ int xor_src_cnt,
+ struct page *xor_dest,
+ size_t xor_len,
+ struct page *copy_src,
+ struct page *copy_dest,
+ size_t copy_len)
{
struct dma_async_tx_descriptor *tx;
+ addr_conv_t addr_conv[xor_src_cnt];
+ struct async_submit_ctl submit;
+ addr_conv_t addr_conv[NDISKS];
+ struct completion cmp;
+
+ init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL,
+ addr_conv);
+ tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, &submit)
- tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len,
- ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL);
- tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len,
- ASYNC_TX_DEP_ACK, tx, NULL, NULL);
- tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len,
- ASYNC_TX_XOR_DROP_DST | ASYNC_TX_DEP_ACK | ASYNC_TX_ACK,
- tx, complete_xor_copy_xor, NULL);
+ submit->depend_tx = tx;
+ tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len, &submit);
+
+ init_completion(&cmp);
+ init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST | ASYNC_TX_ACK, tx,
+ callback, &cmp, addr_conv);
+ tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, &submit);
async_tx_issue_pending_all();
+
+ wait_for_completion(&cmp);
}
See include/linux/async_tx.h for more information on the flags. See the
diff --git a/Documentation/fb/ep93xx-fb.txt b/Documentation/fb/ep93xx-fb.txt
new file mode 100644
index 00000000000..5af1bd9effa
--- /dev/null
+++ b/Documentation/fb/ep93xx-fb.txt
@@ -0,0 +1,135 @@
+================================
+Driver for EP93xx LCD controller
+================================
+
+The EP93xx LCD controller can drive both standard desktop monitors and
+embedded LCD displays. If you have a standard desktop monitor then you
+can use the standard Linux video mode database. In your board file:
+
+ static struct ep93xxfb_mach_info some_board_fb_info = {
+ .num_modes = EP93XXFB_USE_MODEDB,
+ .bpp = 16,
+ };
+
+If you have an embedded LCD display then you need to define a video
+mode for it as follows:
+
+ static struct fb_videomode some_board_video_modes[] = {
+ {
+ .name = "some_lcd_name",
+ /* Pixel clock, porches, etc */
+ },
+ };
+
+Note that the pixel clock value is in pico-seconds. You can use the
+KHZ2PICOS macro to convert the pixel clock value. Most other values
+are in pixel clocks. See Documentation/fb/framebuffer.txt for further
+details.
+
+The ep93xxfb_mach_info structure for your board should look like the
+following:
+
+ static struct ep93xxfb_mach_info some_board_fb_info = {
+ .num_modes = ARRAY_SIZE(some_board_video_modes),
+ .modes = some_board_video_modes,
+ .default_mode = &some_board_video_modes[0],
+ .bpp = 16,
+ };
+
+The framebuffer device can be registered by adding the following to
+your board initialisation function:
+
+ ep93xx_register_fb(&some_board_fb_info);
+
+=====================
+Video Attribute Flags
+=====================
+
+The ep93xxfb_mach_info structure has a flags field which can be used
+to configure the controller. The video attributes flags are fully
+documented in section 7 of the EP93xx users' guide. The following
+flags are available:
+
+EP93XXFB_PCLK_FALLING Clock data on the falling edge of the
+ pixel clock. The default is to clock
+ data on the rising edge.
+
+EP93XXFB_SYNC_BLANK_HIGH Blank signal is active high. By
+ default the blank signal is active low.
+
+EP93XXFB_SYNC_HORIZ_HIGH Horizontal sync is active high. By
+ default the horizontal sync is active low.
+
+EP93XXFB_SYNC_VERT_HIGH Vertical sync is active high. By
+ default the vertical sync is active high.
+
+The physical address of the framebuffer can be controlled using the
+following flags:
+
+EP93XXFB_USE_SDCSN0 Use SDCSn[0] for the framebuffer. This
+ is the default setting.
+
+EP93XXFB_USE_SDCSN1 Use SDCSn[1] for the framebuffer.
+
+EP93XXFB_USE_SDCSN2 Use SDCSn[2] for the framebuffer.
+
+EP93XXFB_USE_SDCSN3 Use SDCSn[3] for the framebuffer.
+
+==================
+Platform callbacks
+==================
+
+The EP93xx framebuffer driver supports three optional platform
+callbacks: setup, teardown and blank. The setup and teardown functions
+are called when the framebuffer driver is installed and removed
+respectively. The blank function is called whenever the display is
+blanked or unblanked.
+
+The setup and teardown devices pass the platform_device structure as
+an argument. The fb_info and ep93xxfb_mach_info structures can be
+obtained as follows:
+
+ static int some_board_fb_setup(struct platform_device *pdev)
+ {
+ struct ep93xxfb_mach_info *mach_info = pdev->dev.platform_data;
+ struct fb_info *fb_info = platform_get_drvdata(pdev);
+
+ /* Board specific framebuffer setup */
+ }
+
+======================
+Setting the video mode
+======================
+
+The video mode is set using the following syntax:
+
+ video=XRESxYRES[-BPP][@REFRESH]
+
+If the EP93xx video driver is built-in then the video mode is set on
+the Linux kernel command line, for example:
+
+ video=ep93xx-fb:800x600-16@60
+
+If the EP93xx video driver is built as a module then the video mode is
+set when the module is installed:
+
+ modprobe ep93xx-fb video=320x240
+
+==============
+Screenpage bug
+==============
+
+At least on the EP9315 there is a silicon bug which causes bit 27 of
+the VIDSCRNPAGE (framebuffer physical offset) to be tied low. There is
+an unofficial errata for this bug at:
+ http://marc.info/?l=linux-arm-kernel&m=110061245502000&w=2
+
+By default the EP93xx framebuffer driver checks if the allocated physical
+address has bit 27 set. If it does, then the memory is freed and an
+error is returned. The check can be disabled by adding the following
+option when loading the driver:
+
+ ep93xx-fb.check_screenpage_bug=0
+
+In some cases it may be possible to reconfigure your SDRAM layout to
+avoid this bug. See section 13 of the EP93xx users' guide for details.
diff --git a/Documentation/fb/matroxfb.txt b/Documentation/fb/matroxfb.txt
index ad7a67707d6..e5ce8a1a978 100644
--- a/Documentation/fb/matroxfb.txt
+++ b/Documentation/fb/matroxfb.txt
@@ -186,9 +186,7 @@ noinverse - show true colors on screen. It is default.
dev:X - bind driver to device X. Driver numbers device from 0 up to N,
where device 0 is first `known' device found, 1 second and so on.
lspci lists devices in this order.
- Default is `every' known device for driver with multihead support
- and first working device (usually dev:0) for driver without
- multihead support.
+ Default is `every' known device.
nohwcursor - disables hardware cursor (use software cursor instead).
hwcursor - enables hardware cursor. It is default. If you are using
non-accelerated mode (`noaccel' or `fbset -accel false'), software
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index fa75220f8d3..89a47b5aff0 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -354,14 +354,6 @@ Who: Krzysztof Piotr Oledzki <ole@ans.pl>
---------------------------
-What: fscher and fscpos drivers
-When: June 2009
-Why: Deprecated by the new fschmd driver.
-Who: Hans de Goede <hdegoede@redhat.com>
- Jean Delvare <khali@linux-fr.org>
-
----------------------------
-
What: sysfs ui for changing p4-clockmod parameters
When: September 2009
Why: See commits 129f8ae9b1b5be94517da76009ea956e89104ce8 and
diff --git a/Documentation/filesystems/9p.txt b/Documentation/filesystems/9p.txt
index 6208f55c44c..57e0b80a527 100644
--- a/Documentation/filesystems/9p.txt
+++ b/Documentation/filesystems/9p.txt
@@ -18,11 +18,11 @@ the 9p client is available in the form of a USENIX paper:
Other applications are described in the following papers:
* XCPU & Clustering
- http://www.xcpu.org/xcpu-talk.pdf
+ http://xcpu.org/papers/xcpu-talk.pdf
* KVMFS: control file system for KVM
- http://www.xcpu.org/kvmfs.pdf
- * CellFS: A New ProgrammingModel for the Cell BE
- http://www.xcpu.org/cellfs-talk.pdf
+ http://xcpu.org/papers/kvmfs.pdf
+ * CellFS: A New Programming Model for the Cell BE
+ http://xcpu.org/papers/cellfs-talk.pdf
* PROSE I/O: Using 9p to enable Application Partitions
http://plan9.escet.urjc.es/iwp9/cready/PROSE_iwp9_2006.pdf
@@ -48,6 +48,7 @@ OPTIONS
(see rfdno and wfdno)
virtio - connect to the next virtio channel available
(from lguest or KVM with trans_virtio module)
+ rdma - connect to a specified RDMA channel
uname=name user name to attempt mount as on the remote server. The
server may override or ignore this value. Certain user
@@ -59,16 +60,22 @@ OPTIONS
cache=mode specifies a caching policy. By default, no caches are used.
loose = no attempts are made at consistency,
intended for exclusive, read-only mounts
+ fscache = use FS-Cache for a persistent, read-only
+ cache backend.
debug=n specifies debug level. The debug level is a bitmask.
- 0x01 = display verbose error messages
- 0x02 = developer debug (DEBUG_CURRENT)
- 0x04 = display 9p trace
- 0x08 = display VFS trace
- 0x10 = display Marshalling debug
- 0x20 = display RPC debug
- 0x40 = display transport debug
- 0x80 = display allocation debug
+ 0x01 = display verbose error messages
+ 0x02 = developer debug (DEBUG_CURRENT)
+ 0x04 = display 9p trace
+ 0x08 = display VFS trace
+ 0x10 = display Marshalling debug
+ 0x20 = display RPC debug
+ 0x40 = display transport debug
+ 0x80 = display allocation debug
+ 0x100 = display protocol message debug
+ 0x200 = display Fid debug
+ 0x400 = display packet debug
+ 0x800 = display fscache tracing debug
rfdno=n the file descriptor for reading with trans=fd
@@ -100,6 +107,10 @@ OPTIONS
any = v9fs does single attach and performs all
operations as one user
+ cachetag cache tag to use the specified persistent cache.
+ cache tags for existing cache sessions can be listed at
+ /sys/fs/9p/caches. (applies only to cache=fscache)
+
RESOURCES
=========
@@ -118,7 +129,7 @@ and export.
A Linux version of the 9p server is now maintained under the npfs project
on sourceforge (http://sourceforge.net/projects/npfs). The currently
maintained version is the single-threaded version of the server (named spfs)
-available from the same CVS repository.
+available from the same SVN repository.
There are user and developer mailing lists available through the v9fs project
on sourceforge (http://sourceforge.net/projects/v9fs).
@@ -126,7 +137,8 @@ on sourceforge (http://sourceforge.net/projects/v9fs).
A stand-alone version of the module (which should build for any 2.6 kernel)
is available via (http://github.com/ericvh/9p-sac/tree/master)
-News and other information is maintained on SWiK (http://swik.net/v9fs).
+News and other information is maintained on SWiK (http://swik.net/v9fs)
+and the Wiki (http://sf.net/apps/mediawiki/v9fs/index.php).
Bug reports may be issued through the kernel.org bugzilla
(http://bugzilla.kernel.org)
diff --git a/Documentation/filesystems/ncpfs.txt b/Documentation/filesystems/ncpfs.txt
index f12c30c93f2..5af164f4b37 100644
--- a/Documentation/filesystems/ncpfs.txt
+++ b/Documentation/filesystems/ncpfs.txt
@@ -7,6 +7,6 @@ ftp.gwdg.de/pub/linux/misc/ncpfs, but sunsite and its many mirrors
will have it as well.
Related products are linware and mars_nwe, which will give Linux partial
-NetWare server functionality. Linware's home site is
-klokan.sh.cvut.cz/pub/linux/linware; mars_nwe can be found on
-ftp.gwdg.de/pub/linux/misc/ncpfs.
+NetWare server functionality.
+
+mars_nwe can be found on ftp.gwdg.de/pub/linux/misc/ncpfs.
diff --git a/Documentation/filesystems/nfs41-server.txt b/Documentation/filesystems/nfs41-server.txt
index 05d81cbcb2e..5920fe26e6f 100644
--- a/Documentation/filesystems/nfs41-server.txt
+++ b/Documentation/filesystems/nfs41-server.txt
@@ -11,6 +11,11 @@ the /proc/fs/nfsd/versions control file. Note that to write this
control file, the nfsd service must be taken down. Use your user-mode
nfs-utils to set this up; see rpc.nfsd(8)
+(Warning: older servers will interpret "+4.1" and "-4.1" as "+4" and
+"-4", respectively. Therefore, code meant to work on both new and old
+kernels must turn 4.1 on or off *before* turning support for version 4
+on or off; rpc.nfsd does this correctly.)
+
The NFSv4 minorversion 1 (NFSv4.1) implementation in nfsd is based
on the latest NFSv4.1 Internet Draft:
http://tools.ietf.org/html/draft-ietf-nfsv4-minorversion1-29
@@ -25,6 +30,49 @@ are still under development out of tree.
See http://wiki.linux-nfs.org/wiki/index.php/PNFS_prototype_design
for more information.
+The current implementation is intended for developers only: while it
+does support ordinary file operations on clients we have tested against
+(including the linux client), it is incomplete in ways which may limit
+features unexpectedly, cause known bugs in rare cases, or cause
+interoperability problems with future clients. Known issues:
+
+ - gss support is questionable: currently mounts with kerberos
+ from a linux client are possible, but we aren't really
+ conformant with the spec (for example, we don't use kerberos
+ on the backchannel correctly).
+ - no trunking support: no clients currently take advantage of
+ trunking, but this is a mandatory failure, and its use is
+ recommended to clients in a number of places. (E.g. to ensure
+ timely renewal in case an existing connection's retry timeouts
+ have gotten too long; see section 8.3 of the draft.)
+ Therefore, lack of this feature may cause future clients to
+ fail.
+ - Incomplete backchannel support: incomplete backchannel gss
+ support and no support for BACKCHANNEL_CTL mean that
+ callbacks (hence delegations and layouts) may not be
+ available and clients confused by the incomplete
+ implementation may fail.
+ - Server reboot recovery is unsupported; if the server reboots,
+ clients may fail.
+ - We do not support SSV, which provides security for shared
+ client-server state (thus preventing unauthorized tampering
+ with locks and opens, for example). It is mandatory for
+ servers to support this, though no clients use it yet.
+ - Mandatory operations which we do not support, such as
+ DESTROY_CLIENTID, FREE_STATEID, SECINFO_NO_NAME, and
+ TEST_STATEID, are not currently used by clients, but will be
+ (and the spec recommends their uses in common cases), and
+ clients should not be expected to know how to recover from the
+ case where they are not supported. This will eventually cause
+ interoperability failures.
+
+In addition, some limitations are inherited from the current NFSv4
+implementation:
+
+ - Incomplete delegation enforcement: if a file is renamed or
+ unlinked, a client holding a delegation may continue to
+ indefinitely allow opens of the file under the old name.
+
The table below, taken from the NFSv4.1 document, lists
the operations that are mandatory to implement (REQ), optional
(OPT), and NFSv4.0 operations that are required not to implement (MNI)
@@ -142,6 +190,12 @@ NS*| CB_WANTS_CANCELLED | OPT | FDELG, | Section 20.10 |
Implementation notes:
+DELEGPURGE:
+* mandatory only for servers that support CLAIM_DELEGATE_PREV and/or
+ CLAIM_DELEG_PREV_FH (which allows clients to keep delegations that
+ persist across client reboots). Thus we need not implement this for
+ now.
+
EXCHANGE_ID:
* only SP4_NONE state protection supported
* implementation ids are ignored
diff --git a/Documentation/filesystems/nfsroot.txt b/Documentation/filesystems/nfsroot.txt
index 68baddf3c3e..3ba0b945aaf 100644
--- a/Documentation/filesystems/nfsroot.txt
+++ b/Documentation/filesystems/nfsroot.txt
@@ -105,7 +105,7 @@ ip=<client-ip>:<server-ip>:<gw-ip>:<netmask>:<hostname>:<device>:<autoconf>
the client address and this parameter is NOT empty only
replies from the specified server are accepted.
- Only required for for NFS root. That is autoconfiguration
+ Only required for NFS root. That is autoconfiguration
will not be triggered if it is missing and NFS root is not
in operation.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index ffead13f944..b5aee7838a0 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -176,6 +176,7 @@ read the file /proc/PID/status:
CapBnd: ffffffffffffffff
voluntary_ctxt_switches: 0
nonvoluntary_ctxt_switches: 1
+ Stack usage: 12 kB
This shows you nearly the same information you would get if you viewed it with
the ps command. In fact, ps uses the proc file system to obtain its
@@ -229,6 +230,7 @@ Table 1-2: Contents of the statm files (as of 2.6.30-rc7)
Mems_allowed_list Same as previous, but in "list format"
voluntary_ctxt_switches number of voluntary context switches
nonvoluntary_ctxt_switches number of non voluntary context switches
+ Stack usage: stack usage high water mark (round up to page size)
..............................................................................
Table 1-3: Contents of the statm files (as of 2.6.8-rc3)
@@ -307,7 +309,7 @@ address perms offset dev inode pathname
08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test
0804a000-0806b000 rw-p 00000000 00:00 0 [heap]
a7cb1000-a7cb2000 ---p 00000000 00:00 0
-a7cb2000-a7eb2000 rw-p 00000000 00:00 0
+a7cb2000-a7eb2000 rw-p 00000000 00:00 0 [threadstack:001ff4b4]
a7eb2000-a7eb3000 ---p 00000000 00:00 0
a7eb3000-a7ed5000 rw-p 00000000 00:00 0
a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6
@@ -343,6 +345,7 @@ is not associated with a file:
[stack] = the stack of the main process
[vdso] = the "virtual dynamic shared object",
the kernel system call handler
+ [threadstack:xxxxxxxx] = the stack of the thread, xxxxxxxx is the stack size
or if empty, the mapping is anonymous.
@@ -375,6 +378,19 @@ of memory currently marked as referenced or accessed.
This file is only present if the CONFIG_MMU kernel configuration option is
enabled.
+The /proc/PID/clear_refs is used to reset the PG_Referenced and ACCESSED/YOUNG
+bits on both physical and virtual pages associated with a process.
+To clear the bits for all the pages associated with the process
+ > echo 1 > /proc/PID/clear_refs
+
+To clear the bits for the anonymous pages associated with the process
+ > echo 2 > /proc/PID/clear_refs
+
+To clear the bits for the file mapped pages associated with the process
+ > echo 3 > /proc/PID/clear_refs
+Any other value written to /proc/PID/clear_refs will have no effect.
+
+
1.2 Kernel data
---------------
@@ -1032,9 +1048,9 @@ Various pieces of information about kernel activity are available in the
since the system first booted. For a quick look, simply cat the file:
> cat /proc/stat
- cpu 2255 34 2290 22625563 6290 127 456 0
- cpu0 1132 34 1441 11311718 3675 127 438 0
- cpu1 1123 0 849 11313845 2614 0 18 0
+ cpu 2255 34 2290 22625563 6290 127 456 0 0
+ cpu0 1132 34 1441 11311718 3675 127 438 0 0
+ cpu1 1123 0 849 11313845 2614 0 18 0 0
intr 114930548 113199788 3 0 5 263 0 4 [... lots more numbers ...]
ctxt 1990473
btime 1062191376
@@ -1056,6 +1072,7 @@ second). The meanings of the columns are as follows, from left to right:
- irq: servicing interrupts
- softirq: servicing softirqs
- steal: involuntary wait
+- guest: running a guest
The "intr" line gives counts of interrupts serviced since boot time, for each
of the possible system interrupts. The first column is the total of all
@@ -1191,7 +1208,7 @@ The following heuristics are then applied:
* if the task was reniced, its score doubles
* superuser or direct hardware access tasks (CAP_SYS_ADMIN, CAP_SYS_RESOURCE
or CAP_SYS_RAWIO) have their score divided by 4
- * if oom condition happened in one cpuset and checked task does not belong
+ * if oom condition happened in one cpuset and checked process does not belong
to it, its score is divided by 8
* the resulting score is multiplied by two to the power of oom_adj, i.e.
points <<= oom_adj when it is positive and
diff --git a/Documentation/filesystems/sharedsubtree.txt b/Documentation/filesystems/sharedsubtree.txt
index 736540045dc..23a181074f9 100644
--- a/Documentation/filesystems/sharedsubtree.txt
+++ b/Documentation/filesystems/sharedsubtree.txt
@@ -4,7 +4,7 @@ Shared Subtrees
Contents:
1) Overview
2) Features
- 3) smount command
+ 3) Setting mount states
4) Use-case
5) Detailed semantics
6) Quiz
@@ -41,14 +41,14 @@ replicas continue to be exactly same.
Here is an example:
- Lets say /mnt has a mount that is shared.
+ Let's say /mnt has a mount that is shared.
mount --make-shared /mnt
- note: mount command does not yet support the --make-shared flag.
- I have included a small C program which does the same by executing
- 'smount /mnt shared'
+ Note: mount(8) command now supports the --make-shared flag,
+ so the sample 'smount' program is no longer needed and has been
+ removed.
- #mount --bind /mnt /tmp
+ # mount --bind /mnt /tmp
The above command replicates the mount at /mnt to the mountpoint /tmp
and the contents of both the mounts remain identical.
@@ -58,8 +58,8 @@ replicas continue to be exactly same.
#ls /tmp
a b c
- Now lets say we mount a device at /tmp/a
- #mount /dev/sd0 /tmp/a
+ Now let's say we mount a device at /tmp/a
+ # mount /dev/sd0 /tmp/a
#ls /tmp/a
t1 t2 t2
@@ -80,21 +80,20 @@ replicas continue to be exactly same.
Here is an example:
- Lets say /mnt has a mount which is shared.
- #mount --make-shared /mnt
+ Let's say /mnt has a mount which is shared.
+ # mount --make-shared /mnt
- Lets bind mount /mnt to /tmp
- #mount --bind /mnt /tmp
+ Let's bind mount /mnt to /tmp
+ # mount --bind /mnt /tmp
the new mount at /tmp becomes a shared mount and it is a replica of
the mount at /mnt.
- Now lets make the mount at /tmp; a slave of /mnt
- #mount --make-slave /tmp
- [or smount /tmp slave]
+ Now let's make the mount at /tmp; a slave of /mnt
+ # mount --make-slave /tmp
- lets mount /dev/sd0 on /mnt/a
- #mount /dev/sd0 /mnt/a
+ let's mount /dev/sd0 on /mnt/a
+ # mount /dev/sd0 /mnt/a
#ls /mnt/a
t1 t2 t3
@@ -104,9 +103,9 @@ replicas continue to be exactly same.
Note the mount event has propagated to the mount at /tmp
- However lets see what happens if we mount something on the mount at /tmp
+ However let's see what happens if we mount something on the mount at /tmp
- #mount /dev/sd1 /tmp/b
+ # mount /dev/sd1 /tmp/b
#ls /tmp/b
s1 s2 s3
@@ -124,12 +123,11 @@ replicas continue to be exactly same.
2d) A unbindable mount is a unbindable private mount
- lets say we have a mount at /mnt and we make is unbindable
+ let's say we have a mount at /mnt and we make is unbindable
- #mount --make-unbindable /mnt
- [ smount /mnt unbindable ]
+ # mount --make-unbindable /mnt
- Lets try to bind mount this mount somewhere else.
+ Let's try to bind mount this mount somewhere else.
# mount --bind /mnt /tmp
mount: wrong fs type, bad option, bad superblock on /mnt,
or too many mounted file systems
@@ -137,149 +135,15 @@ replicas continue to be exactly same.
Binding a unbindable mount is a invalid operation.
-3) smount command
+3) Setting mount states
- Currently the mount command is not aware of shared subtree features.
- Work is in progress to add the support in mount ( util-linux package ).
- Till then use the following program.
+ The mount command (util-linux package) can be used to set mount
+ states:
- ------------------------------------------------------------------------
- //
- //this code was developed my Miklos Szeredi <miklos@szeredi.hu>
- //and modified by Ram Pai <linuxram@us.ibm.com>
- // sample usage:
- // smount /tmp shared
- //
- #include <stdio.h>
- #include <stdlib.h>
- #include <unistd.h>
- #include <string.h>
- #include <sys/mount.h>
- #include <sys/fsuid.h>
-
- #ifndef MS_REC
- #define MS_REC 0x4000 /* 16384: Recursive loopback */
- #endif
-
- #ifndef MS_SHARED
- #define MS_SHARED 1<<20 /* Shared */
- #endif
-
- #ifndef MS_PRIVATE
- #define MS_PRIVATE 1<<18 /* Private */
- #endif
-
- #ifndef MS_SLAVE
- #define MS_SLAVE 1<<19 /* Slave */
- #endif
-
- #ifndef MS_UNBINDABLE
- #define MS_UNBINDABLE 1<<17 /* Unbindable */
- #endif
-
- int main(int argc, char *argv[])
- {
- int type;
- if(argc != 3) {
- fprintf(stderr, "usage: %s dir "
- "<rshared|rslave|rprivate|runbindable|shared|slave"
- "|private|unbindable>\n" , argv[0]);
- return 1;
- }
-
- fprintf(stdout, "%s %s %s\n", argv[0], argv[1], argv[2]);
-
- if (strcmp(argv[2],"rshared")==0)
- type=(MS_SHARED|MS_REC);
- else if (strcmp(argv[2],"rslave")==0)
- type=(MS_SLAVE|MS_REC);
- else if (strcmp(argv[2],"rprivate")==0)
- type=(MS_PRIVATE|MS_REC);
- else if (strcmp(argv[2],"runbindable")==0)
- type=(MS_UNBINDABLE|MS_REC);
- else if (strcmp(argv[2],"shared")==0)
- type=MS_SHARED;
- else if (strcmp(argv[2],"slave")==0)
- type=MS_SLAVE;
- else if (strcmp(argv[2],"private")==0)
- type=MS_PRIVATE;
- else if (strcmp(argv[2],"unbindable")==0)
- type=MS_UNBINDABLE;
- else {
- fprintf(stderr, "invalid operation: %s\n", argv[2]);
- return 1;
- }
- setfsuid(getuid());
-
- if(mount("", argv[1], "dontcare", type, "") == -1) {
- perror("mount");
- return 1;
- }
- return 0;
- }
- -----------------------------------------------------------------------
-
- Copy the above code snippet into smount.c
- gcc -o smount smount.c
-
-
- (i) To mark all the mounts under /mnt as shared execute the following
- command:
-
- smount /mnt rshared
- the corresponding syntax planned for mount command is
- mount --make-rshared /mnt
-
- just to mark a mount /mnt as shared, execute the following
- command:
- smount /mnt shared
- the corresponding syntax planned for mount command is
- mount --make-shared /mnt
-
- (ii) To mark all the shared mounts under /mnt as slave execute the
- following
-
- command:
- smount /mnt rslave
- the corresponding syntax planned for mount command is
- mount --make-rslave /mnt
-
- just to mark a mount /mnt as slave, execute the following
- command:
- smount /mnt slave
- the corresponding syntax planned for mount command is
- mount --make-slave /mnt
-
- (iii) To mark all the mounts under /mnt as private execute the
- following command:
-
- smount /mnt rprivate
- the corresponding syntax planned for mount command is
- mount --make-rprivate /mnt
-
- just to mark a mount /mnt as private, execute the following
- command:
- smount /mnt private
- the corresponding syntax planned for mount command is
- mount --make-private /mnt
-
- NOTE: by default all the mounts are created as private. But if
- you want to change some shared/slave/unbindable mount as
- private at a later point in time, this command can help.
-
- (iv) To mark all the mounts under /mnt as unbindable execute the
- following
-
- command:
- smount /mnt runbindable
- the corresponding syntax planned for mount command is
- mount --make-runbindable /mnt
-
- just to mark a mount /mnt as unbindable, execute the following
- command:
- smount /mnt unbindable
- the corresponding syntax planned for mount command is
- mount --make-unbindable /mnt
+ mount --make-shared mountpoint
+ mount --make-slave mountpoint
+ mount --make-private mountpoint
+ mount --make-unbindable mountpoint
4) Use cases
@@ -350,7 +214,7 @@ replicas continue to be exactly same.
mount --rbind / /view/v3
mount --rbind / /view/v4
- and if /usr has a versioning filesystem mounted, than that
+ and if /usr has a versioning filesystem mounted, then that
mount appears at /view/v1/usr, /view/v2/usr, /view/v3/usr and
/view/v4/usr too
@@ -390,7 +254,7 @@ replicas continue to be exactly same.
For example:
mount --make-shared /mnt
- mount --bin /mnt /tmp
+ mount --bind /mnt /tmp
The mount at /mnt and that at /tmp are both shared and belong
to the same peer group. Anything mounted or unmounted under
@@ -558,7 +422,7 @@ replicas continue to be exactly same.
then the subtree under the unbindable mount is pruned in the new
location.
- eg: lets say we have the following mount tree.
+ eg: let's say we have the following mount tree.
A
/ \
@@ -566,7 +430,7 @@ replicas continue to be exactly same.
/ \ / \
D E F G
- Lets say all the mount except the mount C in the tree are
+ Let's say all the mount except the mount C in the tree are
of a type other than unbindable.
If this tree is rbound to say Z
@@ -683,13 +547,13 @@ replicas continue to be exactly same.
'b' on mounts that receive propagation from mount 'B' and does not have
sub-mounts within them are unmounted.
- Example: Lets say 'B1', 'B2', 'B3' are shared mounts that propagate to
+ Example: Let's say 'B1', 'B2', 'B3' are shared mounts that propagate to
each other.
- lets say 'A1', 'A2', 'A3' are first mounted at dentry 'b' on mount
+ let's say 'A1', 'A2', 'A3' are first mounted at dentry 'b' on mount
'B1', 'B2' and 'B3' respectively.
- lets say 'C1', 'C2', 'C3' are next mounted at the same dentry 'b' on
+ let's say 'C1', 'C2', 'C3' are next mounted at the same dentry 'b' on
mount 'B1', 'B2' and 'B3' respectively.
if 'C1' is unmounted, all the mounts that are most-recently-mounted on
@@ -710,7 +574,7 @@ replicas continue to be exactly same.
A cloned namespace contains all the mounts as that of the parent
namespace.
- Lets say 'A' and 'B' are the corresponding mounts in the parent and the
+ Let's say 'A' and 'B' are the corresponding mounts in the parent and the
child namespace.
If 'A' is shared, then 'B' is also shared and 'A' and 'B' propagate to
@@ -759,11 +623,11 @@ replicas continue to be exactly same.
mount --make-slave /mnt
At this point we have the first mount at /tmp and
- its root dentry is 1. Lets call this mount 'A'
+ its root dentry is 1. Let's call this mount 'A'
And then we have a second mount at /tmp1 with root
- dentry 2. Lets call this mount 'B'
+ dentry 2. Let's call this mount 'B'
Next we have a third mount at /mnt with root dentry
- mnt. Lets call this mount 'C'
+ mnt. Let's call this mount 'C'
'B' is the slave of 'A' and 'C' is a slave of 'B'
A -> B -> C
@@ -794,7 +658,7 @@ replicas continue to be exactly same.
Q3 Why is unbindable mount needed?
- Lets say we want to replicate the mount tree at multiple
+ Let's say we want to replicate the mount tree at multiple
locations within the same subtree.
if one rbind mounts a tree within the same subtree 'n' times
@@ -803,7 +667,7 @@ replicas continue to be exactly same.
mounts. Here is a example.
step 1:
- lets say the root tree has just two directories with
+ let's say the root tree has just two directories with
one vfsmount.
root
/ \
@@ -875,7 +739,7 @@ replicas continue to be exactly same.
Unclonable mounts come in handy here.
step 1:
- lets say the root tree has just two directories with
+ let's say the root tree has just two directories with
one vfsmount.
root
/ \
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index f49eecf2e57..623f094c9d8 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -536,6 +536,7 @@ struct address_space_operations {
/* migrate the contents of a page to the specified target */
int (*migratepage) (struct page *, struct page *);
int (*launder_page) (struct page *);
+ int (*error_remove_page) (struct mapping *mapping, struct page *page);
};
writepage: called by the VM to write a dirty page to backing store.
@@ -694,6 +695,12 @@ struct address_space_operations {
prevent redirtying the page, it is kept locked during the whole
operation.
+ error_remove_page: normally set to generic_error_remove_page if truncation
+ is ok for this address space. Used for memory failure handling.
+ Setting this implies you deal with pages going away under you,
+ unless you have them locked or reference counts increased.
+
+
The File Object
===============
diff --git a/Documentation/gcov.txt b/Documentation/gcov.txt
index 40ec6335276..e7ca6478cd9 100644
--- a/Documentation/gcov.txt
+++ b/Documentation/gcov.txt
@@ -47,7 +47,7 @@ Possible uses:
Configure the kernel with:
- CONFIG_DEBUGFS=y
+ CONFIG_DEBUG_FS=y
CONFIG_GCOV_KERNEL=y
and to get coverage data for the entire kernel:
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index e4b6985044a..fa4dc077ae0 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -524,6 +524,13 @@ and have the following read/write attributes:
is configured as an output, this value may be written;
any nonzero value is treated as high.
+ "edge" ... reads as either "none", "rising", "falling", or
+ "both". Write these strings to select the signal edge(s)
+ that will make poll(2) on the "value" file return.
+
+ This file exists only if the pin can be configured as an
+ interrupt generating input pin.
+
GPIO controllers have paths like /sys/class/gpio/chipchip42/ (for the
controller implementing GPIOs starting at #42) and have the following
read-only attributes:
@@ -555,6 +562,11 @@ requested using gpio_request():
/* reverse gpio_export() */
void gpio_unexport();
+ /* create a sysfs link to an exported GPIO node */
+ int gpio_export_link(struct device *dev, const char *name,
+ unsigned gpio)
+
+
After a kernel driver requests a GPIO, it may only be made available in
the sysfs interface by gpio_export(). The driver can control whether the
signal direction may change. This helps drivers prevent userspace code
@@ -563,3 +575,8 @@ from accidentally clobbering important system state.
This explicit exporting can help with debugging (by making some kinds
of experiments easier), or can provide an always-there interface that's
suitable for documenting as part of a board support package.
+
+After the GPIO has been exported, gpio_export_link() allows creating
+symlinks from elsewhere in sysfs to the GPIO sysfs node. Drivers can
+use this to provide the interface under their own device in sysfs with
+a descriptive name.
diff --git a/Documentation/hwmon/acpi_power_meter b/Documentation/hwmon/acpi_power_meter
new file mode 100644
index 00000000000..c80399a00c5
--- /dev/null
+++ b/Documentation/hwmon/acpi_power_meter
@@ -0,0 +1,51 @@
+Kernel driver power_meter
+=========================
+
+This driver talks to ACPI 4.0 power meters.
+
+Supported systems:
+ * Any recent system with ACPI 4.0.
+ Prefix: 'power_meter'
+ Datasheet: http://acpi.info/, section 10.4.
+
+Author: Darrick J. Wong
+
+Description
+-----------
+
+This driver implements sensor reading support for the power meters exposed in
+the ACPI 4.0 spec (Chapter 10.4). These devices have a simple set of
+features--a power meter that returns average power use over a configurable
+interval, an optional capping mechanism, and a couple of trip points. The
+sysfs interface conforms with the specification outlined in the "Power" section
+of Documentation/hwmon/sysfs-interface.
+
+Special Features
+----------------
+
+The power[1-*]_is_battery knob indicates if the power supply is a battery.
+Both power[1-*]_average_{min,max} must be set before the trip points will work.
+When both of them are set, an ACPI event will be broadcast on the ACPI netlink
+socket and a poll notification will be sent to the appropriate
+power[1-*]_average sysfs file.
+
+The power[1-*]_{model_number, serial_number, oem_info} fields display arbitrary
+strings that ACPI provides with the meter. The measures/ directory contains
+symlinks to the devices that this meter measures.
+
+Some computers have the ability to enforce a power cap in hardware. If this is
+the case, the power[1-*]_cap and related sysfs files will appear. When the
+average power consumption exceeds the cap, an ACPI event will be broadcast on
+the netlink event socket and a poll notification will be sent to the
+appropriate power[1-*]_alarm file to indicate that capping has begun, and the
+hardware has taken action to reduce power consumption. Most likely this will
+result in reduced performance.
+
+There are a few other ACPI notifications that can be sent by the firmware. In
+all cases the ACPI event will be broadcast on the ACPI netlink event socket as
+well as sent as a poll notification to a sysfs file. The events are as
+follows:
+
+power[1-*]_cap will be notified if the firmware changes the power cap.
+power[1-*]_interval will be notified if the firmware changes the averaging
+interval.
diff --git a/Documentation/hwmon/coretemp b/Documentation/hwmon/coretemp
index dbbe6c7025b..92267b62db5 100644
--- a/Documentation/hwmon/coretemp
+++ b/Documentation/hwmon/coretemp
@@ -4,7 +4,9 @@ Kernel driver coretemp
Supported chips:
* All Intel Core family
Prefix: 'coretemp'
- CPUID: family 0x6, models 0xe, 0xf, 0x16, 0x17
+ CPUID: family 0x6, models 0xe (Pentium M DC), 0xf (Core 2 DC 65nm),
+ 0x16 (Core 2 SC 65nm), 0x17 (Penryn 45nm),
+ 0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield)
Datasheet: Intel 64 and IA-32 Architectures Software Developer's Manual
Volume 3A: System Programming Guide
http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
diff --git a/Documentation/hwmon/fscher b/Documentation/hwmon/fscher
deleted file mode 100644
index 64031659aff..00000000000
--- a/Documentation/hwmon/fscher
+++ /dev/null
@@ -1,169 +0,0 @@
-Kernel driver fscher
-====================
-
-Supported chips:
- * Fujitsu-Siemens Hermes chip
- Prefix: 'fscher'
- Addresses scanned: I2C 0x73
-
-Authors:
- Reinhard Nissl <rnissl@gmx.de> based on work
- from Hermann Jung <hej@odn.de>,
- Frodo Looijaard <frodol@dds.nl>,
- Philip Edelbrock <phil@netroedge.com>
-
-Description
------------
-
-This driver implements support for the Fujitsu-Siemens Hermes chip. It is
-described in the 'Register Set Specification BMC Hermes based Systemboard'
-from Fujitsu-Siemens.
-
-The Hermes chip implements a hardware-based system management, e.g. for
-controlling fan speed and core voltage. There is also a watchdog counter on
-the chip which can trigger an alarm and even shut the system down.
-
-The chip provides three temperature values (CPU, motherboard and
-auxiliary), three voltage values (+12V, +5V and battery) and three fans
-(power supply, CPU and auxiliary).
-
-Temperatures are measured in degrees Celsius. The resolution is 1 degree.
-
-Fan rotation speeds are reported in RPM (rotations per minute). The value
-can be divided by a programmable divider (1, 2 or 4) which is stored on
-the chip.
-
-Voltage sensors (also known as "in" sensors) report their values in volts.
-
-All values are reported as final values from the driver. There is no need
-for further calculations.
-
-
-Detailed description
---------------------
-
-Below you'll find a single line description of all the bit values. With
-this information, you're able to decode e. g. alarms, wdog, etc. To make
-use of the watchdog, you'll need to set the watchdog time and enable the
-watchdog. After that it is necessary to restart the watchdog time within
-the specified period of time, or a system reset will occur.
-
-* revision
- READING & 0xff = 0x??: HERMES revision identification
-
-* alarms
- READING & 0x80 = 0x80: CPU throttling active
- READING & 0x80 = 0x00: CPU running at full speed
-
- READING & 0x10 = 0x10: software event (see control:1)
- READING & 0x10 = 0x00: no software event
-
- READING & 0x08 = 0x08: watchdog event (see wdog:2)
- READING & 0x08 = 0x00: no watchdog event
-
- READING & 0x02 = 0x02: thermal event (see temp*:1)
- READING & 0x02 = 0x00: no thermal event
-
- READING & 0x01 = 0x01: fan event (see fan*:1)
- READING & 0x01 = 0x00: no fan event
-
- READING & 0x13 ! 0x00: ALERT LED is flashing
-
-* control
- READING & 0x01 = 0x01: software event
- READING & 0x01 = 0x00: no software event
-
- WRITING & 0x01 = 0x01: set software event
- WRITING & 0x01 = 0x00: clear software event
-
-* watchdog_control
- READING & 0x80 = 0x80: power off on watchdog event while thermal event
- READING & 0x80 = 0x00: watchdog power off disabled (just system reset enabled)
-
- READING & 0x40 = 0x40: watchdog timebase 60 seconds (see also wdog:1)
- READING & 0x40 = 0x00: watchdog timebase 2 seconds
-
- READING & 0x10 = 0x10: watchdog enabled
- READING & 0x10 = 0x00: watchdog disabled
-
- WRITING & 0x80 = 0x80: enable "power off on watchdog event while thermal event"
- WRITING & 0x80 = 0x00: disable "power off on watchdog event while thermal event"
-
- WRITING & 0x40 = 0x40: set watchdog timebase to 60 seconds
- WRITING & 0x40 = 0x00: set watchdog timebase to 2 seconds
-
- WRITING & 0x20 = 0x20: disable watchdog
-
- WRITING & 0x10 = 0x10: enable watchdog / restart watchdog time
-
-* watchdog_state
- READING & 0x02 = 0x02: watchdog system reset occurred
- READING & 0x02 = 0x00: no watchdog system reset occurred
-
- WRITING & 0x02 = 0x02: clear watchdog event
-
-* watchdog_preset
- READING & 0xff = 0x??: configured watch dog time in units (see wdog:3 0x40)
-
- WRITING & 0xff = 0x??: configure watch dog time in units
-
-* in* (0: +5V, 1: +12V, 2: onboard 3V battery)
- READING: actual voltage value
-
-* temp*_status (1: CPU sensor, 2: onboard sensor, 3: auxiliary sensor)
- READING & 0x02 = 0x02: thermal event (overtemperature)
- READING & 0x02 = 0x00: no thermal event
-
- READING & 0x01 = 0x01: sensor is working
- READING & 0x01 = 0x00: sensor is faulty
-
- WRITING & 0x02 = 0x02: clear thermal event
-
-* temp*_input (1: CPU sensor, 2: onboard sensor, 3: auxiliary sensor)
- READING: actual temperature value
-
-* fan*_status (1: power supply fan, 2: CPU fan, 3: auxiliary fan)
- READING & 0x04 = 0x04: fan event (fan fault)
- READING & 0x04 = 0x00: no fan event
-
- WRITING & 0x04 = 0x04: clear fan event
-
-* fan*_div (1: power supply fan, 2: CPU fan, 3: auxiliary fan)
- Divisors 2,4 and 8 are supported, both for reading and writing
-
-* fan*_pwm (1: power supply fan, 2: CPU fan, 3: auxiliary fan)
- READING & 0xff = 0x00: fan may be switched off
- READING & 0xff = 0x01: fan must run at least at minimum speed (supply: 6V)
- READING & 0xff = 0xff: fan must run at maximum speed (supply: 12V)
- READING & 0xff = 0x??: fan must run at least at given speed (supply: 6V..12V)
-
- WRITING & 0xff = 0x00: fan may be switched off
- WRITING & 0xff = 0x01: fan must run at least at minimum speed (supply: 6V)
- WRITING & 0xff = 0xff: fan must run at maximum speed (supply: 12V)
- WRITING & 0xff = 0x??: fan must run at least at given speed (supply: 6V..12V)
-
-* fan*_input (1: power supply fan, 2: CPU fan, 3: auxiliary fan)
- READING: actual RPM value
-
-
-Limitations
------------
-
-* Measuring fan speed
-It seems that the chip counts "ripples" (typical fans produce 2 ripples per
-rotation while VERAX fans produce 18) in a 9-bit register. This register is
-read out every second, then the ripple prescaler (2, 4 or 8) is applied and
-the result is stored in the 8 bit output register. Due to the limitation of
-the counting register to 9 bits, it is impossible to measure a VERAX fan
-properly (even with a prescaler of 8). At its maximum speed of 3500 RPM the
-fan produces 1080 ripples per second which causes the counting register to
-overflow twice, leading to only 186 RPM.
-
-* Measuring input voltages
-in2 ("battery") reports the voltage of the onboard lithium battery and not
-+3.3V from the power supply.
-
-* Undocumented features
-Fujitsu-Siemens Computers has not documented all features of the chip so
-far. Their software, System Guard, shows that there are a still some
-features which cannot be controlled by this implementation.
diff --git a/Documentation/hwmon/hpfall.c b/Documentation/hwmon/hpfall.c
index bbea1ccfd46..681ec22b9d0 100644
--- a/Documentation/hwmon/hpfall.c
+++ b/Documentation/hwmon/hpfall.c
@@ -16,6 +16,34 @@
#include <stdint.h>
#include <errno.h>
#include <signal.h>
+#include <sys/mman.h>
+#include <sched.h>
+
+char unload_heads_path[64];
+
+int set_unload_heads_path(char *device)
+{
+ char devname[64];
+
+ if (strlen(device) <= 5 || strncmp(device, "/dev/", 5) != 0)
+ return -EINVAL;
+ strncpy(devname, device + 5, sizeof(devname));
+
+ snprintf(unload_heads_path, sizeof(unload_heads_path),
+ "/sys/block/%s/device/unload_heads", devname);
+ return 0;
+}
+int valid_disk(void)
+{
+ int fd = open(unload_heads_path, O_RDONLY);
+ if (fd < 0) {
+ perror(unload_heads_path);
+ return 0;
+ }
+
+ close(fd);
+ return 1;
+}
void write_int(char *path, int i)
{
@@ -40,7 +68,7 @@ void set_led(int on)
void protect(int seconds)
{
- write_int("/sys/block/sda/device/unload_heads", seconds*1000);
+ write_int(unload_heads_path, seconds*1000);
}
int on_ac(void)
@@ -57,45 +85,62 @@ void ignore_me(void)
{
protect(0);
set_led(0);
-
}
-int main(int argc, char* argv[])
+int main(int argc, char **argv)
{
- int fd, ret;
+ int fd, ret;
+ struct sched_param param;
+
+ if (argc == 1)
+ ret = set_unload_heads_path("/dev/sda");
+ else if (argc == 2)
+ ret = set_unload_heads_path(argv[1]);
+ else
+ ret = -EINVAL;
+
+ if (ret || !valid_disk()) {
+ fprintf(stderr, "usage: %s <device> (default: /dev/sda)\n",
+ argv[0]);
+ exit(1);
+ }
+
+ fd = open("/dev/freefall", O_RDONLY);
+ if (fd < 0) {
+ perror("/dev/freefall");
+ return EXIT_FAILURE;
+ }
- fd = open("/dev/freefall", O_RDONLY);
- if (fd < 0) {
- perror("open");
- return EXIT_FAILURE;
- }
+ daemon(0, 0);
+ param.sched_priority = sched_get_priority_max(SCHED_FIFO);
+ sched_setscheduler(0, SCHED_FIFO, &param);
+ mlockall(MCL_CURRENT|MCL_FUTURE);
signal(SIGALRM, ignore_me);
- for (;;) {
- unsigned char count;
-
- ret = read(fd, &count, sizeof(count));
- alarm(0);
- if ((ret == -1) && (errno == EINTR)) {
- /* Alarm expired, time to unpark the heads */
- continue;
- }
-
- if (ret != sizeof(count)) {
- perror("read");
- break;
- }
-
- protect(21);
- set_led(1);
- if (1 || on_ac() || lid_open()) {
- alarm(2);
- } else {
- alarm(20);
- }
- }
-
- close(fd);
- return EXIT_SUCCESS;
+ for (;;) {
+ unsigned char count;
+
+ ret = read(fd, &count, sizeof(count));
+ alarm(0);
+ if ((ret == -1) && (errno == EINTR)) {
+ /* Alarm expired, time to unpark the heads */
+ continue;
+ }
+
+ if (ret != sizeof(count)) {
+ perror("read");
+ break;
+ }
+
+ protect(21);
+ set_led(1);
+ if (1 || on_ac() || lid_open())
+ alarm(2);
+ else
+ alarm(20);
+ }
+
+ close(fd);
+ return EXIT_SUCCESS;
}
diff --git a/Documentation/hwmon/pc87427 b/Documentation/hwmon/pc87427
index d1ebbe510f3..db5cc1227a8 100644
--- a/Documentation/hwmon/pc87427
+++ b/Documentation/hwmon/pc87427
@@ -34,5 +34,5 @@ Fan rotation speeds are reported as 14-bit values from a gated clock
signal. Speeds down to 83 RPM can be measured.
An alarm is triggered if the rotation speed drops below a programmable
-limit. Another alarm is triggered if the speed is too low to to be measured
+limit. Another alarm is triggered if the speed is too low to be measured
(including stalled or missing fan).
diff --git a/Documentation/i2c/busses/i2c-piix4 b/Documentation/i2c/busses/i2c-piix4
index f889481762b..c5b37c57055 100644
--- a/Documentation/i2c/busses/i2c-piix4
+++ b/Documentation/i2c/busses/i2c-piix4
@@ -8,6 +8,8 @@ Supported adapters:
Datasheet: Only available via NDA from ServerWorks
* ATI IXP200, IXP300, IXP400, SB600, SB700 and SB800 southbridges
Datasheet: Not publicly available
+ * AMD SB900
+ Datasheet: Not publicly available
* Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge
Datasheet: Publicly available at the SMSC website http://www.smsc.com
diff --git a/Documentation/i2c/chips/pca9539 b/Documentation/i2c/chips/pca9539
deleted file mode 100644
index 6aff890088b..00000000000
--- a/Documentation/i2c/chips/pca9539
+++ /dev/null
@@ -1,58 +0,0 @@
-Kernel driver pca9539
-=====================
-
-NOTE: this driver is deprecated and will be dropped soon, use
-drivers/gpio/pca9539.c instead.
-
-Supported chips:
- * Philips PCA9539
- Prefix: 'pca9539'
- Addresses scanned: none
- Datasheet:
- http://www.semiconductors.philips.com/acrobat/datasheets/PCA9539_2.pdf
-
-Author: Ben Gardner <bgardner@wabtec.com>
-
-
-Description
------------
-
-The Philips PCA9539 is a 16 bit low power I/O device.
-All 16 lines can be individually configured as an input or output.
-The input sense can also be inverted.
-The 16 lines are split between two bytes.
-
-
-Detection
----------
-
-The PCA9539 is difficult to detect and not commonly found in PC machines,
-so you have to pass the I2C bus and address of the installed PCA9539
-devices explicitly to the driver at load time via the force=... parameter.
-
-
-Sysfs entries
--------------
-
-Each is a byte that maps to the 8 I/O bits.
-A '0' suffix is for bits 0-7, while '1' is for bits 8-15.
-
-input[01] - read the current value
-output[01] - sets the output value
-direction[01] - direction of each bit: 1=input, 0=output
-invert[01] - toggle the input bit sense
-
-input reads the actual state of the line and is always available.
-The direction defaults to input for all channels.
-
-
-General Remarks
----------------
-
-Note that each output, direction, and invert entry controls 8 lines.
-You should use the read, modify, write sequence.
-For example. to set output bit 0 of 1.
- val=$(cat output0)
- val=$(( $val | 1 ))
- echo $val > output0
-
diff --git a/Documentation/i2c/chips/pcf8574 b/Documentation/i2c/chips/pcf8574
deleted file mode 100644
index 235815c075f..00000000000
--- a/Documentation/i2c/chips/pcf8574
+++ /dev/null
@@ -1,65 +0,0 @@
-Kernel driver pcf8574
-=====================
-
-Supported chips:
- * Philips PCF8574
- Prefix: 'pcf8574'
- Addresses scanned: none
- Datasheet: Publicly available at the Philips Semiconductors website
- http://www.semiconductors.philips.com/pip/PCF8574P.html
-
- * Philips PCF8574A
- Prefix: 'pcf8574a'
- Addresses scanned: none
- Datasheet: Publicly available at the Philips Semiconductors website
- http://www.semiconductors.philips.com/pip/PCF8574P.html
-
-Authors:
- Frodo Looijaard <frodol@dds.nl>,
- Philip Edelbrock <phil@netroedge.com>,
- Dan Eaton <dan.eaton@rocketlogix.com>,
- Aurelien Jarno <aurelien@aurel32.net>,
- Jean Delvare <khali@linux-fr.org>,
-
-
-Description
------------
-The PCF8574(A) is an 8-bit I/O expander for the I2C bus produced by Philips
-Semiconductors. It is designed to provide a byte I2C interface to up to 16
-separate devices (8 x PCF8574 and 8 x PCF8574A).
-
-This device consists of a quasi-bidirectional port. Each of the eight I/Os
-can be independently used as an input or output. To setup an I/O as an
-input, you have to write a 1 to the corresponding output.
-
-For more informations see the datasheet.
-
-
-Accessing PCF8574(A) via /sys interface
--------------------------------------
-
-The PCF8574(A) is plainly impossible to detect ! Stupid chip.
-So, you have to pass the I2C bus and address of the installed PCF857A
-and PCF8574A devices explicitly to the driver at load time via the
-force=... parameter.
-
-On detection (i.e. insmod, modprobe et al.), directories are being
-created for each detected PCF8574(A):
-
-/sys/bus/i2c/devices/<0>-<1>/
-where <0> is the bus the chip was detected on (e. g. i2c-0)
-and <1> the chip address ([20..27] or [38..3f]):
-
-(example: /sys/bus/i2c/devices/1-0020/)
-
-Inside these directories, there are two files each:
-read and write (and one file with chip name).
-
-The read file is read-only. Reading gives you the current I/O input
-if the corresponding output is set as 1, otherwise the current output
-value, that is to say 0.
-
-The write file is read/write. Writing a value outputs it on the I/O
-port. Reading returns the last written value. As it is not possible
-to read this value from the chip, you need to write at least once to
-this file before you can read back from it.
diff --git a/Documentation/i2c/chips/pcf8575 b/Documentation/i2c/chips/pcf8575
deleted file mode 100644
index 40b268eb276..00000000000
--- a/Documentation/i2c/chips/pcf8575
+++ /dev/null
@@ -1,69 +0,0 @@
-About the PCF8575 chip and the pcf8575 kernel driver
-====================================================
-
-The PCF8575 chip is produced by the following manufacturers:
-
- * Philips NXP
- http://www.nxp.com/#/pip/cb=[type=product,path=50807/41735/41850,final=PCF8575_3]|pip=[pip=PCF8575_3][0]
-
- * Texas Instruments
- http://focus.ti.com/docs/prod/folders/print/pcf8575.html
-
-
-Some vendors sell small PCB's with the PCF8575 mounted on it. You can connect
-such a board to a Linux host via e.g. an USB to I2C interface. Examples of
-PCB boards with a PCF8575:
-
- * SFE Breakout Board for PCF8575 I2C Expander by RobotShop
- http://www.robotshop.ca/home/products/robot-parts/electronics/adapters-converters/sfe-pcf8575-i2c-expander-board.html
-
- * Breakout Board for PCF8575 I2C Expander by Spark Fun Electronics
- http://www.sparkfun.com/commerce/product_info.php?products_id=8130
-
-
-Description
------------
-The PCF8575 chip is a 16-bit I/O expander for the I2C bus. Up to eight of
-these chips can be connected to the same I2C bus. You can find this
-chip on some custom designed hardware, but you won't find it on PC
-motherboards.
-
-The PCF8575 chip consists of a 16-bit quasi-bidirectional port and an I2C-bus
-interface. Each of the sixteen I/O's can be independently used as an input or
-an output. To set up an I/O pin as an input, you have to write a 1 to the
-corresponding output.
-
-For more information please see the datasheet.
-
-
-Detection
----------
-
-There is no method known to detect whether a chip on a given I2C address is
-a PCF8575 or whether it is any other I2C device, so you have to pass the I2C
-bus and address of the installed PCF8575 devices explicitly to the driver at
-load time via the force=... parameter.
-
-/sys interface
---------------
-
-For each address on which a PCF8575 chip was found or forced the following
-files will be created under /sys:
-* /sys/bus/i2c/devices/<bus>-<address>/read
-* /sys/bus/i2c/devices/<bus>-<address>/write
-where bus is the I2C bus number (0, 1, ...) and address is the four-digit
-hexadecimal representation of the 7-bit I2C address of the PCF8575
-(0020 .. 0027).
-
-The read file is read-only. Reading it will trigger an I2C read and will hence
-report the current input state for the pins configured as inputs, and the
-current output value for the pins configured as outputs.
-
-The write file is read-write. Writing a value to it will configure all pins
-as output for which the corresponding bit is zero. Reading the write file will
-return the value last written, or -EAGAIN if no value has yet been written to
-the write file.
-
-On module initialization the configuration of the chip is not changed -- the
-chip is left in the state it was already configured in through either power-up
-or through previous I2C write actions.
diff --git a/Documentation/ia64/aliasing-test.c b/Documentation/ia64/aliasing-test.c
index d23610fb2ff..3dfb76ca693 100644
--- a/Documentation/ia64/aliasing-test.c
+++ b/Documentation/ia64/aliasing-test.c
@@ -24,7 +24,7 @@
int sum;
-int map_mem(char *path, off_t offset, size_t length, int touch)
+static int map_mem(char *path, off_t offset, size_t length, int touch)
{
int fd, rc;
void *addr;
@@ -62,7 +62,7 @@ int map_mem(char *path, off_t offset, size_t length, int touch)
return 0;
}
-int scan_tree(char *path, char *file, off_t offset, size_t length, int touch)
+static int scan_tree(char *path, char *file, off_t offset, size_t length, int touch)
{
struct dirent **namelist;
char *name, *path2;
@@ -119,7 +119,7 @@ skip:
char buf[1024];
-int read_rom(char *path)
+static int read_rom(char *path)
{
int fd, rc;
size_t size = 0;
@@ -146,7 +146,7 @@ int read_rom(char *path)
return size;
}
-int scan_rom(char *path, char *file)
+static int scan_rom(char *path, char *file)
{
struct dirent **namelist;
char *name, *path2;
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index aafca0a8f66..947374977ca 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -135,6 +135,7 @@ Code Seq# Include File Comments
<http://mikonos.dia.unisa.it/tcfs>
'l' 40-7F linux/udf_fs_i.h in development:
<http://sourceforge.net/projects/linux-udf/>
+'m' 00-09 linux/mmtimer.h
'm' all linux/mtio.h conflict!
'm' all linux/soundcard.h conflict!
'm' all linux/synclink.h conflict!
diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt
index f3355b6812d..bb3bf38f03d 100644
--- a/Documentation/kbuild/kbuild.txt
+++ b/Documentation/kbuild/kbuild.txt
@@ -65,6 +65,22 @@ INSTALL_PATH
INSTALL_PATH specifies where to place the updated kernel and system map
images. Default is /boot, but you can set it to other values.
+INSTALLKERNEL
+--------------------------------------------------
+Install script called when using "make install".
+The default name is "installkernel".
+
+The script will be called with the following arguments:
+ $1 - kernel version
+ $2 - kernel image file
+ $3 - kernel map file
+ $4 - default install path (use root directory if blank)
+
+The implmentation of "make install" is architecture specific
+and it may differ from the above.
+
+INSTALLKERNEL is provided to enable the possibility to
+specify a custom installer when cross compiling a kernel.
MODLIB
--------------------------------------------------
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index d76cfd8712e..71c602d6168 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -18,6 +18,7 @@ This document describes the Linux kernel Makefiles.
--- 3.9 Dependency tracking
--- 3.10 Special Rules
--- 3.11 $(CC) support functions
+ --- 3.12 $(LD) support functions
=== 4 Host Program support
--- 4.1 Simple Host Program
@@ -435,14 +436,14 @@ more details, with real examples.
The second argument is optional, and if supplied will be used
if first argument is not supported.
- ld-option
- ld-option is used to check if $(CC) when used to link object files
+ cc-ldoption
+ cc-ldoption is used to check if $(CC) when used to link object files
supports the given option. An optional second option may be
specified if first option are not supported.
Example:
#arch/i386/kernel/Makefile
- vsyscall-flags += $(call ld-option, -Wl$(comma)--hash-style=sysv)
+ vsyscall-flags += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
In the above example, vsyscall-flags will be assigned the option
-Wl$(comma)--hash-style=sysv if it is supported by $(CC).
@@ -570,6 +571,19 @@ more details, with real examples.
endif
endif
+--- 3.12 $(LD) support functions
+
+ ld-option
+ ld-option is used to check if $(LD) supports the supplied option.
+ ld-option takes two options as arguments.
+ The second argument is an optional option that can be used if the
+ first option is not supported by $(LD).
+
+ Example:
+ #Makefile
+ LDFLAGS_vmlinux += $(call really-ld-option, -X)
+
+
=== 4 Host Program support
Kbuild supports building executables on the host for use during the
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 0f17d16dc10..6fa7292947e 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -671,7 +671,7 @@ and is between 256 and 4096 characters. It is defined in the file
earlyprintk= [X86,SH,BLACKFIN]
earlyprintk=vga
earlyprintk=serial[,ttySn[,baudrate]]
- earlyprintk=dbgp
+ earlyprintk=dbgp[debugController#]
Append ",keep" to not disable it when the real console
takes over.
@@ -933,7 +933,7 @@ and is between 256 and 4096 characters. It is defined in the file
1 -- enable informational integrity auditing messages.
ima_hash= [IMA]
- Formt: { "sha1" | "md5" }
+ Format: { "sha1" | "md5" }
default: "sha1"
ima_tcb [IMA]
diff --git a/Documentation/kmemcheck.txt b/Documentation/kmemcheck.txt
index 363044609da..c28f82895d6 100644
--- a/Documentation/kmemcheck.txt
+++ b/Documentation/kmemcheck.txt
@@ -43,26 +43,7 @@ feature.
1. Downloading
==============
-kmemcheck can only be downloaded using git. If you want to write patches
-against the current code, you should use the kmemcheck development branch of
-the tip tree. It is also possible to use the linux-next tree, which also
-includes the latest version of kmemcheck.
-
-Assuming that you've already cloned the linux-2.6.git repository, all you
-have to do is add the -tip tree as a remote, like this:
-
- $ git remote add tip git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git
-
-To actually download the tree, fetch the remote:
-
- $ git fetch tip
-
-And to check out a new local branch with the kmemcheck code:
-
- $ git checkout -b kmemcheck tip/kmemcheck
-
-General instructions for the -tip tree can be found here:
-http://people.redhat.com/mingo/tip.git/readme.txt
+As of version 2.6.31-rc1, kmemcheck is included in the mainline kernel.
2. Configuring and compiling
diff --git a/Documentation/laptops/asus-laptop.txt b/Documentation/laptops/asus-laptop.txt
new file mode 100644
index 00000000000..c1c5be84e4b
--- /dev/null
+++ b/Documentation/laptops/asus-laptop.txt
@@ -0,0 +1,258 @@
+Asus Laptop Extras
+
+Version 0.1
+August 6, 2009
+
+Corentin Chary <corentincj@iksaif.net>
+http://acpi4asus.sf.net/
+
+ This driver provides support for extra features of ACPI-compatible ASUS laptops.
+ It may also support some MEDION, JVC or VICTOR laptops (such as MEDION 9675 or
+ VICTOR XP7210 for example). It makes all the extra buttons generate standard
+ ACPI events that go through /proc/acpi/events and input events (like keyboards).
+ On some models adds support for changing the display brightness and output,
+ switching the LCD backlight on and off, and most importantly, allows you to
+ blink those fancy LEDs intended for reporting mail and wireless status.
+
+This driver supercedes the old asus_acpi driver.
+
+Requirements
+------------
+
+ Kernel 2.6.X sources, configured for your computer, with ACPI support.
+ You also need CONFIG_INPUT and CONFIG_ACPI.
+
+Status
+------
+
+ The features currently supported are the following (see below for
+ detailed description):
+
+ - Fn key combinations
+ - Bluetooth enable and disable
+ - Wlan enable and disable
+ - GPS enable and disable
+ - Video output switching
+ - Ambient Light Sensor on and off
+ - LED control
+ - LED Display control
+ - LCD brightness control
+ - LCD on and off
+
+ A compatibility table by model and feature is maintained on the web
+ site, http://acpi4asus.sf.net/.
+
+Usage
+-----
+
+ Try "modprobe asus_acpi". Check your dmesg (simply type dmesg). You should
+ see some lines like this :
+
+ Asus Laptop Extras version 0.42
+ L2D model detected.
+
+ If it is not the output you have on your laptop, send it (and the laptop's
+ DSDT) to me.
+
+ That's all, now, all the events generated by the hotkeys of your laptop
+ should be reported in your /proc/acpi/event entry. You can check with
+ "acpi_listen".
+
+ Hotkeys are also reported as input keys (like keyboards) you can check
+ which key are supported using "xev" under X11.
+
+ You can get informations on the version of your DSDT table by reading the
+ /sys/devices/platform/asus-laptop/infos entry. If you have a question or a
+ bug report to do, please include the output of this entry.
+
+LEDs
+----
+
+ You can modify LEDs be echoing values to /sys/class/leds/asus::*/brightness :
+ echo 1 > /sys/class/leds/asus::mail/brightness
+ will switch the mail LED on.
+ You can also know if they are on/off by reading their content and use
+ kernel triggers like ide-disk or heartbeat.
+
+Backlight
+---------
+
+ You can control lcd backlight power and brightness with
+ /sys/class/backlight/asus-laptop/. Brightness Values are between 0 and 15.
+
+Wireless devices
+---------------
+
+ You can turn the internal Bluetooth adapter on/off with the bluetooth entry
+ (only on models with Bluetooth). This usually controls the associated LED.
+ Same for Wlan adapter.
+
+Display switching
+-----------------
+
+ Note: the display switching code is currently considered EXPERIMENTAL.
+
+ Switching works for the following models:
+ L3800C
+ A2500H
+ L5800C
+ M5200N
+ W1000N (albeit with some glitches)
+ M6700R
+ A6JC
+ F3J
+
+ Switching doesn't work for the following:
+ M3700N
+ L2X00D (locks the laptop under certain conditions)
+
+ To switch the displays, echo values from 0 to 15 to
+ /sys/devices/platform/asus-laptop/display. The significance of those values
+ is as follows:
+
+ +-------+-----+-----+-----+-----+-----+
+ | Bin | Val | DVI | TV | CRT | LCD |
+ +-------+-----+-----+-----+-----+-----+
+ + 0000 + 0 + + + + +
+ +-------+-----+-----+-----+-----+-----+
+ + 0001 + 1 + + + + X +
+ +-------+-----+-----+-----+-----+-----+
+ + 0010 + 2 + + + X + +
+ +-------+-----+-----+-----+-----+-----+
+ + 0011 + 3 + + + X + X +
+ +-------+-----+-----+-----+-----+-----+
+ + 0100 + 4 + + X + + +
+ +-------+-----+-----+-----+-----+-----+
+ + 0101 + 5 + + X + + X +
+ +-------+-----+-----+-----+-----+-----+
+ + 0110 + 6 + + X + X + +
+ +-------+-----+-----+-----+-----+-----+
+ + 0111 + 7 + + X + X + X +
+ +-------+-----+-----+-----+-----+-----+
+ + 1000 + 8 + X + + + +
+ +-------+-----+-----+-----+-----+-----+
+ + 1001 + 9 + X + + + X +
+ +-------+-----+-----+-----+-----+-----+
+ + 1010 + 10 + X + + X + +
+ +-------+-----+-----+-----+-----+-----+
+ + 1011 + 11 + X + + X + X +
+ +-------+-----+-----+-----+-----+-----+
+ + 1100 + 12 + X + X + + +
+ +-------+-----+-----+-----+-----+-----+
+ + 1101 + 13 + X + X + + X +
+ +-------+-----+-----+-----+-----+-----+
+ + 1110 + 14 + X + X + X + +
+ +-------+-----+-----+-----+-----+-----+
+ + 1111 + 15 + X + X + X + X +
+ +-------+-----+-----+-----+-----+-----+
+
+ In most cases, the appropriate displays must be plugged in for the above
+ combinations to work. TV-Out may need to be initialized at boot time.
+
+ Debugging:
+ 1) Check whether the Fn+F8 key:
+ a) does not lock the laptop (try disabling CONFIG_X86_UP_APIC or boot with
+ noapic / nolapic if it does)
+ b) generates events (0x6n, where n is the value corresponding to the
+ configuration above)
+ c) actually works
+ Record the disp value at every configuration.
+ 2) Echo values from 0 to 15 to /sys/devices/platform/asus-laptop/display.
+ Record its value, note any change. If nothing changes, try a broader range,
+ up to 65535.
+ 3) Send ANY output (both positive and negative reports are needed, unless your
+ machine is already listed above) to the acpi4asus-user mailing list.
+
+ Note: on some machines (e.g. L3C), after the module has been loaded, only 0x6n
+ events are generated and no actual switching occurs. In such a case, a line
+ like:
+
+ echo $((10#$arg-60)) > /sys/devices/platform/asus-laptop/display
+
+ will usually do the trick ($arg is the 0000006n-like event passed to acpid).
+
+ Note: there is currently no reliable way to read display status on xxN
+ (Centrino) models.
+
+LED display
+-----------
+
+ Some models like the W1N have a LED display that can be used to display
+ several informations.
+
+ LED display works for the following models:
+ W1000N
+ W1J
+
+ To control the LED display, use the following :
+
+ echo 0x0T000DDD > /sys/devices/platform/asus-laptop/
+
+ where T control the 3 letters display, and DDD the 3 digits display,
+ according to the tables below.
+
+ DDD (digits)
+ 000 to 999 = display digits
+ AAA = ---
+ BBB to FFF = turn-off
+
+ T (type)
+ 0 = off
+ 1 = dvd
+ 2 = vcd
+ 3 = mp3
+ 4 = cd
+ 5 = tv
+ 6 = cpu
+ 7 = vol
+
+ For example "echo 0x01000001 >/sys/devices/platform/asus-laptop/ledd"
+ would display "DVD001".
+
+Driver options:
+---------------
+
+ Options can be passed to the asus-laptop driver using the standard
+ module argument syntax (<param>=<value> when passing the option to the
+ module or asus-laptop.<param>=<value> on the kernel boot line when
+ asus-laptop is statically linked into the kernel).
+
+ wapf: WAPF defines the behavior of the Fn+Fx wlan key
+ The significance of values is yet to be found, but
+ most of the time:
+ - 0x0 should do nothing
+ - 0x1 should allow to control the device with Fn+Fx key.
+ - 0x4 should send an ACPI event (0x88) while pressing the Fn+Fx key
+ - 0x5 like 0x1 or 0x4
+
+ The default value is 0x1.
+
+Unsupported models
+------------------
+
+ These models will never be supported by this module, as they use a completely
+ different mechanism to handle LEDs and extra stuff (meaning we have no clue
+ how it works):
+
+ - ASUS A1300 (A1B), A1370D
+ - ASUS L7300G
+ - ASUS L8400
+
+Patches, Errors, Questions:
+--------------------------
+
+ I appreciate any success or failure
+ reports, especially if they add to or correct the compatibility table.
+ Please include the following information in your report:
+
+ - Asus model name
+ - a copy of your ACPI tables, using the "acpidump" utility
+ - a copy of /sys/devices/platform/asus-laptop/infos
+ - which driver features work and which don't
+ - the observed behavior of non-working features
+
+ Any other comments or patches are also more than welcome.
+
+ acpi4asus-user@lists.sourceforge.net
+ http://sourceforge.net/projects/acpi4asus
+
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
index e2ddcdeb61b..aafcaa63419 100644
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -199,18 +199,22 @@ kind to allow it (and it often doesn't!).
Not all bits in the mask can be modified. Not all bits that can be
modified do anything. Not all hot keys can be individually controlled
-by the mask. Some models do not support the mask at all, and in those
-models, hot keys cannot be controlled individually. The behaviour of
-the mask is, therefore, highly dependent on the ThinkPad model.
+by the mask. Some models do not support the mask at all. The behaviour
+of the mask is, therefore, highly dependent on the ThinkPad model.
+
+The driver will filter out any unmasked hotkeys, so even if the firmware
+doesn't allow disabling an specific hotkey, the driver will not report
+events for unmasked hotkeys.
Note that unmasking some keys prevents their default behavior. For
example, if Fn+F5 is unmasked, that key will no longer enable/disable
-Bluetooth by itself.
+Bluetooth by itself in firmware.
-Note also that not all Fn key combinations are supported through ACPI.
-For example, on the X40, the brightness, volume and "Access IBM" buttons
-do not generate ACPI events even with this driver. They *can* be used
-through the "ThinkPad Buttons" utility, see http://www.nongnu.org/tpb/
+Note also that not all Fn key combinations are supported through ACPI
+depending on the ThinkPad model and firmware version. On those
+ThinkPads, it is still possible to support some extra hotkeys by
+polling the "CMOS NVRAM" at least 10 times per second. The driver
+attempts to enables this functionality automatically when required.
procfs notes:
@@ -219,7 +223,7 @@ The following commands can be written to the /proc/acpi/ibm/hotkey file:
echo 0xffffffff > /proc/acpi/ibm/hotkey -- enable all hot keys
echo 0 > /proc/acpi/ibm/hotkey -- disable all possible hot keys
... any other 8-hex-digit mask ...
- echo reset > /proc/acpi/ibm/hotkey -- restore the original mask
+ echo reset > /proc/acpi/ibm/hotkey -- restore the recommended mask
The following commands have been deprecated and will cause the kernel
to log a warning:
@@ -240,9 +244,13 @@ sysfs notes:
Returns 0.
hotkey_bios_mask:
+ DEPRECATED, DON'T USE, WILL BE REMOVED IN THE FUTURE.
+
Returns the hot keys mask when thinkpad-acpi was loaded.
Upon module unload, the hot keys mask will be restored
- to this value.
+ to this value. This is always 0x80c, because those are
+ the hotkeys that were supported by ancient firmware
+ without mask support.
hotkey_enable:
DEPRECATED, WILL BE REMOVED SOON.
@@ -251,18 +259,11 @@ sysfs notes:
1: does nothing
hotkey_mask:
- bit mask to enable driver-handling (and depending on
+ bit mask to enable reporting (and depending on
the firmware, ACPI event generation) for each hot key
(see above). Returns the current status of the hot keys
mask, and allows one to modify it.
- Note: when NVRAM polling is active, the firmware mask
- will be different from the value returned by
- hotkey_mask. The driver will retain enabled bits for
- hotkeys that are under NVRAM polling even if the
- firmware refuses them, and will not set these bits on
- the firmware hot key mask.
-
hotkey_all_mask:
bit mask that should enable event reporting for all
supported hot keys, when echoed to hotkey_mask above.
@@ -275,7 +276,8 @@ sysfs notes:
bit mask that should enable event reporting for all
supported hot keys, except those which are always
handled by the firmware anyway. Echo it to
- hotkey_mask above, to use.
+ hotkey_mask above, to use. This is the default mask
+ used by the driver.
hotkey_source_mask:
bit mask that selects which hot keys will the driver
@@ -283,9 +285,10 @@ sysfs notes:
based on the capabilities reported by the ACPI firmware,
but it can be overridden at runtime.
- Hot keys whose bits are set in both hotkey_source_mask
- and also on hotkey_mask are polled for in NVRAM. Only a
- few hot keys are available through CMOS NVRAM polling.
+ Hot keys whose bits are set in hotkey_source_mask are
+ polled for in NVRAM, and reported as hotkey events if
+ enabled in hotkey_mask. Only a few hot keys are
+ available through CMOS NVRAM polling.
Warning: when in NVRAM mode, the volume up/down/mute
keys are synthesized according to changes in the mixer,
@@ -521,6 +524,7 @@ compatibility purposes when hotkey_report_mode is set to 1.
0x2305 System is waking up from suspend to eject bay
0x2404 System is waking up from hibernation to undock
0x2405 System is waking up from hibernation to eject bay
+0x5010 Brightness level changed/control event
The above events are never propagated by the driver.
@@ -528,7 +532,6 @@ The above events are never propagated by the driver.
0x4003 Undocked (see 0x2x04), can sleep again
0x500B Tablet pen inserted into its storage bay
0x500C Tablet pen removed from its storage bay
-0x5010 Brightness level changed (newer Lenovo BIOSes)
The above events are propagated by the driver.
@@ -617,6 +620,8 @@ For Lenovo models *with* ACPI backlight control:
2. Do *NOT* load up ACPI video, enable the hotkeys in thinkpad-acpi,
and map them to KEY_BRIGHTNESS_UP and KEY_BRIGHTNESS_DOWN. Process
these keys on userspace somehow (e.g. by calling xbacklight).
+ The driver will do this automatically if it detects that ACPI video
+ has been disabled.
Bluetooth
@@ -1455,3 +1460,8 @@ Sysfs interface changelog:
0x020400: Marker for 16 LEDs support. Also, LEDs that are known
to not exist in a given model are not registered with
the LED sysfs class anymore.
+
+0x020500: Updated hotkey driver, hotkey_mask is always available
+ and it is always able to disable hot keys. Very old
+ thinkpads are properly supported. hotkey_bios_mask
+ is deprecated and marked for removal.
diff --git a/Documentation/leds-class.txt b/Documentation/leds-class.txt
index 6399557cdab..8fd5ca2ae32 100644
--- a/Documentation/leds-class.txt
+++ b/Documentation/leds-class.txt
@@ -1,3 +1,4 @@
+
LED handling under Linux
========================
@@ -5,10 +6,10 @@ If you're reading this and thinking about keyboard leds, these are
handled by the input subsystem and the led class is *not* needed.
In its simplest form, the LED class just allows control of LEDs from
-userspace. LEDs appear in /sys/class/leds/. The brightness file will
-set the brightness of the LED (taking a value 0-255). Most LEDs don't
-have hardware brightness support so will just be turned on for non-zero
-brightness settings.
+userspace. LEDs appear in /sys/class/leds/. The maximum brightness of the
+LED is defined in max_brightness file. The brightness file will set the brightness
+of the LED (taking a value 0-max_brightness). Most LEDs don't have hardware
+brightness support so will just be turned on for non-zero brightness settings.
The class also introduces the optional concept of an LED trigger. A trigger
is a kernel based source of led events. Triggers can either be simple or
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index 950cde6d6e5..ba9373f82ab 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -42,6 +42,7 @@
#include <signal.h>
#include "linux/lguest_launcher.h"
#include "linux/virtio_config.h"
+#include <linux/virtio_ids.h>
#include "linux/virtio_net.h"
#include "linux/virtio_blk.h"
#include "linux/virtio_console.h"
@@ -133,6 +134,9 @@ struct device {
/* Is it operational */
bool running;
+ /* Does Guest want an intrrupt on empty? */
+ bool irq_on_empty;
+
/* Device-specific data. */
void *priv;
};
@@ -623,10 +627,13 @@ static void trigger_irq(struct virtqueue *vq)
return;
vq->pending_used = 0;
- /* If they don't want an interrupt, don't send one, unless empty. */
- if ((vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
- && lg_last_avail(vq) != vq->vring.avail->idx)
- return;
+ /* If they don't want an interrupt, don't send one... */
+ if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
+ /* ... unless they've asked us to force one on empty. */
+ if (!vq->dev->irq_on_empty
+ || lg_last_avail(vq) != vq->vring.avail->idx)
+ return;
+ }
/* Send the Guest an interrupt tell them we used something up. */
if (write(lguest_fd, buf, sizeof(buf)) != 0)
@@ -1042,6 +1049,15 @@ static void create_thread(struct virtqueue *vq)
close(vq->eventfd);
}
+static bool accepted_feature(struct device *dev, unsigned int bit)
+{
+ const u8 *features = get_feature_bits(dev) + dev->feature_len;
+
+ if (dev->feature_len < bit / CHAR_BIT)
+ return false;
+ return features[bit / CHAR_BIT] & (1 << (bit % CHAR_BIT));
+}
+
static void start_device(struct device *dev)
{
unsigned int i;
@@ -1055,6 +1071,8 @@ static void start_device(struct device *dev)
verbose(" %02x", get_feature_bits(dev)
[dev->feature_len+i]);
+ dev->irq_on_empty = accepted_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY);
+
for (vq = dev->vq; vq; vq = vq->next) {
if (vq->service)
create_thread(vq);
diff --git a/Documentation/memory.txt b/Documentation/memory.txt
index 2b3dedd3953..802efe58647 100644
--- a/Documentation/memory.txt
+++ b/Documentation/memory.txt
@@ -1,18 +1,7 @@
There are several classic problems related to memory on Linux
systems.
- 1) There are some buggy motherboards which cannot properly
- deal with the memory above 16MB. Consider exchanging
- your motherboard.
-
- 2) You cannot do DMA on the ISA bus to addresses above
- 16M. Most device drivers under Linux allow the use
- of bounce buffers which work around this problem. Drivers
- that don't use bounce buffers will be unstable with
- more than 16M installed. Drivers that use bounce buffers
- will be OK, but may have slightly higher overhead.
-
- 3) There are some motherboards that will not cache above
+ 1) There are some motherboards that will not cache above
a certain quantity of memory. If you have one of these
motherboards, your system will be SLOWER, not faster
as you add more memory. Consider exchanging your
@@ -24,7 +13,7 @@ It can also tell Linux to use less memory than is actually installed.
If you use "mem=" on a machine with PCI, consider using "memmap=" to avoid
physical address space collisions.
-See the documentation of your boot loader (LILO, loadlin, etc.) about
+See the documentation of your boot loader (LILO, grub, loadlin, etc.) about
how to pass options to the kernel.
There are other memory problems which Linux cannot deal with. Random
@@ -42,19 +31,3 @@ Try:
with the vendor. Consider testing it with memtest86 yourself.
* Exchanging your CPU, cache, or motherboard for one that works.
-
- * Disabling the cache from the BIOS.
-
- * Try passing the "mem=4M" option to the kernel to limit
- Linux to using a very small amount of memory. Use "memmap="-option
- together with "mem=" on systems with PCI to avoid physical address
- space collisions.
-
-
-Other tricks:
-
- * Try passing the "no-387" option to the kernel to ignore
- a buggy FPU.
-
- * Try passing the "no-hlt" option to disable the potentially
- buggy HLT instruction in your CPU.
diff --git a/Documentation/networking/regulatory.txt b/Documentation/networking/regulatory.txt
index eaa1a25946c..ee31369e9e5 100644
--- a/Documentation/networking/regulatory.txt
+++ b/Documentation/networking/regulatory.txt
@@ -96,7 +96,7 @@ Example code - drivers hinting an alpha2:
This example comes from the zd1211rw device driver. You can start
by having a mapping of your device's EEPROM country/regulatory
-domain value to to a specific alpha2 as follows:
+domain value to a specific alpha2 as follows:
static struct zd_reg_alpha2_map reg_alpha2_map[] = {
{ ZD_REGDOMAIN_FCC, "US" },
diff --git a/Documentation/numastat.txt b/Documentation/numastat.txt
index 80133ace1eb..9fcc9a608dc 100644
--- a/Documentation/numastat.txt
+++ b/Documentation/numastat.txt
@@ -7,10 +7,10 @@ All units are pages. Hugepages have separate counters.
numa_hit A process wanted to allocate memory from this node,
and succeeded.
-numa_miss A process wanted to allocate memory from this node,
- but ended up with memory from another.
-numa_foreign A process wanted to allocate on another node,
- but ended up with memory from this one.
+numa_miss A process wanted to allocate memory from another node,
+ but ended up with memory from this node.
+numa_foreign A process wanted to allocate on this node,
+ but ended up with memory from another one.
local_node A process ran on this node and got memory from it.
other_node A process ran on this node and got memory from another node.
interleave_hit Interleaving wanted to allocate from this node
diff --git a/Documentation/pcmcia/crc32hash.c b/Documentation/pcmcia/crc32hash.c
index 4210e5abab8..44f8beea726 100644
--- a/Documentation/pcmcia/crc32hash.c
+++ b/Documentation/pcmcia/crc32hash.c
@@ -8,7 +8,7 @@ $ ./crc32hash "Dual Speed"
#include <ctype.h>
#include <stdlib.h>
-unsigned int crc32(unsigned char const *p, unsigned int len)
+static unsigned int crc32(unsigned char const *p, unsigned int len)
{
int i;
unsigned int crc = 0;
diff --git a/Documentation/power/power_supply_class.txt b/Documentation/power/power_supply_class.txt
index c6cd4956047..9f16c5178b6 100644
--- a/Documentation/power/power_supply_class.txt
+++ b/Documentation/power/power_supply_class.txt
@@ -76,6 +76,11 @@ STATUS - this attribute represents operating status (charging, full,
discharging (i.e. powering a load), etc.). This corresponds to
BATTERY_STATUS_* values, as defined in battery.h.
+CHARGE_TYPE - batteries can typically charge at different rates.
+This defines trickle and fast charges. For batteries that
+are already charged or discharging, 'n/a' can be displayed (or
+'unknown', if the status is not known).
+
HEALTH - represents health of the battery, values corresponds to
POWER_SUPPLY_HEALTH_*, defined in battery.h.
@@ -108,6 +113,8 @@ relative, time-based measurements.
ENERGY_FULL, ENERGY_EMPTY - same as above but for energy.
CAPACITY - capacity in percents.
+CAPACITY_LEVEL - capacity level. This corresponds to
+POWER_SUPPLY_CAPACITY_LEVEL_*.
TEMP - temperature of the power supply.
TEMP_AMBIENT - ambient temperature.
diff --git a/Documentation/power/regulator/design.txt b/Documentation/power/regulator/design.txt
new file mode 100644
index 00000000000..f9b56b72b78
--- /dev/null
+++ b/Documentation/power/regulator/design.txt
@@ -0,0 +1,33 @@
+Regulator API design notes
+==========================
+
+This document provides a brief, partially structured, overview of some
+of the design considerations which impact the regulator API design.
+
+Safety
+------
+
+ - Errors in regulator configuration can have very serious consequences
+ for the system, potentially including lasting hardware damage.
+ - It is not possible to automatically determine the power confugration
+ of the system - software-equivalent variants of the same chip may
+ have different power requirments, and not all components with power
+ requirements are visible to software.
+
+ => The API should make no changes to the hardware state unless it has
+ specific knowledge that these changes are safe to do perform on
+ this particular system.
+
+Consumer use cases
+------------------
+
+ - The overwhelming majority of devices in a system will have no
+ requirement to do any runtime configuration of their power beyond
+ being able to turn it on or off.
+
+ - Many of the power supplies in the system will be shared between many
+ different consumers.
+
+ => The consumer API should be structured so that these use cases are
+ very easy to handle and so that consumers will work with shared
+ supplies without any additional effort.
diff --git a/Documentation/power/regulator/machine.txt b/Documentation/power/regulator/machine.txt
index ce3487d99ab..63728fed620 100644
--- a/Documentation/power/regulator/machine.txt
+++ b/Documentation/power/regulator/machine.txt
@@ -87,7 +87,7 @@ static struct platform_device regulator_devices[] = {
},
};
/* register regulator 1 device */
-platform_device_register(&wm8350_regulator_devices[0]);
+platform_device_register(&regulator_devices[0]);
/* register regulator 2 device */
-platform_device_register(&wm8350_regulator_devices[1]);
+platform_device_register(&regulator_devices[1]);
diff --git a/Documentation/power/regulator/overview.txt b/Documentation/power/regulator/overview.txt
index 0cded696ca0..ffd185bb605 100644
--- a/Documentation/power/regulator/overview.txt
+++ b/Documentation/power/regulator/overview.txt
@@ -29,7 +29,7 @@ Some terms used in this document:-
o PMIC - Power Management IC. An IC that contains numerous regulators
- and often contains other susbsystems.
+ and often contains other subsystems.
o Consumer - Electronic device that is supplied power by a regulator.
@@ -168,4 +168,4 @@ relevant to non SoC devices and is split into the following four interfaces:-
userspace via sysfs. This could be used to help monitor device power
consumption and status.
- See Documentation/ABI/testing/regulator-sysfs.txt
+ See Documentation/ABI/testing/sysfs-class-regulator
diff --git a/Documentation/power/regulator/regulator.txt b/Documentation/power/regulator/regulator.txt
index 4200accb9bb..3f8b528f237 100644
--- a/Documentation/power/regulator/regulator.txt
+++ b/Documentation/power/regulator/regulator.txt
@@ -10,8 +10,9 @@ Registration
Drivers can register a regulator by calling :-
-struct regulator_dev *regulator_register(struct device *dev,
- struct regulator_desc *regulator_desc);
+struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
+ struct device *dev, struct regulator_init_data *init_data,
+ void *driver_data);
This will register the regulators capabilities and operations to the regulator
core.
diff --git a/Documentation/powerpc/dts-bindings/fsl/esdhc.txt b/Documentation/powerpc/dts-bindings/fsl/esdhc.txt
index 3ed3797b508..8a004073896 100644
--- a/Documentation/powerpc/dts-bindings/fsl/esdhc.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/esdhc.txt
@@ -10,6 +10,8 @@ Required properties:
- interrupts : should contain eSDHC interrupt.
- interrupt-parent : interrupt source phandle.
- clock-frequency : specifies eSDHC base clock frequency.
+ - sdhci,wp-inverted : (optional) specifies that eSDHC controller
+ reports inverted write-protect state;
- sdhci,1-bit-only : (optional) specifies that a controller can
only handle 1-bit data transfers.
diff --git a/Documentation/powerpc/dts-bindings/marvell.txt b/Documentation/powerpc/dts-bindings/marvell.txt
index 3708a2fd474..f1533d91953 100644
--- a/Documentation/powerpc/dts-bindings/marvell.txt
+++ b/Documentation/powerpc/dts-bindings/marvell.txt
@@ -32,7 +32,7 @@ prefixed with the string "marvell,", for Marvell Technology Group Ltd.
devices. This field represents the number of cells needed to
represent the address of the memory-mapped registers of devices
within the system controller chip.
- - #size-cells : Size representation for for the memory-mapped
+ - #size-cells : Size representation for the memory-mapped
registers within the system controller chip.
- #interrupt-cells : Defines the width of cells used to represent
interrupts.
diff --git a/Documentation/powerpc/dts-bindings/mtd-physmap.txt b/Documentation/powerpc/dts-bindings/mtd-physmap.txt
index 667c9bde869..80152cb567d 100644
--- a/Documentation/powerpc/dts-bindings/mtd-physmap.txt
+++ b/Documentation/powerpc/dts-bindings/mtd-physmap.txt
@@ -1,18 +1,19 @@
-CFI or JEDEC memory-mapped NOR flash
+CFI or JEDEC memory-mapped NOR flash, MTD-RAM (NVRAM...)
Flash chips (Memory Technology Devices) are often used for solid state
file systems on embedded devices.
- - compatible : should contain the specific model of flash chip(s)
- used, if known, followed by either "cfi-flash" or "jedec-flash"
- - reg : Address range(s) of the flash chip(s)
+ - compatible : should contain the specific model of mtd chip(s)
+ used, if known, followed by either "cfi-flash", "jedec-flash"
+ or "mtd-ram".
+ - reg : Address range(s) of the mtd chip(s)
It's possible to (optionally) define multiple "reg" tuples so that
- non-identical NOR chips can be described in one flash node.
- - bank-width : Width (in bytes) of the flash bank. Equal to the
+ non-identical chips can be described in one node.
+ - bank-width : Width (in bytes) of the bank. Equal to the
device width times the number of interleaved chips.
- - device-width : (optional) Width of a single flash chip. If
+ - device-width : (optional) Width of a single mtd chip. If
omitted, assumed to be equal to 'bank-width'.
- - #address-cells, #size-cells : Must be present if the flash has
+ - #address-cells, #size-cells : Must be present if the device has
sub-nodes representing partitions (see below). In this case
both #address-cells and #size-cells must be equal to 1.
@@ -22,24 +23,24 @@ are defined:
- vendor-id : Contains the flash chip's vendor id (1 byte).
- device-id : Contains the flash chip's device id (1 byte).
-In addition to the information on the flash bank itself, the
+In addition to the information on the mtd bank itself, the
device tree may optionally contain additional information
-describing partitions of the flash address space. This can be
+describing partitions of the address space. This can be
used on platforms which have strong conventions about which
-portions of the flash are used for what purposes, but which don't
+portions of a flash are used for what purposes, but which don't
use an on-flash partition table such as RedBoot.
-Each partition is represented as a sub-node of the flash device.
+Each partition is represented as a sub-node of the mtd device.
Each node's name represents the name of the corresponding
-partition of the flash device.
+partition of the mtd device.
Flash partitions
- - reg : The partition's offset and size within the flash bank.
- - label : (optional) The label / name for this flash partition.
+ - reg : The partition's offset and size within the mtd bank.
+ - label : (optional) The label / name for this partition.
If omitted, the label is taken from the node name (excluding
the unit address).
- read-only : (optional) This parameter, if present, is a hint to
- Linux that this flash partition should only be mounted
+ Linux that this partition should only be mounted
read-only. This is usually used for flash partitions
containing early-boot firmware images or data which should not
be clobbered.
@@ -78,3 +79,12 @@ Here an example with multiple "reg" tuples:
reg = <0 0x04000000>;
};
};
+
+An example using SRAM:
+
+ sram@2,0 {
+ compatible = "samsung,k6f1616u6a", "mtd-ram";
+ reg = <2 0 0x00200000>;
+ bank-width = <2>;
+ };
+
diff --git a/Documentation/rtc.txt b/Documentation/rtc.txt
index 8deffcd68cb..9104c106208 100644
--- a/Documentation/rtc.txt
+++ b/Documentation/rtc.txt
@@ -135,6 +135,30 @@ a high functionality RTC is integrated into the SOC. That system might read
the system clock from the discrete RTC, but use the integrated one for all
other tasks, because of its greater functionality.
+SYSFS INTERFACE
+---------------
+
+The sysfs interface under /sys/class/rtc/rtcN provides access to various
+rtc attributes without requiring the use of ioctls. All dates and times
+are in the RTC's timezone, rather than in system time.
+
+date: RTC-provided date
+hctosys: 1 if the RTC provided the system time at boot via the
+ CONFIG_RTC_HCTOSYS kernel option, 0 otherwise
+max_user_freq: The maximum interrupt rate an unprivileged user may request
+ from this RTC.
+name: The name of the RTC corresponding to this sysfs directory
+since_epoch: The number of seconds since the epoch according to the RTC
+time: RTC-provided time
+wakealarm: The time at which the clock will generate a system wakeup
+ event. This is a one shot wakeup event, so must be reset
+ after wake if a daily wakeup is required. Format is either
+ seconds since the epoch or, if there's a leading +, seconds
+ in the future.
+
+IOCTL INTERFACE
+---------------
+
The ioctl() calls supported by /dev/rtc are also supported by the RTC class
framework. However, because the chips and systems are not standardized,
some PC/AT functionality might not be provided. And in the same way, some
@@ -185,6 +209,8 @@ driver returns ENOIOCTLCMD. Some common examples:
hardware in the irq_set_freq function. If it isn't, return -EINVAL. If
you cannot actually change the frequency, do not define irq_set_freq.
+ * RTC_PIE_ON, RTC_PIE_OFF: the irq_set_state function will be called.
+
If all else fails, check out the rtc-test.c driver!
diff --git a/Documentation/scsi/ChangeLog.megaraid b/Documentation/scsi/ChangeLog.megaraid
index eaa4801f2ce..38e9e7cadc9 100644
--- a/Documentation/scsi/ChangeLog.megaraid
+++ b/Documentation/scsi/ChangeLog.megaraid
@@ -514,7 +514,7 @@ iv. Remove yield() while mailbox handshake in synchronous commands
v. Remove redundant __megaraid_busywait_mbox routine
-vi. Fix bug in the managment module, which causes a system lockup when the
+vi. Fix bug in the management module, which causes a system lockup when the
IO module is loaded and then unloaded, followed by executing any
management utility. The current version of management module does not
handle the adapter unregister properly.
diff --git a/Documentation/scsi/scsi_fc_transport.txt b/Documentation/scsi/scsi_fc_transport.txt
index d7f181701dc..aec6549ab09 100644
--- a/Documentation/scsi/scsi_fc_transport.txt
+++ b/Documentation/scsi/scsi_fc_transport.txt
@@ -378,7 +378,7 @@ Vport Disable/Enable:
int vport_disable(struct fc_vport *vport, bool disable)
where:
- vport: Is vport to to be enabled or disabled
+ vport: Is vport to be enabled or disabled
disable: If "true", the vport is to be disabled.
If "false", the vport is to be enabled.
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index 97eebd63bed..f1708b79f96 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -387,7 +387,7 @@ STAC92HD73*
STAC92HD83*
===========
ref Reference board
- mic-ref Reference board with power managment for ports
+ mic-ref Reference board with power management for ports
dell-s14 Dell laptop
auto BIOS setup (default)
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary
index 4a02d2508bc..deab51ddc33 100644
--- a/Documentation/spi/spi-summary
+++ b/Documentation/spi/spi-summary
@@ -350,7 +350,7 @@ SPI protocol drivers somewhat resemble platform device drivers:
.resume = CHIP_resume,
};
-The driver core will autmatically attempt to bind this driver to any SPI
+The driver core will automatically attempt to bind this driver to any SPI
device whose board_info gave a modalias of "CHIP". Your probe() code
might look like this unless you're creating a device which is managing
a bus (appearing under /sys/class/spi_master).
diff --git a/Documentation/spi/spidev_test.c b/Documentation/spi/spidev_test.c
index c1a5aad3c75..10abd3773e4 100644
--- a/Documentation/spi/spidev_test.c
+++ b/Documentation/spi/spidev_test.c
@@ -69,7 +69,7 @@ static void transfer(int fd)
puts("");
}
-void print_usage(const char *prog)
+static void print_usage(const char *prog)
{
printf("Usage: %s [-DsbdlHOLC3]\n", prog);
puts(" -D --device device to use (default /dev/spidev1.1)\n"
@@ -85,7 +85,7 @@ void print_usage(const char *prog)
exit(1);
}
-void parse_opts(int argc, char *argv[])
+static void parse_opts(int argc, char *argv[])
{
while (1) {
static const struct option lopts[] = {
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 1458448436c..62682500878 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -96,13 +96,16 @@ handles that the Linux kernel will allocate. When you get lots
of error messages about running out of file handles, you might
want to increase this limit.
-The three values in file-nr denote the number of allocated
-file handles, the number of unused file handles and the maximum
-number of file handles. When the allocated file handles come
-close to the maximum, but the number of unused file handles is
-significantly greater than 0, you've encountered a peak in your
-usage of file handles and you don't need to increase the maximum.
-
+Historically, the three values in file-nr denoted the number of
+allocated file handles, the number of allocated but unused file
+handles, and the maximum number of file handles. Linux 2.6 always
+reports 0 as the number of free file handles -- this is not an
+error, it just means that the number of allocated file handles
+exactly matches the number of used file handles.
+
+Attempts to allocate more file descriptors than file-max are
+reported with printk, look for "VFS: file-max limit <number>
+reached".
==============================================================
nr_open:
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 2dbff53369d..a028b92001e 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -22,6 +22,7 @@ show up in /proc/sys/kernel:
- callhome [ S390 only ]
- auto_msgmni
- core_pattern
+- core_pipe_limit
- core_uses_pid
- ctrl-alt-del
- dentry-state
@@ -135,6 +136,27 @@ core_pattern is used to specify a core dumpfile pattern name.
==============================================================
+core_pipe_limit:
+
+This sysctl is only applicable when core_pattern is configured to pipe core
+files to user space helper a (when the first character of core_pattern is a '|',
+see above). When collecting cores via a pipe to an application, it is
+occasionally usefull for the collecting application to gather data about the
+crashing process from its /proc/pid directory. In order to do this safely, the
+kernel must wait for the collecting process to exit, so as not to remove the
+crashing processes proc files prematurely. This in turn creates the possibility
+that a misbehaving userspace collecting process can block the reaping of a
+crashed process simply by never exiting. This sysctl defends against that. It
+defines how many concurrent crashing processes may be piped to user space
+applications in parallel. If this value is exceeded, then those crashing
+processes above that value are noted via the kernel log and their cores are
+skipped. 0 is a special value, indicating that unlimited processes may be
+captured in parallel, but that no waiting will take place (i.e. the collecting
+process is not guaranteed access to /proc/<crahing pid>/). This value defaults
+to 0.
+
+==============================================================
+
core_uses_pid:
The default coredump filename is "core". By setting
@@ -313,31 +335,43 @@ send before ratelimiting kicks in.
==============================================================
+printk_delay:
+
+Delay each printk message in printk_delay milliseconds
+
+Value from 0 - 10000 is allowed.
+
+==============================================================
+
randomize-va-space:
This option can be used to select the type of process address
space randomization that is used in the system, for architectures
that support this feature.
-0 - Turn the process address space randomization off by default.
+0 - Turn the process address space randomization off. This is the
+ default for architectures that do not support this feature anyways,
+ and kernels that are booted with the "norandmaps" parameter.
1 - Make the addresses of mmap base, stack and VDSO page randomized.
This, among other things, implies that shared libraries will be
- loaded to random addresses. Also for PIE-linked binaries, the location
- of code start is randomized.
+ loaded to random addresses. Also for PIE-linked binaries, the
+ location of code start is randomized. This is the default if the
+ CONFIG_COMPAT_BRK option is enabled.
- With heap randomization, the situation is a little bit more
- complicated.
- There a few legacy applications out there (such as some ancient
+2 - Additionally enable heap randomization. This is the default if
+ CONFIG_COMPAT_BRK is disabled.
+
+ There are a few legacy applications out there (such as some ancient
versions of libc.so.5 from 1996) that assume that brk area starts
- just after the end of the code+bss. These applications break when
- start of the brk area is randomized. There are however no known
+ just after the end of the code+bss. These applications break when
+ start of the brk area is randomized. There are however no known
non-legacy applications that would be broken this way, so for most
- systems it is safe to choose full randomization. However there is
- a CONFIG_COMPAT_BRK option for systems with ancient and/or broken
- binaries, that makes heap non-randomized, but keeps all other
- parts of process address space randomized if randomize_va_space
- sysctl is turned on.
+ systems it is safe to choose full randomization.
+
+ Systems with ancient and/or broken binaries should be configured
+ with CONFIG_COMPAT_BRK enabled, which excludes the heap from process
+ address space randomization.
==============================================================
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index c4de6359d44..a6e360d2055 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -32,6 +32,8 @@ Currently, these files are in /proc/sys/vm:
- legacy_va_layout
- lowmem_reserve_ratio
- max_map_count
+- memory_failure_early_kill
+- memory_failure_recovery
- min_free_kbytes
- min_slab_ratio
- min_unmapped_ratio
@@ -53,7 +55,6 @@ Currently, these files are in /proc/sys/vm:
- vfs_cache_pressure
- zone_reclaim_mode
-
==============================================================
block_dump
@@ -275,6 +276,44 @@ e.g., up to one or two maps per allocation.
The default value is 65536.
+=============================================================
+
+memory_failure_early_kill:
+
+Control how to kill processes when uncorrected memory error (typically
+a 2bit error in a memory module) is detected in the background by hardware
+that cannot be handled by the kernel. In some cases (like the page
+still having a valid copy on disk) the kernel will handle the failure
+transparently without affecting any applications. But if there is
+no other uptodate copy of the data it will kill to prevent any data
+corruptions from propagating.
+
+1: Kill all processes that have the corrupted and not reloadable page mapped
+as soon as the corruption is detected. Note this is not supported
+for a few types of pages, like kernel internally allocated data or
+the swap cache, but works for the majority of user pages.
+
+0: Only unmap the corrupted page from all processes and only kill a process
+who tries to access it.
+
+The kill is done using a catchable SIGBUS with BUS_MCEERR_AO, so processes can
+handle this if they want to.
+
+This is only active on architectures/platforms with advanced machine
+check handling and depends on the hardware capabilities.
+
+Applications can override this setting individually with the PR_MCE_KILL prctl
+
+==============================================================
+
+memory_failure_recovery
+
+Enable memory failure recovery (when supported by the platform)
+
+1: Attempt recovery.
+
+0: Always panic on a memory failure.
+
==============================================================
min_free_kbytes:
@@ -585,7 +624,9 @@ caching of directory and inode objects.
At the default value of vfs_cache_pressure=100 the kernel will attempt to
reclaim dentries and inodes at a "fair" rate with respect to pagecache and
swapcache reclaim. Decreasing vfs_cache_pressure causes the kernel to prefer
-to retain dentry and inode caches. Increasing vfs_cache_pressure beyond 100
+to retain dentry and inode caches. When vfs_cache_pressure=0, the kernel will
+never reclaim dentries and inodes due to memory pressure and this can easily
+lead to out-of-memory conditions. Increasing vfs_cache_pressure beyond 100
causes the kernel to prefer to reclaim dentries and inodes.
==============================================================
diff --git a/Documentation/trace/events-kmem.txt b/Documentation/trace/events-kmem.txt
new file mode 100644
index 00000000000..6ef2a8652e1
--- /dev/null
+++ b/Documentation/trace/events-kmem.txt
@@ -0,0 +1,107 @@
+ Subsystem Trace Points: kmem
+
+The tracing system kmem captures events related to object and page allocation
+within the kernel. Broadly speaking there are four major subheadings.
+
+ o Slab allocation of small objects of unknown type (kmalloc)
+ o Slab allocation of small objects of known type
+ o Page allocation
+ o Per-CPU Allocator Activity
+ o External Fragmentation
+
+This document will describe what each of the tracepoints are and why they
+might be useful.
+
+1. Slab allocation of small objects of unknown type
+===================================================
+kmalloc call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s
+kmalloc_node call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d
+kfree call_site=%lx ptr=%p
+
+Heavy activity for these events may indicate that a specific cache is
+justified, particularly if kmalloc slab pages are getting significantly
+internal fragmented as a result of the allocation pattern. By correlating
+kmalloc with kfree, it may be possible to identify memory leaks and where
+the allocation sites were.
+
+
+2. Slab allocation of small objects of known type
+=================================================
+kmem_cache_alloc call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s
+kmem_cache_alloc_node call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d
+kmem_cache_free call_site=%lx ptr=%p
+
+These events are similar in usage to the kmalloc-related events except that
+it is likely easier to pin the event down to a specific cache. At the time
+of writing, no information is available on what slab is being allocated from,
+but the call_site can usually be used to extrapolate that information
+
+3. Page allocation
+==================
+mm_page_alloc page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s
+mm_page_alloc_zone_locked page=%p pfn=%lu order=%u migratetype=%d cpu=%d percpu_refill=%d
+mm_page_free_direct page=%p pfn=%lu order=%d
+mm_pagevec_free page=%p pfn=%lu order=%d cold=%d
+
+These four events deal with page allocation and freeing. mm_page_alloc is
+a simple indicator of page allocator activity. Pages may be allocated from
+the per-CPU allocator (high performance) or the buddy allocator.
+
+If pages are allocated directly from the buddy allocator, the
+mm_page_alloc_zone_locked event is triggered. This event is important as high
+amounts of activity imply high activity on the zone->lock. Taking this lock
+impairs performance by disabling interrupts, dirtying cache lines between
+CPUs and serialising many CPUs.
+
+When a page is freed directly by the caller, the mm_page_free_direct event
+is triggered. Significant amounts of activity here could indicate that the
+callers should be batching their activities.
+
+When pages are freed using a pagevec, the mm_pagevec_free is
+triggered. Broadly speaking, pages are taken off the LRU lock in bulk and
+freed in batch with a pagevec. Significant amounts of activity here could
+indicate that the system is under memory pressure and can also indicate
+contention on the zone->lru_lock.
+
+4. Per-CPU Allocator Activity
+=============================
+mm_page_alloc_zone_locked page=%p pfn=%lu order=%u migratetype=%d cpu=%d percpu_refill=%d
+mm_page_pcpu_drain page=%p pfn=%lu order=%d cpu=%d migratetype=%d
+
+In front of the page allocator is a per-cpu page allocator. It exists only
+for order-0 pages, reduces contention on the zone->lock and reduces the
+amount of writing on struct page.
+
+When a per-CPU list is empty or pages of the wrong type are allocated,
+the zone->lock will be taken once and the per-CPU list refilled. The event
+triggered is mm_page_alloc_zone_locked for each page allocated with the
+event indicating whether it is for a percpu_refill or not.
+
+When the per-CPU list is too full, a number of pages are freed, each one
+which triggers a mm_page_pcpu_drain event.
+
+The individual nature of the events are so that pages can be tracked
+between allocation and freeing. A number of drain or refill pages that occur
+consecutively imply the zone->lock being taken once. Large amounts of PCP
+refills and drains could imply an imbalance between CPUs where too much work
+is being concentrated in one place. It could also indicate that the per-CPU
+lists should be a larger size. Finally, large amounts of refills on one CPU
+and drains on another could be a factor in causing large amounts of cache
+line bounces due to writes between CPUs and worth investigating if pages
+can be allocated and freed on the same CPU through some algorithm change.
+
+5. External Fragmentation
+=========================
+mm_page_alloc_extfrag page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d
+
+External fragmentation affects whether a high-order allocation will be
+successful or not. For some types of hardware, this is important although
+it is avoided where possible. If the system is using huge pages and needs
+to be able to resize the pool over the lifetime of the system, this value
+is important.
+
+Large numbers of this event implies that memory is fragmenting and
+high-order allocations will start failing at some time in the future. One
+means of reducing the occurange of this event is to increase the size of
+min_free_kbytes in increments of 3*pageblock_size*nr_online_nodes where
+pageblock_size is usually the size of the default hugepage size.
diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt
index 78c45a87be5..02ac6ed38b2 100644
--- a/Documentation/trace/events.txt
+++ b/Documentation/trace/events.txt
@@ -72,7 +72,7 @@ To enable all events in sched subsystem:
# echo 1 > /sys/kernel/debug/tracing/events/sched/enable
-To eanble all events:
+To enable all events:
# echo 1 > /sys/kernel/debug/tracing/events/enable
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 1b6292bbdd6..957b22fde2d 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -133,7 +133,7 @@ of ftrace. Here is a list of some of the key files:
than requested, the rest of the page will be used,
making the actual allocation bigger than requested.
( Note, the size may not be a multiple of the page size
- due to buffer managment overhead. )
+ due to buffer management overhead. )
This can only be updated when the current_tracer
is set to "nop".
diff --git a/Documentation/trace/postprocess/trace-pagealloc-postprocess.pl b/Documentation/trace/postprocess/trace-pagealloc-postprocess.pl
new file mode 100644
index 00000000000..7df50e8cf4d
--- /dev/null
+++ b/Documentation/trace/postprocess/trace-pagealloc-postprocess.pl
@@ -0,0 +1,418 @@
+#!/usr/bin/perl
+# This is a POC (proof of concept or piece of crap, take your pick) for reading the
+# text representation of trace output related to page allocation. It makes an attempt
+# to extract some high-level information on what is going on. The accuracy of the parser
+# may vary considerably
+#
+# Example usage: trace-pagealloc-postprocess.pl < /sys/kernel/debug/tracing/trace_pipe
+# other options
+# --prepend-parent Report on the parent proc and PID
+# --read-procstat If the trace lacks process info, get it from /proc
+# --ignore-pid Aggregate processes of the same name together
+#
+# Copyright (c) IBM Corporation 2009
+# Author: Mel Gorman <mel@csn.ul.ie>
+use strict;
+use Getopt::Long;
+
+# Tracepoint events
+use constant MM_PAGE_ALLOC => 1;
+use constant MM_PAGE_FREE_DIRECT => 2;
+use constant MM_PAGEVEC_FREE => 3;
+use constant MM_PAGE_PCPU_DRAIN => 4;
+use constant MM_PAGE_ALLOC_ZONE_LOCKED => 5;
+use constant MM_PAGE_ALLOC_EXTFRAG => 6;
+use constant EVENT_UNKNOWN => 7;
+
+# Constants used to track state
+use constant STATE_PCPU_PAGES_DRAINED => 8;
+use constant STATE_PCPU_PAGES_REFILLED => 9;
+
+# High-level events extrapolated from tracepoints
+use constant HIGH_PCPU_DRAINS => 10;
+use constant HIGH_PCPU_REFILLS => 11;
+use constant HIGH_EXT_FRAGMENT => 12;
+use constant HIGH_EXT_FRAGMENT_SEVERE => 13;
+use constant HIGH_EXT_FRAGMENT_MODERATE => 14;
+use constant HIGH_EXT_FRAGMENT_CHANGED => 15;
+
+my %perprocesspid;
+my %perprocess;
+my $opt_ignorepid;
+my $opt_read_procstat;
+my $opt_prepend_parent;
+
+# Catch sigint and exit on request
+my $sigint_report = 0;
+my $sigint_exit = 0;
+my $sigint_pending = 0;
+my $sigint_received = 0;
+sub sigint_handler {
+ my $current_time = time;
+ if ($current_time - 2 > $sigint_received) {
+ print "SIGINT received, report pending. Hit ctrl-c again to exit\n";
+ $sigint_report = 1;
+ } else {
+ if (!$sigint_exit) {
+ print "Second SIGINT received quickly, exiting\n";
+ }
+ $sigint_exit++;
+ }
+
+ if ($sigint_exit > 3) {
+ print "Many SIGINTs received, exiting now without report\n";
+ exit;
+ }
+
+ $sigint_received = $current_time;
+ $sigint_pending = 1;
+}
+$SIG{INT} = "sigint_handler";
+
+# Parse command line options
+GetOptions(
+ 'ignore-pid' => \$opt_ignorepid,
+ 'read-procstat' => \$opt_read_procstat,
+ 'prepend-parent' => \$opt_prepend_parent,
+);
+
+# Defaults for dynamically discovered regex's
+my $regex_fragdetails_default = 'page=([0-9a-f]*) pfn=([0-9]*) alloc_order=([-0-9]*) fallback_order=([-0-9]*) pageblock_order=([-0-9]*) alloc_migratetype=([-0-9]*) fallback_migratetype=([-0-9]*) fragmenting=([-0-9]) change_ownership=([-0-9])';
+
+# Dyanically discovered regex
+my $regex_fragdetails;
+
+# Static regex used. Specified like this for readability and for use with /o
+# (process_pid) (cpus ) ( time ) (tpoint ) (details)
+my $regex_traceevent = '\s*([a-zA-Z0-9-]*)\s*(\[[0-9]*\])\s*([0-9.]*):\s*([a-zA-Z_]*):\s*(.*)';
+my $regex_statname = '[-0-9]*\s\((.*)\).*';
+my $regex_statppid = '[-0-9]*\s\(.*\)\s[A-Za-z]\s([0-9]*).*';
+
+sub generate_traceevent_regex {
+ my $event = shift;
+ my $default = shift;
+ my $regex;
+
+ # Read the event format or use the default
+ if (!open (FORMAT, "/sys/kernel/debug/tracing/events/$event/format")) {
+ $regex = $default;
+ } else {
+ my $line;
+ while (!eof(FORMAT)) {
+ $line = <FORMAT>;
+ if ($line =~ /^print fmt:\s"(.*)",.*/) {
+ $regex = $1;
+ $regex =~ s/%p/\([0-9a-f]*\)/g;
+ $regex =~ s/%d/\([-0-9]*\)/g;
+ $regex =~ s/%lu/\([0-9]*\)/g;
+ }
+ }
+ }
+
+ # Verify fields are in the right order
+ my $tuple;
+ foreach $tuple (split /\s/, $regex) {
+ my ($key, $value) = split(/=/, $tuple);
+ my $expected = shift;
+ if ($key ne $expected) {
+ print("WARNING: Format not as expected '$key' != '$expected'");
+ $regex =~ s/$key=\((.*)\)/$key=$1/;
+ }
+ }
+
+ if (defined shift) {
+ die("Fewer fields than expected in format");
+ }
+
+ return $regex;
+}
+$regex_fragdetails = generate_traceevent_regex("kmem/mm_page_alloc_extfrag",
+ $regex_fragdetails_default,
+ "page", "pfn",
+ "alloc_order", "fallback_order", "pageblock_order",
+ "alloc_migratetype", "fallback_migratetype",
+ "fragmenting", "change_ownership");
+
+sub read_statline($) {
+ my $pid = $_[0];
+ my $statline;
+
+ if (open(STAT, "/proc/$pid/stat")) {
+ $statline = <STAT>;
+ close(STAT);
+ }
+
+ if ($statline eq '') {
+ $statline = "-1 (UNKNOWN_PROCESS_NAME) R 0";
+ }
+
+ return $statline;
+}
+
+sub guess_process_pid($$) {
+ my $pid = $_[0];
+ my $statline = $_[1];
+
+ if ($pid == 0) {
+ return "swapper-0";
+ }
+
+ if ($statline !~ /$regex_statname/o) {
+ die("Failed to math stat line for process name :: $statline");
+ }
+ return "$1-$pid";
+}
+
+sub parent_info($$) {
+ my $pid = $_[0];
+ my $statline = $_[1];
+ my $ppid;
+
+ if ($pid == 0) {
+ return "NOPARENT-0";
+ }
+
+ if ($statline !~ /$regex_statppid/o) {
+ die("Failed to match stat line process ppid:: $statline");
+ }
+
+ # Read the ppid stat line
+ $ppid = $1;
+ return guess_process_pid($ppid, read_statline($ppid));
+}
+
+sub process_events {
+ my $traceevent;
+ my $process_pid;
+ my $cpus;
+ my $timestamp;
+ my $tracepoint;
+ my $details;
+ my $statline;
+
+ # Read each line of the event log
+EVENT_PROCESS:
+ while ($traceevent = <STDIN>) {
+ if ($traceevent =~ /$regex_traceevent/o) {
+ $process_pid = $1;
+ $tracepoint = $4;
+
+ if ($opt_read_procstat || $opt_prepend_parent) {
+ $process_pid =~ /(.*)-([0-9]*)$/;
+ my $process = $1;
+ my $pid = $2;
+
+ $statline = read_statline($pid);
+
+ if ($opt_read_procstat && $process eq '') {
+ $process_pid = guess_process_pid($pid, $statline);
+ }
+
+ if ($opt_prepend_parent) {
+ $process_pid = parent_info($pid, $statline) . " :: $process_pid";
+ }
+ }
+
+ # Unnecessary in this script. Uncomment if required
+ # $cpus = $2;
+ # $timestamp = $3;
+ } else {
+ next;
+ }
+
+ # Perl Switch() sucks majorly
+ if ($tracepoint eq "mm_page_alloc") {
+ $perprocesspid{$process_pid}->{MM_PAGE_ALLOC}++;
+ } elsif ($tracepoint eq "mm_page_free_direct") {
+ $perprocesspid{$process_pid}->{MM_PAGE_FREE_DIRECT}++;
+ } elsif ($tracepoint eq "mm_pagevec_free") {
+ $perprocesspid{$process_pid}->{MM_PAGEVEC_FREE}++;
+ } elsif ($tracepoint eq "mm_page_pcpu_drain") {
+ $perprocesspid{$process_pid}->{MM_PAGE_PCPU_DRAIN}++;
+ $perprocesspid{$process_pid}->{STATE_PCPU_PAGES_DRAINED}++;
+ } elsif ($tracepoint eq "mm_page_alloc_zone_locked") {
+ $perprocesspid{$process_pid}->{MM_PAGE_ALLOC_ZONE_LOCKED}++;
+ $perprocesspid{$process_pid}->{STATE_PCPU_PAGES_REFILLED}++;
+ } elsif ($tracepoint eq "mm_page_alloc_extfrag") {
+
+ # Extract the details of the event now
+ $details = $5;
+
+ my ($page, $pfn);
+ my ($alloc_order, $fallback_order, $pageblock_order);
+ my ($alloc_migratetype, $fallback_migratetype);
+ my ($fragmenting, $change_ownership);
+
+ if ($details !~ /$regex_fragdetails/o) {
+ print "WARNING: Failed to parse mm_page_alloc_extfrag as expected\n";
+ next;
+ }
+
+ $perprocesspid{$process_pid}->{MM_PAGE_ALLOC_EXTFRAG}++;
+ $page = $1;
+ $pfn = $2;
+ $alloc_order = $3;
+ $fallback_order = $4;
+ $pageblock_order = $5;
+ $alloc_migratetype = $6;
+ $fallback_migratetype = $7;
+ $fragmenting = $8;
+ $change_ownership = $9;
+
+ if ($fragmenting) {
+ $perprocesspid{$process_pid}->{HIGH_EXT_FRAG}++;
+ if ($fallback_order <= 3) {
+ $perprocesspid{$process_pid}->{HIGH_EXT_FRAGMENT_SEVERE}++;
+ } else {
+ $perprocesspid{$process_pid}->{HIGH_EXT_FRAGMENT_MODERATE}++;
+ }
+ }
+ if ($change_ownership) {
+ $perprocesspid{$process_pid}->{HIGH_EXT_FRAGMENT_CHANGED}++;
+ }
+ } else {
+ $perprocesspid{$process_pid}->{EVENT_UNKNOWN}++;
+ }
+
+ # Catch a full pcpu drain event
+ if ($perprocesspid{$process_pid}->{STATE_PCPU_PAGES_DRAINED} &&
+ $tracepoint ne "mm_page_pcpu_drain") {
+
+ $perprocesspid{$process_pid}->{HIGH_PCPU_DRAINS}++;
+ $perprocesspid{$process_pid}->{STATE_PCPU_PAGES_DRAINED} = 0;
+ }
+
+ # Catch a full pcpu refill event
+ if ($perprocesspid{$process_pid}->{STATE_PCPU_PAGES_REFILLED} &&
+ $tracepoint ne "mm_page_alloc_zone_locked") {
+ $perprocesspid{$process_pid}->{HIGH_PCPU_REFILLS}++;
+ $perprocesspid{$process_pid}->{STATE_PCPU_PAGES_REFILLED} = 0;
+ }
+
+ if ($sigint_pending) {
+ last EVENT_PROCESS;
+ }
+ }
+}
+
+sub dump_stats {
+ my $hashref = shift;
+ my %stats = %$hashref;
+
+ # Dump per-process stats
+ my $process_pid;
+ my $max_strlen = 0;
+
+ # Get the maximum process name
+ foreach $process_pid (keys %perprocesspid) {
+ my $len = length($process_pid);
+ if ($len > $max_strlen) {
+ $max_strlen = $len;
+ }
+ }
+ $max_strlen += 2;
+
+ printf("\n");
+ printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s %8s %8s %8s %8s %8s %8s %8s\n",
+ "Process", "Pages", "Pages", "Pages", "Pages", "PCPU", "PCPU", "PCPU", "Fragment", "Fragment", "MigType", "Fragment", "Fragment", "Unknown");
+ printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s %8s %8s %8s %8s %8s %8s %8s\n",
+ "details", "allocd", "allocd", "freed", "freed", "pages", "drains", "refills", "Fallback", "Causing", "Changed", "Severe", "Moderate", "");
+
+ printf("%-" . $max_strlen . "s %8s %10s %8s %8s %8s %8s %8s %8s %8s %8s %8s %8s %8s\n",
+ "", "", "under lock", "direct", "pagevec", "drain", "", "", "", "", "", "", "", "");
+
+ foreach $process_pid (keys %stats) {
+ # Dump final aggregates
+ if ($stats{$process_pid}->{STATE_PCPU_PAGES_DRAINED}) {
+ $stats{$process_pid}->{HIGH_PCPU_DRAINS}++;
+ $stats{$process_pid}->{STATE_PCPU_PAGES_DRAINED} = 0;
+ }
+ if ($stats{$process_pid}->{STATE_PCPU_PAGES_REFILLED}) {
+ $stats{$process_pid}->{HIGH_PCPU_REFILLS}++;
+ $stats{$process_pid}->{STATE_PCPU_PAGES_REFILLED} = 0;
+ }
+
+ printf("%-" . $max_strlen . "s %8d %10d %8d %8d %8d %8d %8d %8d %8d %8d %8d %8d %8d\n",
+ $process_pid,
+ $stats{$process_pid}->{MM_PAGE_ALLOC},
+ $stats{$process_pid}->{MM_PAGE_ALLOC_ZONE_LOCKED},
+ $stats{$process_pid}->{MM_PAGE_FREE_DIRECT},
+ $stats{$process_pid}->{MM_PAGEVEC_FREE},
+ $stats{$process_pid}->{MM_PAGE_PCPU_DRAIN},
+ $stats{$process_pid}->{HIGH_PCPU_DRAINS},
+ $stats{$process_pid}->{HIGH_PCPU_REFILLS},
+ $stats{$process_pid}->{MM_PAGE_ALLOC_EXTFRAG},
+ $stats{$process_pid}->{HIGH_EXT_FRAG},
+ $stats{$process_pid}->{HIGH_EXT_FRAGMENT_CHANGED},
+ $stats{$process_pid}->{HIGH_EXT_FRAGMENT_SEVERE},
+ $stats{$process_pid}->{HIGH_EXT_FRAGMENT_MODERATE},
+ $stats{$process_pid}->{EVENT_UNKNOWN});
+ }
+}
+
+sub aggregate_perprocesspid() {
+ my $process_pid;
+ my $process;
+ undef %perprocess;
+
+ foreach $process_pid (keys %perprocesspid) {
+ $process = $process_pid;
+ $process =~ s/-([0-9])*$//;
+ if ($process eq '') {
+ $process = "NO_PROCESS_NAME";
+ }
+
+ $perprocess{$process}->{MM_PAGE_ALLOC} += $perprocesspid{$process_pid}->{MM_PAGE_ALLOC};
+ $perprocess{$process}->{MM_PAGE_ALLOC_ZONE_LOCKED} += $perprocesspid{$process_pid}->{MM_PAGE_ALLOC_ZONE_LOCKED};
+ $perprocess{$process}->{MM_PAGE_FREE_DIRECT} += $perprocesspid{$process_pid}->{MM_PAGE_FREE_DIRECT};
+ $perprocess{$process}->{MM_PAGEVEC_FREE} += $perprocesspid{$process_pid}->{MM_PAGEVEC_FREE};
+ $perprocess{$process}->{MM_PAGE_PCPU_DRAIN} += $perprocesspid{$process_pid}->{MM_PAGE_PCPU_DRAIN};
+ $perprocess{$process}->{HIGH_PCPU_DRAINS} += $perprocesspid{$process_pid}->{HIGH_PCPU_DRAINS};
+ $perprocess{$process}->{HIGH_PCPU_REFILLS} += $perprocesspid{$process_pid}->{HIGH_PCPU_REFILLS};
+ $perprocess{$process}->{MM_PAGE_ALLOC_EXTFRAG} += $perprocesspid{$process_pid}->{MM_PAGE_ALLOC_EXTFRAG};
+ $perprocess{$process}->{HIGH_EXT_FRAG} += $perprocesspid{$process_pid}->{HIGH_EXT_FRAG};
+ $perprocess{$process}->{HIGH_EXT_FRAGMENT_CHANGED} += $perprocesspid{$process_pid}->{HIGH_EXT_FRAGMENT_CHANGED};
+ $perprocess{$process}->{HIGH_EXT_FRAGMENT_SEVERE} += $perprocesspid{$process_pid}->{HIGH_EXT_FRAGMENT_SEVERE};
+ $perprocess{$process}->{HIGH_EXT_FRAGMENT_MODERATE} += $perprocesspid{$process_pid}->{HIGH_EXT_FRAGMENT_MODERATE};
+ $perprocess{$process}->{EVENT_UNKNOWN} += $perprocesspid{$process_pid}->{EVENT_UNKNOWN};
+ }
+}
+
+sub report() {
+ if (!$opt_ignorepid) {
+ dump_stats(\%perprocesspid);
+ } else {
+ aggregate_perprocesspid();
+ dump_stats(\%perprocess);
+ }
+}
+
+# Process events or signals until neither is available
+sub signal_loop() {
+ my $sigint_processed;
+ do {
+ $sigint_processed = 0;
+ process_events();
+
+ # Handle pending signals if any
+ if ($sigint_pending) {
+ my $current_time = time;
+
+ if ($sigint_exit) {
+ print "Received exit signal\n";
+ $sigint_pending = 0;
+ }
+ if ($sigint_report) {
+ if ($current_time >= $sigint_received + 2) {
+ report();
+ $sigint_report = 0;
+ $sigint_pending = 0;
+ $sigint_processed = 1;
+ }
+ }
+ }
+ } while ($sigint_pending || $sigint_processed);
+}
+
+signal_loop();
+report();
diff --git a/Documentation/trace/tracepoint-analysis.txt b/Documentation/trace/tracepoint-analysis.txt
new file mode 100644
index 00000000000..5eb4e487e66
--- /dev/null
+++ b/Documentation/trace/tracepoint-analysis.txt
@@ -0,0 +1,327 @@
+ Notes on Analysing Behaviour Using Events and Tracepoints
+
+ Documentation written by Mel Gorman
+ PCL information heavily based on email from Ingo Molnar
+
+1. Introduction
+===============
+
+Tracepoints (see Documentation/trace/tracepoints.txt) can be used without
+creating custom kernel modules to register probe functions using the event
+tracing infrastructure.
+
+Simplistically, tracepoints will represent an important event that when can
+be taken in conjunction with other tracepoints to build a "Big Picture" of
+what is going on within the system. There are a large number of methods for
+gathering and interpreting these events. Lacking any current Best Practises,
+this document describes some of the methods that can be used.
+
+This document assumes that debugfs is mounted on /sys/kernel/debug and that
+the appropriate tracing options have been configured into the kernel. It is
+assumed that the PCL tool tools/perf has been installed and is in your path.
+
+2. Listing Available Events
+===========================
+
+2.1 Standard Utilities
+----------------------
+
+All possible events are visible from /sys/kernel/debug/tracing/events. Simply
+calling
+
+ $ find /sys/kernel/debug/tracing/events -type d
+
+will give a fair indication of the number of events available.
+
+2.2 PCL
+-------
+
+Discovery and enumeration of all counters and events, including tracepoints
+are available with the perf tool. Getting a list of available events is a
+simple case of
+
+ $ perf list 2>&1 | grep Tracepoint
+ ext4:ext4_free_inode [Tracepoint event]
+ ext4:ext4_request_inode [Tracepoint event]
+ ext4:ext4_allocate_inode [Tracepoint event]
+ ext4:ext4_write_begin [Tracepoint event]
+ ext4:ext4_ordered_write_end [Tracepoint event]
+ [ .... remaining output snipped .... ]
+
+
+2. Enabling Events
+==================
+
+2.1 System-Wide Event Enabling
+------------------------------
+
+See Documentation/trace/events.txt for a proper description on how events
+can be enabled system-wide. A short example of enabling all events related
+to page allocation would look something like
+
+ $ for i in `find /sys/kernel/debug/tracing/events -name "enable" | grep mm_`; do echo 1 > $i; done
+
+2.2 System-Wide Event Enabling with SystemTap
+---------------------------------------------
+
+In SystemTap, tracepoints are accessible using the kernel.trace() function
+call. The following is an example that reports every 5 seconds what processes
+were allocating the pages.
+
+ global page_allocs
+
+ probe kernel.trace("mm_page_alloc") {
+ page_allocs[execname()]++
+ }
+
+ function print_count() {
+ printf ("%-25s %-s\n", "#Pages Allocated", "Process Name")
+ foreach (proc in page_allocs-)
+ printf("%-25d %s\n", page_allocs[proc], proc)
+ printf ("\n")
+ delete page_allocs
+ }
+
+ probe timer.s(5) {
+ print_count()
+ }
+
+2.3 System-Wide Event Enabling with PCL
+---------------------------------------
+
+By specifying the -a switch and analysing sleep, the system-wide events
+for a duration of time can be examined.
+
+ $ perf stat -a \
+ -e kmem:mm_page_alloc -e kmem:mm_page_free_direct \
+ -e kmem:mm_pagevec_free \
+ sleep 10
+ Performance counter stats for 'sleep 10':
+
+ 9630 kmem:mm_page_alloc
+ 2143 kmem:mm_page_free_direct
+ 7424 kmem:mm_pagevec_free
+
+ 10.002577764 seconds time elapsed
+
+Similarly, one could execute a shell and exit it as desired to get a report
+at that point.
+
+2.4 Local Event Enabling
+------------------------
+
+Documentation/trace/ftrace.txt describes how to enable events on a per-thread
+basis using set_ftrace_pid.
+
+2.5 Local Event Enablement with PCL
+-----------------------------------
+
+Events can be activate and tracked for the duration of a process on a local
+basis using PCL such as follows.
+
+ $ perf stat -e kmem:mm_page_alloc -e kmem:mm_page_free_direct \
+ -e kmem:mm_pagevec_free ./hackbench 10
+ Time: 0.909
+
+ Performance counter stats for './hackbench 10':
+
+ 17803 kmem:mm_page_alloc
+ 12398 kmem:mm_page_free_direct
+ 4827 kmem:mm_pagevec_free
+
+ 0.973913387 seconds time elapsed
+
+3. Event Filtering
+==================
+
+Documentation/trace/ftrace.txt covers in-depth how to filter events in
+ftrace. Obviously using grep and awk of trace_pipe is an option as well
+as any script reading trace_pipe.
+
+4. Analysing Event Variances with PCL
+=====================================
+
+Any workload can exhibit variances between runs and it can be important
+to know what the standard deviation in. By and large, this is left to the
+performance analyst to do it by hand. In the event that the discrete event
+occurrences are useful to the performance analyst, then perf can be used.
+
+ $ perf stat --repeat 5 -e kmem:mm_page_alloc -e kmem:mm_page_free_direct
+ -e kmem:mm_pagevec_free ./hackbench 10
+ Time: 0.890
+ Time: 0.895
+ Time: 0.915
+ Time: 1.001
+ Time: 0.899
+
+ Performance counter stats for './hackbench 10' (5 runs):
+
+ 16630 kmem:mm_page_alloc ( +- 3.542% )
+ 11486 kmem:mm_page_free_direct ( +- 4.771% )
+ 4730 kmem:mm_pagevec_free ( +- 2.325% )
+
+ 0.982653002 seconds time elapsed ( +- 1.448% )
+
+In the event that some higher-level event is required that depends on some
+aggregation of discrete events, then a script would need to be developed.
+
+Using --repeat, it is also possible to view how events are fluctuating over
+time on a system wide basis using -a and sleep.
+
+ $ perf stat -e kmem:mm_page_alloc -e kmem:mm_page_free_direct \
+ -e kmem:mm_pagevec_free \
+ -a --repeat 10 \
+ sleep 1
+ Performance counter stats for 'sleep 1' (10 runs):
+
+ 1066 kmem:mm_page_alloc ( +- 26.148% )
+ 182 kmem:mm_page_free_direct ( +- 5.464% )
+ 890 kmem:mm_pagevec_free ( +- 30.079% )
+
+ 1.002251757 seconds time elapsed ( +- 0.005% )
+
+5. Higher-Level Analysis with Helper Scripts
+============================================
+
+When events are enabled the events that are triggering can be read from
+/sys/kernel/debug/tracing/trace_pipe in human-readable format although binary
+options exist as well. By post-processing the output, further information can
+be gathered on-line as appropriate. Examples of post-processing might include
+
+ o Reading information from /proc for the PID that triggered the event
+ o Deriving a higher-level event from a series of lower-level events.
+ o Calculate latencies between two events
+
+Documentation/trace/postprocess/trace-pagealloc-postprocess.pl is an example
+script that can read trace_pipe from STDIN or a copy of a trace. When used
+on-line, it can be interrupted once to generate a report without existing
+and twice to exit.
+
+Simplistically, the script just reads STDIN and counts up events but it
+also can do more such as
+
+ o Derive high-level events from many low-level events. If a number of pages
+ are freed to the main allocator from the per-CPU lists, it recognises
+ that as one per-CPU drain even though there is no specific tracepoint
+ for that event
+ o It can aggregate based on PID or individual process number
+ o In the event memory is getting externally fragmented, it reports
+ on whether the fragmentation event was severe or moderate.
+ o When receiving an event about a PID, it can record who the parent was so
+ that if large numbers of events are coming from very short-lived
+ processes, the parent process responsible for creating all the helpers
+ can be identified
+
+6. Lower-Level Analysis with PCL
+================================
+
+There may also be a requirement to identify what functions with a program
+were generating events within the kernel. To begin this sort of analysis, the
+data must be recorded. At the time of writing, this required root
+
+ $ perf record -c 1 \
+ -e kmem:mm_page_alloc -e kmem:mm_page_free_direct \
+ -e kmem:mm_pagevec_free \
+ ./hackbench 10
+ Time: 0.894
+ [ perf record: Captured and wrote 0.733 MB perf.data (~32010 samples) ]
+
+Note the use of '-c 1' to set the event period to sample. The default sample
+period is quite high to minimise overhead but the information collected can be
+very coarse as a result.
+
+This record outputted a file called perf.data which can be analysed using
+perf report.
+
+ $ perf report
+ # Samples: 30922
+ #
+ # Overhead Command Shared Object
+ # ........ ......... ................................
+ #
+ 87.27% hackbench [vdso]
+ 6.85% hackbench /lib/i686/cmov/libc-2.9.so
+ 2.62% hackbench /lib/ld-2.9.so
+ 1.52% perf [vdso]
+ 1.22% hackbench ./hackbench
+ 0.48% hackbench [kernel]
+ 0.02% perf /lib/i686/cmov/libc-2.9.so
+ 0.01% perf /usr/bin/perf
+ 0.01% perf /lib/ld-2.9.so
+ 0.00% hackbench /lib/i686/cmov/libpthread-2.9.so
+ #
+ # (For more details, try: perf report --sort comm,dso,symbol)
+ #
+
+According to this, the vast majority of events occured triggered on events
+within the VDSO. With simple binaries, this will often be the case so lets
+take a slightly different example. In the course of writing this, it was
+noticed that X was generating an insane amount of page allocations so lets look
+at it
+
+ $ perf record -c 1 -f \
+ -e kmem:mm_page_alloc -e kmem:mm_page_free_direct \
+ -e kmem:mm_pagevec_free \
+ -p `pidof X`
+
+This was interrupted after a few seconds and
+
+ $ perf report
+ # Samples: 27666
+ #
+ # Overhead Command Shared Object
+ # ........ ....... .......................................
+ #
+ 51.95% Xorg [vdso]
+ 47.95% Xorg /opt/gfx-test/lib/libpixman-1.so.0.13.1
+ 0.09% Xorg /lib/i686/cmov/libc-2.9.so
+ 0.01% Xorg [kernel]
+ #
+ # (For more details, try: perf report --sort comm,dso,symbol)
+ #
+
+So, almost half of the events are occuring in a library. To get an idea which
+symbol.
+
+ $ perf report --sort comm,dso,symbol
+ # Samples: 27666
+ #
+ # Overhead Command Shared Object Symbol
+ # ........ ....... ....................................... ......
+ #
+ 51.95% Xorg [vdso] [.] 0x000000ffffe424
+ 47.93% Xorg /opt/gfx-test/lib/libpixman-1.so.0.13.1 [.] pixmanFillsse2
+ 0.09% Xorg /lib/i686/cmov/libc-2.9.so [.] _int_malloc
+ 0.01% Xorg /opt/gfx-test/lib/libpixman-1.so.0.13.1 [.] pixman_region32_copy_f
+ 0.01% Xorg [kernel] [k] read_hpet
+ 0.01% Xorg /opt/gfx-test/lib/libpixman-1.so.0.13.1 [.] get_fast_path
+ 0.00% Xorg [kernel] [k] ftrace_trace_userstack
+
+To see where within the function pixmanFillsse2 things are going wrong
+
+ $ perf annotate pixmanFillsse2
+ [ ... ]
+ 0.00 : 34eeb: 0f 18 08 prefetcht0 (%eax)
+ : }
+ :
+ : extern __inline void __attribute__((__gnu_inline__, __always_inline__, _
+ : _mm_store_si128 (__m128i *__P, __m128i __B) : {
+ : *__P = __B;
+ 12.40 : 34eee: 66 0f 7f 80 40 ff ff movdqa %xmm0,-0xc0(%eax)
+ 0.00 : 34ef5: ff
+ 12.40 : 34ef6: 66 0f 7f 80 50 ff ff movdqa %xmm0,-0xb0(%eax)
+ 0.00 : 34efd: ff
+ 12.39 : 34efe: 66 0f 7f 80 60 ff ff movdqa %xmm0,-0xa0(%eax)
+ 0.00 : 34f05: ff
+ 12.67 : 34f06: 66 0f 7f 80 70 ff ff movdqa %xmm0,-0x90(%eax)
+ 0.00 : 34f0d: ff
+ 12.58 : 34f0e: 66 0f 7f 40 80 movdqa %xmm0,-0x80(%eax)
+ 12.31 : 34f13: 66 0f 7f 40 90 movdqa %xmm0,-0x70(%eax)
+ 12.40 : 34f18: 66 0f 7f 40 a0 movdqa %xmm0,-0x60(%eax)
+ 12.31 : 34f1d: 66 0f 7f 40 b0 movdqa %xmm0,-0x50(%eax)
+
+At a glance, it looks like the time is being spent copying pixmaps to
+the card. Further investigation would be needed to determine why pixmaps
+are being copied around so much but a starting point would be to take an
+ancient build of libpixmap out of the library path where it was totally
+forgotten about from months ago!
diff --git a/Documentation/usb/authorization.txt b/Documentation/usb/authorization.txt
index 381b22ee783..c069b6884c7 100644
--- a/Documentation/usb/authorization.txt
+++ b/Documentation/usb/authorization.txt
@@ -16,20 +16,20 @@ Usage:
Authorize a device to connect:
-$ echo 1 > /sys/usb/devices/DEVICE/authorized
+$ echo 1 > /sys/bus/usb/devices/DEVICE/authorized
Deauthorize a device:
-$ echo 0 > /sys/usb/devices/DEVICE/authorized
+$ echo 0 > /sys/bus/usb/devices/DEVICE/authorized
Set new devices connected to hostX to be deauthorized by default (ie:
lock down):
-$ echo 0 > /sys/bus/devices/usbX/authorized_default
+$ echo 0 > /sys/bus/usb/devices/usbX/authorized_default
Remove the lock down:
-$ echo 1 > /sys/bus/devices/usbX/authorized_default
+$ echo 1 > /sys/bus/usb/devices/usbX/authorized_default
By default, Wired USB devices are authorized by default to
connect. Wireless USB hosts deauthorize by default all new connected
@@ -47,7 +47,7 @@ USB port):
boot up
rc.local ->
- for host in /sys/bus/devices/usb*
+ for host in /sys/bus/usb/devices/usb*
do
echo 0 > $host/authorized_default
done
diff --git a/Documentation/usb/usbmon.txt b/Documentation/usb/usbmon.txt
index 6c3c625b7f3..66f92d1194c 100644
--- a/Documentation/usb/usbmon.txt
+++ b/Documentation/usb/usbmon.txt
@@ -33,7 +33,7 @@ if usbmon is built into the kernel.
Verify that bus sockets are present.
-# ls /sys/kernel/debug/usbmon
+# ls /sys/kernel/debug/usb/usbmon
0s 0u 1s 1t 1u 2s 2t 2u 3s 3t 3u 4s 4t 4u
#
@@ -58,11 +58,11 @@ Bus=03 means it's bus 3.
3. Start 'cat'
-# cat /sys/kernel/debug/usbmon/3u > /tmp/1.mon.out
+# cat /sys/kernel/debug/usb/usbmon/3u > /tmp/1.mon.out
to listen on a single bus, otherwise, to listen on all buses, type:
-# cat /sys/kernel/debug/usbmon/0u > /tmp/1.mon.out
+# cat /sys/kernel/debug/usb/usbmon/0u > /tmp/1.mon.out
This process will be reading until killed. Naturally, the output can be
redirected to a desirable location. This is preferred, because it is going
@@ -305,7 +305,7 @@ Before the call, hdr, data, and alloc should be filled. Upon return, the area
pointed by hdr contains the next event structure, and the data buffer contains
the data, if any. The event is removed from the kernel buffer.
-The MON_IOCX_GET copies 48 bytes, MON_IOCX_GETX copies 64 bytes.
+The MON_IOCX_GET copies 48 bytes to hdr area, MON_IOCX_GETX copies 64 bytes.
MON_IOCX_MFETCH, defined as _IOWR(MON_IOC_MAGIC, 7, struct mon_mfetch_arg)
diff --git a/Documentation/video4linux/v4lgrab.c b/Documentation/video4linux/v4lgrab.c
index 05769cff100..c8ded175796 100644
--- a/Documentation/video4linux/v4lgrab.c
+++ b/Documentation/video4linux/v4lgrab.c
@@ -89,7 +89,7 @@
} \
}
-int get_brightness_adj(unsigned char *image, long size, int *brightness) {
+static int get_brightness_adj(unsigned char *image, long size, int *brightness) {
long i, tot = 0;
for (i=0;i<size*3;i++)
tot += image[i];
diff --git a/Documentation/vm/.gitignore b/Documentation/vm/.gitignore
index 33e8a023df0..09b164a5700 100644
--- a/Documentation/vm/.gitignore
+++ b/Documentation/vm/.gitignore
@@ -1 +1,2 @@
+page-types
slabinfo
diff --git a/Documentation/vm/00-INDEX b/Documentation/vm/00-INDEX
index 2f77ced35df..e57d6a9dd32 100644
--- a/Documentation/vm/00-INDEX
+++ b/Documentation/vm/00-INDEX
@@ -6,6 +6,8 @@ balance
- various information on memory balancing.
hugetlbpage.txt
- a brief summary of hugetlbpage support in the Linux kernel.
+ksm.txt
+ - how to use the Kernel Samepage Merging feature.
locking
- info on how locking and synchronization is done in the Linux vm code.
numa
@@ -20,3 +22,5 @@ slabinfo.c
- source code for a tool to get reports about slabs.
slub.txt
- a short users guide for SLUB.
+map_hugetlb.c
+ - an example program that uses the MAP_HUGETLB mmap flag.
diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt
index ea8714fcc3a..82a7bd1800b 100644
--- a/Documentation/vm/hugetlbpage.txt
+++ b/Documentation/vm/hugetlbpage.txt
@@ -18,13 +18,13 @@ First the Linux kernel needs to be built with the CONFIG_HUGETLBFS
automatically when CONFIG_HUGETLBFS is selected) configuration
options.
-The kernel built with hugepage support should show the number of configured
-hugepages in the system by running the "cat /proc/meminfo" command.
+The kernel built with huge page support should show the number of configured
+huge pages in the system by running the "cat /proc/meminfo" command.
/proc/meminfo also provides information about the total number of hugetlb
pages configured in the kernel. It also displays information about the
number of free hugetlb pages at any time. It also displays information about
-the configured hugepage size - this is needed for generating the proper
+the configured huge page size - this is needed for generating the proper
alignment and size of the arguments to the above system calls.
The output of "cat /proc/meminfo" will have lines like:
@@ -37,25 +37,27 @@ HugePages_Surp: yyy
Hugepagesize: zzz kB
where:
-HugePages_Total is the size of the pool of hugepages.
-HugePages_Free is the number of hugepages in the pool that are not yet
-allocated.
-HugePages_Rsvd is short for "reserved," and is the number of hugepages
-for which a commitment to allocate from the pool has been made, but no
-allocation has yet been made. It's vaguely analogous to overcommit.
-HugePages_Surp is short for "surplus," and is the number of hugepages in
-the pool above the value in /proc/sys/vm/nr_hugepages. The maximum
-number of surplus hugepages is controlled by
-/proc/sys/vm/nr_overcommit_hugepages.
+HugePages_Total is the size of the pool of huge pages.
+HugePages_Free is the number of huge pages in the pool that are not yet
+ allocated.
+HugePages_Rsvd is short for "reserved," and is the number of huge pages for
+ which a commitment to allocate from the pool has been made,
+ but no allocation has yet been made. Reserved huge pages
+ guarantee that an application will be able to allocate a
+ huge page from the pool of huge pages at fault time.
+HugePages_Surp is short for "surplus," and is the number of huge pages in
+ the pool above the value in /proc/sys/vm/nr_hugepages. The
+ maximum number of surplus huge pages is controlled by
+ /proc/sys/vm/nr_overcommit_hugepages.
/proc/filesystems should also show a filesystem of type "hugetlbfs" configured
in the kernel.
/proc/sys/vm/nr_hugepages indicates the current number of configured hugetlb
pages in the kernel. Super user can dynamically request more (or free some
-pre-configured) hugepages.
+pre-configured) huge pages.
The allocation (or deallocation) of hugetlb pages is possible only if there are
-enough physically contiguous free pages in system (freeing of hugepages is
+enough physically contiguous free pages in system (freeing of huge pages is
possible only if there are enough hugetlb pages free that can be transferred
back to regular memory pool).
@@ -67,43 +69,82 @@ use either the mmap system call or shared memory system calls to start using
the huge pages. It is required that the system administrator preallocate
enough memory for huge page purposes.
-Use the following command to dynamically allocate/deallocate hugepages:
+The administrator can preallocate huge pages on the kernel boot command line by
+specifying the "hugepages=N" parameter, where 'N' = the number of huge pages
+requested. This is the most reliable method for preallocating huge pages as
+memory has not yet become fragmented.
+
+Some platforms support multiple huge page sizes. To preallocate huge pages
+of a specific size, one must preceed the huge pages boot command parameters
+with a huge page size selection parameter "hugepagesz=<size>". <size> must
+be specified in bytes with optional scale suffix [kKmMgG]. The default huge
+page size may be selected with the "default_hugepagesz=<size>" boot parameter.
+
+/proc/sys/vm/nr_hugepages indicates the current number of configured [default
+size] hugetlb pages in the kernel. Super user can dynamically request more
+(or free some pre-configured) huge pages.
+
+Use the following command to dynamically allocate/deallocate default sized
+huge pages:
echo 20 > /proc/sys/vm/nr_hugepages
-This command will try to configure 20 hugepages in the system. The success
-or failure of allocation depends on the amount of physically contiguous
-memory that is preset in system at this time. System administrators may want
-to put this command in one of the local rc init files. This will enable the
-kernel to request huge pages early in the boot process (when the possibility
-of getting physical contiguous pages is still very high). In either
-case, administrators will want to verify the number of hugepages actually
-allocated by checking the sysctl or meminfo.
-
-/proc/sys/vm/nr_overcommit_hugepages indicates how large the pool of
-hugepages can grow, if more hugepages than /proc/sys/vm/nr_hugepages are
-requested by applications. echo'ing any non-zero value into this file
-indicates that the hugetlb subsystem is allowed to try to obtain
-hugepages from the buddy allocator, if the normal pool is exhausted. As
-these surplus hugepages go out of use, they are freed back to the buddy
+This command will try to configure 20 default sized huge pages in the system.
+On a NUMA platform, the kernel will attempt to distribute the huge page pool
+over the all on-line nodes. These huge pages, allocated when nr_hugepages
+is increased, are called "persistent huge pages".
+
+The success or failure of huge page allocation depends on the amount of
+physically contiguous memory that is preset in system at the time of the
+allocation attempt. If the kernel is unable to allocate huge pages from
+some nodes in a NUMA system, it will attempt to make up the difference by
+allocating extra pages on other nodes with sufficient available contiguous
+memory, if any.
+
+System administrators may want to put this command in one of the local rc init
+files. This will enable the kernel to request huge pages early in the boot
+process when the possibility of getting physical contiguous pages is still
+very high. Administrators can verify the number of huge pages actually
+allocated by checking the sysctl or meminfo. To check the per node
+distribution of huge pages in a NUMA system, use:
+
+ cat /sys/devices/system/node/node*/meminfo | fgrep Huge
+
+/proc/sys/vm/nr_overcommit_hugepages specifies how large the pool of
+huge pages can grow, if more huge pages than /proc/sys/vm/nr_hugepages are
+requested by applications. Writing any non-zero value into this file
+indicates that the hugetlb subsystem is allowed to try to obtain "surplus"
+huge pages from the buddy allocator, when the normal pool is exhausted. As
+these surplus huge pages go out of use, they are freed back to the buddy
allocator.
+When increasing the huge page pool size via nr_hugepages, any surplus
+pages will first be promoted to persistent huge pages. Then, additional
+huge pages will be allocated, if necessary and if possible, to fulfill
+the new huge page pool size.
+
+The administrator may shrink the pool of preallocated huge pages for
+the default huge page size by setting the nr_hugepages sysctl to a
+smaller value. The kernel will attempt to balance the freeing of huge pages
+across all on-line nodes. Any free huge pages on the selected nodes will
+be freed back to the buddy allocator.
+
Caveat: Shrinking the pool via nr_hugepages such that it becomes less
-than the number of hugepages in use will convert the balance to surplus
+than the number of huge pages in use will convert the balance to surplus
huge pages even if it would exceed the overcommit value. As long as
this condition holds, however, no more surplus huge pages will be
allowed on the system until one of the two sysctls are increased
sufficiently, or the surplus huge pages go out of use and are freed.
-With support for multiple hugepage pools at run-time available, much of
-the hugepage userspace interface has been duplicated in sysfs. The above
-information applies to the default hugepage size (which will be
-controlled by the proc interfaces for backwards compatibility). The root
-hugepage control directory is
+With support for multiple huge page pools at run-time available, much of
+the huge page userspace interface has been duplicated in sysfs. The above
+information applies to the default huge page size which will be
+controlled by the /proc interfaces for backwards compatibility. The root
+huge page control directory in sysfs is:
/sys/kernel/mm/hugepages
-For each hugepage size supported by the running kernel, a subdirectory
+For each huge page size supported by the running kernel, a subdirectory
will exist, of the form
hugepages-${size}kB
@@ -116,9 +157,9 @@ Inside each of these directories, the same set of files will exist:
resv_hugepages
surplus_hugepages
-which function as described above for the default hugepage-sized case.
+which function as described above for the default huge page-sized case.
-If the user applications are going to request hugepages using mmap system
+If the user applications are going to request huge pages using mmap system
call, then it is required that system administrator mount a file system of
type hugetlbfs:
@@ -127,7 +168,7 @@ type hugetlbfs:
none /mnt/huge
This command mounts a (pseudo) filesystem of type hugetlbfs on the directory
-/mnt/huge. Any files created on /mnt/huge uses hugepages. The uid and gid
+/mnt/huge. Any files created on /mnt/huge uses huge pages. The uid and gid
options sets the owner and group of the root of the file system. By default
the uid and gid of the current process are taken. The mode option sets the
mode of root of file system to value & 0777. This value is given in octal.
@@ -146,24 +187,26 @@ Regular chown, chgrp, and chmod commands (with right permissions) could be
used to change the file attributes on hugetlbfs.
Also, it is important to note that no such mount command is required if the
-applications are going to use only shmat/shmget system calls. Users who
-wish to use hugetlb page via shared memory segment should be a member of
-a supplementary group and system admin needs to configure that gid into
-/proc/sys/vm/hugetlb_shm_group. It is possible for same or different
-applications to use any combination of mmaps and shm* calls, though the
-mount of filesystem will be required for using mmap calls.
+applications are going to use only shmat/shmget system calls or mmap with
+MAP_HUGETLB. Users who wish to use hugetlb page via shared memory segment
+should be a member of a supplementary group and system admin needs to
+configure that gid into /proc/sys/vm/hugetlb_shm_group. It is possible for
+same or different applications to use any combination of mmaps and shm*
+calls, though the mount of filesystem will be required for using mmap calls
+without MAP_HUGETLB. For an example of how to use mmap with MAP_HUGETLB see
+map_hugetlb.c.
*******************************************************************
/*
- * Example of using hugepage memory in a user application using Sys V shared
+ * Example of using huge page memory in a user application using Sys V shared
* memory system calls. In this example the app is requesting 256MB of
* memory that is backed by huge pages. The application uses the flag
* SHM_HUGETLB in the shmget system call to inform the kernel that it is
- * requesting hugepages.
+ * requesting huge pages.
*
* For the ia64 architecture, the Linux kernel reserves Region number 4 for
- * hugepages. That means the addresses starting with 0x800000... will need
+ * huge pages. That means the addresses starting with 0x800000... will need
* to be specified. Specifying a fixed address is not required on ppc64,
* i386 or x86_64.
*
@@ -252,14 +295,14 @@ int main(void)
*******************************************************************
/*
- * Example of using hugepage memory in a user application using the mmap
+ * Example of using huge page memory in a user application using the mmap
* system call. Before running this application, make sure that the
* administrator has mounted the hugetlbfs filesystem (on some directory
* like /mnt) using the command mount -t hugetlbfs nodev /mnt. In this
* example, the app is requesting memory of size 256MB that is backed by
* huge pages.
*
- * For ia64 architecture, Linux kernel reserves Region number 4 for hugepages.
+ * For ia64 architecture, Linux kernel reserves Region number 4 for huge pages.
* That means the addresses starting with 0x800000... will need to be
* specified. Specifying a fixed address is not required on ppc64, i386
* or x86_64.
diff --git a/Documentation/vm/ksm.txt b/Documentation/vm/ksm.txt
new file mode 100644
index 00000000000..72a22f65960
--- /dev/null
+++ b/Documentation/vm/ksm.txt
@@ -0,0 +1,89 @@
+How to use the Kernel Samepage Merging feature
+----------------------------------------------
+
+KSM is a memory-saving de-duplication feature, enabled by CONFIG_KSM=y,
+added to the Linux kernel in 2.6.32. See mm/ksm.c for its implementation,
+and http://lwn.net/Articles/306704/ and http://lwn.net/Articles/330589/
+
+The KSM daemon ksmd periodically scans those areas of user memory which
+have been registered with it, looking for pages of identical content which
+can be replaced by a single write-protected page (which is automatically
+copied if a process later wants to update its content).
+
+KSM was originally developed for use with KVM (where it was known as
+Kernel Shared Memory), to fit more virtual machines into physical memory,
+by sharing the data common between them. But it can be useful to any
+application which generates many instances of the same data.
+
+KSM only merges anonymous (private) pages, never pagecache (file) pages.
+KSM's merged pages are at present locked into kernel memory for as long
+as they are shared: so cannot be swapped out like the user pages they
+replace (but swapping KSM pages should follow soon in a later release).
+
+KSM only operates on those areas of address space which an application
+has advised to be likely candidates for merging, by using the madvise(2)
+system call: int madvise(addr, length, MADV_MERGEABLE).
+
+The app may call int madvise(addr, length, MADV_UNMERGEABLE) to cancel
+that advice and restore unshared pages: whereupon KSM unmerges whatever
+it merged in that range. Note: this unmerging call may suddenly require
+more memory than is available - possibly failing with EAGAIN, but more
+probably arousing the Out-Of-Memory killer.
+
+If KSM is not configured into the running kernel, madvise MADV_MERGEABLE
+and MADV_UNMERGEABLE simply fail with EINVAL. If the running kernel was
+built with CONFIG_KSM=y, those calls will normally succeed: even if the
+the KSM daemon is not currently running, MADV_MERGEABLE still registers
+the range for whenever the KSM daemon is started; even if the range
+cannot contain any pages which KSM could actually merge; even if
+MADV_UNMERGEABLE is applied to a range which was never MADV_MERGEABLE.
+
+Like other madvise calls, they are intended for use on mapped areas of
+the user address space: they will report ENOMEM if the specified range
+includes unmapped gaps (though working on the intervening mapped areas),
+and might fail with EAGAIN if not enough memory for internal structures.
+
+Applications should be considerate in their use of MADV_MERGEABLE,
+restricting its use to areas likely to benefit. KSM's scans may use
+a lot of processing power, and its kernel-resident pages are a limited
+resource. Some installations will disable KSM for these reasons.
+
+The KSM daemon is controlled by sysfs files in /sys/kernel/mm/ksm/,
+readable by all but writable only by root:
+
+max_kernel_pages - set to maximum number of kernel pages that KSM may use
+ e.g. "echo 2000 > /sys/kernel/mm/ksm/max_kernel_pages"
+ Value 0 imposes no limit on the kernel pages KSM may use;
+ but note that any process using MADV_MERGEABLE can cause
+ KSM to allocate these pages, unswappable until it exits.
+ Default: 2000 (chosen for demonstration purposes)
+
+pages_to_scan - how many present pages to scan before ksmd goes to sleep
+ e.g. "echo 200 > /sys/kernel/mm/ksm/pages_to_scan"
+ Default: 200 (chosen for demonstration purposes)
+
+sleep_millisecs - how many milliseconds ksmd should sleep before next scan
+ e.g. "echo 20 > /sys/kernel/mm/ksm/sleep_millisecs"
+ Default: 20 (chosen for demonstration purposes)
+
+run - set 0 to stop ksmd from running but keep merged pages,
+ set 1 to run ksmd e.g. "echo 1 > /sys/kernel/mm/ksm/run",
+ set 2 to stop ksmd and unmerge all pages currently merged,
+ but leave mergeable areas registered for next run
+ Default: 1 (for immediate use by apps which register)
+
+The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/:
+
+pages_shared - how many shared unswappable kernel pages KSM is using
+pages_sharing - how many more sites are sharing them i.e. how much saved
+pages_unshared - how many pages unique but repeatedly checked for merging
+pages_volatile - how many pages changing too fast to be placed in a tree
+full_scans - how many times all mergeable areas have been scanned
+
+A high ratio of pages_sharing to pages_shared indicates good sharing, but
+a high ratio of pages_unshared to pages_sharing indicates wasted effort.
+pages_volatile embraces several different kinds of activity, but a high
+proportion there would also indicate poor use of madvise MADV_MERGEABLE.
+
+Izik Eidus,
+Hugh Dickins, 30 July 2009
diff --git a/Documentation/vm/locking b/Documentation/vm/locking
index f366fa95617..25fadb44876 100644
--- a/Documentation/vm/locking
+++ b/Documentation/vm/locking
@@ -80,7 +80,7 @@ Note: PTL can also be used to guarantee that no new clones using the
mm start up ... this is a loose form of stability on mm_users. For
example, it is used in copy_mm to protect against a racing tlb_gather_mmu
single address space optimization, so that the zap_page_range (from
-vmtruncate) does not lose sending ipi's to cloned threads that might
+truncate) does not lose sending ipi's to cloned threads that might
be spawned underneath it and go to user mode to drag in pte's into tlbs.
swap_lock
diff --git a/Documentation/vm/map_hugetlb.c b/Documentation/vm/map_hugetlb.c
new file mode 100644
index 00000000000..e2bdae37f49
--- /dev/null
+++ b/Documentation/vm/map_hugetlb.c
@@ -0,0 +1,77 @@
+/*
+ * Example of using hugepage memory in a user application using the mmap
+ * system call with MAP_HUGETLB flag. Before running this program make
+ * sure the administrator has allocated enough default sized huge pages
+ * to cover the 256 MB allocation.
+ *
+ * For ia64 architecture, Linux kernel reserves Region number 4 for hugepages.
+ * That means the addresses starting with 0x800000... will need to be
+ * specified. Specifying a fixed address is not required on ppc64, i386
+ * or x86_64.
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+
+#define LENGTH (256UL*1024*1024)
+#define PROTECTION (PROT_READ | PROT_WRITE)
+
+#ifndef MAP_HUGETLB
+#define MAP_HUGETLB 0x40
+#endif
+
+/* Only ia64 requires this */
+#ifdef __ia64__
+#define ADDR (void *)(0x8000000000000000UL)
+#define FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_FIXED)
+#else
+#define ADDR (void *)(0x0UL)
+#define FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB)
+#endif
+
+void check_bytes(char *addr)
+{
+ printf("First hex is %x\n", *((unsigned int *)addr));
+}
+
+void write_bytes(char *addr)
+{
+ unsigned long i;
+
+ for (i = 0; i < LENGTH; i++)
+ *(addr + i) = (char)i;
+}
+
+void read_bytes(char *addr)
+{
+ unsigned long i;
+
+ check_bytes(addr);
+ for (i = 0; i < LENGTH; i++)
+ if (*(addr + i) != (char)i) {
+ printf("Mismatch at %lu\n", i);
+ break;
+ }
+}
+
+int main(void)
+{
+ void *addr;
+
+ addr = mmap(ADDR, LENGTH, PROTECTION, FLAGS, 0, 0);
+ if (addr == MAP_FAILED) {
+ perror("mmap");
+ exit(1);
+ }
+
+ printf("Returned address is %p\n", addr);
+ check_bytes(addr);
+ write_bytes(addr);
+ read_bytes(addr);
+
+ munmap(addr, LENGTH);
+
+ return 0;
+}
diff --git a/Documentation/vm/page-types.c b/Documentation/vm/page-types.c
index 0833f44ba16..fa1a30d9e9d 100644
--- a/Documentation/vm/page-types.c
+++ b/Documentation/vm/page-types.c
@@ -5,6 +5,7 @@
* Copyright (C) 2009 Wu Fengguang <fengguang.wu@intel.com>
*/
+#define _LARGEFILE64_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
@@ -13,12 +14,33 @@
#include <string.h>
#include <getopt.h>
#include <limits.h>
+#include <assert.h>
#include <sys/types.h>
#include <sys/errno.h>
#include <sys/fcntl.h>
/*
+ * pagemap kernel ABI bits
+ */
+
+#define PM_ENTRY_BYTES sizeof(uint64_t)
+#define PM_STATUS_BITS 3
+#define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
+#define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
+#define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
+#define PM_PSHIFT_BITS 6
+#define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
+#define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
+#define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
+#define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
+#define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
+
+#define PM_PRESENT PM_STATUS(4LL)
+#define PM_SWAP PM_STATUS(2LL)
+
+
+/*
* kernel page flags
*/
@@ -126,6 +148,14 @@ static int nr_addr_ranges;
static unsigned long opt_offset[MAX_ADDR_RANGES];
static unsigned long opt_size[MAX_ADDR_RANGES];
+#define MAX_VMAS 10240
+static int nr_vmas;
+static unsigned long pg_start[MAX_VMAS];
+static unsigned long pg_end[MAX_VMAS];
+static unsigned long voffset;
+
+static int pagemap_fd;
+
#define MAX_BIT_FILTERS 64
static int nr_bit_filters;
static uint64_t opt_mask[MAX_BIT_FILTERS];
@@ -135,7 +165,6 @@ static int page_size;
#define PAGES_BATCH (64 << 10) /* 64k pages */
static int kpageflags_fd;
-static uint64_t kpageflags_buf[KPF_BYTES * PAGES_BATCH];
#define HASH_SHIFT 13
#define HASH_SIZE (1 << HASH_SHIFT)
@@ -158,12 +187,17 @@ static uint64_t page_flags[HASH_SIZE];
type __min2 = (y); \
__min1 < __min2 ? __min1 : __min2; })
-unsigned long pages2mb(unsigned long pages)
+#define max_t(type, x, y) ({ \
+ type __max1 = (x); \
+ type __max2 = (y); \
+ __max1 > __max2 ? __max1 : __max2; })
+
+static unsigned long pages2mb(unsigned long pages)
{
return (pages * page_size) >> 20;
}
-void fatal(const char *x, ...)
+static void fatal(const char *x, ...)
{
va_list ap;
@@ -178,7 +212,7 @@ void fatal(const char *x, ...)
* page flag names
*/
-char *page_flag_name(uint64_t flags)
+static char *page_flag_name(uint64_t flags)
{
static char buf[65];
int present;
@@ -197,7 +231,7 @@ char *page_flag_name(uint64_t flags)
return buf;
}
-char *page_flag_longname(uint64_t flags)
+static char *page_flag_longname(uint64_t flags)
{
static char buf[1024];
int i, n;
@@ -221,32 +255,40 @@ char *page_flag_longname(uint64_t flags)
* page list and summary
*/
-void show_page_range(unsigned long offset, uint64_t flags)
+static void show_page_range(unsigned long offset, uint64_t flags)
{
static uint64_t flags0;
+ static unsigned long voff;
static unsigned long index;
static unsigned long count;
- if (flags == flags0 && offset == index + count) {
+ if (flags == flags0 && offset == index + count &&
+ (!opt_pid || voffset == voff + count)) {
count++;
return;
}
- if (count)
- printf("%lu\t%lu\t%s\n",
+ if (count) {
+ if (opt_pid)
+ printf("%lx\t", voff);
+ printf("%lx\t%lx\t%s\n",
index, count, page_flag_name(flags0));
+ }
flags0 = flags;
index = offset;
+ voff = voffset;
count = 1;
}
-void show_page(unsigned long offset, uint64_t flags)
+static void show_page(unsigned long offset, uint64_t flags)
{
- printf("%lu\t%s\n", offset, page_flag_name(flags));
+ if (opt_pid)
+ printf("%lx\t", voffset);
+ printf("%lx\t%s\n", offset, page_flag_name(flags));
}
-void show_summary(void)
+static void show_summary(void)
{
int i;
@@ -272,7 +314,7 @@ void show_summary(void)
* page flag filters
*/
-int bit_mask_ok(uint64_t flags)
+static int bit_mask_ok(uint64_t flags)
{
int i;
@@ -289,7 +331,7 @@ int bit_mask_ok(uint64_t flags)
return 1;
}
-uint64_t expand_overloaded_flags(uint64_t flags)
+static uint64_t expand_overloaded_flags(uint64_t flags)
{
/* SLOB/SLUB overload several page flags */
if (flags & BIT(SLAB)) {
@@ -308,7 +350,7 @@ uint64_t expand_overloaded_flags(uint64_t flags)
return flags;
}
-uint64_t well_known_flags(uint64_t flags)
+static uint64_t well_known_flags(uint64_t flags)
{
/* hide flags intended only for kernel hacker */
flags &= ~KPF_HACKERS_BITS;
@@ -325,7 +367,7 @@ uint64_t well_known_flags(uint64_t flags)
* page frame walker
*/
-int hash_slot(uint64_t flags)
+static int hash_slot(uint64_t flags)
{
int k = HASH_KEY(flags);
int i;
@@ -352,7 +394,7 @@ int hash_slot(uint64_t flags)
exit(EXIT_FAILURE);
}
-void add_page(unsigned long offset, uint64_t flags)
+static void add_page(unsigned long offset, uint64_t flags)
{
flags = expand_overloaded_flags(flags);
@@ -371,7 +413,7 @@ void add_page(unsigned long offset, uint64_t flags)
total_pages++;
}
-void walk_pfn(unsigned long index, unsigned long count)
+static void walk_pfn(unsigned long index, unsigned long count)
{
unsigned long batch;
unsigned long n;
@@ -383,6 +425,8 @@ void walk_pfn(unsigned long index, unsigned long count)
lseek(kpageflags_fd, index * KPF_BYTES, SEEK_SET);
while (count) {
+ uint64_t kpageflags_buf[KPF_BYTES * PAGES_BATCH];
+
batch = min_t(unsigned long, count, PAGES_BATCH);
n = read(kpageflags_fd, kpageflags_buf, batch * KPF_BYTES);
if (n == 0)
@@ -404,7 +448,82 @@ void walk_pfn(unsigned long index, unsigned long count)
}
}
-void walk_addr_ranges(void)
+
+#define PAGEMAP_BATCH 4096
+static unsigned long task_pfn(unsigned long pgoff)
+{
+ static uint64_t buf[PAGEMAP_BATCH];
+ static unsigned long start;
+ static long count;
+ uint64_t pfn;
+
+ if (pgoff < start || pgoff >= start + count) {
+ if (lseek64(pagemap_fd,
+ (uint64_t)pgoff * PM_ENTRY_BYTES,
+ SEEK_SET) < 0) {
+ perror("pagemap seek");
+ exit(EXIT_FAILURE);
+ }
+ count = read(pagemap_fd, buf, sizeof(buf));
+ if (count == 0)
+ return 0;
+ if (count < 0) {
+ perror("pagemap read");
+ exit(EXIT_FAILURE);
+ }
+ if (count % PM_ENTRY_BYTES) {
+ fatal("pagemap read not aligned.\n");
+ exit(EXIT_FAILURE);
+ }
+ count /= PM_ENTRY_BYTES;
+ start = pgoff;
+ }
+
+ pfn = buf[pgoff - start];
+ if (pfn & PM_PRESENT)
+ pfn = PM_PFRAME(pfn);
+ else
+ pfn = 0;
+
+ return pfn;
+}
+
+static void walk_task(unsigned long index, unsigned long count)
+{
+ int i = 0;
+ const unsigned long end = index + count;
+
+ while (index < end) {
+
+ while (pg_end[i] <= index)
+ if (++i >= nr_vmas)
+ return;
+ if (pg_start[i] >= end)
+ return;
+
+ voffset = max_t(unsigned long, pg_start[i], index);
+ index = min_t(unsigned long, pg_end[i], end);
+
+ assert(voffset < index);
+ for (; voffset < index; voffset++) {
+ unsigned long pfn = task_pfn(voffset);
+ if (pfn)
+ walk_pfn(pfn, 1);
+ }
+ }
+}
+
+static void add_addr_range(unsigned long offset, unsigned long size)
+{
+ if (nr_addr_ranges >= MAX_ADDR_RANGES)
+ fatal("too many addr ranges\n");
+
+ opt_offset[nr_addr_ranges] = offset;
+ opt_size[nr_addr_ranges] = min_t(unsigned long, size, ULONG_MAX-offset);
+ nr_addr_ranges++;
+}
+
+static void walk_addr_ranges(void)
{
int i;
@@ -415,10 +534,13 @@ void walk_addr_ranges(void)
}
if (!nr_addr_ranges)
- walk_pfn(0, ULONG_MAX);
+ add_addr_range(0, ULONG_MAX);
for (i = 0; i < nr_addr_ranges; i++)
- walk_pfn(opt_offset[i], opt_size[i]);
+ if (!opt_pid)
+ walk_pfn(opt_offset[i], opt_size[i]);
+ else
+ walk_task(opt_offset[i], opt_size[i]);
close(kpageflags_fd);
}
@@ -428,7 +550,7 @@ void walk_addr_ranges(void)
* user interface
*/
-const char *page_flag_type(uint64_t flag)
+static const char *page_flag_type(uint64_t flag)
{
if (flag & KPF_HACKERS_BITS)
return "(r)";
@@ -437,7 +559,7 @@ const char *page_flag_type(uint64_t flag)
return " ";
}
-void usage(void)
+static void usage(void)
{
int i, j;
@@ -446,8 +568,8 @@ void usage(void)
" -r|--raw Raw mode, for kernel developers\n"
" -a|--addr addr-spec Walk a range of pages\n"
" -b|--bits bits-spec Walk pages with specified bits\n"
-#if 0 /* planned features */
" -p|--pid pid Walk process address space\n"
+#if 0 /* planned features */
" -f|--file filename Walk file address space\n"
#endif
" -l|--list Show page details in ranges\n"
@@ -459,7 +581,7 @@ void usage(void)
" N+M pages range from N to N+M-1\n"
" N,M pages range from N to M-1\n"
" N, pages range from N to end\n"
-" ,M pages range from 0 to M\n"
+" ,M pages range from 0 to M-1\n"
"bits-spec:\n"
" bit1,bit2 (flags & (bit1|bit2)) != 0\n"
" bit1,bit2=bit1 (flags & (bit1|bit2)) == bit1\n"
@@ -482,7 +604,7 @@ void usage(void)
"(r) raw mode bits (o) overloaded bits\n");
}
-unsigned long long parse_number(const char *str)
+static unsigned long long parse_number(const char *str)
{
unsigned long long n;
@@ -494,26 +616,62 @@ unsigned long long parse_number(const char *str)
return n;
}
-void parse_pid(const char *str)
+static void parse_pid(const char *str)
{
+ FILE *file;
+ char buf[5000];
+
opt_pid = parse_number(str);
-}
-void parse_file(const char *name)
-{
+ sprintf(buf, "/proc/%d/pagemap", opt_pid);
+ pagemap_fd = open(buf, O_RDONLY);
+ if (pagemap_fd < 0) {
+ perror(buf);
+ exit(EXIT_FAILURE);
+ }
+
+ sprintf(buf, "/proc/%d/maps", opt_pid);
+ file = fopen(buf, "r");
+ if (!file) {
+ perror(buf);
+ exit(EXIT_FAILURE);
+ }
+
+ while (fgets(buf, sizeof(buf), file) != NULL) {
+ unsigned long vm_start;
+ unsigned long vm_end;
+ unsigned long long pgoff;
+ int major, minor;
+ char r, w, x, s;
+ unsigned long ino;
+ int n;
+
+ n = sscanf(buf, "%lx-%lx %c%c%c%c %llx %x:%x %lu",
+ &vm_start,
+ &vm_end,
+ &r, &w, &x, &s,
+ &pgoff,
+ &major, &minor,
+ &ino);
+ if (n < 10) {
+ fprintf(stderr, "unexpected line: %s\n", buf);
+ continue;
+ }
+ pg_start[nr_vmas] = vm_start / page_size;
+ pg_end[nr_vmas] = vm_end / page_size;
+ if (++nr_vmas >= MAX_VMAS) {
+ fprintf(stderr, "too many VMAs\n");
+ break;
+ }
+ }
+ fclose(file);
}
-void add_addr_range(unsigned long offset, unsigned long size)
+static void parse_file(const char *name)
{
- if (nr_addr_ranges >= MAX_ADDR_RANGES)
- fatal("too much addr ranges\n");
-
- opt_offset[nr_addr_ranges] = offset;
- opt_size[nr_addr_ranges] = size;
- nr_addr_ranges++;
}
-void parse_addr_range(const char *optarg)
+static void parse_addr_range(const char *optarg)
{
unsigned long offset;
unsigned long size;
@@ -547,7 +705,7 @@ void parse_addr_range(const char *optarg)
add_addr_range(offset, size);
}
-void add_bits_filter(uint64_t mask, uint64_t bits)
+static void add_bits_filter(uint64_t mask, uint64_t bits)
{
if (nr_bit_filters >= MAX_BIT_FILTERS)
fatal("too much bit filters\n");
@@ -557,7 +715,7 @@ void add_bits_filter(uint64_t mask, uint64_t bits)
nr_bit_filters++;
}
-uint64_t parse_flag_name(const char *str, int len)
+static uint64_t parse_flag_name(const char *str, int len)
{
int i;
@@ -577,7 +735,7 @@ uint64_t parse_flag_name(const char *str, int len)
return parse_number(str);
}
-uint64_t parse_flag_names(const char *str, int all)
+static uint64_t parse_flag_names(const char *str, int all)
{
const char *p = str;
uint64_t flags = 0;
@@ -596,7 +754,7 @@ uint64_t parse_flag_names(const char *str, int all)
return flags;
}
-void parse_bits_mask(const char *optarg)
+static void parse_bits_mask(const char *optarg)
{
uint64_t mask;
uint64_t bits;
@@ -621,7 +779,7 @@ void parse_bits_mask(const char *optarg)
}
-struct option opts[] = {
+static struct option opts[] = {
{ "raw" , 0, NULL, 'r' },
{ "pid" , 1, NULL, 'p' },
{ "file" , 1, NULL, 'f' },
@@ -676,8 +834,10 @@ int main(int argc, char *argv[])
}
}
+ if (opt_list && opt_pid)
+ printf("voffset\t");
if (opt_list == 1)
- printf("offset\tcount\tflags\n");
+ printf("offset\tlen\tflags\n");
if (opt_list == 2)
printf("offset\tflags\n");
diff --git a/Documentation/vm/slabinfo.c b/Documentation/vm/slabinfo.c
index df3227605d5..92e729f4b67 100644
--- a/Documentation/vm/slabinfo.c
+++ b/Documentation/vm/slabinfo.c
@@ -87,7 +87,7 @@ int page_size;
regex_t pattern;
-void fatal(const char *x, ...)
+static void fatal(const char *x, ...)
{
va_list ap;
@@ -97,7 +97,7 @@ void fatal(const char *x, ...)
exit(EXIT_FAILURE);
}
-void usage(void)
+static void usage(void)
{
printf("slabinfo 5/7/2007. (c) 2007 sgi.\n\n"
"slabinfo [-ahnpvtsz] [-d debugopts] [slab-regexp]\n"
@@ -131,7 +131,7 @@ void usage(void)
);
}
-unsigned long read_obj(const char *name)
+static unsigned long read_obj(const char *name)
{
FILE *f = fopen(name, "r");
@@ -151,7 +151,7 @@ unsigned long read_obj(const char *name)
/*
* Get the contents of an attribute
*/
-unsigned long get_obj(const char *name)
+static unsigned long get_obj(const char *name)
{
if (!read_obj(name))
return 0;
@@ -159,7 +159,7 @@ unsigned long get_obj(const char *name)
return atol(buffer);
}
-unsigned long get_obj_and_str(const char *name, char **x)
+static unsigned long get_obj_and_str(const char *name, char **x)
{
unsigned long result = 0;
char *p;
@@ -178,7 +178,7 @@ unsigned long get_obj_and_str(const char *name, char **x)
return result;
}
-void set_obj(struct slabinfo *s, const char *name, int n)
+static void set_obj(struct slabinfo *s, const char *name, int n)
{
char x[100];
FILE *f;
@@ -192,7 +192,7 @@ void set_obj(struct slabinfo *s, const char *name, int n)
fclose(f);
}
-unsigned long read_slab_obj(struct slabinfo *s, const char *name)
+static unsigned long read_slab_obj(struct slabinfo *s, const char *name)
{
char x[100];
FILE *f;
@@ -215,7 +215,7 @@ unsigned long read_slab_obj(struct slabinfo *s, const char *name)
/*
* Put a size string together
*/
-int store_size(char *buffer, unsigned long value)
+static int store_size(char *buffer, unsigned long value)
{
unsigned long divisor = 1;
char trailer = 0;
@@ -247,7 +247,7 @@ int store_size(char *buffer, unsigned long value)
return n;
}
-void decode_numa_list(int *numa, char *t)
+static void decode_numa_list(int *numa, char *t)
{
int node;
int nr;
@@ -272,7 +272,7 @@ void decode_numa_list(int *numa, char *t)
}
}
-void slab_validate(struct slabinfo *s)
+static void slab_validate(struct slabinfo *s)
{
if (strcmp(s->name, "*") == 0)
return;
@@ -280,7 +280,7 @@ void slab_validate(struct slabinfo *s)
set_obj(s, "validate", 1);
}
-void slab_shrink(struct slabinfo *s)
+static void slab_shrink(struct slabinfo *s)
{
if (strcmp(s->name, "*") == 0)
return;
@@ -290,7 +290,7 @@ void slab_shrink(struct slabinfo *s)
int line = 0;
-void first_line(void)
+static void first_line(void)
{
if (show_activity)
printf("Name Objects Alloc Free %%Fast Fallb O\n");
@@ -302,7 +302,7 @@ void first_line(void)
/*
* Find the shortest alias of a slab
*/
-struct aliasinfo *find_one_alias(struct slabinfo *find)
+static struct aliasinfo *find_one_alias(struct slabinfo *find)
{
struct aliasinfo *a;
struct aliasinfo *best = NULL;
@@ -318,18 +318,18 @@ struct aliasinfo *find_one_alias(struct slabinfo *find)
return best;
}
-unsigned long slab_size(struct slabinfo *s)
+static unsigned long slab_size(struct slabinfo *s)
{
return s->slabs * (page_size << s->order);
}
-unsigned long slab_activity(struct slabinfo *s)
+static unsigned long slab_activity(struct slabinfo *s)
{
return s->alloc_fastpath + s->free_fastpath +
s->alloc_slowpath + s->free_slowpath;
}
-void slab_numa(struct slabinfo *s, int mode)
+static void slab_numa(struct slabinfo *s, int mode)
{
int node;
@@ -374,7 +374,7 @@ void slab_numa(struct slabinfo *s, int mode)
line++;
}
-void show_tracking(struct slabinfo *s)
+static void show_tracking(struct slabinfo *s)
{
printf("\n%s: Kernel object allocation\n", s->name);
printf("-----------------------------------------------------------------------\n");
@@ -392,7 +392,7 @@ void show_tracking(struct slabinfo *s)
}
-void ops(struct slabinfo *s)
+static void ops(struct slabinfo *s)
{
if (strcmp(s->name, "*") == 0)
return;
@@ -405,14 +405,14 @@ void ops(struct slabinfo *s)
printf("\n%s has no kmem_cache operations\n", s->name);
}
-const char *onoff(int x)
+static const char *onoff(int x)
{
if (x)
return "On ";
return "Off";
}
-void slab_stats(struct slabinfo *s)
+static void slab_stats(struct slabinfo *s)
{
unsigned long total_alloc;
unsigned long total_free;
@@ -477,7 +477,7 @@ void slab_stats(struct slabinfo *s)
s->deactivate_to_tail, (s->deactivate_to_tail * 100) / total);
}
-void report(struct slabinfo *s)
+static void report(struct slabinfo *s)
{
if (strcmp(s->name, "*") == 0)
return;
@@ -518,7 +518,7 @@ void report(struct slabinfo *s)
slab_stats(s);
}
-void slabcache(struct slabinfo *s)
+static void slabcache(struct slabinfo *s)
{
char size_str[20];
char dist_str[40];
@@ -593,7 +593,7 @@ void slabcache(struct slabinfo *s)
/*
* Analyze debug options. Return false if something is amiss.
*/
-int debug_opt_scan(char *opt)
+static int debug_opt_scan(char *opt)
{
if (!opt || !opt[0] || strcmp(opt, "-") == 0)
return 1;
@@ -642,7 +642,7 @@ int debug_opt_scan(char *opt)
return 1;
}
-int slab_empty(struct slabinfo *s)
+static int slab_empty(struct slabinfo *s)
{
if (s->objects > 0)
return 0;
@@ -657,7 +657,7 @@ int slab_empty(struct slabinfo *s)
return 1;
}
-void slab_debug(struct slabinfo *s)
+static void slab_debug(struct slabinfo *s)
{
if (strcmp(s->name, "*") == 0)
return;
@@ -717,7 +717,7 @@ void slab_debug(struct slabinfo *s)
set_obj(s, "trace", 1);
}
-void totals(void)
+static void totals(void)
{
struct slabinfo *s;
@@ -976,7 +976,7 @@ void totals(void)
b1, b2, b3);
}
-void sort_slabs(void)
+static void sort_slabs(void)
{
struct slabinfo *s1,*s2;
@@ -1005,7 +1005,7 @@ void sort_slabs(void)
}
}
-void sort_aliases(void)
+static void sort_aliases(void)
{
struct aliasinfo *a1,*a2;
@@ -1030,7 +1030,7 @@ void sort_aliases(void)
}
}
-void link_slabs(void)
+static void link_slabs(void)
{
struct aliasinfo *a;
struct slabinfo *s;
@@ -1048,7 +1048,7 @@ void link_slabs(void)
}
}
-void alias(void)
+static void alias(void)
{
struct aliasinfo *a;
char *active = NULL;
@@ -1079,7 +1079,7 @@ void alias(void)
}
-void rename_slabs(void)
+static void rename_slabs(void)
{
struct slabinfo *s;
struct aliasinfo *a;
@@ -1102,12 +1102,12 @@ void rename_slabs(void)
}
}
-int slab_mismatch(char *slab)
+static int slab_mismatch(char *slab)
{
return regexec(&pattern, slab, 0, NULL, 0);
}
-void read_slab_dir(void)
+static void read_slab_dir(void)
{
DIR *dir;
struct dirent *de;
@@ -1209,7 +1209,7 @@ void read_slab_dir(void)
fatal("Too many aliases\n");
}
-void output_slabs(void)
+static void output_slabs(void)
{
struct slabinfo *slab;
diff --git a/Documentation/watchdog/src/watchdog-test.c b/Documentation/watchdog/src/watchdog-test.c
index 65f6c19cb86..a750532ffcf 100644
--- a/Documentation/watchdog/src/watchdog-test.c
+++ b/Documentation/watchdog/src/watchdog-test.c
@@ -18,7 +18,7 @@ int fd;
* the PC Watchdog card to reset its internal timer so it doesn't trigger
* a computer reset.
*/
-void keep_alive(void)
+static void keep_alive(void)
{
int dummy;
diff --git a/Documentation/x86/earlyprintk.txt b/Documentation/x86/earlyprintk.txt
index 607b1a01606..f19802c0f48 100644
--- a/Documentation/x86/earlyprintk.txt
+++ b/Documentation/x86/earlyprintk.txt
@@ -7,7 +7,7 @@ and two USB cables, connected like this:
[host/target] <-------> [USB debug key] <-------> [client/console]
-1. There are three specific hardware requirements:
+1. There are a number of specific hardware requirements:
a.) Host/target system needs to have USB debug port capability.
@@ -42,7 +42,35 @@ and two USB cables, connected like this:
This is a small blue plastic connector with two USB connections,
it draws power from its USB connections.
- c.) Thirdly, you need a second client/console system with a regular USB port.
+ c.) You need a second client/console system with a high speed USB 2.0
+ port.
+
+ d.) The Netchip device must be plugged directly into the physical
+ debug port on the "host/target" system. You cannot use a USB hub in
+ between the physical debug port and the "host/target" system.
+
+ The EHCI debug controller is bound to a specific physical USB
+ port and the Netchip device will only work as an early printk
+ device in this port. The EHCI host controllers are electrically
+ wired such that the EHCI debug controller is hooked up to the
+ first physical and there is no way to change this via software.
+ You can find the physical port through experimentation by trying
+ each physical port on the system and rebooting. Or you can try
+ and use lsusb or look at the kernel info messages emitted by the
+ usb stack when you plug a usb device into various ports on the
+ "host/target" system.
+
+ Some hardware vendors do not expose the usb debug port with a
+ physical connector and if you find such a device send a complaint
+ to the hardware vendor, because there is no reason not to wire
+ this port into one of the physically accessible ports.
+
+ e.) It is also important to note, that many versions of the Netchip
+ device require the "client/console" system to be plugged into the
+ right and side of the device (with the product logo facing up and
+ readable left to right). The reason being is that the 5 volt
+ power supply is taken from only one side of the device and it
+ must be the side that does not get rebooted.
2. Software requirements:
@@ -56,6 +84,13 @@ and two USB cables, connected like this:
(If you are using Grub, append it to the 'kernel' line in
/etc/grub.conf)
+ On systems with more than one EHCI debug controller you must
+ specify the correct EHCI debug controller number. The ordering
+ comes from the PCI bus enumeration of the EHCI controllers. The
+ default with no number argument is "0" the first EHCI debug
+ controller. To use the second EHCI debug controller, you would
+ use the command line: "earlyprintk=dbgp1"
+
NOTE: normally earlyprintk console gets turned off once the
regular console is alive - use "earlyprintk=dbgp,keep" to keep
this channel open beyond early bootup. This can be useful for
diff --git a/MAINTAINERS b/MAINTAINERS
index 43761a00e3f..c450f3abb8c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -233,6 +233,7 @@ S: Supported
F: drivers/acpi/
F: drivers/pnp/pnpacpi/
F: include/linux/acpi.h
+F: include/acpi/
ACPI BATTERY DRIVERS
M: Alexey Starikovskiy <astarikovskiy@suse.de>
@@ -256,12 +257,6 @@ W: http://www.lesswatts.org/projects/acpi/
S: Supported
F: drivers/acpi/fan.c
-ACPI PCI HOTPLUG DRIVER
-M: Kristen Carlson Accardi <kristen.c.accardi@intel.com>
-L: linux-pci@vger.kernel.org
-S: Supported
-F: drivers/pci/hotplug/acpi*
-
ACPI THERMAL DRIVER
M: Zhang Rui <rui.zhang@intel.com>
L: linux-acpi@vger.kernel.org
@@ -497,7 +492,7 @@ F: arch/arm/include/asm/floppy.h
ARM PORT
M: Russell King <linux@arm.linux.org.uk>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.arm.linux.org.uk/
S: Maintained
F: arch/arm/
@@ -508,36 +503,36 @@ F: drivers/mmc/host/mmci.*
ARM/ADI ROADRUNNER MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-ixp23xx/
F: arch/arm/mach-ixp23xx/include/mach/
ARM/ADS SPHERE MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/AFEB9260 MACHINE SUPPORT
M: Sergey Lapin <slapin@ossfans.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/AJECO 1ARM MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/ATMEL AT91RM9200 ARM ARCHITECTURE
M: Andrew Victor <linux@maxim.org.za>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://maxim.org.za/at91_26.html
S: Maintained
ARM/BCMRING ARM ARCHITECTURE
M: Leo Chen <leochen@broadcom.com>
M: Scott Branden <sbranden@broadcom.com>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-bcmring
@@ -554,25 +549,25 @@ F: drivers/mtd/nand/nand_bcm_umi.h
ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE
M: Hartley Sweeten <hsweeten@visionengravers.com>
M: Ryan Mallon <ryan@bluewatersys.com>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-ep93xx/
F: arch/arm/mach-ep93xx/include/mach/
ARM/CIRRUS LOGIC EDB9315A MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/CLKDEV SUPPORT
M: Russell King <linux@arm.linux.org.uk>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
F: arch/arm/common/clkdev.c
F: arch/arm/include/asm/clkdev.h
ARM/COMPULAB CM-X270/EM-X270 and CM-X300 MACHINE SUPPORT
M: Mike Rapoport <mike@compulab.co.il>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/CORGI MACHINE SUPPORT
@@ -581,14 +576,14 @@ S: Maintained
ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE
M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://gitorious.org/linux-gemini/mainline.git
S: Maintained
F: arch/arm/mach-gemini/
ARM/EBSA110 MACHINE SUPPORT
M: Russell King <linux@arm.linux.org.uk>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.arm.linux.org.uk/
S: Maintained
F: arch/arm/mach-ebsa110/
@@ -606,13 +601,13 @@ F: arch/arm/mach-pxa/ezx.c
ARM/FARADAY FA526 PORT
M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mm/*-fa*
ARM/FOOTBRIDGE ARCHITECTURE
M: Russell King <linux@arm.linux.org.uk>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.arm.linux.org.uk/
S: Maintained
F: arch/arm/include/asm/hardware/dec21285.h
@@ -620,17 +615,17 @@ F: arch/arm/mach-footbridge/
ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
M: Sascha Hauer <kernel@pengutronix.de>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/GLOMATION GESBC9312SX MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/GUMSTIX MACHINE SUPPORT
M: Steve Sakoman <sakoman@gmail.com>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/H4700 (HP IPAQ HX4700) MACHINE SUPPORT
@@ -650,55 +645,62 @@ F: arch/arm/mach-sa1100/include/mach/jornada720.h
ARM/INTEL IOP32X ARM ARCHITECTURE
M: Lennert Buytenhek <kernel@wantstofly.org>
M: Dan Williams <dan.j.williams@intel.com>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
ARM/INTEL IOP33X ARM ARCHITECTURE
M: Dan Williams <dan.j.williams@intel.com>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
ARM/INTEL IOP13XX ARM ARCHITECTURE
M: Lennert Buytenhek <kernel@wantstofly.org>
M: Dan Williams <dan.j.williams@intel.com>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
ARM/INTEL IQ81342EX MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
M: Dan Williams <dan.j.williams@intel.com>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
ARM/INTEL IXP2000 ARM ARCHITECTURE
M: Lennert Buytenhek <kernel@wantstofly.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/INTEL IXDP2850 MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/INTEL IXP23XX ARM ARCHITECTURE
M: Lennert Buytenhek <kernel@wantstofly.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+
+ARM/INTEL IXP4XX ARM ARCHITECTURE
+M: Imre Kaloz <kaloz@openwrt.org>
+M: Krzysztof Halasa <khc@pm.waw.pl>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+F: arch/arm/mach-ixp4xx/
ARM/INTEL XSC3 (MANZANO) ARM CORE
M: Lennert Buytenhek <kernel@wantstofly.org>
M: Dan Williams <dan.j.williams@intel.com>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
ARM/IP FABRICS DOUBLE ESPRESSO MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/LOGICPD PXA270 MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/MAGICIAN MACHINE SUPPORT
@@ -708,7 +710,7 @@ S: Maintained
ARM/Marvell Loki/Kirkwood/MV78xx0/Orion SOC support
M: Lennert Buytenhek <buytenh@marvell.com>
M: Nicolas Pitre <nico@marvell.com>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://git.marvell.com/orion
S: Maintained
F: arch/arm/mach-loki/
@@ -719,7 +721,7 @@ F: arch/arm/plat-orion/
ARM/MIOA701 MACHINE SUPPORT
M: Robert Jarzmik <robert.jarzmik@free.fr>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
F: arch/arm/mach-pxa/mioa701.c
S: Maintained
@@ -738,18 +740,22 @@ M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
M: Dirk Opfer <dirk@opfer-online.de>
S: Maintained
-ARM/PALMTX,PALMT5,PALMLD,PALMTE2 SUPPORT
-M: Marek Vasut <marek.vasut@gmail.com>
+ARM/PALMTX,PALMT5,PALMLD,PALMTE2,PALMTC SUPPORT
+P: Marek Vasut
+M: marek.vasut@gmail.com
+L: linux-arm-kernel@lists.infradead.org
W: http://hackndev.com
S: Maintained
ARM/PALM TREO 680 SUPPORT
M: Tomas Cech <sleep_walker@suse.cz>
+L: linux-arm-kernel@lists.infradead.org
W: http://hackndev.com
S: Maintained
ARM/PALMZ72 SUPPORT
M: Sergey Lapin <slapin@ossfans.org>
+L: linux-arm-kernel@lists.infradead.org
W: http://hackndev.com
S: Maintained
@@ -760,18 +766,18 @@ S: Maintained
ARM/PT DIGITAL BOARD PORT
M: Stefan Eletzhofer <stefan.eletzhofer@eletztrick.de>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.arm.linux.org.uk/
S: Maintained
ARM/RADISYS ENP2611 MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/RISCPC ARCHITECTURE
M: Russell King <linux@arm.linux.org.uk>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.arm.linux.org.uk/
S: Maintained
F: arch/arm/common/time-acorn.c
@@ -790,7 +796,7 @@ S: Maintained
ARM/SAMSUNG ARM ARCHITECTURES
M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/plat-s3c/
@@ -798,65 +804,65 @@ F: arch/arm/plat-s3c24xx/
ARM/S3C2410 ARM ARCHITECTURE
M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/mach-s3c2410/
ARM/S3C2440 ARM ARCHITECTURE
M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/mach-s3c2440/
ARM/S3C2442 ARM ARCHITECTURE
M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/mach-s3c2442/
ARM/S3C2443 ARM ARCHITECTURE
M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/mach-s3c2443/
ARM/S3C6400 ARM ARCHITECTURE
M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/mach-s3c6400/
ARM/S3C6410 ARM ARCHITECTURE
M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/mach-s3c6410/
ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/THECUS N2100 MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/NUVOTON W90X900 ARM ARCHITECTURE
M: Wan ZongShun <mcuos.com@gmail.com>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.mcuos.com
S: Maintained
ARM/VFP SUPPORT
M: Russell King <linux@arm.linux.org.uk>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.arm.linux.org.uk/
S: Maintained
F: arch/arm/vfp/
@@ -894,6 +900,13 @@ F: drivers/dma/
F: include/linux/dmaengine.h
F: include/linux/async_tx.h
+AT24 EEPROM DRIVER
+M: Wolfram Sang <w.sang@pengutronix.de>
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: drivers/misc/eeprom/at24.c
+F: include/linux/i2c/at24.h
+
ATA OVER ETHERNET (AOE) DRIVER
M: "Ed L. Cashin" <ecashin@coraid.com>
W: http://www.coraid.com/support/linux
@@ -963,7 +976,7 @@ F: include/linux/atm*
ATMEL AT91 MCI DRIVER
M: Nicolas Ferre <nicolas.ferre@atmel.com>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.atmel.com/products/AT91/
W: http://www.at91.com/
S: Maintained
@@ -1541,7 +1554,7 @@ F: drivers/infiniband/hw/cxgb3/
CYBERPRO FB DRIVER
M: Russell King <linux@arm.linux.org.uk>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.arm.linux.org.uk/
S: Maintained
F: drivers/video/cyber2000fb.*
@@ -2085,7 +2098,7 @@ F: drivers/i2c/busses/i2c-cpm.c
FREESCALE IMX / MXC FRAMEBUFFER DRIVER
M: Sascha Hauer <kernel@pengutronix.de>
L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/plat-mxc/include/mach/imxfb.h
F: drivers/video/imxfb.c
@@ -2106,12 +2119,12 @@ S: Supported
F: arch/powerpc/sysdev/qe_lib/
F: arch/powerpc/include/asm/*qe.h
-FREESCALE HIGHSPEED USB DEVICE DRIVER
+FREESCALE USB PERIPHERIAL DRIVERS
M: Li Yang <leoli@freescale.com>
L: linux-usb@vger.kernel.org
L: linuxppc-dev@ozlabs.org
S: Maintained
-F: drivers/usb/gadget/fsl_usb2_udc.c
+F: drivers/usb/gadget/fsl*
FREESCALE QUICC ENGINE UCC ETHERNET DRIVER
M: Li Yang <leoli@freescale.com>
@@ -2316,7 +2329,9 @@ S: Orphan
F: drivers/hwmon/
HARDWARE RANDOM NUMBER GENERATOR CORE
-S: Orphan
+M: Matt Mackall <mpm@selenic.com>
+M: Herbert Xu <herbert@gondor.apana.org.au>
+S: Odd fixes
F: Documentation/hw_random.txt
F: drivers/char/hw_random/
F: include/linux/hw_random.h
@@ -2803,6 +2818,8 @@ L: netdev@vger.kernel.org
L: lvs-devel@vger.kernel.org
S: Maintained
F: Documentation/networking/ipvs-sysctl.txt
+F: include/net/ip_vs.h
+F: include/linux/ip_vs.h
F: net/netfilter/ipvs/
IPWIRELESS DRIVER
@@ -2955,7 +2972,7 @@ F: scripts/Makefile.*
KERNEL JANITORS
L: kernel-janitors@vger.kernel.org
W: http://www.kerneljanitors.org/
-S: Odd fixes
+S: Maintained
KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
M: "J. Bruce Fields" <bfields@fieldses.org>
@@ -3449,7 +3466,7 @@ F: include/linux/meye.h
MOTOROLA IMX MMC/SD HOST CONTROLLER INTERFACE DRIVER
M: Pavel Pisa <ppisa@pikron.com>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/mmc/host/imxmmc.*
@@ -3524,7 +3541,6 @@ F: drivers/net/natsemi.c
NCP FILESYSTEM
M: Petr Vandrovec <vandrove@vc.cvut.cz>
-L: linware@sh.cvut.cz
S: Maintained
F: fs/ncpfs/
@@ -3734,7 +3750,7 @@ W: http://www.muru.com/linux/omap/
W: http://linux.omap.com/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap-2.6.git
S: Maintained
-F: arch/arm/*omap*
+F: arch/arm/*omap*/
OMAP CLOCK FRAMEWORK SUPPORT
M: Paul Walmsley <paul@pwsan.com>
@@ -3766,7 +3782,13 @@ OMAP MMC SUPPORT
M: Jarkko Lavinen <jarkko.lavinen@nokia.com>
L: linux-omap@vger.kernel.org
S: Maintained
-F: drivers/mmc/host/*omap*
+F: drivers/mmc/host/omap.c
+
+OMAP HS MMC SUPPORT
+M: Madhusudhan Chikkature <madhu.cr@ti.com>
+L: linux-omap@vger.kernel.org
+S: Maintained
+F: drivers/mmc/host/omap_hsmmc.c
OMAP RANDOM NUMBER GENERATOR SUPPORT
M: Deepak Saxena <dsaxena@plexity.net>
@@ -3956,6 +3978,15 @@ S: Maintained
F: drivers/leds/leds-pca9532.c
F: include/linux/leds-pca9532.h
+PCA9564/PCA9665 I2C BUS DRIVER
+M: Wolfram Sang <w.sang@pengutronix.de>
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: drivers/i2c/algos/i2c-algo-pca.c
+F: drivers/i2c/busses/i2c-pca-*
+F: include/linux/i2c-algo-pca.h
+F: include/linux/i2c-pca-platform.h
+
PCI ERROR RECOVERY
M: Linas Vepstas <linas@austin.ibm.com>
L: linux-pci@vger.kernel.org
@@ -3972,11 +4003,11 @@ F: Documentation/PCI/
F: drivers/pci/
F: include/linux/pci*
-PCIE HOTPLUG DRIVER
-M: Kristen Carlson Accardi <kristen.c.accardi@intel.com>
+PCI HOTPLUG
+M: Jesse Barnes <jbarnes@virtuousgeek.org>
L: linux-pci@vger.kernel.org
S: Supported
-F: drivers/pci/pcie/
+F: drivers/pci/hotplug
PCMCIA SUBSYSTEM
P: Linux PCMCIA Team
@@ -4000,7 +4031,7 @@ S: Maintained
F: include/linux/delayacct.h
F: kernel/delayacct.c
-PERFORMANCE COUNTER SUBSYSTEM
+PERFORMANCE EVENTS SUBSYSTEM
M: Peter Zijlstra <a.p.zijlstra@chello.nl>
M: Paul Mackerras <paulus@samba.org>
M: Ingo Molnar <mingo@elte.hu>
@@ -4025,8 +4056,7 @@ F: drivers/block/pktcdvd.c
F: include/linux/pktcdvd.h
PMC SIERRA MaxRAID DRIVER
-P: Anil Ravindranath
-M: anil_ravindranath@pmc-sierra.com
+M: Anil Ravindranath <anil_ravindranath@pmc-sierra.com>
L: linux-scsi@vger.kernel.org
W: http://www.pmc-sierra.com/
S: Supported
@@ -4168,7 +4198,7 @@ F: drivers/media/video/pvrusb2/
PXA2xx/PXA3xx SUPPORT
M: Eric Miao <eric.y.miao@gmail.com>
M: Russell King <linux@arm.linux.org.uk>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-pxa/
F: drivers/pcmcia/pxa2xx*
@@ -4181,13 +4211,13 @@ F: sound/soc/pxa
PXA168 SUPPORT
M: Eric Miao <eric.y.miao@gmail.com>
M: Jason Chagas <jason.chagas@marvell.com>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ycmiao/pxa-linux-2.6.git
S: Maintained
PXA910 SUPPORT
M: Eric Miao <eric.y.miao@gmail.com>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ycmiao/pxa-linux-2.6.git
S: Maintained
@@ -4428,7 +4458,7 @@ F: net/iucv/
S3C24XX SD/MMC Driver
M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: drivers/mmc/host/s3cmci.*
@@ -4458,7 +4488,7 @@ SCORE ARCHITECTURE
P: Chen Liqin
M: liqin.chen@sunplusct.com
P: Lennox Wu
-M: lennox.wu@sunplusct.com
+M: lennox.wu@gmail.com
W: http://www.sunplusct.com
S: Supported
@@ -4533,20 +4563,20 @@ S: Maintained
F: drivers/mmc/host/sdricoh_cs.c
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER
-M: Pierre Ossman <pierre@ossman.eu>
-L: sdhci-devel@lists.ossman.eu
-S: Maintained
+S: Orphan
+L: linux-mmc@vger.kernel.org
+F: drivers/mmc/host/sdhci.*
SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
M: Anton Vorontsov <avorontsov@ru.mvista.com>
L: linuxppc-dev@ozlabs.org
-L: sdhci-devel@lists.ossman.eu
+L: linux-mmc@vger.kernel.org
S: Maintained
-F: drivers/mmc/host/sdhci.*
+F: drivers/mmc/host/sdhci-of.*
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) SAMSUNG DRIVER
M: Ben Dooks <ben-linux@fluff.org>
-L: sdhci-devel@lists.ossman.eu
+L: linux-mmc@vger.kernel.org
S: Maintained
F: drivers/mmc/host/sdhci-s3c.c
@@ -4632,7 +4662,7 @@ F: drivers/misc/sgi-xp/
SHARP LH SUPPORT (LH7952X & LH7A40X)
M: Marc Singer <elf@buici.com>
W: http://projects.buici.com/arm
-L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/arm/Sharp-LH/ADC-LH7-Touchscreen
F: arch/arm/mach-lh7a40x/
@@ -4640,11 +4670,16 @@ F: drivers/serial/serial_lh7a40x.c
F: drivers/usb/gadget/lh7a40*
F: drivers/usb/host/ohci-lh7a40*
-SHPC HOTPLUG DRIVER
-M: Kristen Carlson Accardi <kristen.c.accardi@intel.com>
-L: linux-pci@vger.kernel.org
+SIMPLE FIRMWARE INTERFACE (SFI)
+P: Len Brown
+M: lenb@kernel.org
+L: sfi-devel@simplefirmware.org
+W: http://simplefirmware.org/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-sfi-2.6.git
S: Supported
-F: drivers/pci/hotplug/shpchp*
+F: arch/x86/kernel/*sfi*
+F: drivers/sfi/
+F: include/linux/sfi*.h
SIMTEC EB110ATX (Chalice CATS)
P: Ben Dooks
@@ -5046,6 +5081,11 @@ T: quilt http://svn.sourceforge.jp/svnroot/tomoyo/trunk/2.2.x/tomoyo-lsm/patches
S: Maintained
F: security/tomoyo/
+TOPSTAR LAPTOP EXTRAS DRIVER
+M: Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+S: Maintained
+F: drivers/platform/x86/topstar-laptop.c
+
TOSHIBA ACPI EXTRAS DRIVER
S: Orphan
F: drivers/platform/x86/toshiba_acpi.c
@@ -5638,6 +5678,12 @@ L: linux-scsi@vger.kernel.org
S: Maintained
F: drivers/scsi/wd7000.c
+WINBOND CIR DRIVER
+P: David Härdeman
+M: david@hardeman.nu
+S: Maintained
+F: drivers/input/misc/winbond-cir.c
+
WIMAX STACK
M: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
M: linux-wimax@intel.com
@@ -5657,8 +5703,7 @@ S: Maintained
F: drivers/input/misc/wistron_btns.c
WL1251 WIRELESS DRIVER
-P: Kalle Valo
-M: kalle.valo@nokia.com
+M: Kalle Valo <kalle.valo@nokia.com>
L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
diff --git a/Makefile b/Makefile
index 433493a2b77..f908accd332 100644
--- a/Makefile
+++ b/Makefile
@@ -179,9 +179,46 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
# Alternatively CROSS_COMPILE can be set in the environment.
# Default value for CROSS_COMPILE is not to prefix executables
# Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
+#
+# To force ARCH and CROSS_COMPILE settings include kernel.* files
+# in the kernel tree - do not patch this file.
export KBUILD_BUILDHOST := $(SUBARCH)
-ARCH ?= $(SUBARCH)
-CROSS_COMPILE ?=
+
+# Kbuild save the ARCH and CROSS_COMPILE setting in kernel.* files.
+# Restore these settings and check that user did not specify
+# conflicting values.
+
+saved_arch := $(shell cat include/generated/kernel.arch 2> /dev/null)
+saved_cross := $(shell cat include/generated/kernel.cross 2> /dev/null)
+
+ifneq ($(CROSS_COMPILE),)
+ ifneq ($(saved_cross),)
+ ifneq ($(CROSS_COMPILE),$(saved_cross))
+ $(error CROSS_COMPILE changed from \
+ "$(saved_cross)" to \
+ to "$(CROSS_COMPILE)". \
+ Use "make mrproper" to fix it up)
+ endif
+ endif
+else
+ CROSS_COMPILE := $(saved_cross)
+endif
+
+ifneq ($(ARCH),)
+ ifneq ($(saved_arch),)
+ ifneq ($(saved_arch),$(ARCH))
+ $(error ARCH changed from \
+ "$(saved_arch)" to "$(ARCH)". \
+ Use "make mrproper" to fix it up)
+ endif
+ endif
+else
+ ifneq ($(saved_arch),)
+ ARCH := $(saved_arch)
+ else
+ ARCH := $(SUBARCH)
+ endif
+endif
# Architecture as present in compile.h
UTS_MACHINE := $(ARCH)
@@ -315,6 +352,7 @@ OBJCOPY = $(CROSS_COMPILE)objcopy
OBJDUMP = $(CROSS_COMPILE)objdump
AWK = awk
GENKSYMS = scripts/genksyms/genksyms
+INSTALLKERNEL := installkernel
DEPMOD = /sbin/depmod
KALLSYMS = scripts/kallsyms
PERL = perl
@@ -353,7 +391,8 @@ KERNELVERSION = $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION
export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC
-export CPP AR NM STRIP OBJCOPY OBJDUMP MAKE AWK GENKSYMS PERL UTS_MACHINE
+export CPP AR NM STRIP OBJCOPY OBJDUMP
+export MAKE AWK GENKSYMS INSTALLKERNEL PERL UTS_MACHINE
export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
@@ -444,6 +483,11 @@ ifeq ($(config-targets),1)
include $(srctree)/arch/$(SRCARCH)/Makefile
export KBUILD_DEFCONFIG KBUILD_KCONFIG
+# save ARCH & CROSS_COMPILE settings
+$(shell mkdir -p include/generated && \
+ echo $(ARCH) > include/generated/kernel.arch && \
+ echo $(CROSS_COMPILE) > include/generated/kernel.cross)
+
config: scripts_basic outputmakefile FORCE
$(Q)mkdir -p include/linux include/config
$(Q)$(MAKE) $(build)=scripts/kconfig $@
@@ -571,6 +615,9 @@ KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
# revert to pre-gcc-4.4 behaviour of .eh_frame
KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
+# conserve stack if available
+KBUILD_CFLAGS += $(call cc-option,-fconserve-stack)
+
# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
# But warn user when we do so
warn-assign = \
@@ -591,12 +638,12 @@ endif
# Use --build-id when available.
LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\
- $(call ld-option, -Wl$(comma)--build-id,))
+ $(call cc-ldoption, -Wl$(comma)--build-id,))
LDFLAGS_MODULE += $(LDFLAGS_BUILD_ID)
LDFLAGS_vmlinux += $(LDFLAGS_BUILD_ID)
ifeq ($(CONFIG_STRIP_ASM_SYMS),y)
-LDFLAGS_vmlinux += -X
+LDFLAGS_vmlinux += $(call ld-option, -X,)
endif
# Default kernel image to build when no specific target is given.
@@ -980,11 +1027,6 @@ prepare0: archprepare FORCE
# All the preparing..
prepare: prepare0
-# Leave this as default for preprocessing vmlinux.lds.S, which is now
-# done in arch/$(ARCH)/kernel/Makefile
-
-export CPPFLAGS_vmlinux.lds += -P -C -U$(ARCH)
-
# The asm symlink changes when $(ARCH) changes.
# Detect this and ask user to run make mrproper
# If asm is a stale symlink (point to dir that does not exist) remove it
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 9fb8aae5c39..443448154f3 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -45,6 +45,14 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
+config GENERIC_TIME
+ bool
+ default y
+
+config ARCH_USES_GETTIMEOFFSET
+ bool
+ default y
+
config ZONE_DMA
bool
default y
diff --git a/arch/alpha/boot/tools/objstrip.c b/arch/alpha/boot/tools/objstrip.c
index ef183823029..9d0727d18ae 100644
--- a/arch/alpha/boot/tools/objstrip.c
+++ b/arch/alpha/boot/tools/objstrip.c
@@ -93,7 +93,7 @@ main (int argc, char *argv[])
ofd = 1;
if (i < argc) {
ofd = open(argv[i++], O_WRONLY | O_CREAT | O_TRUNC, 0666);
- if (fd == -1) {
+ if (ofd == -1) {
perror("open");
exit(1);
}
diff --git a/arch/alpha/include/asm/fcntl.h b/arch/alpha/include/asm/fcntl.h
index 25da0017ec8..e42823e954a 100644
--- a/arch/alpha/include/asm/fcntl.h
+++ b/arch/alpha/include/asm/fcntl.h
@@ -26,6 +26,8 @@
#define F_GETOWN 6 /* for sockets. */
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
+#define F_SETOWN_EX 12
+#define F_GETOWN_EX 13
/* for posix fcntl() and lockf() */
#define F_RDLCK 1
diff --git a/arch/alpha/include/asm/hardirq.h b/arch/alpha/include/asm/hardirq.h
index 88971460fa6..242c09ba98c 100644
--- a/arch/alpha/include/asm/hardirq.h
+++ b/arch/alpha/include/asm/hardirq.h
@@ -1,17 +1,9 @@
#ifndef _ALPHA_HARDIRQ_H
#define _ALPHA_HARDIRQ_H
-#include <linux/threads.h>
-#include <linux/cache.h>
-
-
-/* entry.S is sensitive to the offsets of these fields */
-typedef struct {
- unsigned long __softirq_pending;
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
void ack_bad_irq(unsigned int irq);
+#define ack_bad_irq ack_bad_irq
+
+#include <asm-generic/hardirq.h>
#endif /* _ALPHA_HARDIRQ_H */
diff --git a/arch/alpha/include/asm/mman.h b/arch/alpha/include/asm/mman.h
index 90d7c35d286..99c56d47879 100644
--- a/arch/alpha/include/asm/mman.h
+++ b/arch/alpha/include/asm/mman.h
@@ -28,6 +28,8 @@
#define MAP_NORESERVE 0x10000 /* don't check for reservations */
#define MAP_POPULATE 0x20000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x40000 /* do not block on IO */
+#define MAP_STACK 0x80000 /* give out an address that is best suited for process/thread stacks */
+#define MAP_HUGETLB 0x100000 /* create a huge page mapping */
#define MS_ASYNC 1 /* sync memory asynchronously */
#define MS_SYNC 2 /* synchronous memory sync */
@@ -48,6 +50,9 @@
#define MADV_DONTFORK 10 /* don't inherit across fork */
#define MADV_DOFORK 11 /* do inherit across fork */
+#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
+#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/alpha/include/asm/smp.h b/arch/alpha/include/asm/smp.h
index 547e90951ce..3f390e8cc0b 100644
--- a/arch/alpha/include/asm/smp.h
+++ b/arch/alpha/include/asm/smp.h
@@ -47,7 +47,7 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
extern int smp_num_cpus;
extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#else /* CONFIG_SMP */
diff --git a/arch/alpha/include/asm/topology.h b/arch/alpha/include/asm/topology.h
index b4f284c72ff..36b3a30ba0e 100644
--- a/arch/alpha/include/asm/topology.h
+++ b/arch/alpha/include/asm/topology.h
@@ -22,23 +22,6 @@ static inline int cpu_to_node(int cpu)
return node;
}
-static inline cpumask_t node_to_cpumask(int node)
-{
- cpumask_t node_cpu_mask = CPU_MASK_NONE;
- int cpu;
-
- for_each_online_cpu(cpu) {
- if (cpu_to_node(cpu) == node)
- cpu_set(cpu, node_cpu_mask);
- }
-
-#ifdef DEBUG_NUMA
- printk("node %d: cpu_mask: %016lx\n", node, node_cpu_mask);
-#endif
-
- return node_cpu_mask;
-}
-
extern struct cpumask node_to_cpumask_map[];
/* FIXME: This is dumb, recalculating every time. But simple. */
static const struct cpumask *cpumask_of_node(int node)
@@ -55,7 +38,6 @@ static const struct cpumask *cpumask_of_node(int node)
return &node_to_cpumask_map[node];
}
-#define pcibus_to_cpumask(bus) (cpu_online_map)
#define cpumask_of_pcibus(bus) (cpu_online_mask)
#endif /* !CONFIG_NUMA */
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index e302daecbe5..8e059e58b0a 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -1016,7 +1016,7 @@ marvel_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *m
{
struct marvel_agp_aperture *aper = agp->aperture.sysdata;
return iommu_bind(aper->arena, aper->pg_start + pg_start,
- mem->page_count, mem->memory);
+ mem->page_count, mem->pages);
}
static int
diff --git a/arch/alpha/kernel/core_titan.c b/arch/alpha/kernel/core_titan.c
index 319fcb74611..76686497b1e 100644
--- a/arch/alpha/kernel/core_titan.c
+++ b/arch/alpha/kernel/core_titan.c
@@ -680,7 +680,7 @@ titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *me
{
struct titan_agp_aperture *aper = agp->aperture.sysdata;
return iommu_bind(aper->arena, aper->pg_start + pg_start,
- mem->page_count, mem->memory);
+ mem->page_count, mem->pages);
}
static int
diff --git a/arch/alpha/kernel/init_task.c b/arch/alpha/kernel/init_task.c
index 19b86328ffd..6f80ca4f976 100644
--- a/arch/alpha/kernel/init_task.c
+++ b/arch/alpha/kernel/init_task.c
@@ -13,6 +13,5 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
-union thread_union init_thread_union
- __attribute__((section(".data.init_thread")))
- = { INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data =
+ { INIT_THREAD_INFO(init_task) };
diff --git a/arch/alpha/kernel/pci_impl.h b/arch/alpha/kernel/pci_impl.h
index 00edd04b585..85457b2d451 100644
--- a/arch/alpha/kernel/pci_impl.h
+++ b/arch/alpha/kernel/pci_impl.h
@@ -198,7 +198,7 @@ extern unsigned long size_for_memory(unsigned long max);
extern int iommu_reserve(struct pci_iommu_arena *, long, long);
extern int iommu_release(struct pci_iommu_arena *, long, long);
-extern int iommu_bind(struct pci_iommu_arena *, long, long, unsigned long *);
+extern int iommu_bind(struct pci_iommu_arena *, long, long, struct page **);
extern int iommu_unbind(struct pci_iommu_arena *, long, long);
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index bfb880af959..8449504f5e0 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -268,11 +268,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
assume it doesn't support sg mapping, and, since we tried to
use direct_map above, it now must be considered an error. */
if (! alpha_mv.mv_pci_tbi) {
- static int been_here = 0; /* Only print the message once. */
- if (!been_here) {
- printk(KERN_WARNING "pci_map_single: no HW sg\n");
- been_here = 1;
- }
+ printk_once(KERN_WARNING "pci_map_single: no HW sg\n");
return 0;
}
@@ -880,7 +876,7 @@ iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
int
iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
- unsigned long *physaddrs)
+ struct page **pages)
{
unsigned long flags;
unsigned long *ptes;
@@ -900,7 +896,7 @@ iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
}
for(i = 0, j = pg_start; i < pg_count; i++, j++)
- ptes[j] = mk_iommu_pte(physaddrs[i]);
+ ptes[j] = mk_iommu_pte(page_to_phys(pages[i]));
spin_unlock_irqrestore(&arena->lock, flags);
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 3a2fb7a02db..289039bb6bb 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -19,7 +19,6 @@
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/user.h>
-#include <linux/utsname.h>
#include <linux/time.h>
#include <linux/major.h>
#include <linux/stat.h>
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index b1fe5674c3a..42aa078a5e4 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -548,16 +548,16 @@ setup_profiling_timer(unsigned int multiplier)
static void
-send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
+send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
{
int i;
mb();
- for_each_cpu_mask(i, to_whom)
+ for_each_cpu(i, to_whom)
set_bit(operation, &ipi_data[i].bits);
mb();
- for_each_cpu_mask(i, to_whom)
+ for_each_cpu(i, to_whom)
wripir(i);
}
@@ -624,7 +624,7 @@ smp_send_reschedule(int cpu)
printk(KERN_WARNING
"smp_send_reschedule: Sending IPI to self.\n");
#endif
- send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
+ send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
}
void
@@ -636,17 +636,17 @@ smp_send_stop(void)
if (hard_smp_processor_id() != boot_cpu_id)
printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
#endif
- send_ipi_message(to_whom, IPI_CPU_STOP);
+ send_ipi_message(&to_whom, IPI_CPU_STOP);
}
-void arch_send_call_function_ipi(cpumask_t mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
send_ipi_message(mask, IPI_CALL_FUNC);
}
void arch_send_call_function_single_ipi(int cpu)
{
- send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
+ send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
}
static void
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index b04e2cbf23a..5d0826654c6 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -408,28 +408,17 @@ time_init(void)
* part. So we can't do the "find absolute time in terms of cycles" thing
* that the other ports do.
*/
-void
-do_gettimeofday(struct timeval *tv)
+u32 arch_gettimeoffset(void)
{
- unsigned long flags;
- unsigned long sec, usec, seq;
- unsigned long delta_cycles, delta_usec, partial_tick;
-
- do {
- seq = read_seqbegin_irqsave(&xtime_lock, flags);
-
- delta_cycles = rpcc() - state.last_time;
- sec = xtime.tv_sec;
- usec = (xtime.tv_nsec / 1000);
- partial_tick = state.partial_tick;
-
- } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
-
#ifdef CONFIG_SMP
/* Until and unless we figure out how to get cpu cycle counters
in sync and keep them there, we can't use the rpcc tricks. */
- delta_usec = 0;
+ return 0;
#else
+ unsigned long delta_cycles, delta_usec, partial_tick;
+
+ delta_cycles = rpcc() - state.last_time;
+ partial_tick = state.partial_tick;
/*
* usec = cycles * ticks_per_cycle * 2**48 * 1e6 / (2**48 * ticks)
* = cycles * (s_t_p_c) * 1e6 / (2**48 * ticks)
@@ -446,64 +435,10 @@ do_gettimeofday(struct timeval *tv)
delta_usec = (delta_cycles * state.scaled_ticks_per_cycle
+ partial_tick) * 15625;
delta_usec = ((delta_usec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2;
+ return delta_usec * 1000;
#endif
-
- usec += delta_usec;
- if (usec >= 1000000) {
- sec += 1;
- usec -= 1000000;
- }
-
- tv->tv_sec = sec;
- tv->tv_usec = usec;
}
-EXPORT_SYMBOL(do_gettimeofday);
-
-int
-do_settimeofday(struct timespec *tv)
-{
- time_t wtm_sec, sec = tv->tv_sec;
- long wtm_nsec, nsec = tv->tv_nsec;
- unsigned long delta_nsec;
-
- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
- return -EINVAL;
-
- write_seqlock_irq(&xtime_lock);
-
- /* The offset that is added into time in do_gettimeofday above
- must be subtracted out here to keep a coherent view of the
- time. Without this, a full-tick error is possible. */
-
-#ifdef CONFIG_SMP
- delta_nsec = 0;
-#else
- delta_nsec = rpcc() - state.last_time;
- delta_nsec = (delta_nsec * state.scaled_ticks_per_cycle
- + state.partial_tick) * 15625;
- delta_nsec = ((delta_nsec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2;
- delta_nsec *= 1000;
-#endif
-
- nsec -= delta_nsec;
-
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-
- ntp_clear();
-
- write_sequnlock_irq(&xtime_lock);
- clock_was_set();
- return 0;
-}
-
-EXPORT_SYMBOL(do_settimeofday);
-
-
/*
* In order to set the CMOS clock precisely, set_rtc_mmss has to be
* called 500 ms after the second nowtime has started, because when
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
index 6dc03c35caa..2906665b1c1 100644
--- a/arch/alpha/kernel/vmlinux.lds.S
+++ b/arch/alpha/kernel/vmlinux.lds.S
@@ -1,5 +1,6 @@
#include <asm-generic/vmlinux.lds.h>
#include <asm/page.h>
+#include <asm/thread_info.h>
OUTPUT_FORMAT("elf64-alpha")
OUTPUT_ARCH(alpha)
@@ -31,88 +32,21 @@ SECTIONS
} :kernel
RODATA
-
- /* Exception table */
- . = ALIGN(16);
- __ex_table : {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
- }
+ EXCEPTION_TABLE(16)
/* Will be freed after init */
- . = ALIGN(PAGE_SIZE);
- /* Init code and data */
- __init_begin = .;
- .init.text : {
- _sinittext = .;
- INIT_TEXT
- _einittext = .;
- }
- .init.data : {
- INIT_DATA
- }
-
- . = ALIGN(16);
- .init.setup : {
- __setup_start = .;
- *(.init.setup)
- __setup_end = .;
- }
-
- . = ALIGN(8);
- .initcall.init : {
- __initcall_start = .;
- INITCALLS
- __initcall_end = .;
- }
-
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(PAGE_SIZE);
- .init.ramfs : {
- __initramfs_start = .;
- *(.init.ramfs)
- __initramfs_end = .;
- }
-#endif
-
- . = ALIGN(8);
- .con_initcall.init : {
- __con_initcall_start = .;
- *(.con_initcall.init)
- __con_initcall_end = .;
- }
-
- . = ALIGN(8);
- SECURITY_INIT
-
+ __init_begin = ALIGN(PAGE_SIZE);
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(16)
PERCPU(PAGE_SIZE)
-
- . = ALIGN(2 * PAGE_SIZE);
+ /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page
+ needed for the THREAD_SIZE aligned init_task gets freed after init */
+ . = ALIGN(THREAD_SIZE);
__init_end = .;
/* Freed after init ends here */
- /* Note 2 page alignment above. */
- .data.init_thread : {
- *(.data.init_thread)
- }
-
- . = ALIGN(PAGE_SIZE);
- .data.page_aligned : {
- *(.data.page_aligned)
- }
-
- . = ALIGN(64);
- .data.cacheline_aligned : {
- *(.data.cacheline_aligned)
- }
-
_data = .;
- /* Data */
- .data : {
- DATA_DATA
- CONSTRUCTORS
- }
+ RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
.got : {
*(.got)
@@ -122,16 +56,7 @@ SECTIONS
}
_edata = .; /* End of data section */
- __bss_start = .;
- .sbss : {
- *(.sbss)
- *(.scommon)
- }
- .bss : {
- *(.bss)
- *(COMMON)
- }
- __bss_stop = .;
+ BSS_SECTION(0, 0, 0)
_end = .;
.mdebug 0 : {
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index af71d38c8e4..a0902c20d67 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -299,7 +299,7 @@ printk_memory_info(void)
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, %luk data, %luk init)\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ nr_free_pages() << (PAGE_SHIFT-10),
max_mapnr << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
index 0eab5574942..10b403554b6 100644
--- a/arch/alpha/mm/numa.c
+++ b/arch/alpha/mm/numa.c
@@ -349,7 +349,7 @@ void __init mem_init(void)
printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, "
"%luk data, %luk init)\n",
- (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
+ nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index d778a699f57..1c4119c6004 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -46,6 +46,10 @@ config GENERIC_CLOCKEVENTS_BROADCAST
depends on GENERIC_CLOCKEVENTS
default y if SMP && !LOCAL_TIMERS
+config HAVE_TCM
+ bool
+ select GENERIC_ALLOCATOR
+
config NO_IOPORT
bool
@@ -649,6 +653,7 @@ config ARCH_U300
bool "ST-Ericsson U300 Series"
depends on MMU
select CPU_ARM926T
+ select HAVE_TCM
select ARM_AMBA
select ARM_VIC
select GENERIC_TIME
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 7350557a81e..a73caaf6676 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -14,7 +14,7 @@ LDFLAGS_vmlinux :=-p --no-undefined -X
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
endif
-CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
+
OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
GZFLAGS :=-9
#KBUILD_CFLAGS +=-pipe
@@ -25,7 +25,7 @@ KBUILD_CFLAGS +=$(call cc-option,-marm,)
# Select a platform tht is kept up-to-date
KBUILD_DEFCONFIG := versatile_defconfig
-# defines filename extension depending memory manement type.
+# defines filename extension depending memory management type.
ifeq ($(CONFIG_MMU),)
MMUEXT := -nommu
endif
@@ -279,7 +279,7 @@ define archhelp
echo ' (supply initrd image via make variable INITRD=<path>)'
echo ' install - Install uncompressed kernel'
echo ' zinstall - Install compressed kernel'
- echo ' Install using (your) ~/bin/installkernel or'
- echo ' (distribution) /sbin/installkernel or'
+ echo ' Install using (your) ~/bin/$(INSTALLKERNEL) or'
+ echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
echo ' install to $$(INSTALL_PATH) and run lilo'
endef
diff --git a/arch/arm/boot/install.sh b/arch/arm/boot/install.sh
index 9f9bed20734..06ea7d42ce8 100644
--- a/arch/arm/boot/install.sh
+++ b/arch/arm/boot/install.sh
@@ -21,8 +21,8 @@
#
# User may have a custom install script
-if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
-if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi
+if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
if [ "$(basename $2)" = "zImage" ]; then
# Compressed install
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c
index 2293f0ce061..bd36c778c81 100644
--- a/arch/arm/common/locomo.c
+++ b/arch/arm/common/locomo.c
@@ -865,6 +865,7 @@ void locomo_gpio_set_dir(struct device *dev, unsigned int bits, unsigned int dir
spin_unlock_irqrestore(&lchip->lock, flags);
}
+EXPORT_SYMBOL(locomo_gpio_set_dir);
int locomo_gpio_read_level(struct device *dev, unsigned int bits)
{
@@ -882,6 +883,7 @@ int locomo_gpio_read_level(struct device *dev, unsigned int bits)
ret &= bits;
return ret;
}
+EXPORT_SYMBOL(locomo_gpio_read_level);
int locomo_gpio_read_output(struct device *dev, unsigned int bits)
{
@@ -899,6 +901,7 @@ int locomo_gpio_read_output(struct device *dev, unsigned int bits)
ret &= bits;
return ret;
}
+EXPORT_SYMBOL(locomo_gpio_read_output);
void locomo_gpio_write(struct device *dev, unsigned int bits, unsigned int set)
{
@@ -920,6 +923,7 @@ void locomo_gpio_write(struct device *dev, unsigned int bits, unsigned int set)
spin_unlock_irqrestore(&lchip->lock, flags);
}
+EXPORT_SYMBOL(locomo_gpio_write);
static void locomo_m62332_sendbit(void *mapbase, int bit)
{
@@ -1084,13 +1088,12 @@ void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int
spin_unlock_irqrestore(&lchip->lock, flags);
}
+EXPORT_SYMBOL(locomo_m62332_senddata);
/*
* Frontlight control
*/
-static struct locomo *locomo_chip_driver(struct locomo_dev *ldev);
-
void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf)
{
unsigned long flags;
@@ -1182,11 +1185,13 @@ int locomo_driver_register(struct locomo_driver *driver)
driver->drv.bus = &locomo_bus_type;
return driver_register(&driver->drv);
}
+EXPORT_SYMBOL(locomo_driver_register);
void locomo_driver_unregister(struct locomo_driver *driver)
{
driver_unregister(&driver->drv);
}
+EXPORT_SYMBOL(locomo_driver_unregister);
static int __init locomo_init(void)
{
@@ -1208,11 +1213,3 @@ module_exit(locomo_exit);
MODULE_DESCRIPTION("Sharp LoCoMo core driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>");
-
-EXPORT_SYMBOL(locomo_driver_register);
-EXPORT_SYMBOL(locomo_driver_unregister);
-EXPORT_SYMBOL(locomo_gpio_set_dir);
-EXPORT_SYMBOL(locomo_gpio_read_level);
-EXPORT_SYMBOL(locomo_gpio_read_output);
-EXPORT_SYMBOL(locomo_gpio_write);
-EXPORT_SYMBOL(locomo_m62332_senddata);
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index 920ced0b73c..f232941de8a 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -22,6 +22,7 @@
#include <linux/list.h>
#include <linux/io.h>
#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/amba/bus.h>
#include <asm/mach/irq.h>
diff --git a/arch/arm/configs/littleton_defconfig b/arch/arm/configs/littleton_defconfig
deleted file mode 100644
index 1db49690805..00000000000
--- a/arch/arm/configs/littleton_defconfig
+++ /dev/null
@@ -1,783 +0,0 @@
-#
-# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.24-rc5
-# Fri Dec 21 11:06:19 2007
-#
-CONFIG_ARM=y
-CONFIG_SYS_SUPPORTS_APM_EMULATION=y
-CONFIG_GENERIC_GPIO=y
-CONFIG_GENERIC_TIME=y
-CONFIG_GENERIC_CLOCKEVENTS=y
-CONFIG_MMU=y
-# CONFIG_NO_IOPORT is not set
-CONFIG_GENERIC_HARDIRQS=y
-CONFIG_STACKTRACE_SUPPORT=y
-CONFIG_LOCKDEP_SUPPORT=y
-CONFIG_TRACE_IRQFLAGS_SUPPORT=y
-CONFIG_HARDIRQS_SW_RESEND=y
-CONFIG_GENERIC_IRQ_PROBE=y
-CONFIG_RWSEM_GENERIC_SPINLOCK=y
-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
-CONFIG_GENERIC_HWEIGHT=y
-CONFIG_GENERIC_CALIBRATE_DELAY=y
-CONFIG_ZONE_DMA=y
-CONFIG_ARCH_MTD_XIP=y
-CONFIG_VECTORS_BASE=0xffff0000
-CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
-
-#
-# General setup
-#
-CONFIG_EXPERIMENTAL=y
-CONFIG_BROKEN_ON_SMP=y
-CONFIG_LOCK_KERNEL=y
-CONFIG_INIT_ENV_ARG_LIMIT=32
-CONFIG_LOCALVERSION=""
-CONFIG_LOCALVERSION_AUTO=y
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
-CONFIG_SYSVIPC_SYSCTL=y
-# CONFIG_POSIX_MQUEUE is not set
-# CONFIG_BSD_PROCESS_ACCT is not set
-# CONFIG_TASKSTATS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
-# CONFIG_AUDIT is not set
-# CONFIG_IKCONFIG is not set
-CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CGROUPS is not set
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_FAIR_USER_SCHED=y
-# CONFIG_FAIR_CGROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
-# CONFIG_RELAY is not set
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE=""
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_SYSCTL=y
-# CONFIG_EMBEDDED is not set
-CONFIG_UID16=y
-CONFIG_SYSCTL_SYSCALL=y
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
-CONFIG_HOTPLUG=y
-CONFIG_PRINTK=y
-CONFIG_BUG=y
-CONFIG_ELF_CORE=y
-CONFIG_BASE_FULL=y
-CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
-CONFIG_EPOLL=y
-CONFIG_SIGNALFD=y
-CONFIG_EVENTFD=y
-CONFIG_SHMEM=y
-CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_SLAB=y
-# CONFIG_SLUB is not set
-# CONFIG_SLOB is not set
-CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
-CONFIG_BASE_SMALL=0
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_MODVERSIONS is not set
-# CONFIG_MODULE_SRCVERSION_ALL is not set
-# CONFIG_KMOD is not set
-CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
-# CONFIG_BLK_DEV_BSG is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_DEFAULT_AS is not set
-# CONFIG_DEFAULT_DEADLINE is not set
-CONFIG_DEFAULT_CFQ=y
-# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="cfq"
-
-#
-# System Type
-#
-# CONFIG_ARCH_AAEC2000 is not set
-# CONFIG_ARCH_INTEGRATOR is not set
-# CONFIG_ARCH_REALVIEW is not set
-# CONFIG_ARCH_VERSATILE is not set
-# CONFIG_ARCH_AT91 is not set
-# CONFIG_ARCH_CLPS7500 is not set
-# CONFIG_ARCH_CLPS711X is not set
-# CONFIG_ARCH_CO285 is not set
-# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_EP93XX is not set
-# CONFIG_ARCH_FOOTBRIDGE is not set
-# CONFIG_ARCH_NETX is not set
-# CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_IMX is not set
-# CONFIG_ARCH_IOP13XX is not set
-# CONFIG_ARCH_IOP32X is not set
-# CONFIG_ARCH_IOP33X is not set
-# CONFIG_ARCH_IXP23XX is not set
-# CONFIG_ARCH_IXP2000 is not set
-# CONFIG_ARCH_IXP4XX is not set
-# CONFIG_ARCH_L7200 is not set
-# CONFIG_ARCH_KS8695 is not set
-# CONFIG_ARCH_NS9XXX is not set
-# CONFIG_ARCH_MXC is not set
-# CONFIG_ARCH_PNX4008 is not set
-CONFIG_ARCH_PXA=y
-# CONFIG_ARCH_RPC is not set
-# CONFIG_ARCH_SA1100 is not set
-# CONFIG_ARCH_S3C2410 is not set
-# CONFIG_ARCH_SHARK is not set
-# CONFIG_ARCH_LH7A40X is not set
-# CONFIG_ARCH_DAVINCI is not set
-# CONFIG_ARCH_OMAP is not set
-
-#
-# Intel PXA2xx/PXA3xx Implementations
-#
-
-#
-# Supported PXA3xx Processor Variants
-#
-CONFIG_CPU_PXA300=y
-CONFIG_CPU_PXA310=y
-# CONFIG_CPU_PXA320 is not set
-# CONFIG_ARCH_LUBBOCK is not set
-# CONFIG_MACH_LOGICPD_PXA270 is not set
-# CONFIG_MACH_MAINSTONE is not set
-# CONFIG_ARCH_PXA_IDP is not set
-# CONFIG_PXA_SHARPSL is not set
-# CONFIG_MACH_TRIZEPS4 is not set
-# CONFIG_MACH_EM_X270 is not set
-# CONFIG_MACH_ZYLONITE is not set
-CONFIG_MACH_LITTLETON=y
-# CONFIG_MACH_ARMCORE is not set
-CONFIG_PXA3xx=y
-CONFIG_PXA_SSP=y
-
-#
-# Boot options
-#
-
-#
-# Power management
-#
-
-#
-# Processor Type
-#
-CONFIG_CPU_32=y
-CONFIG_CPU_XSC3=y
-CONFIG_CPU_32v5=y
-CONFIG_CPU_ABRT_EV5T=y
-CONFIG_CPU_CACHE_VIVT=y
-CONFIG_CPU_TLB_V4WBI=y
-CONFIG_CPU_CP15=y
-CONFIG_CPU_CP15_MMU=y
-CONFIG_IO_36=y
-
-#
-# Processor Features
-#
-# CONFIG_ARM_THUMB is not set
-# CONFIG_CPU_DCACHE_DISABLE is not set
-# CONFIG_CPU_BPREDICT_DISABLE is not set
-# CONFIG_OUTER_CACHE is not set
-CONFIG_IWMMXT=y
-
-#
-# Bus support
-#
-# CONFIG_PCI_SYSCALL is not set
-# CONFIG_ARCH_SUPPORTS_MSI is not set
-# CONFIG_PCCARD is not set
-
-#
-# Kernel Features
-#
-CONFIG_TICK_ONESHOT=y
-# CONFIG_NO_HZ is not set
-# CONFIG_HIGH_RES_TIMERS is not set
-CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
-CONFIG_PREEMPT=y
-CONFIG_HZ=100
-CONFIG_AEABI=y
-CONFIG_OABI_COMPAT=y
-# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
-CONFIG_SELECT_MEMORY_MODEL=y
-CONFIG_FLATMEM_MANUAL=y
-# CONFIG_DISCONTIGMEM_MANUAL is not set
-# CONFIG_SPARSEMEM_MANUAL is not set
-CONFIG_FLATMEM=y
-CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
-CONFIG_SPLIT_PTLOCK_CPUS=4096
-# CONFIG_RESOURCES_64BIT is not set
-CONFIG_ZONE_DMA_FLAG=1
-CONFIG_BOUNCE=y
-CONFIG_VIRT_TO_BUS=y
-CONFIG_ALIGNMENT_TRAP=y
-
-#
-# Boot options
-#
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=/dev/nfs rootfstype=nfs nfsroot=192.168.1.100:/nfsroot/ ip=192.168.1.101:192.168.1.100::255.255.255.0::eth0:on console=ttyS2,38400 mem=64M"
-# CONFIG_XIP_KERNEL is not set
-# CONFIG_KEXEC is not set
-
-#
-# CPU Frequency scaling
-#
-# CONFIG_CPU_FREQ is not set
-
-#
-# Floating point emulation
-#
-
-#
-# At least one emulation must be selected
-#
-CONFIG_FPE_NWFPE=y
-# CONFIG_FPE_NWFPE_XP is not set
-# CONFIG_FPE_FASTFPE is not set
-
-#
-# Userspace binary formats
-#
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_AOUT is not set
-# CONFIG_BINFMT_MISC is not set
-
-#
-# Power management options
-#
-# CONFIG_PM is not set
-CONFIG_SUSPEND_UP_POSSIBLE=y
-
-#
-# Networking
-#
-CONFIG_NET=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
-CONFIG_UNIX=y
-CONFIG_XFRM=y
-# CONFIG_XFRM_USER is not set
-# CONFIG_XFRM_SUB_POLICY is not set
-# CONFIG_XFRM_MIGRATE is not set
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_FIB_HASH=y
-CONFIG_IP_PNP=y
-# CONFIG_IP_PNP_DHCP is not set
-# CONFIG_IP_PNP_BOOTP is not set
-# CONFIG_IP_PNP_RARP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_INET_XFRM_TUNNEL is not set
-# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
-# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
-CONFIG_INET_TCP_DIAG=y
-# CONFIG_TCP_CONG_ADVANCED is not set
-CONFIG_TCP_CONG_CUBIC=y
-CONFIG_DEFAULT_TCP_CONG="cubic"
-# CONFIG_TCP_MD5SIG is not set
-# CONFIG_IPV6 is not set
-# CONFIG_INET6_XFRM_TUNNEL is not set
-# CONFIG_INET6_TUNNEL is not set
-# CONFIG_NETWORK_SECMARK is not set
-# CONFIG_NETFILTER is not set
-# CONFIG_IP_DCCP is not set
-# CONFIG_IP_SCTP is not set
-# CONFIG_TIPC is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-# CONFIG_NET_SCHED is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
-# CONFIG_AF_RXRPC is not set
-
-#
-# Wireless
-#
-# CONFIG_CFG80211 is not set
-# CONFIG_WIRELESS_EXT is not set
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
-# CONFIG_RFKILL is not set
-# CONFIG_NET_9P is not set
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_FW_LOADER=y
-# CONFIG_DEBUG_DRIVER is not set
-# CONFIG_DEBUG_DEVRES is not set
-# CONFIG_SYS_HYPERVISOR is not set
-# CONFIG_CONNECTOR is not set
-# CONFIG_MTD is not set
-# CONFIG_PARPORT is not set
-# CONFIG_BLK_DEV is not set
-# CONFIG_MISC_DEVICES is not set
-# CONFIG_IDE is not set
-
-#
-# SCSI device support
-#
-# CONFIG_RAID_ATTRS is not set
-# CONFIG_SCSI is not set
-# CONFIG_SCSI_DMA is not set
-# CONFIG_SCSI_NETLINK is not set
-# CONFIG_ATA is not set
-# CONFIG_MD is not set
-CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
-# CONFIG_DUMMY is not set
-# CONFIG_BONDING is not set
-# CONFIG_MACVLAN is not set
-# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
-# CONFIG_VETH is not set
-# CONFIG_PHYLIB is not set
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-# CONFIG_AX88796 is not set
-CONFIG_SMC91X=y
-# CONFIG_DM9000 is not set
-# CONFIG_SMC911X is not set
-# CONFIG_IBM_NEW_EMAC_ZMII is not set
-# CONFIG_IBM_NEW_EMAC_RGMII is not set
-# CONFIG_IBM_NEW_EMAC_TAH is not set
-# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
-# CONFIG_B44 is not set
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
-# CONFIG_WAN is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_ISDN is not set
-
-#
-# Input device support
-#
-CONFIG_INPUT=y
-# CONFIG_INPUT_FF_MEMLESS is not set
-# CONFIG_INPUT_POLLDEV is not set
-
-#
-# Userland interfaces
-#
-CONFIG_INPUT_MOUSEDEV=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_EVDEV is not set
-# CONFIG_INPUT_EVBUG is not set
-
-#
-# Input Device Drivers
-#
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_INPUT_JOYSTICK is not set
-# CONFIG_INPUT_TABLET is not set
-# CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
-
-#
-# Hardware I/O ports
-#
-# CONFIG_SERIO is not set
-# CONFIG_GAMEPORT is not set
-
-#
-# Character devices
-#
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
-CONFIG_HW_CONSOLE=y
-# CONFIG_VT_HW_CONSOLE_BINDING is not set
-# CONFIG_SERIAL_NONSTANDARD is not set
-
-#
-# Serial drivers
-#
-# CONFIG_SERIAL_8250 is not set
-
-#
-# Non-8250 serial port support
-#
-CONFIG_SERIAL_PXA=y
-CONFIG_SERIAL_PXA_CONSOLE=y
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_UNIX98_PTYS=y
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_IPMI_HANDLER is not set
-# CONFIG_HW_RANDOM is not set
-# CONFIG_NVRAM is not set
-# CONFIG_R3964 is not set
-# CONFIG_RAW_DRIVER is not set
-# CONFIG_TCG_TPM is not set
-# CONFIG_I2C is not set
-
-#
-# SPI support
-#
-# CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
-# CONFIG_W1 is not set
-# CONFIG_POWER_SUPPLY is not set
-# CONFIG_HWMON is not set
-# CONFIG_WATCHDOG is not set
-
-#
-# Sonics Silicon Backplane
-#
-CONFIG_SSB_POSSIBLE=y
-# CONFIG_SSB is not set
-
-#
-# Multifunction device drivers
-#
-# CONFIG_MFD_SM501 is not set
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_DAB is not set
-
-#
-# Graphics support
-#
-# CONFIG_VGASTATE is not set
-# CONFIG_VIDEO_OUTPUT_CONTROL is not set
-CONFIG_FB=y
-# CONFIG_FIRMWARE_EDID is not set
-# CONFIG_FB_DDC is not set
-CONFIG_FB_CFB_FILLRECT=y
-CONFIG_FB_CFB_COPYAREA=y
-CONFIG_FB_CFB_IMAGEBLIT=y
-# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
-# CONFIG_FB_SYS_FILLRECT is not set
-# CONFIG_FB_SYS_COPYAREA is not set
-# CONFIG_FB_SYS_IMAGEBLIT is not set
-# CONFIG_FB_SYS_FOPS is not set
-CONFIG_FB_DEFERRED_IO=y
-# CONFIG_FB_SVGALIB is not set
-# CONFIG_FB_MACMODES is not set
-# CONFIG_FB_BACKLIGHT is not set
-# CONFIG_FB_MODE_HELPERS is not set
-# CONFIG_FB_TILEBLITTING is not set
-
-#
-# Frame buffer hardware drivers
-#
-# CONFIG_FB_S1D13XXX is not set
-CONFIG_FB_PXA=y
-# CONFIG_FB_PXA_PARAMETERS is not set
-# CONFIG_FB_MBX is not set
-# CONFIG_FB_VIRTUAL is not set
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
-
-#
-# Display device support
-#
-# CONFIG_DISPLAY_SUPPORT is not set
-
-#
-# Console display driver support
-#
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_DUMMY_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
-# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
-CONFIG_FONTS=y
-# CONFIG_FONT_8x8 is not set
-CONFIG_FONT_8x16=y
-# CONFIG_FONT_6x11 is not set
-# CONFIG_FONT_7x14 is not set
-# CONFIG_FONT_PEARL_8x8 is not set
-# CONFIG_FONT_ACORN_8x8 is not set
-# CONFIG_FONT_MINI_4x6 is not set
-# CONFIG_FONT_SUN8x16 is not set
-# CONFIG_FONT_SUN12x22 is not set
-# CONFIG_FONT_10x18 is not set
-CONFIG_LOGO=y
-CONFIG_LOGO_LINUX_MONO=y
-CONFIG_LOGO_LINUX_VGA16=y
-CONFIG_LOGO_LINUX_CLUT224=y
-
-#
-# Sound
-#
-# CONFIG_SOUND is not set
-# CONFIG_HID_SUPPORT is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_MMC is not set
-# CONFIG_NEW_LEDS is not set
-CONFIG_RTC_LIB=y
-# CONFIG_RTC_CLASS is not set
-
-#
-# File systems
-#
-# CONFIG_EXT2_FS is not set
-# CONFIG_EXT3_FS is not set
-# CONFIG_EXT4DEV_FS is not set
-# CONFIG_REISERFS_FS is not set
-# CONFIG_JFS_FS is not set
-CONFIG_FS_POSIX_ACL=y
-# CONFIG_XFS_FS is not set
-# CONFIG_GFS2_FS is not set
-# CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_INOTIFY is not set
-# CONFIG_QUOTA is not set
-# CONFIG_DNOTIFY is not set
-# CONFIG_AUTOFS_FS is not set
-# CONFIG_AUTOFS4_FS is not set
-# CONFIG_FUSE_FS is not set
-
-#
-# CD-ROM/DVD Filesystems
-#
-# CONFIG_ISO9660_FS is not set
-# CONFIG_UDF_FS is not set
-
-#
-# DOS/FAT/NT Filesystems
-#
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_SYSCTL=y
-CONFIG_SYSFS=y
-# CONFIG_TMPFS is not set
-# CONFIG_HUGETLB_PAGE is not set
-# CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-CONFIG_NETWORK_FILESYSTEMS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-CONFIG_NFS_DIRECTIO=y
-# CONFIG_NFSD is not set
-CONFIG_ROOT_NFS=y
-CONFIG_LOCKD=y
-CONFIG_LOCKD_V4=y
-CONFIG_NFS_ACL_SUPPORT=y
-CONFIG_NFS_COMMON=y
-CONFIG_SUNRPC=y
-CONFIG_SUNRPC_GSS=y
-# CONFIG_SUNRPC_BIND34 is not set
-CONFIG_RPCSEC_GSS_KRB5=y
-# CONFIG_RPCSEC_GSS_SPKM3 is not set
-# CONFIG_SMB_FS is not set
-# CONFIG_CIFS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MSDOS_PARTITION=y
-# CONFIG_NLS is not set
-# CONFIG_DLM is not set
-# CONFIG_INSTRUMENTATION is not set
-
-#
-# Kernel hacking
-#
-CONFIG_PRINTK_TIME=y
-CONFIG_ENABLE_WARN_DEPRECATED=y
-CONFIG_ENABLE_MUST_CHECK=y
-CONFIG_MAGIC_SYSRQ=y
-# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
-# CONFIG_HEADERS_CHECK is not set
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DEBUG_SHIRQ is not set
-CONFIG_DETECT_SOFTLOCKUP=y
-CONFIG_SCHED_DEBUG=y
-# CONFIG_SCHEDSTATS is not set
-# CONFIG_TIMER_STATS is not set
-# CONFIG_DEBUG_SLAB is not set
-# CONFIG_DEBUG_PREEMPT is not set
-# CONFIG_DEBUG_RT_MUTEXES is not set
-# CONFIG_RT_MUTEX_TESTER is not set
-# CONFIG_DEBUG_SPINLOCK is not set
-# CONFIG_DEBUG_MUTEXES is not set
-# CONFIG_DEBUG_LOCK_ALLOC is not set
-# CONFIG_PROVE_LOCKING is not set
-# CONFIG_LOCK_STAT is not set
-# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
-# CONFIG_DEBUG_KOBJECT is not set
-CONFIG_DEBUG_BUGVERBOSE=y
-CONFIG_DEBUG_INFO=y
-# CONFIG_DEBUG_VM is not set
-# CONFIG_DEBUG_LIST is not set
-# CONFIG_DEBUG_SG is not set
-CONFIG_FRAME_POINTER=y
-CONFIG_FORCED_INLINING=y
-# CONFIG_BOOT_PRINTK_DELAY is not set
-# CONFIG_RCU_TORTURE_TEST is not set
-# CONFIG_FAULT_INJECTION is not set
-# CONFIG_SAMPLES is not set
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
-CONFIG_DEBUG_LL=y
-# CONFIG_DEBUG_ICEDCC is not set
-
-#
-# Security options
-#
-# CONFIG_KEYS is not set
-# CONFIG_SECURITY is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
-CONFIG_CRYPTO=y
-CONFIG_CRYPTO_ALGAPI=y
-CONFIG_CRYPTO_BLKCIPHER=y
-CONFIG_CRYPTO_MANAGER=y
-# CONFIG_CRYPTO_HMAC is not set
-# CONFIG_CRYPTO_XCBC is not set
-# CONFIG_CRYPTO_NULL is not set
-# CONFIG_CRYPTO_MD4 is not set
-CONFIG_CRYPTO_MD5=y
-# CONFIG_CRYPTO_SHA1 is not set
-# CONFIG_CRYPTO_SHA256 is not set
-# CONFIG_CRYPTO_SHA512 is not set
-# CONFIG_CRYPTO_WP512 is not set
-# CONFIG_CRYPTO_TGR192 is not set
-# CONFIG_CRYPTO_GF128MUL is not set
-# CONFIG_CRYPTO_ECB is not set
-CONFIG_CRYPTO_CBC=y
-# CONFIG_CRYPTO_PCBC is not set
-# CONFIG_CRYPTO_LRW is not set
-# CONFIG_CRYPTO_XTS is not set
-# CONFIG_CRYPTO_CRYPTD is not set
-CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_FCRYPT is not set
-# CONFIG_CRYPTO_BLOWFISH is not set
-# CONFIG_CRYPTO_TWOFISH is not set
-# CONFIG_CRYPTO_SERPENT is not set
-# CONFIG_CRYPTO_AES is not set
-# CONFIG_CRYPTO_CAST5 is not set
-# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
-# CONFIG_CRYPTO_ARC4 is not set
-# CONFIG_CRYPTO_KHAZAD is not set
-# CONFIG_CRYPTO_ANUBIS is not set
-# CONFIG_CRYPTO_SEED is not set
-# CONFIG_CRYPTO_DEFLATE is not set
-# CONFIG_CRYPTO_MICHAEL_MIC is not set
-# CONFIG_CRYPTO_CRC32C is not set
-# CONFIG_CRYPTO_CAMELLIA is not set
-# CONFIG_CRYPTO_TEST is not set
-# CONFIG_CRYPTO_AUTHENC is not set
-CONFIG_CRYPTO_HW=y
-
-#
-# Library routines
-#
-CONFIG_BITREVERSE=y
-CONFIG_CRC_CCITT=y
-# CONFIG_CRC16 is not set
-# CONFIG_CRC_ITU_T is not set
-CONFIG_CRC32=y
-# CONFIG_CRC7 is not set
-# CONFIG_LIBCRC32C is not set
-CONFIG_PLIST=y
-CONFIG_HAS_IOMEM=y
-CONFIG_HAS_IOPORT=y
-CONFIG_HAS_DMA=y
diff --git a/arch/arm/configs/n770_defconfig b/arch/arm/configs/n770_defconfig
index 672f6db06a5..a1657b73683 100644
--- a/arch/arm/configs/n770_defconfig
+++ b/arch/arm/configs/n770_defconfig
@@ -875,7 +875,7 @@ CONFIG_FB_OMAP_LCDC_EXTERNAL=y
CONFIG_FB_OMAP_LCDC_HWA742=y
# CONFIG_FB_OMAP_LCDC_BLIZZARD is not set
CONFIG_FB_OMAP_MANUAL_UPDATE=y
-# CONFIG_FB_OMAP_LCD_MIPID is not set
+CONFIG_FB_OMAP_LCD_MIPID=y
# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE=2
# CONFIG_FB_OMAP_DMA_TUNE is not set
diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig
index 9bb45b932f0..600cb270f2b 100644
--- a/arch/arm/configs/nhk8815_defconfig
+++ b/arch/arm/configs/nhk8815_defconfig
@@ -498,7 +498,7 @@ CONFIG_MTD_CFI_I2=y
# CONFIG_MTD_DOC2001PLUS is not set
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_VERIFY_WRITE=y
-# CONFIG_MTD_NAND_ECC_SMC is not set
+CONFIG_MTD_NAND_ECC_SMC=y
# CONFIG_MTD_NAND_MUSEUM_IDS is not set
# CONFIG_MTD_NAND_GPIO is not set
CONFIG_MTD_NAND_IDS=y
diff --git a/arch/arm/configs/omap3_beagle_defconfig b/arch/arm/configs/omap3_beagle_defconfig
index 51c0fa8897c..357d4021e2d 100644
--- a/arch/arm/configs/omap3_beagle_defconfig
+++ b/arch/arm/configs/omap3_beagle_defconfig
@@ -778,7 +778,33 @@ CONFIG_DAB=y
#
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
-# CONFIG_FB is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_VIRTUAL is not set
+CONFIG_FB_OMAP=y
+# CONFIG_FB_OMAP_LCDC_EXTERNAL is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE=2
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
@@ -791,6 +817,25 @@ CONFIG_DAB=y
#
# CONFIG_VGA_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FONT_MINI_4x6 is not set
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
+# CONFIG_LOGO is not set
+
+#
+# Sound
+#
# CONFIG_SOUND is not set
# CONFIG_HID_SUPPORT is not set
CONFIG_USB_SUPPORT=y
diff --git a/arch/arm/configs/omap_3430sdp_defconfig b/arch/arm/configs/omap_3430sdp_defconfig
index 9a510eab75a..8a4a7e2ba87 100644
--- a/arch/arm/configs/omap_3430sdp_defconfig
+++ b/arch/arm/configs/omap_3430sdp_defconfig
@@ -1313,8 +1313,33 @@ CONFIG_DVB_ISL6421=m
# Graphics support
#
# CONFIG_VGASTATE is not set
-# CONFIG_VIDEO_OUTPUT_CONTROL is not set
-# CONFIG_FB is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_VIRTUAL is not set
+CONFIG_FB_OMAP=y
+# CONFIG_FB_OMAP_LCDC_EXTERNAL is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE=2
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
@@ -1331,6 +1356,16 @@ CONFIG_DISPLAY_SUPPORT=y
#
# CONFIG_VGA_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
CONFIG_SOUND=y
CONFIG_SOUND_OSS_CORE=y
CONFIG_SND=y
diff --git a/arch/arm/configs/omap_ldp_defconfig b/arch/arm/configs/omap_ldp_defconfig
index 679a4a3e265..b9c48919a68 100644
--- a/arch/arm/configs/omap_ldp_defconfig
+++ b/arch/arm/configs/omap_ldp_defconfig
@@ -690,6 +690,7 @@ CONFIG_GPIOLIB=y
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
+CONFIG_GPIO_TWL4030=y
#
# PCI GPIO expanders:
@@ -742,6 +743,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_EGPIO is not set
# CONFIG_HTC_PASIC3 is not set
+CONFIG_TWL4030_CORE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_MFD_T7L66XB is not set
# CONFIG_MFD_TC6387XB is not set
@@ -767,8 +769,46 @@ CONFIG_DAB=y
#
# CONFIG_VGASTATE is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
-# CONFIG_FB is not set
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+CONFIG_FB_OMAP=y
+CONFIG_FB_OMAP_LCD_VGA=y
+# CONFIG_FB_OMAP_LCDC_EXTERNAL is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE=4
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_LTV350QV is not set
+# CONFIG_LCD_ILI9320 is not set
+# CONFIG_LCD_TDO24M is not set
+# CONFIG_LCD_VGG2432A4 is not set
+CONFIG_LCD_PLATFORM=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_CORGI is not set
+# CONFIG_BACKLIGHT_GENERIC is not set
#
# Display device support
@@ -780,6 +820,16 @@ CONFIG_VIDEO_OUTPUT_CONTROL=m
#
# CONFIG_VGA_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
CONFIG_SOUND=y
CONFIG_SND=y
# CONFIG_SND_SEQUENCER is not set
diff --git a/arch/arm/configs/pxa3xx_defconfig b/arch/arm/configs/pxa3xx_defconfig
new file mode 100644
index 00000000000..733b851e5b7
--- /dev/null
+++ b/arch/arm/configs/pxa3xx_defconfig
@@ -0,0 +1,1332 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.31-rc1
+# Mon Jul 13 22:48:49 2009
+#
+CONFIG_ARM=y
+CONFIG_HAVE_PWM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_MTD_XIP=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+# CONFIG_CLASSIC_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+# CONFIG_BLK_DEV_INITRD is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Performance Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+# CONFIG_STRIP_ASM_SYMS is not set
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+# CONFIG_MODULE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_FREEZER is not set
+
+#
+# System Type
+#
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_PNX4008 is not set
+CONFIG_ARCH_PXA=y
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+
+#
+# Intel PXA2xx/PXA3xx Implementations
+#
+
+#
+# Supported PXA3xx Processor Variants
+#
+CONFIG_CPU_PXA300=y
+CONFIG_CPU_PXA310=y
+CONFIG_CPU_PXA320=y
+CONFIG_CPU_PXA930=y
+CONFIG_CPU_PXA935=y
+# CONFIG_ARCH_GUMSTIX is not set
+# CONFIG_MACH_INTELMOTE2 is not set
+# CONFIG_MACH_STARGATE2 is not set
+# CONFIG_ARCH_LUBBOCK is not set
+# CONFIG_MACH_LOGICPD_PXA270 is not set
+# CONFIG_MACH_MAINSTONE is not set
+# CONFIG_MACH_MP900C is not set
+# CONFIG_ARCH_PXA_IDP is not set
+# CONFIG_PXA_SHARPSL is not set
+# CONFIG_ARCH_VIPER is not set
+# CONFIG_ARCH_PXA_ESERIES is not set
+# CONFIG_TRIZEPS_PXA is not set
+# CONFIG_MACH_H5000 is not set
+# CONFIG_MACH_EM_X270 is not set
+# CONFIG_MACH_EXEDA is not set
+# CONFIG_MACH_COLIBRI is not set
+# CONFIG_MACH_COLIBRI300 is not set
+# CONFIG_MACH_COLIBRI320 is not set
+CONFIG_MACH_ZYLONITE=y
+CONFIG_MACH_LITTLETON=y
+CONFIG_MACH_TAVOREVB=y
+CONFIG_MACH_SAAR=y
+# CONFIG_MACH_ARMCORE is not set
+# CONFIG_MACH_CM_X300 is not set
+# CONFIG_MACH_H4700 is not set
+# CONFIG_MACH_MAGICIAN is not set
+# CONFIG_MACH_HIMALAYA is not set
+# CONFIG_MACH_MIOA701 is not set
+# CONFIG_MACH_PCM027 is not set
+# CONFIG_ARCH_PXA_PALM is not set
+# CONFIG_MACH_CSB726 is not set
+# CONFIG_PXA_EZX is not set
+CONFIG_PXA3xx=y
+CONFIG_PXA_SSP=y
+CONFIG_PXA_HAVE_BOARD_IRQS=y
+CONFIG_PLAT_PXA=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_XSC3=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5T=y
+CONFIG_CPU_PABRT_NOIFAR=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+CONFIG_IO_36=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_OUTER_CACHE=y
+CONFIG_CACHE_XSC3L2=y
+CONFIG_IWMMXT=y
+CONFIG_COMMON_CLKDEV=y
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT=y
+CONFIG_HZ=100
+CONFIG_AEABI=y
+CONFIG_OABI_COMPAT=y
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="root=/dev/nfs rootfstype=nfs nfsroot=192.168.1.100:/nfsroot/ ip=192.168.1.101:192.168.1.100::255.255.255.0::eth0:on console=ttyS0,115200 mem=64M debug"
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_FREQ is not set
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_VERIFY_WRITE=y
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+# CONFIG_MTD_NAND_H1900 is not set
+# CONFIG_MTD_NAND_GPIO is not set
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_SHARPSL is not set
+CONFIG_MTD_NAND_PXA3xx=y
+CONFIG_MTD_NAND_PXA3xx_BUILTIN=y
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+CONFIG_MTD_ONENAND=y
+CONFIG_MTD_ONENAND_VERIFY_WRITE=y
+CONFIG_MTD_ONENAND_GENERIC=y
+# CONFIG_MTD_ONENAND_OTP is not set
+# CONFIG_MTD_ONENAND_2X_PROGRAM is not set
+# CONFIG_MTD_ONENAND_SIM is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+CONFIG_SMC91X=y
+# CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+CONFIG_KEYBOARD_PXA27x=y
+CONFIG_KEYBOARD_PXA930_ROTARY=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+CONFIG_MOUSE_PXA930_TRKBALL=y
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+CONFIG_TOUCHSCREEN_DA9034=y
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+CONFIG_SERIAL_PXA=y
+CONFIG_SERIAL_PXA_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+# CONFIG_I2C_CHARDEV is not set
+# CONFIG_I2C_HELPER_AUTO is not set
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_PXA=y
+# CONFIG_I2C_PXA_SLAVE is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+CONFIG_SPI_PXA2XX=y
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+# CONFIG_GPIO_SYSFS is not set
+
+#
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+CONFIG_GPIO_MAX732X=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCF857X=y
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+CONFIG_GPIO_MAX7301=y
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_W1 is not set
+CONFIG_POWER_SUPPLY=y
+CONFIG_POWER_SUPPLY_DEBUG=y
+CONFIG_PDA_POWER=y
+# CONFIG_BATTERY_DS2760 is not set
+# CONFIG_BATTERY_BQ27x00 is not set
+CONFIG_BATTERY_DA9030=y
+# CONFIG_BATTERY_MAX17040 is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+CONFIG_PMIC_DA903X=y
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+CONFIG_FB_PXA=y
+# CONFIG_FB_PXA_OVERLAY is not set
+# CONFIG_FB_PXA_SMARTPANEL is not set
+# CONFIG_FB_PXA_PARAMETERS is not set
+# CONFIG_FB_MBX is not set
+# CONFIG_FB_W100 is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_LTV350QV is not set
+# CONFIG_LCD_ILI9320 is not set
+CONFIG_LCD_TDO24M=y
+# CONFIG_LCD_VGG2432A4 is not set
+# CONFIG_LCD_PLATFORM is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PWM=y
+CONFIG_BACKLIGHT_DA903X=y
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+CONFIG_FONTS=y
+# CONFIG_FONT_8x8 is not set
+# CONFIG_FONT_8x16 is not set
+CONFIG_FONT_6x11=y
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FONT_MINI_4x6 is not set
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+CONFIG_MMC_PXA=y
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_SPI is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=m
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP5521 is not set
+# CONFIG_LEDS_PCA955X is not set
+CONFIG_LEDS_DA903X=m
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_PWM is not set
+# CONFIG_LEDS_BD2802 is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+CONFIG_LEDS_TRIGGER_BACKLIGHT=m
+CONFIG_LEDS_TRIGGER_GPIO=m
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_DEBUG=y
+# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
+CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+CONFIG_REGULATOR_DA903X=y
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+# CONFIG_INOTIFY is not set
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+# CONFIG_TMPFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+CONFIG_JFFS2_FS_WBUF_VERIFY=y
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RTIME=y
+CONFIG_JFFS2_RUBIN=y
+# CONFIG_JFFS2_CMODE_NONE is not set
+CONFIG_JFFS2_CMODE_PRIORITY=y
+# CONFIG_JFFS2_CMODE_SIZE is not set
+# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_SOFTLOCKUP=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=1
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+CONFIG_DEBUG_PREEMPT=y
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_KMEMCHECK is not set
+CONFIG_ARM_UNWIND=y
+CONFIG_DEBUG_USER=y
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/arm/configs/xcep_defconfig b/arch/arm/configs/xcep_defconfig
new file mode 100644
index 00000000000..33bb7250946
--- /dev/null
+++ b/arch/arm/configs/xcep_defconfig
@@ -0,0 +1,1129 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.31-rc6
+# Thu Aug 20 09:02:37 2009
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_MTD_XIP=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=".xcep-itech"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+# CONFIG_UID16 is not set
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+# CONFIG_SHMEM is not set
+CONFIG_AIO=y
+
+#
+# Performance Counters
+#
+# CONFIG_VM_EVENT_COUNTERS is not set
+CONFIG_STRIP_ASM_SYMS=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+# CONFIG_SLUB is not set
+CONFIG_SLOB=y
+# CONFIG_PROFILING is not set
+CONFIG_TRACEPOINTS=y
+CONFIG_MARKERS=y
+CONFIG_HAVE_OPROFILE=y
+CONFIG_KPROBES=y
+CONFIG_KRETPROBES=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLOCK is not set
+# CONFIG_FREEZER is not set
+
+#
+# System Type
+#
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_PNX4008 is not set
+CONFIG_ARCH_PXA=y
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+
+#
+# Intel PXA2xx/PXA3xx Implementations
+#
+# CONFIG_ARCH_GUMSTIX is not set
+# CONFIG_MACH_INTELMOTE2 is not set
+# CONFIG_MACH_STARGATE2 is not set
+# CONFIG_ARCH_LUBBOCK is not set
+# CONFIG_MACH_LOGICPD_PXA270 is not set
+# CONFIG_MACH_MAINSTONE is not set
+# CONFIG_MACH_MP900C is not set
+# CONFIG_ARCH_PXA_IDP is not set
+# CONFIG_PXA_SHARPSL is not set
+# CONFIG_ARCH_VIPER is not set
+# CONFIG_ARCH_PXA_ESERIES is not set
+# CONFIG_TRIZEPS_PXA is not set
+# CONFIG_MACH_H5000 is not set
+# CONFIG_MACH_EM_X270 is not set
+# CONFIG_MACH_EXEDA is not set
+# CONFIG_MACH_COLIBRI is not set
+# CONFIG_MACH_COLIBRI300 is not set
+# CONFIG_MACH_COLIBRI320 is not set
+# CONFIG_MACH_ZYLONITE is not set
+# CONFIG_MACH_LITTLETON is not set
+# CONFIG_MACH_TAVOREVB is not set
+# CONFIG_MACH_SAAR is not set
+# CONFIG_MACH_ARMCORE is not set
+# CONFIG_MACH_CM_X300 is not set
+# CONFIG_MACH_H4700 is not set
+# CONFIG_MACH_MAGICIAN is not set
+# CONFIG_MACH_HIMALAYA is not set
+# CONFIG_MACH_MIOA701 is not set
+# CONFIG_MACH_PCM027 is not set
+# CONFIG_ARCH_PXA_PALM is not set
+# CONFIG_MACH_CSB726 is not set
+# CONFIG_PXA_EZX is not set
+CONFIG_MACH_XCEP=y
+CONFIG_PXA25x=y
+CONFIG_PXA_SSP=y
+CONFIG_PLAT_PXA=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_XSCALE=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5T=y
+CONFIG_CPU_PABRT_NOIFAR=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_DCACHE_DISABLE is not set
+CONFIG_IWMMXT=y
+CONFIG_XSCALE_PMU=y
+CONFIG_COMMON_CLKDEV=y
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=100
+CONFIG_AEABI=y
+CONFIG_OABI_COMPAT=y
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="root=mtd4 rootfstype=jffs2 ro console=ttyS0,115200"
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_FREQ is not set
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=m
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET_LRO=y
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_XIP is not set
+
+#
+# Mapping drivers for chip access
+#
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+CONFIG_MTD_PXA2XX=y
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+
+#
+# SCSI device support
+#
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+CONFIG_SMC91X=y
+# CONFIG_DM9000 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_PXA=y
+CONFIG_SERIAL_PXA_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=m
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_PXA=m
+# CONFIG_I2C_PXA_SLAVE is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_SPI is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+# CONFIG_GPIO_SYSFS is not set
+
+#
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+CONFIG_HWMON=m
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+CONFIG_SENSORS_ADM1021=m
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+CONFIG_SENSORS_MAX6650=m
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_NEW_LEDS is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=m
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_SA1100=m
+# CONFIG_RTC_DRV_PXA is not set
+CONFIG_DMADEVICES=y
+
+#
+# DMA Devices
+#
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_FILE_LOCKING=y
+# CONFIG_FSNOTIFY is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+CONFIG_JFFS2_FS_WBUF_VERIFY=y
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=m
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+CONFIG_NLS=m
+CONFIG_NLS_DEFAULT="utf8"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_KPROBES_SANITY_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_ARM_UNWIND is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=m
+CONFIG_CRYPTO_ALGAPI2=m
+CONFIG_CRYPTO_HASH=m
+CONFIG_CRYPTO_HASH2=m
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_MANAGER2 is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/arm/configs/zylonite_defconfig b/arch/arm/configs/zylonite_defconfig
deleted file mode 100644
index 7949d04a360..00000000000
--- a/arch/arm/configs/zylonite_defconfig
+++ /dev/null
@@ -1,736 +0,0 @@
-#
-# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.23
-# Tue Oct 23 13:33:20 2007
-#
-CONFIG_ARM=y
-CONFIG_SYS_SUPPORTS_APM_EMULATION=y
-CONFIG_GENERIC_GPIO=y
-CONFIG_GENERIC_TIME=y
-CONFIG_GENERIC_CLOCKEVENTS=y
-CONFIG_MMU=y
-# CONFIG_NO_IOPORT is not set
-CONFIG_GENERIC_HARDIRQS=y
-CONFIG_STACKTRACE_SUPPORT=y
-CONFIG_LOCKDEP_SUPPORT=y
-CONFIG_TRACE_IRQFLAGS_SUPPORT=y
-CONFIG_HARDIRQS_SW_RESEND=y
-CONFIG_GENERIC_IRQ_PROBE=y
-CONFIG_RWSEM_GENERIC_SPINLOCK=y
-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
-CONFIG_GENERIC_HWEIGHT=y
-CONFIG_GENERIC_CALIBRATE_DELAY=y
-CONFIG_ZONE_DMA=y
-CONFIG_ARCH_MTD_XIP=y
-CONFIG_VECTORS_BASE=0xffff0000
-CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
-
-#
-# General setup
-#
-CONFIG_EXPERIMENTAL=y
-CONFIG_BROKEN_ON_SMP=y
-CONFIG_INIT_ENV_ARG_LIMIT=32
-CONFIG_LOCALVERSION=""
-CONFIG_LOCALVERSION_AUTO=y
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
-CONFIG_SYSVIPC_SYSCTL=y
-# CONFIG_POSIX_MQUEUE is not set
-# CONFIG_BSD_PROCESS_ACCT is not set
-# CONFIG_TASKSTATS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_AUDIT is not set
-# CONFIG_IKCONFIG is not set
-CONFIG_LOG_BUF_SHIFT=18
-# CONFIG_CGROUPS is not set
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_FAIR_USER_SCHED=y
-# CONFIG_FAIR_CGROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
-# CONFIG_RELAY is not set
-# CONFIG_BLK_DEV_INITRD is not set
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_SYSCTL=y
-# CONFIG_EMBEDDED is not set
-CONFIG_UID16=y
-CONFIG_SYSCTL_SYSCALL=y
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
-CONFIG_HOTPLUG=y
-CONFIG_PRINTK=y
-CONFIG_BUG=y
-CONFIG_ELF_CORE=y
-CONFIG_BASE_FULL=y
-CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
-CONFIG_EPOLL=y
-CONFIG_SIGNALFD=y
-CONFIG_EVENTFD=y
-CONFIG_SHMEM=y
-CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_SLUB_DEBUG=y
-# CONFIG_SLAB is not set
-CONFIG_SLUB=y
-# CONFIG_SLOB is not set
-CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
-CONFIG_BASE_SMALL=0
-CONFIG_MODULES=y
-# CONFIG_MODULE_UNLOAD is not set
-# CONFIG_MODVERSIONS is not set
-# CONFIG_MODULE_SRCVERSION_ALL is not set
-# CONFIG_KMOD is not set
-CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
-# CONFIG_BLK_DEV_BSG is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_DEFAULT_AS is not set
-# CONFIG_DEFAULT_DEADLINE is not set
-CONFIG_DEFAULT_CFQ=y
-# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="cfq"
-
-#
-# System Type
-#
-# CONFIG_ARCH_AAEC2000 is not set
-# CONFIG_ARCH_INTEGRATOR is not set
-# CONFIG_ARCH_REALVIEW is not set
-# CONFIG_ARCH_VERSATILE is not set
-# CONFIG_ARCH_AT91 is not set
-# CONFIG_ARCH_CLPS7500 is not set
-# CONFIG_ARCH_CLPS711X is not set
-# CONFIG_ARCH_CO285 is not set
-# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_EP93XX is not set
-# CONFIG_ARCH_FOOTBRIDGE is not set
-# CONFIG_ARCH_NETX is not set
-# CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_IMX is not set
-# CONFIG_ARCH_IOP13XX is not set
-# CONFIG_ARCH_IOP32X is not set
-# CONFIG_ARCH_IOP33X is not set
-# CONFIG_ARCH_IXP23XX is not set
-# CONFIG_ARCH_IXP2000 is not set
-# CONFIG_ARCH_IXP4XX is not set
-# CONFIG_ARCH_L7200 is not set
-# CONFIG_ARCH_KS8695 is not set
-# CONFIG_ARCH_NS9XXX is not set
-# CONFIG_ARCH_MXC is not set
-# CONFIG_ARCH_PNX4008 is not set
-CONFIG_ARCH_PXA=y
-# CONFIG_ARCH_RPC is not set
-# CONFIG_ARCH_SA1100 is not set
-# CONFIG_ARCH_S3C2410 is not set
-# CONFIG_ARCH_SHARK is not set
-# CONFIG_ARCH_LH7A40X is not set
-# CONFIG_ARCH_DAVINCI is not set
-# CONFIG_ARCH_OMAP is not set
-
-#
-# Intel PXA2xx/PXA3xx Implementations
-#
-
-#
-# Supported PXA3xx Processor Variants
-#
-CONFIG_CPU_PXA300=y
-CONFIG_CPU_PXA310=y
-CONFIG_CPU_PXA320=y
-# CONFIG_ARCH_LUBBOCK is not set
-# CONFIG_MACH_LOGICPD_PXA270 is not set
-# CONFIG_MACH_MAINSTONE is not set
-# CONFIG_ARCH_PXA_IDP is not set
-# CONFIG_PXA_SHARPSL is not set
-# CONFIG_MACH_TRIZEPS4 is not set
-# CONFIG_MACH_EM_X270 is not set
-CONFIG_MACH_ZYLONITE=y
-# CONFIG_MACH_ARMCORE is not set
-CONFIG_PXA3xx=y
-
-#
-# Boot options
-#
-
-#
-# Power management
-#
-
-#
-# Processor Type
-#
-CONFIG_CPU_32=y
-CONFIG_CPU_XSC3=y
-CONFIG_CPU_32v5=y
-CONFIG_CPU_ABRT_EV5T=y
-CONFIG_CPU_CACHE_VIVT=y
-CONFIG_CPU_TLB_V4WBI=y
-CONFIG_CPU_CP15=y
-CONFIG_CPU_CP15_MMU=y
-CONFIG_IO_36=y
-
-#
-# Processor Features
-#
-# CONFIG_ARM_THUMB is not set
-# CONFIG_CPU_DCACHE_DISABLE is not set
-# CONFIG_CPU_BPREDICT_DISABLE is not set
-# CONFIG_OUTER_CACHE is not set
-CONFIG_IWMMXT=y
-
-#
-# Bus support
-#
-# CONFIG_PCI_SYSCALL is not set
-# CONFIG_ARCH_SUPPORTS_MSI is not set
-# CONFIG_PCCARD is not set
-
-#
-# Kernel Features
-#
-# CONFIG_TICK_ONESHOT is not set
-# CONFIG_NO_HZ is not set
-# CONFIG_HIGH_RES_TIMERS is not set
-CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
-# CONFIG_PREEMPT is not set
-CONFIG_HZ=100
-CONFIG_AEABI=y
-CONFIG_OABI_COMPAT=y
-# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
-CONFIG_SELECT_MEMORY_MODEL=y
-CONFIG_FLATMEM_MANUAL=y
-# CONFIG_DISCONTIGMEM_MANUAL is not set
-# CONFIG_SPARSEMEM_MANUAL is not set
-CONFIG_FLATMEM=y
-CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
-CONFIG_SPLIT_PTLOCK_CPUS=4096
-# CONFIG_RESOURCES_64BIT is not set
-CONFIG_ZONE_DMA_FLAG=1
-CONFIG_BOUNCE=y
-CONFIG_VIRT_TO_BUS=y
-CONFIG_ALIGNMENT_TRAP=y
-
-#
-# Boot options
-#
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=/dev/nfs rootfstype=nfs nfsroot=192.168.1.100:/nfs/rootfs/ ip=192.168.1.101:192.168.1.100::255.255.255.0::eth0:on console=ttyS0,38400 mem=64M debug"
-# CONFIG_XIP_KERNEL is not set
-# CONFIG_KEXEC is not set
-
-#
-# Floating point emulation
-#
-
-#
-# At least one emulation must be selected
-#
-CONFIG_FPE_NWFPE=y
-# CONFIG_FPE_NWFPE_XP is not set
-# CONFIG_FPE_FASTFPE is not set
-
-#
-# Userspace binary formats
-#
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_AOUT is not set
-# CONFIG_BINFMT_MISC is not set
-
-#
-# Power management options
-#
-# CONFIG_PM is not set
-CONFIG_SUSPEND_UP_POSSIBLE=y
-
-#
-# Networking
-#
-CONFIG_NET=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
-CONFIG_UNIX=y
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_FIB_HASH=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_INET_XFRM_TUNNEL is not set
-# CONFIG_INET_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_TCP_CONG_ADVANCED is not set
-CONFIG_TCP_CONG_CUBIC=y
-CONFIG_DEFAULT_TCP_CONG="cubic"
-# CONFIG_TCP_MD5SIG is not set
-# CONFIG_IPV6 is not set
-# CONFIG_INET6_XFRM_TUNNEL is not set
-# CONFIG_INET6_TUNNEL is not set
-# CONFIG_NETWORK_SECMARK is not set
-# CONFIG_NETFILTER is not set
-# CONFIG_IP_DCCP is not set
-# CONFIG_IP_SCTP is not set
-# CONFIG_TIPC is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-# CONFIG_NET_SCHED is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
-# CONFIG_AF_RXRPC is not set
-
-#
-# Wireless
-#
-# CONFIG_CFG80211 is not set
-# CONFIG_WIRELESS_EXT is not set
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
-# CONFIG_RFKILL is not set
-# CONFIG_NET_9P is not set
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-CONFIG_FW_LOADER=y
-# CONFIG_SYS_HYPERVISOR is not set
-# CONFIG_CONNECTOR is not set
-# CONFIG_MTD is not set
-# CONFIG_PARPORT is not set
-# CONFIG_BLK_DEV is not set
-# CONFIG_MISC_DEVICES is not set
-# CONFIG_IDE is not set
-
-#
-# SCSI device support
-#
-# CONFIG_RAID_ATTRS is not set
-# CONFIG_SCSI is not set
-# CONFIG_SCSI_DMA is not set
-# CONFIG_SCSI_NETLINK is not set
-# CONFIG_ATA is not set
-# CONFIG_MD is not set
-CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
-# CONFIG_DUMMY is not set
-# CONFIG_BONDING is not set
-# CONFIG_MACVLAN is not set
-# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
-# CONFIG_VETH is not set
-# CONFIG_PHYLIB is not set
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-# CONFIG_AX88796 is not set
-CONFIG_SMC91X=y
-# CONFIG_DM9000 is not set
-# CONFIG_SMC911X is not set
-# CONFIG_IBM_NEW_EMAC_ZMII is not set
-# CONFIG_IBM_NEW_EMAC_RGMII is not set
-# CONFIG_IBM_NEW_EMAC_TAH is not set
-# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
-# CONFIG_B44 is not set
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
-# CONFIG_WAN is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_ISDN is not set
-
-#
-# Input device support
-#
-CONFIG_INPUT=y
-# CONFIG_INPUT_FF_MEMLESS is not set
-# CONFIG_INPUT_POLLDEV is not set
-
-#
-# Userland interfaces
-#
-CONFIG_INPUT_MOUSEDEV=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_EVDEV is not set
-# CONFIG_INPUT_EVBUG is not set
-
-#
-# Input Device Drivers
-#
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_INPUT_JOYSTICK is not set
-# CONFIG_INPUT_TABLET is not set
-# CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
-
-#
-# Hardware I/O ports
-#
-# CONFIG_SERIO is not set
-# CONFIG_GAMEPORT is not set
-
-#
-# Character devices
-#
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
-CONFIG_HW_CONSOLE=y
-# CONFIG_VT_HW_CONSOLE_BINDING is not set
-# CONFIG_SERIAL_NONSTANDARD is not set
-
-#
-# Serial drivers
-#
-# CONFIG_SERIAL_8250 is not set
-
-#
-# Non-8250 serial port support
-#
-CONFIG_SERIAL_PXA=y
-CONFIG_SERIAL_PXA_CONSOLE=y
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_UNIX98_PTYS=y
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_IPMI_HANDLER is not set
-# CONFIG_HW_RANDOM is not set
-# CONFIG_NVRAM is not set
-# CONFIG_R3964 is not set
-# CONFIG_RAW_DRIVER is not set
-# CONFIG_TCG_TPM is not set
-# CONFIG_I2C is not set
-
-#
-# SPI support
-#
-# CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
-# CONFIG_W1 is not set
-# CONFIG_POWER_SUPPLY is not set
-# CONFIG_HWMON is not set
-
-#
-# Sonics Silicon Backplane
-#
-CONFIG_SSB_POSSIBLE=y
-# CONFIG_SSB is not set
-
-#
-# Multifunction device drivers
-#
-# CONFIG_MFD_SM501 is not set
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_DAB is not set
-
-#
-# Graphics support
-#
-# CONFIG_VGASTATE is not set
-# CONFIG_VIDEO_OUTPUT_CONTROL is not set
-CONFIG_FB=y
-# CONFIG_FIRMWARE_EDID is not set
-# CONFIG_FB_DDC is not set
-CONFIG_FB_CFB_FILLRECT=y
-CONFIG_FB_CFB_COPYAREA=y
-CONFIG_FB_CFB_IMAGEBLIT=y
-# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
-# CONFIG_FB_SYS_FILLRECT is not set
-# CONFIG_FB_SYS_COPYAREA is not set
-# CONFIG_FB_SYS_IMAGEBLIT is not set
-# CONFIG_FB_SYS_FOPS is not set
-CONFIG_FB_DEFERRED_IO=y
-# CONFIG_FB_SVGALIB is not set
-# CONFIG_FB_MACMODES is not set
-# CONFIG_FB_BACKLIGHT is not set
-# CONFIG_FB_MODE_HELPERS is not set
-# CONFIG_FB_TILEBLITTING is not set
-
-#
-# Frame buffer hardware drivers
-#
-# CONFIG_FB_S1D13XXX is not set
-CONFIG_FB_PXA=y
-# CONFIG_FB_PXA_PARAMETERS is not set
-# CONFIG_FB_MBX is not set
-# CONFIG_FB_VIRTUAL is not set
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
-
-#
-# Display device support
-#
-# CONFIG_DISPLAY_SUPPORT is not set
-
-#
-# Console display driver support
-#
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_DUMMY_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
-# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
-CONFIG_FONTS=y
-# CONFIG_FONT_8x8 is not set
-# CONFIG_FONT_8x16 is not set
-CONFIG_FONT_6x11=y
-# CONFIG_FONT_7x14 is not set
-# CONFIG_FONT_PEARL_8x8 is not set
-# CONFIG_FONT_ACORN_8x8 is not set
-# CONFIG_FONT_MINI_4x6 is not set
-# CONFIG_FONT_SUN8x16 is not set
-# CONFIG_FONT_SUN12x22 is not set
-# CONFIG_FONT_10x18 is not set
-CONFIG_LOGO=y
-CONFIG_LOGO_LINUX_MONO=y
-CONFIG_LOGO_LINUX_VGA16=y
-CONFIG_LOGO_LINUX_CLUT224=y
-
-#
-# Sound
-#
-# CONFIG_SOUND is not set
-# CONFIG_HID_SUPPORT is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_MMC is not set
-# CONFIG_NEW_LEDS is not set
-CONFIG_RTC_LIB=y
-# CONFIG_RTC_CLASS is not set
-
-#
-# File systems
-#
-# CONFIG_EXT2_FS is not set
-# CONFIG_EXT3_FS is not set
-# CONFIG_EXT4DEV_FS is not set
-# CONFIG_REISERFS_FS is not set
-# CONFIG_JFS_FS is not set
-CONFIG_FS_POSIX_ACL=y
-# CONFIG_XFS_FS is not set
-# CONFIG_GFS2_FS is not set
-# CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_INOTIFY is not set
-# CONFIG_QUOTA is not set
-CONFIG_DNOTIFY=y
-# CONFIG_AUTOFS_FS is not set
-# CONFIG_AUTOFS4_FS is not set
-# CONFIG_FUSE_FS is not set
-
-#
-# CD-ROM/DVD Filesystems
-#
-# CONFIG_ISO9660_FS is not set
-# CONFIG_UDF_FS is not set
-
-#
-# DOS/FAT/NT Filesystems
-#
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_SYSCTL=y
-CONFIG_SYSFS=y
-# CONFIG_TMPFS is not set
-# CONFIG_HUGETLB_PAGE is not set
-# CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-CONFIG_NETWORK_FILESYSTEMS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-CONFIG_NFS_DIRECTIO=y
-# CONFIG_NFSD is not set
-CONFIG_ROOT_NFS=y
-CONFIG_LOCKD=y
-CONFIG_LOCKD_V4=y
-CONFIG_NFS_ACL_SUPPORT=y
-CONFIG_NFS_COMMON=y
-CONFIG_SUNRPC=y
-CONFIG_SUNRPC_GSS=y
-# CONFIG_SUNRPC_BIND34 is not set
-CONFIG_RPCSEC_GSS_KRB5=y
-# CONFIG_RPCSEC_GSS_SPKM3 is not set
-# CONFIG_SMB_FS is not set
-# CONFIG_CIFS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MSDOS_PARTITION=y
-# CONFIG_NLS is not set
-# CONFIG_DLM is not set
-# CONFIG_INSTRUMENTATION is not set
-
-#
-# Kernel hacking
-#
-# CONFIG_PRINTK_TIME is not set
-CONFIG_ENABLE_MUST_CHECK=y
-# CONFIG_MAGIC_SYSRQ is not set
-# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
-# CONFIG_HEADERS_CHECK is not set
-# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_SLUB_DEBUG_ON is not set
-CONFIG_DEBUG_BUGVERBOSE=y
-CONFIG_FRAME_POINTER=y
-# CONFIG_SAMPLES is not set
-CONFIG_DEBUG_USER=y
-
-#
-# Security options
-#
-# CONFIG_KEYS is not set
-# CONFIG_SECURITY is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
-CONFIG_CRYPTO=y
-CONFIG_CRYPTO_ALGAPI=y
-CONFIG_CRYPTO_BLKCIPHER=y
-CONFIG_CRYPTO_MANAGER=y
-# CONFIG_CRYPTO_HMAC is not set
-# CONFIG_CRYPTO_XCBC is not set
-# CONFIG_CRYPTO_NULL is not set
-# CONFIG_CRYPTO_MD4 is not set
-CONFIG_CRYPTO_MD5=y
-# CONFIG_CRYPTO_SHA1 is not set
-# CONFIG_CRYPTO_SHA256 is not set
-# CONFIG_CRYPTO_SHA512 is not set
-# CONFIG_CRYPTO_WP512 is not set
-# CONFIG_CRYPTO_TGR192 is not set
-# CONFIG_CRYPTO_GF128MUL is not set
-# CONFIG_CRYPTO_ECB is not set
-CONFIG_CRYPTO_CBC=y
-# CONFIG_CRYPTO_PCBC is not set
-# CONFIG_CRYPTO_LRW is not set
-# CONFIG_CRYPTO_XTS is not set
-# CONFIG_CRYPTO_CRYPTD is not set
-CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_FCRYPT is not set
-# CONFIG_CRYPTO_BLOWFISH is not set
-# CONFIG_CRYPTO_TWOFISH is not set
-# CONFIG_CRYPTO_SERPENT is not set
-# CONFIG_CRYPTO_AES is not set
-# CONFIG_CRYPTO_CAST5 is not set
-# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
-# CONFIG_CRYPTO_ARC4 is not set
-# CONFIG_CRYPTO_KHAZAD is not set
-# CONFIG_CRYPTO_ANUBIS is not set
-# CONFIG_CRYPTO_SEED is not set
-# CONFIG_CRYPTO_DEFLATE is not set
-# CONFIG_CRYPTO_MICHAEL_MIC is not set
-# CONFIG_CRYPTO_CRC32C is not set
-# CONFIG_CRYPTO_CAMELLIA is not set
-# CONFIG_CRYPTO_TEST is not set
-# CONFIG_CRYPTO_AUTHENC is not set
-# CONFIG_CRYPTO_HW is not set
-
-#
-# Library routines
-#
-CONFIG_BITREVERSE=y
-# CONFIG_CRC_CCITT is not set
-# CONFIG_CRC16 is not set
-# CONFIG_CRC_ITU_T is not set
-CONFIG_CRC32=y
-# CONFIG_CRC7 is not set
-# CONFIG_LIBCRC32C is not set
-CONFIG_PLIST=y
-CONFIG_HAS_IOMEM=y
-CONFIG_HAS_IOPORT=y
-CONFIG_HAS_DMA=y
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 9ed2377fe8e..d0daeab2234 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -19,31 +19,21 @@
#ifdef __KERNEL__
+/*
+ * On ARM, ordinary assignment (str instruction) doesn't clear the local
+ * strex/ldrex monitor on some implementations. The reason we can use it for
+ * atomic_set() is the clrex or dummy strex done on every exception return.
+ */
#define atomic_read(v) ((v)->counter)
+#define atomic_set(v,i) (((v)->counter) = (i))
#if __LINUX_ARM_ARCH__ >= 6
/*
* ARMv6 UP and SMP safe atomic ops. We use load exclusive and
* store exclusive to ensure that these are atomic. We may loop
- * to ensure that the update happens. Writing to 'v->counter'
- * without using the following operations WILL break the atomic
- * nature of these ops.
+ * to ensure that the update happens.
*/
-static inline void atomic_set(atomic_t *v, int i)
-{
- unsigned long tmp;
-
- __asm__ __volatile__("@ atomic_set\n"
-"1: ldrex %0, [%1]\n"
-" strex %0, %2, [%1]\n"
-" teq %0, #0\n"
-" bne 1b"
- : "=&r" (tmp)
- : "r" (&v->counter), "r" (i)
- : "cc");
-}
-
static inline void atomic_add(int i, atomic_t *v)
{
unsigned long tmp;
@@ -163,8 +153,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
#error SMP not supported on pre-ARMv6 CPUs
#endif
-#define atomic_set(v,i) (((v)->counter) = (i))
-
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long flags;
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index feaa75f0013..66c160b8547 100644
--- a/arch/arm/include/asm/cache.h
+++ b/arch/arm/include/asm/cache.h
@@ -4,7 +4,7 @@
#ifndef __ASMARM_CACHE_H
#define __ASMARM_CACHE_H
-#define L1_CACHE_SHIFT 5
+#define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
/*
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 1a711ea8418..fd03fb63a33 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -334,14 +334,14 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
#ifndef CONFIG_CPU_CACHE_VIPT
static inline void flush_cache_mm(struct mm_struct *mm)
{
- if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
__cpuc_flush_user_all();
}
static inline void
flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
vma->vm_flags);
}
@@ -349,7 +349,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long
static inline void
flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = user_addr & PAGE_MASK;
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
}
@@ -360,7 +360,7 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *kaddr,
unsigned long len, int write)
{
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = (unsigned long)kaddr;
__cpuc_coherent_kern_range(addr, addr + len);
}
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index b3e656c6fb7..20ae96cc002 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -63,6 +63,11 @@ static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
return read_cpuid(CPUID_CACHETYPE);
}
+static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void)
+{
+ return read_cpuid(CPUID_TCM);
+}
+
/*
* Intel's XScale3 core supports some v6 features (supersections, L2)
* but advertises itself as v5 as it does not support the v6 ISA. For
@@ -73,7 +78,10 @@ static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
#else
static inline int cpu_is_xsc3(void)
{
- if ((read_cpuid_id() & 0xffffe000) == 0x69056000)
+ unsigned int id;
+ id = read_cpuid_id() & 0xffffe000;
+ /* It covers both Intel ID and Marvell ID */
+ if ((id == 0x69056000) || (id == 0x56056000))
return 1;
return 0;
diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h
index 83e6ba338e2..1a8c7279a28 100644
--- a/arch/arm/include/asm/hardware/iop3xx-adma.h
+++ b/arch/arm/include/asm/hardware/iop3xx-adma.h
@@ -187,11 +187,74 @@ union iop3xx_desc {
void *ptr;
};
+/* No support for p+q operations */
+static inline int
+iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op)
+{
+ BUG();
+ return 0;
+}
+
+static inline void
+iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
+ unsigned long flags)
+{
+ BUG();
+}
+
+static inline void
+iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
+{
+ BUG();
+}
+
+static inline void
+iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
+ dma_addr_t addr, unsigned char coef)
+{
+ BUG();
+}
+
+static inline int
+iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op)
+{
+ BUG();
+ return 0;
+}
+
+static inline void
+iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
+ unsigned long flags)
+{
+ BUG();
+}
+
+static inline void
+iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
+{
+ BUG();
+}
+
+#define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr
+
+static inline void
+iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
+ dma_addr_t *src)
+{
+ BUG();
+}
+
static inline int iop_adma_get_max_xor(void)
{
return 32;
}
+static inline int iop_adma_get_max_pq(void)
+{
+ BUG();
+ return 0;
+}
+
static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
{
int id = chan->device->id;
@@ -332,6 +395,11 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
return slot_cnt;
}
+static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
+{
+ return 0;
+}
+
static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
@@ -349,6 +417,14 @@ static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
return 0;
}
+
+static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
+ struct iop_adma_chan *chan)
+{
+ BUG();
+ return 0;
+}
+
static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
@@ -756,13 +832,14 @@ static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
hw_desc->src[0] = val;
}
-static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
+static inline enum sum_check_flags
+iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
{
struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
- return desc_ctrl.zero_result_err;
+ return desc_ctrl.zero_result_err << SUM_CHECK_P;
}
static inline void iop_chan_append(struct iop_adma_chan *chan)
diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h
index 385c6e8cbbd..59b8c3892f7 100644
--- a/arch/arm/include/asm/hardware/iop_adma.h
+++ b/arch/arm/include/asm/hardware/iop_adma.h
@@ -86,6 +86,7 @@ struct iop_adma_chan {
* @idx: pool index
* @unmap_src_cnt: number of xor sources
* @unmap_len: transaction bytecount
+ * @tx_list: list of descriptors that are associated with one operation
* @async_tx: support for the async_tx api
* @group_list: list of slots that make up a multi-descriptor transaction
* for example transfer lengths larger than the supported hw max
@@ -102,10 +103,12 @@ struct iop_adma_desc_slot {
u16 idx;
u16 unmap_src_cnt;
size_t unmap_len;
+ struct list_head tx_list;
struct dma_async_tx_descriptor async_tx;
union {
u32 *xor_check_result;
u32 *crc32_result;
+ u32 *pq_check_result;
};
};
diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h
index fc26976d8e3..8eebf89f5ab 100644
--- a/arch/arm/include/asm/mman.h
+++ b/arch/arm/include/asm/mman.h
@@ -1,17 +1 @@
-#ifndef __ARM_MMAN_H__
-#define __ARM_MMAN_H__
-
-#include <asm-generic/mman-common.h>
-
-#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define MAP_LOCKED 0x2000 /* pages are locked */
-#define MAP_NORESERVE 0x4000 /* don't check for reservations */
-#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */
-#define MAP_NONBLOCK 0x10000 /* do not block on IO */
-
-#define MCL_CURRENT 1 /* lock all current mappings */
-#define MCL_FUTURE 2 /* lock all future mappings */
-
-#endif /* __ARM_MMAN_H__ */
+#include <asm-generic/mman.h>
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index bcdb9291ef0..de6cefb329d 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -103,14 +103,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#ifdef CONFIG_SMP
/* check for possible thread migration */
- if (!cpus_empty(next->cpu_vm_mask) && !cpu_isset(cpu, next->cpu_vm_mask))
+ if (!cpumask_empty(mm_cpumask(next)) &&
+ !cpumask_test_cpu(cpu, mm_cpumask(next)))
__flush_icache_all();
#endif
- if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) {
+ if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
check_context(next);
cpu_switch_mm(next->pgd, next);
if (cache_is_vivt())
- cpu_clear(cpu, prev->cpu_vm_mask);
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
}
#endif
}
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index a06e735b262..e0d763be184 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -93,7 +93,6 @@ extern void platform_cpu_enable(unsigned int cpu);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
/*
* show local interrupt info
diff --git a/arch/arm/include/asm/tcm.h b/arch/arm/include/asm/tcm.h
new file mode 100644
index 00000000000..5929ef5d927
--- /dev/null
+++ b/arch/arm/include/asm/tcm.h
@@ -0,0 +1,31 @@
+/*
+ *
+ * Copyright (C) 2008-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ *
+ */
+#ifndef __ASMARM_TCM_H
+#define __ASMARM_TCM_H
+
+#ifndef CONFIG_HAVE_TCM
+#error "You should not be including tcm.h unless you have a TCM!"
+#endif
+
+#include <linux/compiler.h>
+
+/* Tag variables with this */
+#define __tcmdata __section(.tcm.data)
+/* Tag constants with this */
+#define __tcmconst __section(.tcm.rodata)
+/* Tag functions inside TCM called from outside TCM with this */
+#define __tcmfunc __attribute__((long_call)) __section(.tcm.text) noinline
+/* Tag function inside TCM called from inside TCM with this */
+#define __tcmlocalfunc __section(.tcm.text)
+
+void *tcm_alloc(size_t len);
+void tcm_free(void *addr, size_t len);
+
+#endif
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index c964f3fc3bc..a45ab5dd825 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -350,7 +350,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
if (tlb_flag(TLB_WB))
dsb();
- if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
if (tlb_flag(TLB_V3_FULL))
asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
if (tlb_flag(TLB_V4_U_FULL))
@@ -388,7 +388,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
if (tlb_flag(TLB_WB))
dsb();
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
if (tlb_flag(TLB_V3_PAGE))
asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc");
if (tlb_flag(TLB_V4_U_PAGE))
diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
index 073e85b9b96..bc631161e9c 100644
--- a/arch/arm/include/asm/unified.h
+++ b/arch/arm/include/asm/unified.h
@@ -35,7 +35,9 @@
#define ARM(x...)
#define THUMB(x...) x
+#ifdef __ASSEMBLY__
#define W(instr) instr.w
+#endif
#define BSYM(sym) sym + 1
#else /* !CONFIG_THUMB2_KERNEL */
@@ -45,7 +47,9 @@
#define ARM(x...) x
#define THUMB(x...)
+#ifdef __ASSEMBLY__
#define W(instr) instr
+#endif
#define BSYM(sym) sym
#endif /* CONFIG_THUMB2_KERNEL */
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 9122c9ee18f..89f7eade20a 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -390,7 +390,7 @@
#define __NR_preadv (__NR_SYSCALL_BASE+361)
#define __NR_pwritev (__NR_SYSCALL_BASE+362)
#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363)
-#define __NR_perf_counter_open (__NR_SYSCALL_BASE+364)
+#define __NR_perf_event_open (__NR_SYSCALL_BASE+364)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 3213c9382b1..79087dd6d86 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -2,7 +2,8 @@
# Makefile for the linux kernel.
#
-AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
+CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
+AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
ifdef CONFIG_DYNAMIC_FTRACE
CFLAGS_REMOVE_ftrace.o = -pg
@@ -34,6 +35,7 @@ obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_ARM_UNWIND) += unwind.o
+obj-$(CONFIG_HAVE_TCM) += tcm.o
obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index ecfa98954d1..fafce1b5c69 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -373,7 +373,7 @@
CALL(sys_preadv)
CALL(sys_pwritev)
CALL(sys_rt_tgsigqueueinfo)
- CALL(sys_perf_counter_open)
+ CALL(sys_perf_event_open)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 3d727a8a23b..0a2ba51cf35 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -272,7 +272,15 @@ __und_svc:
@
@ r0 - instruction
@
+#ifndef CONFIG_THUMB2_KERNEL
ldr r0, [r2, #-4]
+#else
+ ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2
+ and r9, r0, #0xf800
+ cmp r9, #0xe800 @ 32-bit instruction if xx >= 0
+ ldrhhs r9, [r2] @ bottom 16 bits
+ orrhs r0, r9, r0, lsl #16
+#endif
adr r9, BSYM(1f)
bl call_fpe
@@ -678,7 +686,9 @@ ENTRY(fp_enter)
.word no_fp
.previous
-no_fp: mov pc, lr
+ENTRY(no_fp)
+ mov pc, lr
+ENDPROC(no_fp)
__und_usr_unknown:
enable_irq
@@ -734,13 +744,6 @@ ENTRY(__switch_to)
#ifdef CONFIG_MMU
ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
-#if __LINUX_ARM_ARCH__ >= 6
-#ifdef CONFIG_CPU_32v6K
- clrex
-#else
- strex r5, r4, [ip] @ Clear exclusive monitor
-#endif
-#endif
#if defined(CONFIG_HAS_TLS_REG)
mcr p15, 0, r3, c13, c0, 3 @ set TLS register
#elif !defined(CONFIG_TLS_REG_EMUL)
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index a4eaf4f920c..ac34c0d9384 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -76,13 +76,27 @@
#ifndef CONFIG_THUMB2_KERNEL
.macro svc_exit, rpsr
msr spsr_cxsf, \rpsr
+#if defined(CONFIG_CPU_32v6K)
+ clrex @ clear the exclusive monitor
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+#elif defined (CONFIG_CPU_V6)
+ ldr r0, [sp]
+ strex r1, r2, [sp] @ clear the exclusive monitor
+ ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr
+#else
+ ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+#endif
.endm
.macro restore_user_regs, fast = 0, offset = 0
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
ldr lr, [sp, #\offset + S_PC]! @ get pc
msr spsr_cxsf, r1 @ save in spsr_svc
+#if defined(CONFIG_CPU_32v6K)
+ clrex @ clear the exclusive monitor
+#elif defined (CONFIG_CPU_V6)
+ strex r1, r2, [sp] @ clear the exclusive monitor
+#endif
.if \fast
ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
.else
@@ -98,6 +112,7 @@
.endm
#else /* CONFIG_THUMB2_KERNEL */
.macro svc_exit, rpsr
+ clrex @ clear the exclusive monitor
ldr r0, [sp, #S_SP] @ top of the stack
ldr r1, [sp, #S_PC] @ return address
tst r0, #4 @ orig stack 8-byte aligned?
@@ -110,6 +125,7 @@
.endm
.macro restore_user_regs, fast = 0, offset = 0
+ clrex @ clear the exclusive monitor
mov r2, sp
load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
diff --git a/arch/arm/kernel/init_task.c b/arch/arm/kernel/init_task.c
index 3f470866bb8..e7cbb50dc35 100644
--- a/arch/arm/kernel/init_task.c
+++ b/arch/arm/kernel/init_task.c
@@ -24,9 +24,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
*
* The things we do for performance..
*/
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"))) =
- { INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data =
+ { INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index f692efddd44..60c62c377fa 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/module.h>
+#include <linux/stop_machine.h>
#include <linux/stringify.h>
#include <asm/traps.h>
#include <asm/cacheflush.h>
@@ -83,10 +84,24 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
flush_insns(p->addr, 1);
}
+/*
+ * The actual disarming is done here on each CPU and synchronized using
+ * stop_machine. This synchronization is necessary on SMP to avoid removing
+ * a probe between the moment the 'Undefined Instruction' exception is raised
+ * and the moment the exception handler reads the faulting instruction from
+ * memory.
+ */
+int __kprobes __arch_disarm_kprobe(void *p)
+{
+ struct kprobe *kp = p;
+ *kp->addr = kp->opcode;
+ flush_insns(kp->addr, 1);
+ return 0;
+}
+
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
- *p->addr = p->opcode;
- flush_insns(p->addr, 1);
+ stop_machine(__arch_disarm_kprobe, p, &cpu_online_map);
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index d4d4f77c91b..c6c57b640b6 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -45,6 +45,7 @@
#include "compat.h"
#include "atags.h"
+#include "tcm.h"
#ifndef MEM_SIZE
#define MEM_SIZE (16*1024*1024)
@@ -749,6 +750,7 @@ void __init setup_arch(char **cmdline_p)
#endif
cpu_init();
+ tcm_init();
/*
* Set up various architecture-specific pointers
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index de885fd256c..e0d32770bb3 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -189,7 +189,7 @@ int __cpuexit __cpu_disable(void)
read_lock(&tasklist_lock);
for_each_process(p) {
if (p->mm)
- cpu_clear(cpu, p->mm->cpu_vm_mask);
+ cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
}
read_unlock(&tasklist_lock);
@@ -257,7 +257,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
atomic_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
current->active_mm = mm;
- cpu_set(cpu, mm->cpu_vm_mask);
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
cpu_switch_mm(mm->pgd, mm);
enter_lazy_tlb(mm, current);
local_flush_tlb_all();
@@ -643,7 +643,7 @@ void flush_tlb_all(void)
void flush_tlb_mm(struct mm_struct *mm)
{
if (tlb_ops_need_broadcast())
- on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask);
+ on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
else
local_flush_tlb_mm(mm);
}
@@ -654,7 +654,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
struct tlb_args ta;
ta.ta_vma = vma;
ta.ta_start = uaddr;
- on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask);
+ on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
} else
local_flush_tlb_page(vma, uaddr);
}
@@ -677,7 +677,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
ta.ta_vma = vma;
ta.ta_start = start;
ta.ta_end = end;
- on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask);
+ on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
} else
local_flush_tlb_range(vma, start, end);
}
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index b3ec641b5cf..78ecaac6520 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -25,7 +25,6 @@
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/file.h>
-#include <linux/utsname.h>
#include <linux/ipc.h>
#include <linux/uaccess.h>
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
new file mode 100644
index 00000000000..e50303868f1
--- /dev/null
+++ b/arch/arm/kernel/tcm.c
@@ -0,0 +1,246 @@
+/*
+ * Copyright (C) 2008-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * TCM memory handling for ARM systems
+ *
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/ioport.h>
+#include <linux/genalloc.h>
+#include <linux/string.h> /* memcpy */
+#include <asm/page.h> /* PAGE_SHIFT */
+#include <asm/cputype.h>
+#include <asm/mach/map.h>
+#include <mach/memory.h>
+#include "tcm.h"
+
+/* Scream and warn about misuse */
+#if !defined(ITCM_OFFSET) || !defined(ITCM_END) || \
+ !defined(DTCM_OFFSET) || !defined(DTCM_END)
+#error "TCM support selected but offsets not defined!"
+#endif
+
+static struct gen_pool *tcm_pool;
+
+/* TCM section definitions from the linker */
+extern char __itcm_start, __sitcm_text, __eitcm_text;
+extern char __dtcm_start, __sdtcm_data, __edtcm_data;
+
+/*
+ * TCM memory resources
+ */
+static struct resource dtcm_res = {
+ .name = "DTCM RAM",
+ .start = DTCM_OFFSET,
+ .end = DTCM_END,
+ .flags = IORESOURCE_MEM
+};
+
+static struct resource itcm_res = {
+ .name = "ITCM RAM",
+ .start = ITCM_OFFSET,
+ .end = ITCM_END,
+ .flags = IORESOURCE_MEM
+};
+
+static struct map_desc dtcm_iomap[] __initdata = {
+ {
+ .virtual = DTCM_OFFSET,
+ .pfn = __phys_to_pfn(DTCM_OFFSET),
+ .length = (DTCM_END - DTCM_OFFSET + 1),
+ .type = MT_UNCACHED
+ }
+};
+
+static struct map_desc itcm_iomap[] __initdata = {
+ {
+ .virtual = ITCM_OFFSET,
+ .pfn = __phys_to_pfn(ITCM_OFFSET),
+ .length = (ITCM_END - ITCM_OFFSET + 1),
+ .type = MT_UNCACHED
+ }
+};
+
+/*
+ * Allocate a chunk of TCM memory
+ */
+void *tcm_alloc(size_t len)
+{
+ unsigned long vaddr;
+
+ if (!tcm_pool)
+ return NULL;
+
+ vaddr = gen_pool_alloc(tcm_pool, len);
+ if (!vaddr)
+ return NULL;
+
+ return (void *) vaddr;
+}
+EXPORT_SYMBOL(tcm_alloc);
+
+/*
+ * Free a chunk of TCM memory
+ */
+void tcm_free(void *addr, size_t len)
+{
+ gen_pool_free(tcm_pool, (unsigned long) addr, len);
+}
+EXPORT_SYMBOL(tcm_free);
+
+
+static void __init setup_tcm_bank(u8 type, u32 offset, u32 expected_size)
+{
+ const int tcm_sizes[16] = { 0, -1, -1, 4, 8, 16, 32, 64, 128,
+ 256, 512, 1024, -1, -1, -1, -1 };
+ u32 tcm_region;
+ int tcm_size;
+
+ /* Read the special TCM region register c9, 0 */
+ if (!type)
+ asm("mrc p15, 0, %0, c9, c1, 0"
+ : "=r" (tcm_region));
+ else
+ asm("mrc p15, 0, %0, c9, c1, 1"
+ : "=r" (tcm_region));
+
+ tcm_size = tcm_sizes[(tcm_region >> 2) & 0x0f];
+ if (tcm_size < 0) {
+ pr_err("CPU: %sTCM of unknown size!\n",
+ type ? "I" : "D");
+ } else {
+ pr_info("CPU: found %sTCM %dk @ %08x, %senabled\n",
+ type ? "I" : "D",
+ tcm_size,
+ (tcm_region & 0xfffff000U),
+ (tcm_region & 1) ? "" : "not ");
+ }
+
+ if (tcm_size != expected_size) {
+ pr_crit("CPU: %sTCM was detected %dk but expected %dk!\n",
+ type ? "I" : "D",
+ tcm_size,
+ expected_size);
+ /* Adjust to the expected size? what can we do... */
+ }
+
+ /* Force move the TCM bank to where we want it, enable */
+ tcm_region = offset | (tcm_region & 0x00000ffeU) | 1;
+
+ if (!type)
+ asm("mcr p15, 0, %0, c9, c1, 0"
+ : /* No output operands */
+ : "r" (tcm_region));
+ else
+ asm("mcr p15, 0, %0, c9, c1, 1"
+ : /* No output operands */
+ : "r" (tcm_region));
+
+ pr_debug("CPU: moved %sTCM %dk to %08x, enabled\n",
+ type ? "I" : "D",
+ tcm_size,
+ (tcm_region & 0xfffff000U));
+}
+
+/*
+ * This initializes the TCM memory
+ */
+void __init tcm_init(void)
+{
+ u32 tcm_status = read_cpuid_tcmstatus();
+ char *start;
+ char *end;
+ char *ram;
+
+ /* Setup DTCM if present */
+ if (tcm_status & (1 << 16)) {
+ setup_tcm_bank(0, DTCM_OFFSET,
+ (DTCM_END - DTCM_OFFSET + 1) >> 10);
+ request_resource(&iomem_resource, &dtcm_res);
+ iotable_init(dtcm_iomap, 1);
+ /* Copy data from RAM to DTCM */
+ start = &__sdtcm_data;
+ end = &__edtcm_data;
+ ram = &__dtcm_start;
+ memcpy(start, ram, (end-start));
+ pr_debug("CPU DTCM: copied data from %p - %p\n", start, end);
+ }
+
+ /* Setup ITCM if present */
+ if (tcm_status & 1) {
+ setup_tcm_bank(1, ITCM_OFFSET,
+ (ITCM_END - ITCM_OFFSET + 1) >> 10);
+ request_resource(&iomem_resource, &itcm_res);
+ iotable_init(itcm_iomap, 1);
+ /* Copy code from RAM to ITCM */
+ start = &__sitcm_text;
+ end = &__eitcm_text;
+ ram = &__itcm_start;
+ memcpy(start, ram, (end-start));
+ pr_debug("CPU ITCM: copied code from %p - %p\n", start, end);
+ }
+}
+
+/*
+ * This creates the TCM memory pool and has to be done later,
+ * during the core_initicalls, since the allocator is not yet
+ * up and running when the first initialization runs.
+ */
+static int __init setup_tcm_pool(void)
+{
+ u32 tcm_status = read_cpuid_tcmstatus();
+ u32 dtcm_pool_start = (u32) &__edtcm_data;
+ u32 itcm_pool_start = (u32) &__eitcm_text;
+ int ret;
+
+ /*
+ * Set up malloc pool, 2^2 = 4 bytes granularity since
+ * the TCM is sometimes just 4 KiB. NB: pages and cache
+ * line alignments does not matter in TCM!
+ */
+ tcm_pool = gen_pool_create(2, -1);
+
+ pr_debug("Setting up TCM memory pool\n");
+
+ /* Add the rest of DTCM to the TCM pool */
+ if (tcm_status & (1 << 16)) {
+ if (dtcm_pool_start < DTCM_END) {
+ ret = gen_pool_add(tcm_pool, dtcm_pool_start,
+ DTCM_END - dtcm_pool_start + 1, -1);
+ if (ret) {
+ pr_err("CPU DTCM: could not add DTCM " \
+ "remainder to pool!\n");
+ return ret;
+ }
+ pr_debug("CPU DTCM: Added %08x bytes @ %08x to " \
+ "the TCM memory pool\n",
+ DTCM_END - dtcm_pool_start + 1,
+ dtcm_pool_start);
+ }
+ }
+
+ /* Add the rest of ITCM to the TCM pool */
+ if (tcm_status & 1) {
+ if (itcm_pool_start < ITCM_END) {
+ ret = gen_pool_add(tcm_pool, itcm_pool_start,
+ ITCM_END - itcm_pool_start + 1, -1);
+ if (ret) {
+ pr_err("CPU ITCM: could not add ITCM " \
+ "remainder to pool!\n");
+ return ret;
+ }
+ pr_debug("CPU ITCM: Added %08x bytes @ %08x to " \
+ "the TCM memory pool\n",
+ ITCM_END - itcm_pool_start + 1,
+ itcm_pool_start);
+ }
+ }
+ return 0;
+}
+
+core_initcall(setup_tcm_pool);
diff --git a/arch/arm/kernel/tcm.h b/arch/arm/kernel/tcm.h
new file mode 100644
index 00000000000..8015ad434a4
--- /dev/null
+++ b/arch/arm/kernel/tcm.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2008-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * TCM memory handling for ARM systems
+ *
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>
+ */
+
+#ifdef CONFIG_HAVE_TCM
+void __init tcm_init(void);
+#else
+/* No TCM support, just blank inlines to be optimized out */
+inline void tcm_init(void)
+{
+}
+#endif
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 5cc4812c976..aecf87dfbae 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -199,6 +199,63 @@ SECTIONS
}
_edata_loc = __data_loc + SIZEOF(.data);
+#ifdef CONFIG_HAVE_TCM
+ /*
+ * We align everything to a page boundary so we can
+ * free it after init has commenced and TCM contents have
+ * been copied to its destination.
+ */
+ .tcm_start : {
+ . = ALIGN(PAGE_SIZE);
+ __tcm_start = .;
+ __itcm_start = .;
+ }
+
+ /*
+ * Link these to the ITCM RAM
+ * Put VMA to the TCM address and LMA to the common RAM
+ * and we'll upload the contents from RAM to TCM and free
+ * the used RAM after that.
+ */
+ .text_itcm ITCM_OFFSET : AT(__itcm_start)
+ {
+ __sitcm_text = .;
+ *(.tcm.text)
+ *(.tcm.rodata)
+ . = ALIGN(4);
+ __eitcm_text = .;
+ }
+
+ /*
+ * Reset the dot pointer, this is needed to create the
+ * relative __dtcm_start below (to be used as extern in code).
+ */
+ . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
+
+ .dtcm_start : {
+ __dtcm_start = .;
+ }
+
+ /* TODO: add remainder of ITCM as well, that can be used for data! */
+ .data_dtcm DTCM_OFFSET : AT(__dtcm_start)
+ {
+ . = ALIGN(4);
+ __sdtcm_data = .;
+ *(.tcm.data)
+ . = ALIGN(4);
+ __edtcm_data = .;
+ }
+
+ /* Reset the dot pointer or the linker gets confused */
+ . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
+
+ /* End marker for freeing TCM copy in linked object */
+ .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
+ . = ALIGN(PAGE_SIZE);
+ __tcm_end = .;
+ }
+#endif
+
.bss : {
__bss_start = .; /* BSS */
*(.bss)
diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
index 6ae04db1ca4..6ee2f6706f8 100644
--- a/arch/arm/lib/copy_page.S
+++ b/arch/arm/lib/copy_page.S
@@ -12,8 +12,9 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
+#include <asm/cache.h>
-#define COPY_COUNT (PAGE_SZ/64 PLD( -1 ))
+#define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 ))
.text
.align 5
@@ -26,17 +27,16 @@
ENTRY(copy_page)
stmfd sp!, {r4, lr} @ 2
PLD( pld [r1, #0] )
- PLD( pld [r1, #32] )
+ PLD( pld [r1, #L1_CACHE_BYTES] )
mov r2, #COPY_COUNT @ 1
ldmia r1!, {r3, r4, ip, lr} @ 4+1
-1: PLD( pld [r1, #64] )
- PLD( pld [r1, #96] )
-2: stmia r0!, {r3, r4, ip, lr} @ 4
- ldmia r1!, {r3, r4, ip, lr} @ 4+1
- stmia r0!, {r3, r4, ip, lr} @ 4
- ldmia r1!, {r3, r4, ip, lr} @ 4+1
+1: PLD( pld [r1, #2 * L1_CACHE_BYTES])
+ PLD( pld [r1, #3 * L1_CACHE_BYTES])
+2:
+ .rept (2 * L1_CACHE_BYTES / 16 - 1)
stmia r0!, {r3, r4, ip, lr} @ 4
ldmia r1!, {r3, r4, ip, lr} @ 4
+ .endr
subs r2, r2, #1 @ 1
stmia r0!, {r3, r4, ip, lr} @ 4
ldmgtia r1!, {r3, r4, ip, lr} @ 4
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index a24d824c428..e35d54d43e7 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -289,6 +289,13 @@ config MACH_NEOCORE926
help
Select this if you are using the Adeneo Neocore 926 board.
+config MACH_AT91SAM9G20EK_2MMC
+ bool "Atmel AT91SAM9G20-EK Evaluation Kit modified for 2 MMC Slots"
+ depends on ARCH_AT91SAM9G20
+ help
+ Select this if you are using an Atmel AT91SAM9G20-EK Evaluation Kit
+ Rev A or B modified for 2 MMC Slots.
+
endif
# ----------------------------------------------------------
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index a6ed015d82e..ada440aab0c 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_MACH_AT91SAM9RLEK) += board-sam9rlek.o
# AT91SAM9G20 board-specific support
obj-$(CONFIG_MACH_AT91SAM9G20EK) += board-sam9g20ek.o
+obj-$(CONFIG_MACH_AT91SAM9G20EK_2MMC) += board-sam9g20ek-2slot-mmc.o
obj-$(CONFIG_MACH_CPU9G20) += board-cpu9krea.o
# AT91SAM9G45 board-specific support
diff --git a/arch/arm/mach-at91/at91cap9_devices.c b/arch/arm/mach-at91/at91cap9_devices.c
index 412aa49ad2f..d1f775e8635 100644
--- a/arch/arm/mach-at91/at91cap9_devices.c
+++ b/arch/arm/mach-at91/at91cap9_devices.c
@@ -771,9 +771,9 @@ void __init at91_add_device_pwm(u32 mask) {}
* AC97
* -------------------------------------------------------------------- */
-#if defined(CONFIG_SND_AT91_AC97) || defined(CONFIG_SND_AT91_AC97_MODULE)
+#if defined(CONFIG_SND_ATMEL_AC97C) || defined(CONFIG_SND_ATMEL_AC97C_MODULE)
static u64 ac97_dmamask = DMA_BIT_MASK(32);
-static struct atmel_ac97_data ac97_data;
+static struct ac97c_platform_data ac97_data;
static struct resource ac97_resources[] = {
[0] = {
@@ -789,7 +789,7 @@ static struct resource ac97_resources[] = {
};
static struct platform_device at91cap9_ac97_device = {
- .name = "ac97c",
+ .name = "atmel_ac97c",
.id = 1,
.dev = {
.dma_mask = &ac97_dmamask,
@@ -800,7 +800,7 @@ static struct platform_device at91cap9_ac97_device = {
.num_resources = ARRAY_SIZE(ac97_resources),
};
-void __init at91_add_device_ac97(struct atmel_ac97_data *data)
+void __init at91_add_device_ac97(struct ac97c_platform_data *data)
{
if (!data)
return;
@@ -818,7 +818,7 @@ void __init at91_add_device_ac97(struct atmel_ac97_data *data)
platform_device_register(&at91cap9_ac97_device);
}
#else
-void __init at91_add_device_ac97(struct atmel_ac97_data *data) {}
+void __init at91_add_device_ac97(struct ac97c_platform_data *data) {}
#endif
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index ee4ea0e720c..07eb7b07e44 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -278,6 +278,102 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) {}
#endif
+/* --------------------------------------------------------------------
+ * MMC / SD Slot for Atmel MCI Driver
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE)
+static u64 mmc_dmamask = DMA_BIT_MASK(32);
+static struct mci_platform_data mmc_data;
+
+static struct resource mmc_resources[] = {
+ [0] = {
+ .start = AT91SAM9260_BASE_MCI,
+ .end = AT91SAM9260_BASE_MCI + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9260_ID_MCI,
+ .end = AT91SAM9260_ID_MCI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91sam9260_mmc_device = {
+ .name = "atmel_mci",
+ .id = -1,
+ .dev = {
+ .dma_mask = &mmc_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &mmc_data,
+ },
+ .resource = mmc_resources,
+ .num_resources = ARRAY_SIZE(mmc_resources),
+};
+
+void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data)
+{
+ unsigned int i;
+ unsigned int slot_count = 0;
+
+ if (!data)
+ return;
+
+ for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
+ if (data->slot[i].bus_width) {
+ /* input/irq */
+ if (data->slot[i].detect_pin) {
+ at91_set_gpio_input(data->slot[i].detect_pin, 1);
+ at91_set_deglitch(data->slot[i].detect_pin, 1);
+ }
+ if (data->slot[i].wp_pin)
+ at91_set_gpio_input(data->slot[i].wp_pin, 1);
+
+ switch (i) {
+ case 0:
+ /* CMD */
+ at91_set_A_periph(AT91_PIN_PA7, 1);
+ /* DAT0, maybe DAT1..DAT3 */
+ at91_set_A_periph(AT91_PIN_PA6, 1);
+ if (data->slot[i].bus_width == 4) {
+ at91_set_A_periph(AT91_PIN_PA9, 1);
+ at91_set_A_periph(AT91_PIN_PA10, 1);
+ at91_set_A_periph(AT91_PIN_PA11, 1);
+ }
+ slot_count++;
+ break;
+ case 1:
+ /* CMD */
+ at91_set_B_periph(AT91_PIN_PA1, 1);
+ /* DAT0, maybe DAT1..DAT3 */
+ at91_set_B_periph(AT91_PIN_PA0, 1);
+ if (data->slot[i].bus_width == 4) {
+ at91_set_B_periph(AT91_PIN_PA5, 1);
+ at91_set_B_periph(AT91_PIN_PA4, 1);
+ at91_set_B_periph(AT91_PIN_PA3, 1);
+ }
+ slot_count++;
+ break;
+ default:
+ printk(KERN_ERR
+ "AT91: SD/MMC slot %d not available\n", i);
+ break;
+ }
+ }
+ }
+
+ if (slot_count) {
+ /* CLK */
+ at91_set_A_periph(AT91_PIN_PA8, 0);
+
+ mmc_data = *data;
+ platform_device_register(&at91sam9260_mmc_device);
+ }
+}
+#else
+void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data) {}
+#endif
+
/* --------------------------------------------------------------------
* NAND / SmartMedia
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index 55719a97427..fb5c23af101 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -757,6 +757,42 @@ void __init at91_add_device_ac97(struct ac97c_platform_data *data)
void __init at91_add_device_ac97(struct ac97c_platform_data *data) {}
#endif
+/* --------------------------------------------------------------------
+ * CAN Controller
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_CAN_AT91) || defined(CONFIG_CAN_AT91_MODULE)
+static struct resource can_resources[] = {
+ [0] = {
+ .start = AT91SAM9263_BASE_CAN,
+ .end = AT91SAM9263_BASE_CAN + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9263_ID_CAN,
+ .end = AT91SAM9263_ID_CAN,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91sam9263_can_device = {
+ .name = "at91_can",
+ .id = -1,
+ .resource = can_resources,
+ .num_resources = ARRAY_SIZE(can_resources),
+};
+
+void __init at91_add_device_can(struct at91_can_data *data)
+{
+ at91_set_A_periph(AT91_PIN_PA13, 0); /* CANTX */
+ at91_set_A_periph(AT91_PIN_PA14, 0); /* CANRX */
+ at91sam9263_can_device.dev.platform_data = data;
+
+ platform_device_register(&at91sam9263_can_device);
+}
+#else
+void __init at91_add_device_can(struct at91_can_data *data) {}
+#endif
/* --------------------------------------------------------------------
* LCD Controller
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index d746e8621bc..d581cff80c4 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -24,11 +24,59 @@
#include <mach/at91sam9g45.h>
#include <mach/at91sam9g45_matrix.h>
#include <mach/at91sam9_smc.h>
+#include <mach/at_hdmac.h>
#include "generic.h"
/* --------------------------------------------------------------------
+ * HDMAC - AHB DMA Controller
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_AT_HDMAC) || defined(CONFIG_AT_HDMAC_MODULE)
+static u64 hdmac_dmamask = DMA_BIT_MASK(32);
+
+static struct at_dma_platform_data atdma_pdata = {
+ .nr_channels = 8,
+};
+
+static struct resource hdmac_resources[] = {
+ [0] = {
+ .start = AT91_BASE_SYS + AT91_DMA,
+ .end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .start = AT91SAM9G45_ID_DMA,
+ .end = AT91SAM9G45_ID_DMA,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at_hdmac_device = {
+ .name = "at_hdmac",
+ .id = -1,
+ .dev = {
+ .dma_mask = &hdmac_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &atdma_pdata,
+ },
+ .resource = hdmac_resources,
+ .num_resources = ARRAY_SIZE(hdmac_resources),
+};
+
+void __init at91_add_device_hdmac(void)
+{
+ dma_cap_set(DMA_MEMCPY, atdma_pdata.cap_mask);
+ dma_cap_set(DMA_SLAVE, atdma_pdata.cap_mask);
+ platform_device_register(&at_hdmac_device);
+}
+#else
+void __init at91_add_device_hdmac(void) {}
+#endif
+
+
+/* --------------------------------------------------------------------
* USB Host (OHCI)
* -------------------------------------------------------------------- */
@@ -550,6 +598,61 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
/* --------------------------------------------------------------------
+ * AC97
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_SND_ATMEL_AC97C) || defined(CONFIG_SND_ATMEL_AC97C_MODULE)
+static u64 ac97_dmamask = DMA_BIT_MASK(32);
+static struct ac97c_platform_data ac97_data;
+
+static struct resource ac97_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_AC97C,
+ .end = AT91SAM9G45_BASE_AC97C + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_ID_AC97C,
+ .end = AT91SAM9G45_ID_AC97C,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91sam9g45_ac97_device = {
+ .name = "atmel_ac97c",
+ .id = 0,
+ .dev = {
+ .dma_mask = &ac97_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &ac97_data,
+ },
+ .resource = ac97_resources,
+ .num_resources = ARRAY_SIZE(ac97_resources),
+};
+
+void __init at91_add_device_ac97(struct ac97c_platform_data *data)
+{
+ if (!data)
+ return;
+
+ at91_set_A_periph(AT91_PIN_PD8, 0); /* AC97FS */
+ at91_set_A_periph(AT91_PIN_PD9, 0); /* AC97CK */
+ at91_set_A_periph(AT91_PIN_PD7, 0); /* AC97TX */
+ at91_set_A_periph(AT91_PIN_PD6, 0); /* AC97RX */
+
+ /* reset */
+ if (data->reset_pin)
+ at91_set_gpio_output(data->reset_pin, 0);
+
+ ac97_data = *data;
+ platform_device_register(&at91sam9g45_ac97_device);
+}
+#else
+void __init at91_add_device_ac97(struct ac97c_platform_data *data) {}
+#endif
+
+
+/* --------------------------------------------------------------------
* LCD Controller
* -------------------------------------------------------------------- */
@@ -1220,6 +1323,7 @@ void __init at91_add_device_serial(void) {}
*/
static int __init at91_add_standard_devices(void)
{
+ at91_add_device_hdmac();
at91_add_device_rtc();
at91_add_device_rtt();
at91_add_device_watchdog();
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index 728186515cd..d345f5453db 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -21,11 +21,57 @@
#include <mach/at91sam9rl.h>
#include <mach/at91sam9rl_matrix.h>
#include <mach/at91sam9_smc.h>
+#include <mach/at_hdmac.h>
#include "generic.h"
/* --------------------------------------------------------------------
+ * HDMAC - AHB DMA Controller
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_AT_HDMAC) || defined(CONFIG_AT_HDMAC_MODULE)
+static u64 hdmac_dmamask = DMA_BIT_MASK(32);
+
+static struct at_dma_platform_data atdma_pdata = {
+ .nr_channels = 2,
+};
+
+static struct resource hdmac_resources[] = {
+ [0] = {
+ .start = AT91_BASE_SYS + AT91_DMA,
+ .end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .start = AT91SAM9RL_ID_DMA,
+ .end = AT91SAM9RL_ID_DMA,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at_hdmac_device = {
+ .name = "at_hdmac",
+ .id = -1,
+ .dev = {
+ .dma_mask = &hdmac_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &atdma_pdata,
+ },
+ .resource = hdmac_resources,
+ .num_resources = ARRAY_SIZE(hdmac_resources),
+};
+
+void __init at91_add_device_hdmac(void)
+{
+ dma_cap_set(DMA_MEMCPY, atdma_pdata.cap_mask);
+ platform_device_register(&at_hdmac_device);
+}
+#else
+void __init at91_add_device_hdmac(void) {}
+#endif
+
+/* --------------------------------------------------------------------
* USB HS Device (Gadget)
* -------------------------------------------------------------------- */
@@ -398,6 +444,61 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
/* --------------------------------------------------------------------
+ * AC97
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_SND_ATMEL_AC97C) || defined(CONFIG_SND_ATMEL_AC97C_MODULE)
+static u64 ac97_dmamask = DMA_BIT_MASK(32);
+static struct ac97c_platform_data ac97_data;
+
+static struct resource ac97_resources[] = {
+ [0] = {
+ .start = AT91SAM9RL_BASE_AC97C,
+ .end = AT91SAM9RL_BASE_AC97C + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9RL_ID_AC97C,
+ .end = AT91SAM9RL_ID_AC97C,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91sam9rl_ac97_device = {
+ .name = "atmel_ac97c",
+ .id = 0,
+ .dev = {
+ .dma_mask = &ac97_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &ac97_data,
+ },
+ .resource = ac97_resources,
+ .num_resources = ARRAY_SIZE(ac97_resources),
+};
+
+void __init at91_add_device_ac97(struct ac97c_platform_data *data)
+{
+ if (!data)
+ return;
+
+ at91_set_A_periph(AT91_PIN_PD1, 0); /* AC97FS */
+ at91_set_A_periph(AT91_PIN_PD2, 0); /* AC97CK */
+ at91_set_A_periph(AT91_PIN_PD3, 0); /* AC97TX */
+ at91_set_A_periph(AT91_PIN_PD4, 0); /* AC97RX */
+
+ /* reset */
+ if (data->reset_pin)
+ at91_set_gpio_output(data->reset_pin, 0);
+
+ ac97_data = *data;
+ platform_device_register(&at91sam9rl_ac97_device);
+}
+#else
+void __init at91_add_device_ac97(struct ac97c_platform_data *data) {}
+#endif
+
+
+/* --------------------------------------------------------------------
* LCD Controller
* -------------------------------------------------------------------- */
@@ -1103,6 +1204,7 @@ void __init at91_add_device_serial(void) {}
*/
static int __init at91_add_standard_devices(void)
{
+ at91_add_device_hdmac();
at91_add_device_rtc();
at91_add_device_rtt();
at91_add_device_watchdog();
diff --git a/arch/arm/mach-at91/board-afeb-9260v1.c b/arch/arm/mach-at91/board-afeb-9260v1.c
index 61e52b66bc7..50667bed7cc 100644
--- a/arch/arm/mach-at91/board-afeb-9260v1.c
+++ b/arch/arm/mach-at91/board-afeb-9260v1.c
@@ -53,7 +53,7 @@ static void __init afeb9260_map_io(void)
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
- /* DGBU on ttyS0. (Rx & Tx only) */
+ /* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
diff --git a/arch/arm/mach-at91/board-cam60.c b/arch/arm/mach-at91/board-cam60.c
index d3ba29c5d8c..02138af631e 100644
--- a/arch/arm/mach-at91/board-cam60.c
+++ b/arch/arm/mach-at91/board-cam60.c
@@ -50,7 +50,7 @@ static void __init cam60_map_io(void)
/* Initialize processor: 10 MHz crystal */
at91sam9260_initialize(10000000);
- /* DGBU on ttyS0. (Rx & Tx only) */
+ /* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* set serial console to ttyS0 (ie, DBGU) */
diff --git a/arch/arm/mach-at91/board-cap9adk.c b/arch/arm/mach-at91/board-cap9adk.c
index 83a1a0fef47..d6940870e40 100644
--- a/arch/arm/mach-at91/board-cap9adk.c
+++ b/arch/arm/mach-at91/board-cap9adk.c
@@ -364,7 +364,7 @@ static struct atmel_lcdfb_info __initdata cap9adk_lcdc_data;
/*
* AC97
*/
-static struct atmel_ac97_data cap9adk_ac97_data = {
+static struct ac97c_platform_data cap9adk_ac97_data = {
// .reset_pin = ... not connected
};
diff --git a/arch/arm/mach-at91/board-neocore926.c b/arch/arm/mach-at91/board-neocore926.c
index 9ba7ba2cc3b..7c1e382330f 100644
--- a/arch/arm/mach-at91/board-neocore926.c
+++ b/arch/arm/mach-at91/board-neocore926.c
@@ -56,7 +56,7 @@ static void __init neocore926_map_io(void)
/* Initialize processor: 20 MHz crystal */
at91sam9263_initialize(20000000);
- /* DGBU on ttyS0. (Rx & Tx only) */
+ /* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* USART0 on ttyS1. (Rx, Tx, RTS, CTS) */
@@ -340,7 +340,7 @@ static void __init neocore926_add_device_buttons(void) {}
/*
* AC97
*/
-static struct atmel_ac97_data neocore926_ac97_data = {
+static struct ac97c_platform_data neocore926_ac97_data = {
.reset_pin = AT91_PIN_PA13,
};
diff --git a/arch/arm/mach-at91/board-qil-a9260.c b/arch/arm/mach-at91/board-qil-a9260.c
index 4cff9a7e61d..664938e8f66 100644
--- a/arch/arm/mach-at91/board-qil-a9260.c
+++ b/arch/arm/mach-at91/board-qil-a9260.c
@@ -53,7 +53,7 @@ static void __init ek_map_io(void)
/* Initialize processor: 12.000 MHz crystal */
at91sam9260_initialize(12000000);
- /* DGBU on ttyS0. (Rx & Tx only) */
+ /* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
diff --git a/arch/arm/mach-at91/board-sam9260ek.c b/arch/arm/mach-at91/board-sam9260ek.c
index 93a0f8b100e..ba9d501b5c5 100644
--- a/arch/arm/mach-at91/board-sam9260ek.c
+++ b/arch/arm/mach-at91/board-sam9260ek.c
@@ -54,7 +54,7 @@ static void __init ek_map_io(void)
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
- /* DGBU on ttyS0. (Rx & Tx only) */
+ /* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index f9b19993a7a..c4c8865d52d 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -61,7 +61,7 @@ static void __init ek_map_io(void)
/* Setup the LEDs */
at91_init_leds(AT91_PIN_PA13, AT91_PIN_PA14);
- /* DGBU on ttyS0. (Rx & Tx only) */
+ /* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* set serial console to ttyS0 (ie, DBGU) */
diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c
index 1bf7bd4cbe1..2d867fb0630 100644
--- a/arch/arm/mach-at91/board-sam9263ek.c
+++ b/arch/arm/mach-at91/board-sam9263ek.c
@@ -57,7 +57,7 @@ static void __init ek_map_io(void)
/* Initialize processor: 16.367 MHz crystal */
at91sam9263_initialize(16367660);
- /* DGBU on ttyS0. (Rx & Tx only) */
+ /* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* USART0 on ttyS1. (Rx, Tx, RTS, CTS) */
@@ -400,6 +400,23 @@ static struct gpio_led ek_pwm_led[] = {
}
};
+/*
+ * CAN
+ */
+static void sam9263ek_transceiver_switch(int on)
+{
+ if (on) {
+ at91_set_gpio_output(AT91_PIN_PA18, 1); /* CANRXEN */
+ at91_set_gpio_output(AT91_PIN_PA19, 0); /* CANRS */
+ } else {
+ at91_set_gpio_output(AT91_PIN_PA18, 0); /* CANRXEN */
+ at91_set_gpio_output(AT91_PIN_PA19, 1); /* CANRS */
+ }
+}
+
+static struct at91_can_data ek_can_data = {
+ .transceiver_switch = sam9263ek_transceiver_switch,
+};
static void __init ek_board_init(void)
{
@@ -431,6 +448,8 @@ static void __init ek_board_init(void)
/* LEDs */
at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds));
at91_pwm_leds(ek_pwm_led, ARRAY_SIZE(ek_pwm_led));
+ /* CAN */
+ at91_add_device_can(&ek_can_data);
}
MACHINE_START(AT91SAM9263EK, "Atmel AT91SAM9263-EK")
diff --git a/arch/arm/mach-at91/board-sam9g20ek-2slot-mmc.c b/arch/arm/mach-at91/board-sam9g20ek-2slot-mmc.c
new file mode 100644
index 00000000000..a28e53faf71
--- /dev/null
+++ b/arch/arm/mach-at91/board-sam9g20ek-2slot-mmc.c
@@ -0,0 +1,277 @@
+/*
+ * Copyright (C) 2005 SAN People
+ * Copyright (C) 2008 Atmel
+ * Copyright (C) 2009 Rob Emanuele
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/at73c213.h>
+#include <linux/clk.h>
+
+#include <mach/hardware.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/irq.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <mach/board.h>
+#include <mach/gpio.h>
+#include <mach/at91sam9_smc.h>
+
+#include "sam9_smc.h"
+#include "generic.h"
+
+
+static void __init ek_map_io(void)
+{
+ /* Initialize processor: 18.432 MHz crystal */
+ at91sam9260_initialize(18432000);
+
+ /* DGBU on ttyS0. (Rx & Tx only) */
+ at91_register_uart(0, 0, 0);
+
+ /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
+ at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
+ | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD
+ | ATMEL_UART_RI);
+
+ /* USART1 on ttyS2. (Rx, Tx, RTS, CTS) */
+ at91_register_uart(AT91SAM9260_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS);
+
+ /* set serial console to ttyS0 (ie, DBGU) */
+ at91_set_serial_console(0);
+}
+
+static void __init ek_init_irq(void)
+{
+ at91sam9260_init_interrupts(NULL);
+}
+
+
+/*
+ * USB Host port
+ */
+static struct at91_usbh_data __initdata ek_usbh_data = {
+ .ports = 2,
+};
+
+/*
+ * USB Device port
+ */
+static struct at91_udc_data __initdata ek_udc_data = {
+ .vbus_pin = AT91_PIN_PC5,
+ .pullup_pin = 0, /* pull-up driven by UDC */
+};
+
+
+/*
+ * SPI devices.
+ */
+static struct spi_board_info ek_spi_devices[] = {
+#if !defined(CONFIG_MMC_ATMELMCI)
+ { /* DataFlash chip */
+ .modalias = "mtd_dataflash",
+ .chip_select = 1,
+ .max_speed_hz = 15 * 1000 * 1000,
+ .bus_num = 0,
+ },
+#if defined(CONFIG_MTD_AT91_DATAFLASH_CARD)
+ { /* DataFlash card */
+ .modalias = "mtd_dataflash",
+ .chip_select = 0,
+ .max_speed_hz = 15 * 1000 * 1000,
+ .bus_num = 0,
+ },
+#endif
+#endif
+};
+
+
+/*
+ * MACB Ethernet device
+ */
+static struct at91_eth_data __initdata ek_macb_data = {
+ .phy_irq_pin = AT91_PIN_PC12,
+ .is_rmii = 1,
+};
+
+
+/*
+ * NAND flash
+ */
+static struct mtd_partition __initdata ek_nand_partition[] = {
+ {
+ .name = "Bootstrap",
+ .offset = 0,
+ .size = 4 * SZ_1M,
+ },
+ {
+ .name = "Partition 1",
+ .offset = MTDPART_OFS_NXTBLK,
+ .size = 60 * SZ_1M,
+ },
+ {
+ .name = "Partition 2",
+ .offset = MTDPART_OFS_NXTBLK,
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
+{
+ *num_partitions = ARRAY_SIZE(ek_nand_partition);
+ return ek_nand_partition;
+}
+
+/* det_pin is not connected */
+static struct atmel_nand_data __initdata ek_nand_data = {
+ .ale = 21,
+ .cle = 22,
+ .rdy_pin = AT91_PIN_PC13,
+ .enable_pin = AT91_PIN_PC14,
+ .partition_info = nand_partitions,
+#if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16)
+ .bus_width_16 = 1,
+#else
+ .bus_width_16 = 0,
+#endif
+};
+
+static struct sam9_smc_config __initdata ek_nand_smc_config = {
+ .ncs_read_setup = 0,
+ .nrd_setup = 2,
+ .ncs_write_setup = 0,
+ .nwe_setup = 2,
+
+ .ncs_read_pulse = 4,
+ .nrd_pulse = 4,
+ .ncs_write_pulse = 4,
+ .nwe_pulse = 4,
+
+ .read_cycle = 7,
+ .write_cycle = 7,
+
+ .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE,
+ .tdf_cycles = 3,
+};
+
+static void __init ek_add_device_nand(void)
+{
+ /* setup bus-width (8 or 16) */
+ if (ek_nand_data.bus_width_16)
+ ek_nand_smc_config.mode |= AT91_SMC_DBW_16;
+ else
+ ek_nand_smc_config.mode |= AT91_SMC_DBW_8;
+
+ /* configure chip-select 3 (NAND) */
+ sam9_smc_configure(3, &ek_nand_smc_config);
+
+ at91_add_device_nand(&ek_nand_data);
+}
+
+
+/*
+ * MCI (SD/MMC)
+ * det_pin and wp_pin are not connected
+ */
+#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE)
+static struct mci_platform_data __initdata ek_mmc_data = {
+ .slot[0] = {
+ .bus_width = 4,
+ .detect_pin = -ENODEV,
+ .wp_pin = -ENODEV,
+ },
+ .slot[1] = {
+ .bus_width = 4,
+ .detect_pin = -ENODEV,
+ .wp_pin = -ENODEV,
+ },
+
+};
+#else
+static struct amci_platform_data __initdata ek_mmc_data = {
+};
+#endif
+
+/*
+ * LEDs
+ */
+static struct gpio_led ek_leds[] = {
+ { /* "bottom" led, green, userled1 to be defined */
+ .name = "ds5",
+ .gpio = AT91_PIN_PB12,
+ .active_low = 1,
+ .default_trigger = "none",
+ },
+ { /* "power" led, yellow */
+ .name = "ds1",
+ .gpio = AT91_PIN_PB13,
+ .default_trigger = "heartbeat",
+ }
+};
+
+static struct i2c_board_info __initdata ek_i2c_devices[] = {
+ {
+ I2C_BOARD_INFO("24c512", 0x50),
+ },
+};
+
+
+static void __init ek_board_init(void)
+{
+ /* Serial */
+ at91_add_device_serial();
+ /* USB Host */
+ at91_add_device_usbh(&ek_usbh_data);
+ /* USB Device */
+ at91_add_device_udc(&ek_udc_data);
+ /* SPI */
+ at91_add_device_spi(ek_spi_devices, ARRAY_SIZE(ek_spi_devices));
+ /* NAND */
+ ek_add_device_nand();
+ /* Ethernet */
+ at91_add_device_eth(&ek_macb_data);
+ /* MMC */
+ at91_add_device_mci(0, &ek_mmc_data);
+ /* I2C */
+ at91_add_device_i2c(ek_i2c_devices, ARRAY_SIZE(ek_i2c_devices));
+ /* LEDs */
+ at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds));
+ /* PCK0 provides MCLK to the WM8731 */
+ at91_set_B_periph(AT91_PIN_PC1, 0);
+ /* SSC (for WM8731) */
+ at91_add_device_ssc(AT91SAM9260_ID_SSC, ATMEL_SSC_TX);
+}
+
+MACHINE_START(AT91SAM9G20EK_2MMC, "Atmel AT91SAM9G20-EK 2 MMC Slot Mod")
+ /* Maintainer: Rob Emanuele */
+ .phys_io = AT91_BASE_SYS,
+ .io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
+ .boot_params = AT91_SDRAM_BASE + 0x100,
+ .timer = &at91sam926x_timer,
+ .map_io = ek_map_io,
+ .init_irq = ek_init_irq,
+ .init_machine = ek_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c
index ca470d504ea..29cf8317748 100644
--- a/arch/arm/mach-at91/board-sam9g20ek.c
+++ b/arch/arm/mach-at91/board-sam9g20ek.c
@@ -50,7 +50,7 @@ static void __init ek_map_io(void)
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
- /* DGBU on ttyS0. (Rx & Tx only) */
+ /* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c
index b8558eae522..64c3843f323 100644
--- a/arch/arm/mach-at91/board-sam9m10g45ek.c
+++ b/arch/arm/mach-at91/board-sam9m10g45ek.c
@@ -311,6 +311,14 @@ static void __init ek_add_device_buttons(void) {}
/*
+ * AC97
+ * reset_pin is not connected: NRST
+ */
+static struct ac97c_platform_data ek_ac97_data = {
+};
+
+
+/*
* LEDs ... these could all be PWM-driven, for variable brightness
*/
static struct gpio_led ek_leds[] = {
@@ -372,6 +380,8 @@ static void __init ek_board_init(void)
at91_add_device_lcdc(&ek_lcdc_data);
/* Push Buttons */
ek_add_device_buttons();
+ /* AC97 */
+ at91_add_device_ac97(&ek_ac97_data);
/* LEDs */
at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds));
at91_pwm_leds(ek_pwm_led, ARRAY_SIZE(ek_pwm_led));
diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c
index 9d07679efce..bd28e989e54 100644
--- a/arch/arm/mach-at91/board-sam9rlek.c
+++ b/arch/arm/mach-at91/board-sam9rlek.c
@@ -43,7 +43,7 @@ static void __init ek_map_io(void)
/* Initialize processor: 12.000 MHz crystal */
at91sam9rl_initialize(12000000);
- /* DGBU on ttyS0. (Rx & Tx only) */
+ /* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* USART0 on ttyS1. (Rx, Tx, CTS, RTS) */
@@ -211,6 +211,14 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data;
/*
+ * AC97
+ * reset_pin is not connected: NRST
+ */
+static struct ac97c_platform_data ek_ac97_data = {
+};
+
+
+/*
* LEDs
*/
static struct gpio_led ek_leds[] = {
@@ -299,6 +307,8 @@ static void __init ek_board_init(void)
at91_add_device_mmc(0, &ek_mmc_data);
/* LCD Controller */
at91_add_device_lcdc(&ek_lcdc_data);
+ /* AC97 */
+ at91_add_device_ac97(&ek_ac97_data);
/* Touch Screen Controller */
at91_add_device_tsadcc();
/* LEDs */
diff --git a/arch/arm/mach-at91/board-usb-a9260.c b/arch/arm/mach-at91/board-usb-a9260.c
index d13304c0bc4..905d6ef7680 100644
--- a/arch/arm/mach-at91/board-usb-a9260.c
+++ b/arch/arm/mach-at91/board-usb-a9260.c
@@ -53,7 +53,7 @@ static void __init ek_map_io(void)
/* Initialize processor: 12.000 MHz crystal */
at91sam9260_initialize(12000000);
- /* DGBU on ttyS0. (Rx & Tx only) */
+ /* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* set serial console to ttyS0 (ie, DBGU) */
diff --git a/arch/arm/mach-at91/board-usb-a9263.c b/arch/arm/mach-at91/board-usb-a9263.c
index d96405b7d57..b6a3480383e 100644
--- a/arch/arm/mach-at91/board-usb-a9263.c
+++ b/arch/arm/mach-at91/board-usb-a9263.c
@@ -52,7 +52,7 @@ static void __init ek_map_io(void)
/* Initialize processor: 12.00 MHz crystal */
at91sam9263_initialize(12000000);
- /* DGBU on ttyS0. (Rx & Tx only) */
+ /* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* set serial console to ttyS0 (ie, DBGU) */
diff --git a/arch/arm/mach-at91/include/mach/board.h b/arch/arm/mach-at91/include/mach/board.h
index 13f27a4b882..2f4fcedc02b 100644
--- a/arch/arm/mach-at91/include/mach/board.h
+++ b/arch/arm/mach-at91/include/mach/board.h
@@ -37,6 +37,7 @@
#include <linux/leds.h>
#include <linux/spi/spi.h>
#include <linux/usb/atmel_usba_udc.h>
+#include <linux/atmel-mci.h>
#include <sound/atmel-ac97c.h>
/* USB Device */
@@ -64,6 +65,7 @@ struct at91_cf_data {
extern void __init at91_add_device_cf(struct at91_cf_data *data);
/* MMC / SD */
+ /* at91_mci platform config */
struct at91_mmc_data {
u8 det_pin; /* card detect IRQ */
unsigned slot_b:1; /* uses Slot B */
@@ -73,6 +75,9 @@ struct at91_mmc_data {
};
extern void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data);
+ /* atmel-mci platform config */
+extern void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data);
+
/* Ethernet (EMAC & MACB) */
struct at91_eth_data {
u32 phy_mask;
@@ -183,6 +188,12 @@ extern void __init at91_add_device_isi(void);
/* Touchscreen Controller */
extern void __init at91_add_device_tsadcc(void);
+/* CAN */
+struct at91_can_data {
+ void (*transceiver_switch)(int on);
+};
+extern void __init at91_add_device_can(struct at91_can_data *data);
+
/* LEDs */
extern void __init at91_init_leds(u8 cpu_led, u8 timer_led);
extern void __init at91_gpio_leds(struct gpio_led *leds, int nr);
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
index 3dd0e2a2309..dda19cd7619 100644
--- a/arch/arm/mach-ep93xx/clock.c
+++ b/arch/arm/mach-ep93xx/clock.c
@@ -37,7 +37,7 @@ struct clk {
static unsigned long get_uart_rate(struct clk *clk);
static int set_keytchclk_rate(struct clk *clk, unsigned long rate);
-
+static int set_div_rate(struct clk *clk, unsigned long rate);
static struct clk clk_uart1 = {
.sw_locked = 1,
@@ -76,6 +76,13 @@ static struct clk clk_pwm = {
.rate = EP93XX_EXT_CLK_RATE,
};
+static struct clk clk_video = {
+ .sw_locked = 1,
+ .enable_reg = EP93XX_SYSCON_VIDCLKDIV,
+ .enable_mask = EP93XX_SYSCON_CLKDIV_ENABLE,
+ .set_rate = set_div_rate,
+};
+
/* DMA Clocks */
static struct clk clk_m2p0 = {
.enable_reg = EP93XX_SYSCON_PWRCNT,
@@ -140,6 +147,7 @@ static struct clk_lookup clocks[] = {
INIT_CK(NULL, "pll2", &clk_pll2),
INIT_CK("ep93xx-ohci", NULL, &clk_usb_host),
INIT_CK("ep93xx-keypad", NULL, &clk_keypad),
+ INIT_CK("ep93xx-fb", NULL, &clk_video),
INIT_CK(NULL, "pwm_clk", &clk_pwm),
INIT_CK(NULL, "m2p0", &clk_m2p0),
INIT_CK(NULL, "m2p1", &clk_m2p1),
@@ -236,6 +244,84 @@ static int set_keytchclk_rate(struct clk *clk, unsigned long rate)
return 0;
}
+static unsigned long calc_clk_div(unsigned long rate, int *psel, int *esel,
+ int *pdiv, int *div)
+{
+ unsigned long max_rate, best_rate = 0,
+ actual_rate = 0, mclk_rate = 0, rate_err = -1;
+ int i, found = 0, __div = 0, __pdiv = 0;
+
+ /* Don't exceed the maximum rate */
+ max_rate = max(max(clk_pll1.rate / 4, clk_pll2.rate / 4),
+ (unsigned long)EP93XX_EXT_CLK_RATE / 4);
+ rate = min(rate, max_rate);
+
+ /*
+ * Try the two pll's and the external clock
+ * Because the valid predividers are 2, 2.5 and 3, we multiply
+ * all the clocks by 2 to avoid floating point math.
+ *
+ * This is based on the algorithm in the ep93xx raster guide:
+ * http://be-a-maverick.com/en/pubs/appNote/AN269REV1.pdf
+ *
+ */
+ for (i = 0; i < 3; i++) {
+ if (i == 0)
+ mclk_rate = EP93XX_EXT_CLK_RATE * 2;
+ else if (i == 1)
+ mclk_rate = clk_pll1.rate * 2;
+ else if (i == 2)
+ mclk_rate = clk_pll2.rate * 2;
+
+ /* Try each predivider value */
+ for (__pdiv = 4; __pdiv <= 6; __pdiv++) {
+ __div = mclk_rate / (rate * __pdiv);
+ if (__div < 2 || __div > 127)
+ continue;
+
+ actual_rate = mclk_rate / (__pdiv * __div);
+
+ if (!found || abs(actual_rate - rate) < rate_err) {
+ *pdiv = __pdiv - 3;
+ *div = __div;
+ *psel = (i == 2);
+ *esel = (i != 0);
+ best_rate = actual_rate;
+ rate_err = abs(actual_rate - rate);
+ found = 1;
+ }
+ }
+ }
+
+ if (!found)
+ return 0;
+
+ return best_rate;
+}
+
+static int set_div_rate(struct clk *clk, unsigned long rate)
+{
+ unsigned long actual_rate;
+ int psel = 0, esel = 0, pdiv = 0, div = 0;
+ u32 val;
+
+ actual_rate = calc_clk_div(rate, &psel, &esel, &pdiv, &div);
+ if (actual_rate == 0)
+ return -EINVAL;
+ clk->rate = actual_rate;
+
+ /* Clear the esel, psel, pdiv and div bits */
+ val = __raw_readl(clk->enable_reg);
+ val &= ~0x7fff;
+
+ /* Set the new esel, psel, pdiv and div bits for the new clock rate */
+ val |= (esel ? EP93XX_SYSCON_CLKDIV_ESEL : 0) |
+ (psel ? EP93XX_SYSCON_CLKDIV_PSEL : 0) |
+ (pdiv << EP93XX_SYSCON_CLKDIV_PDIV_SHIFT) | div;
+ ep93xx_syscon_swlocked_write(val, clk->enable_reg);
+ return 0;
+}
+
int clk_set_rate(struct clk *clk, unsigned long rate)
{
if (clk->set_rate)
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 16b92c37ec9..f7ebed942f6 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -30,6 +30,7 @@
#include <linux/i2c-gpio.h>
#include <mach/hardware.h>
+#include <mach/fb.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
@@ -682,6 +683,37 @@ void ep93xx_pwm_release_gpio(struct platform_device *pdev)
EXPORT_SYMBOL(ep93xx_pwm_release_gpio);
+/*************************************************************************
+ * EP93xx video peripheral handling
+ *************************************************************************/
+static struct ep93xxfb_mach_info ep93xxfb_data;
+
+static struct resource ep93xx_fb_resource[] = {
+ {
+ .start = EP93XX_RASTER_PHYS_BASE,
+ .end = EP93XX_RASTER_PHYS_BASE + 0x800 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device ep93xx_fb_device = {
+ .name = "ep93xx-fb",
+ .id = -1,
+ .dev = {
+ .platform_data = &ep93xxfb_data,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .dma_mask = &ep93xx_fb_device.dev.coherent_dma_mask,
+ },
+ .num_resources = ARRAY_SIZE(ep93xx_fb_resource),
+ .resource = ep93xx_fb_resource,
+};
+
+void __init ep93xx_register_fb(struct ep93xxfb_mach_info *data)
+{
+ ep93xxfb_data = *data;
+ platform_device_register(&ep93xx_fb_device);
+}
+
extern void ep93xx_gpio_init(void);
void __init ep93xx_init_devices(void)
diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
index ea78e908fc8..0fbf87b1633 100644
--- a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
+++ b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
@@ -70,6 +70,7 @@
#define EP93XX_USB_PHYS_BASE (EP93XX_AHB_PHYS_BASE + 0x00020000)
#define EP93XX_USB_BASE EP93XX_AHB_IOMEM(0x00020000)
+#define EP93XX_RASTER_PHYS_BASE (EP93XX_AHB_PHYS_BASE + 0x00030000)
#define EP93XX_RASTER_BASE EP93XX_AHB_IOMEM(0x00030000)
#define EP93XX_GRAPHICS_ACCEL_BASE EP93XX_AHB_IOMEM(0x00040000)
@@ -207,6 +208,11 @@
#define EP93XX_SYSCON_DEVCFG_ADCPD (1<<2)
#define EP93XX_SYSCON_DEVCFG_KEYS (1<<1)
#define EP93XX_SYSCON_DEVCFG_SHENA (1<<0)
+#define EP93XX_SYSCON_VIDCLKDIV EP93XX_SYSCON_REG(0x84)
+#define EP93XX_SYSCON_CLKDIV_ENABLE (1<<15)
+#define EP93XX_SYSCON_CLKDIV_ESEL (1<<14)
+#define EP93XX_SYSCON_CLKDIV_PSEL (1<<13)
+#define EP93XX_SYSCON_CLKDIV_PDIV_SHIFT 8
#define EP93XX_SYSCON_KEYTCHCLKDIV EP93XX_SYSCON_REG(0x90)
#define EP93XX_SYSCON_KEYTCHCLKDIV_TSEN (1<<31)
#define EP93XX_SYSCON_KEYTCHCLKDIV_ADIV (1<<16)
diff --git a/arch/arm/mach-ep93xx/include/mach/fb.h b/arch/arm/mach-ep93xx/include/mach/fb.h
new file mode 100644
index 00000000000..d5ae11d7c45
--- /dev/null
+++ b/arch/arm/mach-ep93xx/include/mach/fb.h
@@ -0,0 +1,56 @@
+/*
+ * arch/arm/mach-ep93xx/include/mach/fb.h
+ */
+
+#ifndef __ASM_ARCH_EP93XXFB_H
+#define __ASM_ARCH_EP93XXFB_H
+
+struct platform_device;
+struct fb_videomode;
+struct fb_info;
+
+#define EP93XXFB_USE_MODEDB 0
+
+/* VideoAttributes flags */
+#define EP93XXFB_STATE_MACHINE_ENABLE (1 << 0)
+#define EP93XXFB_PIXEL_CLOCK_ENABLE (1 << 1)
+#define EP93XXFB_VSYNC_ENABLE (1 << 2)
+#define EP93XXFB_PIXEL_DATA_ENABLE (1 << 3)
+#define EP93XXFB_COMPOSITE_SYNC (1 << 4)
+#define EP93XXFB_SYNC_VERT_HIGH (1 << 5)
+#define EP93XXFB_SYNC_HORIZ_HIGH (1 << 6)
+#define EP93XXFB_SYNC_BLANK_HIGH (1 << 7)
+#define EP93XXFB_PCLK_FALLING (1 << 8)
+#define EP93XXFB_ENABLE_AC (1 << 9)
+#define EP93XXFB_ENABLE_LCD (1 << 10)
+#define EP93XXFB_ENABLE_CCIR (1 << 12)
+#define EP93XXFB_USE_PARALLEL_INTERFACE (1 << 13)
+#define EP93XXFB_ENABLE_INTERRUPT (1 << 14)
+#define EP93XXFB_USB_INTERLACE (1 << 16)
+#define EP93XXFB_USE_EQUALIZATION (1 << 17)
+#define EP93XXFB_USE_DOUBLE_HORZ (1 << 18)
+#define EP93XXFB_USE_DOUBLE_VERT (1 << 19)
+#define EP93XXFB_USE_BLANK_PIXEL (1 << 20)
+#define EP93XXFB_USE_SDCSN0 (0 << 21)
+#define EP93XXFB_USE_SDCSN1 (1 << 21)
+#define EP93XXFB_USE_SDCSN2 (2 << 21)
+#define EP93XXFB_USE_SDCSN3 (3 << 21)
+
+#define EP93XXFB_ENABLE (EP93XXFB_STATE_MACHINE_ENABLE | \
+ EP93XXFB_PIXEL_CLOCK_ENABLE | \
+ EP93XXFB_VSYNC_ENABLE | \
+ EP93XXFB_PIXEL_DATA_ENABLE)
+
+struct ep93xxfb_mach_info {
+ unsigned int num_modes;
+ const struct fb_videomode *modes;
+ const struct fb_videomode *default_mode;
+ int bpp;
+ unsigned int flags;
+
+ int (*setup)(struct platform_device *pdev);
+ void (*teardown)(struct platform_device *pdev);
+ void (*blank)(int blank_mode, struct fb_info *info);
+};
+
+#endif /* __ASM_ARCH_EP93XXFB_H */
diff --git a/arch/arm/mach-ep93xx/include/mach/platform.h b/arch/arm/mach-ep93xx/include/mach/platform.h
index 5f5fa6574d3..01a0f0838e5 100644
--- a/arch/arm/mach-ep93xx/include/mach/platform.h
+++ b/arch/arm/mach-ep93xx/include/mach/platform.h
@@ -6,6 +6,7 @@
struct i2c_board_info;
struct platform_device;
+struct ep93xxfb_mach_info;
struct ep93xx_eth_data
{
@@ -33,6 +34,7 @@ static inline void ep93xx_devcfg_clear_bits(unsigned int bits)
void ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr);
void ep93xx_register_i2c(struct i2c_board_info *devices, int num);
+void ep93xx_register_fb(struct ep93xxfb_mach_info *data);
void ep93xx_register_pwm(int pwm0, int pwm1);
int ep93xx_pwm_acquire_gpio(struct platform_device *pdev);
void ep93xx_pwm_release_gpio(struct platform_device *pdev);
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index 2a318eba1b0..3f35293d457 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -19,6 +19,7 @@
#include <linux/amba/bus.h>
#include <linux/amba/kmi.h>
#include <linux/amba/clcd.h>
+#include <linux/amba/mmci.h>
#include <linux/io.h>
#include <asm/clkdev.h>
@@ -35,7 +36,6 @@
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <asm/mach/irq.h>
-#include <asm/mach/mmc.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
@@ -400,7 +400,7 @@ static unsigned int mmc_status(struct device *dev)
return status & 8;
}
-static struct mmc_platform_data mmc_data = {
+static struct mmci_platform_data mmc_data = {
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.status = mmc_status,
.gpio_wp = -1,
diff --git a/arch/arm/mach-iop13xx/include/mach/adma.h b/arch/arm/mach-iop13xx/include/mach/adma.h
index 5722e86f217..6d3782d85a9 100644
--- a/arch/arm/mach-iop13xx/include/mach/adma.h
+++ b/arch/arm/mach-iop13xx/include/mach/adma.h
@@ -150,6 +150,8 @@ static inline int iop_adma_get_max_xor(void)
return 16;
}
+#define iop_adma_get_max_pq iop_adma_get_max_xor
+
static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
{
return __raw_readl(ADMA_ADAR(chan));
@@ -211,7 +213,10 @@ iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op)
#define IOP_ADMA_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT
#define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT
#define IOP_ADMA_XOR_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT
+#define IOP_ADMA_PQ_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT
#define iop_chan_zero_sum_slot_count(l, s, o) iop_chan_xor_slot_count(l, s, o)
+#define iop_chan_pq_slot_count iop_chan_xor_slot_count
+#define iop_chan_pq_zero_sum_slot_count iop_chan_xor_slot_count
static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
@@ -220,6 +225,13 @@ static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
return hw_desc->dest_addr;
}
+static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
+ struct iop_adma_chan *chan)
+{
+ struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
+ return hw_desc->q_dest_addr;
+}
+
static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
@@ -319,6 +331,58 @@ iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
return 1;
}
+static inline void
+iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
+ unsigned long flags)
+{
+ struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
+ union {
+ u32 value;
+ struct iop13xx_adma_desc_ctrl field;
+ } u_desc_ctrl;
+
+ u_desc_ctrl.value = 0;
+ u_desc_ctrl.field.src_select = src_cnt - 1;
+ u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
+ u_desc_ctrl.field.pq_xfer_en = 1;
+ u_desc_ctrl.field.p_xfer_dis = !!(flags & DMA_PREP_PQ_DISABLE_P);
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
+ hw_desc->desc_ctrl = u_desc_ctrl.value;
+}
+
+static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
+{
+ struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
+ union {
+ u32 value;
+ struct iop13xx_adma_desc_ctrl field;
+ } u_desc_ctrl;
+
+ u_desc_ctrl.value = hw_desc->desc_ctrl;
+ return u_desc_ctrl.field.pq_xfer_en;
+}
+
+static inline void
+iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
+ unsigned long flags)
+{
+ struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
+ union {
+ u32 value;
+ struct iop13xx_adma_desc_ctrl field;
+ } u_desc_ctrl;
+
+ u_desc_ctrl.value = 0;
+ u_desc_ctrl.field.src_select = src_cnt - 1;
+ u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
+ u_desc_ctrl.field.zero_result = 1;
+ u_desc_ctrl.field.status_write_back_en = 1;
+ u_desc_ctrl.field.pq_xfer_en = 1;
+ u_desc_ctrl.field.p_xfer_dis = !!(flags & DMA_PREP_PQ_DISABLE_P);
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
+ hw_desc->desc_ctrl = u_desc_ctrl.value;
+}
+
static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan,
u32 byte_count)
@@ -351,6 +415,7 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
}
}
+#define iop_desc_set_pq_zero_sum_byte_count iop_desc_set_zero_sum_byte_count
static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan,
@@ -361,6 +426,16 @@ static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
hw_desc->upper_dest_addr = 0;
}
+static inline void
+iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
+{
+ struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
+
+ hw_desc->dest_addr = addr[0];
+ hw_desc->q_dest_addr = addr[1];
+ hw_desc->upper_dest_addr = 0;
+}
+
static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
dma_addr_t addr)
{
@@ -389,6 +464,29 @@ static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
}
static inline void
+iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
+ dma_addr_t addr, unsigned char coef)
+{
+ int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
+ struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc, *iter;
+ struct iop13xx_adma_src *src;
+ int i = 0;
+
+ do {
+ iter = iop_hw_desc_slot_idx(hw_desc, i);
+ src = &iter->src[src_idx];
+ src->src_addr = addr;
+ src->pq_upper_src_addr = 0;
+ src->pq_dmlt = coef;
+ slot_cnt -= slots_per_op;
+ if (slot_cnt) {
+ i += slots_per_op;
+ addr += IOP_ADMA_PQ_MAX_BYTE_COUNT;
+ }
+ } while (slot_cnt);
+}
+
+static inline void
iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
@@ -399,6 +497,15 @@ iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
}
#define iop_desc_set_zero_sum_src_addr iop_desc_set_xor_src_addr
+#define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr
+
+static inline void
+iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
+ dma_addr_t *src)
+{
+ iop_desc_set_xor_src_addr(desc, pq_idx, src[pq_idx]);
+ iop_desc_set_xor_src_addr(desc, pq_idx+1, src[pq_idx+1]);
+}
static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
u32 next_desc_addr)
@@ -428,18 +535,20 @@ static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
hw_desc->block_fill_data = val;
}
-static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
+static inline enum sum_check_flags
+iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
{
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
struct iop13xx_adma_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
struct iop13xx_adma_byte_count byte_count = hw_desc->byte_count_field;
+ enum sum_check_flags flags;
BUG_ON(!(byte_count.tx_complete && desc_ctrl.zero_result));
- if (desc_ctrl.pq_xfer_en)
- return byte_count.zero_result_err_q;
- else
- return byte_count.zero_result_err;
+ flags = byte_count.zero_result_err_q << SUM_CHECK_Q;
+ flags |= byte_count.zero_result_err << SUM_CHECK_P;
+
+ return flags;
}
static inline void iop_chan_append(struct iop_adma_chan *chan)
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c
index bee42c609df..5c147fb66a0 100644
--- a/arch/arm/mach-iop13xx/setup.c
+++ b/arch/arm/mach-iop13xx/setup.c
@@ -477,10 +477,8 @@ void __init iop13xx_platform_init(void)
plat_data = &iop13xx_adma_0_data;
dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
dma_cap_set(DMA_XOR, plat_data->cap_mask);
- dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask);
- dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask);
+ dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
- dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask);
dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
break;
case IOP13XX_INIT_ADMA_1:
@@ -489,10 +487,8 @@ void __init iop13xx_platform_init(void)
plat_data = &iop13xx_adma_1_data;
dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
dma_cap_set(DMA_XOR, plat_data->cap_mask);
- dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask);
- dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask);
+ dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
- dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask);
dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
break;
case IOP13XX_INIT_ADMA_2:
@@ -501,14 +497,11 @@ void __init iop13xx_platform_init(void)
plat_data = &iop13xx_adma_2_data;
dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
dma_cap_set(DMA_XOR, plat_data->cap_mask);
- dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask);
- dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask);
+ dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
- dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask);
dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
- dma_cap_set(DMA_PQ_XOR, plat_data->cap_mask);
- dma_cap_set(DMA_PQ_UPDATE, plat_data->cap_mask);
- dma_cap_set(DMA_PQ_ZERO_SUM, plat_data->cap_mask);
+ dma_cap_set(DMA_PQ, plat_data->cap_mask);
+ dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask);
break;
}
}
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 5083f03e9b5..cfd52fb341c 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -41,8 +41,8 @@
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
-static int __init ixp4xx_clocksource_init(void);
-static int __init ixp4xx_clockevent_init(void);
+static void __init ixp4xx_clocksource_init(void);
+static void __init ixp4xx_clockevent_init(void);
static struct clock_event_device clockevent_ixp4xx;
/*************************************************************************
@@ -267,7 +267,7 @@ void __init ixp4xx_init_irq(void)
static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id)
{
- struct clock_event_device *evt = &clockevent_ixp4xx;
+ struct clock_event_device *evt = dev_id;
/* Clear Pending Interrupt by writing '1' to it */
*IXP4XX_OSST = IXP4XX_OSST_TIMER_1_PEND;
@@ -281,6 +281,7 @@ static struct irqaction ixp4xx_timer_irq = {
.name = "timer1",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
.handler = ixp4xx_timer_interrupt,
+ .dev_id = &clockevent_ixp4xx,
};
void __init ixp4xx_timer_init(void)
@@ -401,7 +402,7 @@ void __init ixp4xx_sys_init(void)
/*
* clocksource
*/
-cycle_t ixp4xx_get_cycles(struct clocksource *cs)
+static cycle_t ixp4xx_get_cycles(struct clocksource *cs)
{
return *IXP4XX_OSTS;
}
@@ -417,14 +418,12 @@ static struct clocksource clocksource_ixp4xx = {
unsigned long ixp4xx_timer_freq = FREQ;
EXPORT_SYMBOL(ixp4xx_timer_freq);
-static int __init ixp4xx_clocksource_init(void)
+static void __init ixp4xx_clocksource_init(void)
{
clocksource_ixp4xx.mult =
clocksource_hz2mult(ixp4xx_timer_freq,
clocksource_ixp4xx.shift);
clocksource_register(&clocksource_ixp4xx);
-
- return 0;
}
/*
@@ -480,7 +479,7 @@ static struct clock_event_device clockevent_ixp4xx = {
.set_next_event = ixp4xx_set_next_event,
};
-static int __init ixp4xx_clockevent_init(void)
+static void __init ixp4xx_clockevent_init(void)
{
clockevent_ixp4xx.mult = div_sc(FREQ, NSEC_PER_SEC,
clockevent_ixp4xx.shift);
@@ -491,5 +490,4 @@ static int __init ixp4xx_clockevent_init(void)
clockevent_ixp4xx.cpumask = cpumask_of(0);
clockevents_register_device(&clockevent_ixp4xx);
- return 0;
}
diff --git a/arch/arm/mach-ixp4xx/include/mach/system.h b/arch/arm/mach-ixp4xx/include/mach/system.h
index d2aa26f5acd..54c0af7fa2d 100644
--- a/arch/arm/mach-ixp4xx/include/mach/system.h
+++ b/arch/arm/mach-ixp4xx/include/mach/system.h
@@ -13,9 +13,11 @@
static inline void arch_idle(void)
{
+ /* ixp4xx does not implement the XScale PWRMODE register,
+ * so it must not call cpu_do_idle() here.
+ */
#if 0
- if (!hlt_counter)
- cpu_do_idle(0);
+ cpu_do_idle();
#endif
}
diff --git a/arch/arm/mach-nomadik/board-nhk8815.c b/arch/arm/mach-nomadik/board-nhk8815.c
index 79bdea943eb..6bfd537d5af 100644
--- a/arch/arm/mach-nomadik/board-nhk8815.c
+++ b/arch/arm/mach-nomadik/board-nhk8815.c
@@ -16,12 +16,164 @@
#include <linux/amba/bus.h>
#include <linux/interrupt.h>
#include <linux/gpio.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+#include <asm/sizes.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
+#include <asm/mach/flash.h>
#include <mach/setup.h>
+#include <mach/nand.h>
+#include <mach/fsmc.h>
#include "clock.h"
+/* These adresses span 16MB, so use three individual pages */
+static struct resource nhk8815_nand_resources[] = {
+ {
+ .name = "nand_addr",
+ .start = NAND_IO_ADDR,
+ .end = NAND_IO_ADDR + 0xfff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "nand_cmd",
+ .start = NAND_IO_CMD,
+ .end = NAND_IO_CMD + 0xfff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "nand_data",
+ .start = NAND_IO_DATA,
+ .end = NAND_IO_DATA + 0xfff,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static int nhk8815_nand_init(void)
+{
+ /* FSMC setup for nand chip select (8-bit nand in 8815NHK) */
+ writel(0x0000000E, FSMC_PCR(0));
+ writel(0x000D0A00, FSMC_PMEM(0));
+ writel(0x00100A00, FSMC_PATT(0));
+
+ /* enable access to the chip select area */
+ writel(readl(FSMC_PCR(0)) | 0x04, FSMC_PCR(0));
+
+ return 0;
+}
+
+/*
+ * These partitions are the same as those used in the 2.6.20 release
+ * shipped by the vendor; the first two partitions are mandated
+ * by the boot ROM, and the bootloader area is somehow oversized...
+ */
+static struct mtd_partition nhk8815_partitions[] = {
+ {
+ .name = "X-Loader(NAND)",
+ .offset = 0,
+ .size = SZ_256K,
+ }, {
+ .name = "MemInit(NAND)",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_256K,
+ }, {
+ .name = "BootLoader(NAND)",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_2M,
+ }, {
+ .name = "Kernel zImage(NAND)",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 3 * SZ_1M,
+ }, {
+ .name = "Root Filesystem(NAND)",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 22 * SZ_1M,
+ }, {
+ .name = "User Filesystem(NAND)",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ }
+};
+
+static struct nomadik_nand_platform_data nhk8815_nand_data = {
+ .parts = nhk8815_partitions,
+ .nparts = ARRAY_SIZE(nhk8815_partitions),
+ .options = NAND_COPYBACK | NAND_CACHEPRG | NAND_NO_PADDING \
+ | NAND_NO_READRDY | NAND_NO_AUTOINCR,
+ .init = nhk8815_nand_init,
+};
+
+static struct platform_device nhk8815_nand_device = {
+ .name = "nomadik_nand",
+ .dev = {
+ .platform_data = &nhk8815_nand_data,
+ },
+ .resource = nhk8815_nand_resources,
+ .num_resources = ARRAY_SIZE(nhk8815_nand_resources),
+};
+
+/* These are the partitions for the OneNand device, different from above */
+static struct mtd_partition nhk8815_onenand_partitions[] = {
+ {
+ .name = "X-Loader(OneNAND)",
+ .offset = 0,
+ .size = SZ_256K,
+ }, {
+ .name = "MemInit(OneNAND)",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_256K,
+ }, {
+ .name = "BootLoader(OneNAND)",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_2M-SZ_256K,
+ }, {
+ .name = "SysImage(OneNAND)",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 4 * SZ_1M,
+ }, {
+ .name = "Root Filesystem(OneNAND)",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 22 * SZ_1M,
+ }, {
+ .name = "User Filesystem(OneNAND)",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ }
+};
+
+static struct flash_platform_data nhk8815_onenand_data = {
+ .parts = nhk8815_onenand_partitions,
+ .nr_parts = ARRAY_SIZE(nhk8815_onenand_partitions),
+};
+
+static struct resource nhk8815_onenand_resource[] = {
+ {
+ .start = 0x30000000,
+ .end = 0x30000000 + SZ_128K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device nhk8815_onenand_device = {
+ .name = "onenand",
+ .id = -1,
+ .dev = {
+ .platform_data = &nhk8815_onenand_data,
+ },
+ .resource = nhk8815_onenand_resource,
+ .num_resources = ARRAY_SIZE(nhk8815_onenand_resource),
+};
+
+static void __init nhk8815_onenand_init(void)
+{
+#ifdef CONFIG_ONENAND
+ /* Set up SMCS0 for OneNand */
+ writel(0x000030db, FSMC_BCR0);
+ writel(0x02100551, FSMC_BTR0);
+#endif
+}
+
#define __MEM_4K_RESOURCE(x) \
.res = {.start = (x), .end = (x) + SZ_4K - 1, .flags = IORESOURCE_MEM}
@@ -81,6 +233,8 @@ static int __init nhk8815_eth_init(void)
device_initcall(nhk8815_eth_init);
static struct platform_device *nhk8815_platform_devices[] __initdata = {
+ &nhk8815_nand_device,
+ &nhk8815_onenand_device,
&nhk8815_eth_device,
/* will add more devices */
};
@@ -90,6 +244,7 @@ static void __init nhk8815_platform_init(void)
int i;
cpu8815_platform_init();
+ nhk8815_onenand_init();
platform_add_devices(nhk8815_platform_devices,
ARRAY_SIZE(nhk8815_platform_devices));
diff --git a/arch/arm/mach-nomadik/include/mach/fsmc.h b/arch/arm/mach-nomadik/include/mach/fsmc.h
new file mode 100644
index 00000000000..8c2c0518368
--- /dev/null
+++ b/arch/arm/mach-nomadik/include/mach/fsmc.h
@@ -0,0 +1,29 @@
+
+/* Definitions for the Nomadik FSMC "Flexible Static Memory controller" */
+
+#ifndef __ASM_ARCH_FSMC_H
+#define __ASM_ARCH_FSMC_H
+
+#include <mach/hardware.h>
+/*
+ * Register list
+ */
+
+/* bus control reg. and bus timing reg. for CS0..CS3 */
+#define FSMC_BCR(x) (NOMADIK_FSMC_VA + (x << 3))
+#define FSMC_BTR(x) (NOMADIK_FSMC_VA + (x << 3) + 0x04)
+
+/* PC-card and NAND:
+ * PCR = control register
+ * PMEM = memory timing
+ * PATT = attribute timing
+ * PIO = I/O timing
+ * PECCR = ECC result
+ */
+#define FSMC_PCR(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x00)
+#define FSMC_PMEM(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x08)
+#define FSMC_PATT(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x0c)
+#define FSMC_PIO(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x10)
+#define FSMC_PECCR(x) (NOMADIK_FSMC_VA + ((2 + x) << 5) + 0x14)
+
+#endif /* __ASM_ARCH_FSMC_H */
diff --git a/arch/arm/mach-nomadik/include/mach/nand.h b/arch/arm/mach-nomadik/include/mach/nand.h
new file mode 100644
index 00000000000..c3c8254c22a
--- /dev/null
+++ b/arch/arm/mach-nomadik/include/mach/nand.h
@@ -0,0 +1,16 @@
+#ifndef __ASM_ARCH_NAND_H
+#define __ASM_ARCH_NAND_H
+
+struct nomadik_nand_platform_data {
+ struct mtd_partition *parts;
+ int nparts;
+ int options;
+ int (*init) (void);
+ int (*exit) (void);
+};
+
+#define NAND_IO_DATA 0x40000000
+#define NAND_IO_CMD 0x40800000
+#define NAND_IO_ADDR 0x41000000
+
+#endif /* __ASM_ARCH_NAND_H */
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
index 7a2b54c7291..a1132288c70 100644
--- a/arch/arm/mach-omap2/board-apollon.c
+++ b/arch/arm/mach-omap2/board-apollon.c
@@ -87,7 +87,7 @@ static struct mtd_partition apollon_partitions[] = {
},
};
-static struct flash_platform_data apollon_flash_data = {
+static struct onenand_platform_data apollon_flash_data = {
.parts = apollon_partitions,
.nr_parts = ARRAY_SIZE(apollon_partitions),
};
@@ -99,7 +99,7 @@ static struct resource apollon_flash_resource[] = {
};
static struct platform_device apollon_onenand_device = {
- .name = "onenand",
+ .name = "onenand-flash",
.id = -1,
.dev = {
.platform_data = &apollon_flash_data,
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index e70baa79901..e6e8290b782 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -19,6 +19,7 @@
#include <linux/delay.h>
#include <linux/regulator/machine.h>
#include <linux/gpio.h>
+#include <linux/mmc/host.h>
#include <mach/mcspi.h>
#include <mach/mux.h>
@@ -102,6 +103,7 @@ static struct twl4030_hsmmc_info mmc[] = {
.cover_only = true,
.gpio_cd = 160,
.gpio_wp = -EINVAL,
+ .power_saving = true,
},
{
.name = "internal",
@@ -109,6 +111,8 @@ static struct twl4030_hsmmc_info mmc[] = {
.wires = 8,
.gpio_cd = -EINVAL,
.gpio_wp = -EINVAL,
+ .nonremovable = true,
+ .power_saving = true,
},
{} /* Terminator */
};
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index a2e915639b7..bcfcfc7fdb9 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -257,6 +257,11 @@ static inline void omap_init_sti(void) {}
#define OMAP2_MCSPI3_BASE 0x480b8000
#define OMAP2_MCSPI4_BASE 0x480ba000
+#define OMAP4_MCSPI1_BASE 0x48098100
+#define OMAP4_MCSPI2_BASE 0x4809a100
+#define OMAP4_MCSPI3_BASE 0x480b8100
+#define OMAP4_MCSPI4_BASE 0x480ba100
+
static struct omap2_mcspi_platform_config omap2_mcspi1_config = {
.num_cs = 4,
};
@@ -301,7 +306,8 @@ static struct platform_device omap2_mcspi2 = {
},
};
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3)
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
+ defined(CONFIG_ARCH_OMAP4)
static struct omap2_mcspi_platform_config omap2_mcspi3_config = {
.num_cs = 2,
};
@@ -325,7 +331,7 @@ static struct platform_device omap2_mcspi3 = {
};
#endif
-#ifdef CONFIG_ARCH_OMAP3
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
static struct omap2_mcspi_platform_config omap2_mcspi4_config = {
.num_cs = 1,
};
@@ -351,14 +357,25 @@ static struct platform_device omap2_mcspi4 = {
static void omap_init_mcspi(void)
{
+ if (cpu_is_omap44xx()) {
+ omap2_mcspi1_resources[0].start = OMAP4_MCSPI1_BASE;
+ omap2_mcspi1_resources[0].end = OMAP4_MCSPI1_BASE + 0xff;
+ omap2_mcspi2_resources[0].start = OMAP4_MCSPI2_BASE;
+ omap2_mcspi2_resources[0].end = OMAP4_MCSPI2_BASE + 0xff;
+ omap2_mcspi3_resources[0].start = OMAP4_MCSPI3_BASE;
+ omap2_mcspi3_resources[0].end = OMAP4_MCSPI3_BASE + 0xff;
+ omap2_mcspi4_resources[0].start = OMAP4_MCSPI4_BASE;
+ omap2_mcspi4_resources[0].end = OMAP4_MCSPI4_BASE + 0xff;
+ }
platform_device_register(&omap2_mcspi1);
platform_device_register(&omap2_mcspi2);
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3)
- if (cpu_is_omap2430() || cpu_is_omap343x())
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
+ defined(CONFIG_ARCH_OMAP4)
+ if (cpu_is_omap2430() || cpu_is_omap343x() || cpu_is_omap44xx())
platform_device_register(&omap2_mcspi3);
#endif
-#ifdef CONFIG_ARCH_OMAP3
- if (cpu_is_omap343x())
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
+ if (cpu_is_omap343x() || cpu_is_omap44xx())
platform_device_register(&omap2_mcspi4);
#endif
}
@@ -397,7 +414,7 @@ static inline void omap_init_sha1_md5(void) { }
/*-------------------------------------------------------------------------*/
-#ifdef CONFIG_ARCH_OMAP3
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
#define MMCHS_SYSCONFIG 0x0010
#define MMCHS_SYSCONFIG_SWRESET (1 << 1)
@@ -424,8 +441,8 @@ static struct platform_device dummy_pdev = {
**/
static void __init omap_hsmmc_reset(void)
{
- u32 i, nr_controllers = cpu_is_omap34xx() ? OMAP34XX_NR_MMC :
- OMAP24XX_NR_MMC;
+ u32 i, nr_controllers = cpu_is_omap44xx() ? OMAP44XX_NR_MMC :
+ (cpu_is_omap34xx() ? OMAP34XX_NR_MMC : OMAP24XX_NR_MMC);
for (i = 0; i < nr_controllers; i++) {
u32 v, base = 0;
@@ -442,8 +459,21 @@ static void __init omap_hsmmc_reset(void)
case 2:
base = OMAP3_MMC3_BASE;
break;
+ case 3:
+ if (!cpu_is_omap44xx())
+ return;
+ base = OMAP4_MMC4_BASE;
+ break;
+ case 4:
+ if (!cpu_is_omap44xx())
+ return;
+ base = OMAP4_MMC5_BASE;
+ break;
}
+ if (cpu_is_omap44xx())
+ base += OMAP4_MMC_REG_OFFSET;
+
dummy_pdev.id = i;
dev_set_name(&dummy_pdev.dev, "mmci-omap-hs.%d", i);
iclk = clk_get(dev, "ick");
@@ -581,11 +611,23 @@ void __init omap2_init_mmc(struct omap_mmc_platform_data **mmc_data,
irq = INT_24XX_MMC2_IRQ;
break;
case 2:
- if (!cpu_is_omap34xx())
+ if (!cpu_is_omap44xx() && !cpu_is_omap34xx())
return;
base = OMAP3_MMC3_BASE;
irq = INT_34XX_MMC3_IRQ;
break;
+ case 3:
+ if (!cpu_is_omap44xx())
+ return;
+ base = OMAP4_MMC4_BASE + OMAP4_MMC_REG_OFFSET;
+ irq = INT_44XX_MMC4_IRQ;
+ break;
+ case 4:
+ if (!cpu_is_omap44xx())
+ return;
+ base = OMAP4_MMC5_BASE + OMAP4_MMC_REG_OFFSET;
+ irq = INT_44XX_MMC5_IRQ;
+ break;
default:
continue;
}
@@ -593,8 +635,15 @@ void __init omap2_init_mmc(struct omap_mmc_platform_data **mmc_data,
if (cpu_is_omap2420()) {
size = OMAP2420_MMC_SIZE;
name = "mmci-omap";
+ } else if (cpu_is_omap44xx()) {
+ if (i < 3) {
+ base += OMAP4_MMC_REG_OFFSET;
+ irq += IRQ_GIC_START;
+ }
+ size = OMAP4_HSMMC_SIZE;
+ name = "mmci-omap-hs";
} else {
- size = HSMMC_SIZE;
+ size = OMAP3_HSMMC_SIZE;
name = "mmci-omap-hs";
}
omap_mmc_add(name, i, base, size, irq, mmc_data[i]);
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index f91934b2b09..15876828db2 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -57,6 +57,11 @@
#define GPMC_CHUNK_SHIFT 24 /* 16 MB */
#define GPMC_SECTION_SHIFT 28 /* 128 MB */
+#define PREFETCH_FIFOTHRESHOLD (0x40 << 8)
+#define CS_NUM_SHIFT 24
+#define ENABLE_PREFETCH (0x1 << 7)
+#define DMA_MPU_MODE 2
+
static struct resource gpmc_mem_root;
static struct resource gpmc_cs_mem[GPMC_CS_NUM];
static DEFINE_SPINLOCK(gpmc_mem_lock);
@@ -386,6 +391,63 @@ void gpmc_cs_free(int cs)
}
EXPORT_SYMBOL(gpmc_cs_free);
+/**
+ * gpmc_prefetch_enable - configures and starts prefetch transfer
+ * @cs: nand cs (chip select) number
+ * @dma_mode: dma mode enable (1) or disable (0)
+ * @u32_count: number of bytes to be transferred
+ * @is_write: prefetch read(0) or write post(1) mode
+ */
+int gpmc_prefetch_enable(int cs, int dma_mode,
+ unsigned int u32_count, int is_write)
+{
+ uint32_t prefetch_config1;
+
+ if (!(gpmc_read_reg(GPMC_PREFETCH_CONTROL))) {
+ /* Set the amount of bytes to be prefetched */
+ gpmc_write_reg(GPMC_PREFETCH_CONFIG2, u32_count);
+
+ /* Set dma/mpu mode, the prefetch read / post write and
+ * enable the engine. Set which cs is has requested for.
+ */
+ prefetch_config1 = ((cs << CS_NUM_SHIFT) |
+ PREFETCH_FIFOTHRESHOLD |
+ ENABLE_PREFETCH |
+ (dma_mode << DMA_MPU_MODE) |
+ (0x1 & is_write));
+ gpmc_write_reg(GPMC_PREFETCH_CONFIG1, prefetch_config1);
+ } else {
+ return -EBUSY;
+ }
+ /* Start the prefetch engine */
+ gpmc_write_reg(GPMC_PREFETCH_CONTROL, 0x1);
+
+ return 0;
+}
+EXPORT_SYMBOL(gpmc_prefetch_enable);
+
+/**
+ * gpmc_prefetch_reset - disables and stops the prefetch engine
+ */
+void gpmc_prefetch_reset(void)
+{
+ /* Stop the PFPW engine */
+ gpmc_write_reg(GPMC_PREFETCH_CONTROL, 0x0);
+
+ /* Reset/disable the PFPW engine */
+ gpmc_write_reg(GPMC_PREFETCH_CONFIG1, 0x0);
+}
+EXPORT_SYMBOL(gpmc_prefetch_reset);
+
+/**
+ * gpmc_prefetch_status - reads prefetch status of engine
+ */
+int gpmc_prefetch_status(void)
+{
+ return gpmc_read_reg(GPMC_PREFETCH_STATUS);
+}
+EXPORT_SYMBOL(gpmc_prefetch_status);
+
static void __init gpmc_mem_init(void)
{
int cs;
@@ -452,6 +514,5 @@ void __init gpmc_init(void)
l &= 0x03 << 3;
l |= (0x02 << 3) | (1 << 0);
gpmc_write_reg(GPMC_SYSCONFIG, l);
-
gpmc_mem_init();
}
diff --git a/arch/arm/mach-omap2/mmc-twl4030.c b/arch/arm/mach-omap2/mmc-twl4030.c
index 3c04c2f1b23..c9c59a2db4e 100644
--- a/arch/arm/mach-omap2/mmc-twl4030.c
+++ b/arch/arm/mach-omap2/mmc-twl4030.c
@@ -198,6 +198,18 @@ static int twl_mmc_resume(struct device *dev, int slot)
#define twl_mmc_resume NULL
#endif
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
+
+static int twl4030_mmc_get_context_loss(struct device *dev)
+{
+ /* FIXME: PM DPS not implemented yet */
+ return 0;
+}
+
+#else
+#define twl4030_mmc_get_context_loss NULL
+#endif
+
static int twl_mmc1_set_power(struct device *dev, int slot, int power_on,
int vdd)
{
@@ -328,6 +340,61 @@ static int twl_mmc23_set_power(struct device *dev, int slot, int power_on, int v
return ret;
}
+static int twl_mmc1_set_sleep(struct device *dev, int slot, int sleep, int vdd,
+ int cardsleep)
+{
+ struct twl_mmc_controller *c = &hsmmc[0];
+ int mode = sleep ? REGULATOR_MODE_STANDBY : REGULATOR_MODE_NORMAL;
+
+ return regulator_set_mode(c->vcc, mode);
+}
+
+static int twl_mmc23_set_sleep(struct device *dev, int slot, int sleep, int vdd,
+ int cardsleep)
+{
+ struct twl_mmc_controller *c = NULL;
+ struct omap_mmc_platform_data *mmc = dev->platform_data;
+ int i, err, mode;
+
+ for (i = 1; i < ARRAY_SIZE(hsmmc); i++) {
+ if (mmc == hsmmc[i].mmc) {
+ c = &hsmmc[i];
+ break;
+ }
+ }
+
+ if (c == NULL)
+ return -ENODEV;
+
+ /*
+ * If we don't see a Vcc regulator, assume it's a fixed
+ * voltage always-on regulator.
+ */
+ if (!c->vcc)
+ return 0;
+
+ mode = sleep ? REGULATOR_MODE_STANDBY : REGULATOR_MODE_NORMAL;
+
+ if (!c->vcc_aux)
+ return regulator_set_mode(c->vcc, mode);
+
+ if (cardsleep) {
+ /* VCC can be turned off if card is asleep */
+ struct regulator *vcc_aux = c->vcc_aux;
+
+ c->vcc_aux = NULL;
+ if (sleep)
+ err = twl_mmc23_set_power(dev, slot, 0, 0);
+ else
+ err = twl_mmc23_set_power(dev, slot, 1, vdd);
+ c->vcc_aux = vcc_aux;
+ } else
+ err = regulator_set_mode(c->vcc, mode);
+ if (err)
+ return err;
+ return regulator_set_mode(c->vcc_aux, mode);
+}
+
static struct omap_mmc_platform_data *hsmmc_data[OMAP34XX_NR_MMC] __initdata;
void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
@@ -390,6 +457,9 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
} else
mmc->slots[0].switch_pin = -EINVAL;
+ mmc->get_context_loss_count =
+ twl4030_mmc_get_context_loss;
+
/* write protect normally uses an OMAP gpio */
if (gpio_is_valid(c->gpio_wp)) {
gpio_request(c->gpio_wp, "mmc_wp");
@@ -400,6 +470,12 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
} else
mmc->slots[0].gpio_wp = -EINVAL;
+ if (c->nonremovable)
+ mmc->slots[0].nonremovable = 1;
+
+ if (c->power_saving)
+ mmc->slots[0].power_saving = 1;
+
/* NOTE: MMC slots should have a Vcc regulator set up.
* This may be from a TWL4030-family chip, another
* controllable regulator, or a fixed supply.
@@ -412,6 +488,7 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
case 1:
/* on-chip level shifting via PBIAS0/PBIAS1 */
mmc->slots[0].set_power = twl_mmc1_set_power;
+ mmc->slots[0].set_sleep = twl_mmc1_set_sleep;
break;
case 2:
if (c->ext_clock)
@@ -422,6 +499,7 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
case 3:
/* off-chip level shifting, or none */
mmc->slots[0].set_power = twl_mmc23_set_power;
+ mmc->slots[0].set_sleep = twl_mmc23_set_sleep;
break;
default:
pr_err("MMC%d configuration not supported!\n", c->mmc);
diff --git a/arch/arm/mach-omap2/mmc-twl4030.h b/arch/arm/mach-omap2/mmc-twl4030.h
index 3807c45c9a6..a47e68563fb 100644
--- a/arch/arm/mach-omap2/mmc-twl4030.h
+++ b/arch/arm/mach-omap2/mmc-twl4030.h
@@ -12,6 +12,8 @@ struct twl4030_hsmmc_info {
bool transceiver; /* MMC-2 option */
bool ext_clock; /* use external pin for input clock */
bool cover_only; /* No card detect - just cover switch */
+ bool nonremovable; /* Nonremovable e.g. eMMC */
+ bool power_saving; /* Try to sleep or power off when possible */
int gpio_cd; /* or -EINVAL */
int gpio_wp; /* or -EINVAL */
char *name; /* or NULL for default */
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 89c992b8f75..a6f8eab14ba 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -21,6 +21,11 @@ config CPU_PXA930
config CPU_PXA935
bool "PXA935 (codename Tavor-P65)"
+ select CPU_PXA930
+
+config CPU_PXA950
+ bool "PXA950 (codename Tavor-PV2)"
+ select CPU_PXA930
endmenu
@@ -79,6 +84,12 @@ config MACH_MP900C
bool "Nec Mobilepro 900/c"
select PXA25x
+config MACH_BALLOON3
+ bool "Balloon 3 board"
+ select PXA27x
+ select IWMMXT
+ select PXA_HAVE_BOARD_IRQS
+
config ARCH_PXA_IDP
bool "Accelent Xscale IDP"
select PXA25x
@@ -371,6 +382,15 @@ config MACH_PALMTE2
Say Y here if you intend to run this kernel on a Palm Tungsten|E2
handheld computer.
+config MACH_PALMTC
+ bool "Palm Tungsten|C"
+ default y
+ depends on ARCH_PXA_PALM
+ select PXA25x
+ help
+ Say Y here if you intend to run this kernel on a Palm Tungsten|C
+ handheld computer.
+
config MACH_PALMT5
bool "Palm Tungsten|T5"
default y
@@ -458,6 +478,7 @@ config PXA_EZX
select PXA27x
select IWMMXT
select HAVE_PWM
+ select PXA_HAVE_BOARD_IRQS
config MACH_EZX_A780
bool "Motorola EZX A780"
@@ -489,6 +510,21 @@ config MACH_EZX_E2
default y
depends on PXA_EZX
+config MACH_XCEP
+ bool "Iskratel Electronics XCEP"
+ select PXA25x
+ select MTD
+ select MTD_PARTITIONS
+ select MTD_PHYSMAP
+ select MTD_CFI_INTELEXT
+ select MTD_CFI
+ select MTD_CHAR
+ select SMC91X
+ select PXA_SSP
+ help
+ PXA255 based Single Board Computer with SMC 91C111 ethernet chip and 64 MB of flash.
+ Tuned for usage in Libera instruments for particle accelerators.
+
endmenu
config PXA25x
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index d4c6122a342..f10e152bfc2 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_GUMSTIX_AM300EPD) += am300epd.o
obj-$(CONFIG_ARCH_LUBBOCK) += lubbock.o
obj-$(CONFIG_MACH_LOGICPD_PXA270) += lpd270.o
obj-$(CONFIG_MACH_MAINSTONE) += mainstone.o
+obj-$(CONFIG_MACH_BALLOON3) += balloon3.o
obj-$(CONFIG_MACH_MP900C) += mp900.o
obj-$(CONFIG_ARCH_PXA_IDP) += idp.o
obj-$(CONFIG_MACH_TRIZEPS4) += trizeps4.o
@@ -58,6 +59,7 @@ obj-$(CONFIG_MACH_E750) += e750.o
obj-$(CONFIG_MACH_E400) += e400.o
obj-$(CONFIG_MACH_E800) += e800.o
obj-$(CONFIG_MACH_PALMTE2) += palmte2.o
+obj-$(CONFIG_MACH_PALMTC) += palmtc.o
obj-$(CONFIG_MACH_PALMT5) += palmt5.o
obj-$(CONFIG_MACH_PALMTX) += palmtx.o
obj-$(CONFIG_MACH_PALMLD) += palmld.o
@@ -78,6 +80,8 @@ obj-$(CONFIG_MACH_ARMCORE) += cm-x2xx.o cm-x255.o cm-x270.o
obj-$(CONFIG_MACH_CM_X300) += cm-x300.o
obj-$(CONFIG_PXA_EZX) += ezx.o
+obj-$(CONFIG_MACH_XCEP) += xcep.o
+
obj-$(CONFIG_MACH_INTELMOTE2) += imote2.o
obj-$(CONFIG_MACH_STARGATE2) += stargate2.o
obj-$(CONFIG_MACH_CSB726) += csb726.o
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
new file mode 100644
index 00000000000..f23138b8fca
--- /dev/null
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -0,0 +1,361 @@
+/*
+ * linux/arch/arm/mach-pxa/balloon3.c
+ *
+ * Support for Balloonboard.org Balloon3 board.
+ *
+ * Author: Nick Bane, Wookey, Jonathan McDowell
+ * Created: June, 2006
+ * Copyright: Toby Churchill Ltd
+ * Derived from mainstone.c, by Nico Pitre
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/sysdev.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/ioport.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/types.h>
+
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/irq.h>
+#include <asm/sizes.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+#include <asm/mach/flash.h>
+
+#include <mach/pxa27x.h>
+#include <mach/balloon3.h>
+#include <mach/audio.h>
+#include <mach/pxafb.h>
+#include <mach/mmc.h>
+#include <mach/udc.h>
+#include <mach/pxa27x-udc.h>
+#include <mach/irda.h>
+#include <mach/ohci.h>
+
+#include <plat/i2c.h>
+
+#include "generic.h"
+#include "devices.h"
+
+static unsigned long balloon3_irq_enabled;
+
+static unsigned long balloon3_features_present =
+ (1 << BALLOON3_FEATURE_OHCI) | (1 << BALLOON3_FEATURE_CF) |
+ (1 << BALLOON3_FEATURE_AUDIO) |
+ (1 << BALLOON3_FEATURE_TOPPOLY);
+
+int balloon3_has(enum balloon3_features feature)
+{
+ return (balloon3_features_present & (1 << feature)) ? 1 : 0;
+}
+EXPORT_SYMBOL_GPL(balloon3_has);
+
+int __init parse_balloon3_features(char *arg)
+{
+ if (!arg)
+ return 0;
+
+ return strict_strtoul(arg, 0, &balloon3_features_present);
+}
+early_param("balloon3_features", parse_balloon3_features);
+
+static void balloon3_mask_irq(unsigned int irq)
+{
+ int balloon3_irq = (irq - BALLOON3_IRQ(0));
+ balloon3_irq_enabled &= ~(1 << balloon3_irq);
+ __raw_writel(~balloon3_irq_enabled, BALLOON3_INT_CONTROL_REG);
+}
+
+static void balloon3_unmask_irq(unsigned int irq)
+{
+ int balloon3_irq = (irq - BALLOON3_IRQ(0));
+ balloon3_irq_enabled |= (1 << balloon3_irq);
+ __raw_writel(~balloon3_irq_enabled, BALLOON3_INT_CONTROL_REG);
+}
+
+static struct irq_chip balloon3_irq_chip = {
+ .name = "FPGA",
+ .ack = balloon3_mask_irq,
+ .mask = balloon3_mask_irq,
+ .unmask = balloon3_unmask_irq,
+};
+
+static void balloon3_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ unsigned long pending = __raw_readl(BALLOON3_INT_CONTROL_REG) &
+ balloon3_irq_enabled;
+
+ do {
+ /* clear useless edge notification */
+ if (desc->chip->ack)
+ desc->chip->ack(BALLOON3_AUX_NIRQ);
+ while (pending) {
+ irq = BALLOON3_IRQ(0) + __ffs(pending);
+ generic_handle_irq(irq);
+ pending &= pending - 1;
+ }
+ pending = __raw_readl(BALLOON3_INT_CONTROL_REG) &
+ balloon3_irq_enabled;
+ } while (pending);
+}
+
+static void __init balloon3_init_irq(void)
+{
+ int irq;
+
+ pxa27x_init_irq();
+ /* setup extra Balloon3 irqs */
+ for (irq = BALLOON3_IRQ(0); irq <= BALLOON3_IRQ(7); irq++) {
+ set_irq_chip(irq, &balloon3_irq_chip);
+ set_irq_handler(irq, handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
+
+ set_irq_chained_handler(BALLOON3_AUX_NIRQ, balloon3_irq_handler);
+ set_irq_type(BALLOON3_AUX_NIRQ, IRQ_TYPE_EDGE_FALLING);
+
+ pr_debug("%s: chained handler installed - irq %d automatically "
+ "enabled\n", __func__, BALLOON3_AUX_NIRQ);
+}
+
+static void balloon3_backlight_power(int on)
+{
+ pr_debug("%s: power is %s\n", __func__, on ? "on" : "off");
+ gpio_set_value(BALLOON3_GPIO_RUN_BACKLIGHT, on);
+}
+
+static unsigned long balloon3_lcd_pin_config[] = {
+ /* LCD - 16bpp Active TFT */
+ GPIO58_LCD_LDD_0,
+ GPIO59_LCD_LDD_1,
+ GPIO60_LCD_LDD_2,
+ GPIO61_LCD_LDD_3,
+ GPIO62_LCD_LDD_4,
+ GPIO63_LCD_LDD_5,
+ GPIO64_LCD_LDD_6,
+ GPIO65_LCD_LDD_7,
+ GPIO66_LCD_LDD_8,
+ GPIO67_LCD_LDD_9,
+ GPIO68_LCD_LDD_10,
+ GPIO69_LCD_LDD_11,
+ GPIO70_LCD_LDD_12,
+ GPIO71_LCD_LDD_13,
+ GPIO72_LCD_LDD_14,
+ GPIO73_LCD_LDD_15,
+ GPIO74_LCD_FCLK,
+ GPIO75_LCD_LCLK,
+ GPIO76_LCD_PCLK,
+ GPIO77_LCD_BIAS,
+
+ GPIO99_GPIO, /* Backlight */
+};
+
+static struct pxafb_mode_info balloon3_lcd_modes[] = {
+ {
+ .pixclock = 38000,
+ .xres = 480,
+ .yres = 640,
+ .bpp = 16,
+ .hsync_len = 8,
+ .left_margin = 8,
+ .right_margin = 8,
+ .vsync_len = 2,
+ .upper_margin = 4,
+ .lower_margin = 5,
+ .sync = 0,
+ },
+};
+
+static struct pxafb_mach_info balloon3_pxafb_info = {
+ .modes = balloon3_lcd_modes,
+ .num_modes = ARRAY_SIZE(balloon3_lcd_modes),
+ .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL,
+ .pxafb_backlight_power = balloon3_backlight_power,
+};
+
+static unsigned long balloon3_mmc_pin_config[] = {
+ GPIO32_MMC_CLK,
+ GPIO92_MMC_DAT_0,
+ GPIO109_MMC_DAT_1,
+ GPIO110_MMC_DAT_2,
+ GPIO111_MMC_DAT_3,
+ GPIO112_MMC_CMD,
+};
+
+static void balloon3_mci_setpower(struct device *dev, unsigned int vdd)
+{
+ struct pxamci_platform_data *p_d = dev->platform_data;
+
+ if ((1 << vdd) & p_d->ocr_mask) {
+ pr_debug("%s: on\n", __func__);
+ /* FIXME something to prod here? */
+ } else {
+ pr_debug("%s: off\n", __func__);
+ /* FIXME something to prod here? */
+ }
+}
+
+static struct pxamci_platform_data balloon3_mci_platform_data = {
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .setpower = balloon3_mci_setpower,
+};
+
+static int balloon3_udc_is_connected(void)
+{
+ pr_debug("%s: udc connected\n", __func__);
+ return 1;
+}
+
+static void balloon3_udc_command(int cmd)
+{
+ switch (cmd) {
+ case PXA2XX_UDC_CMD_CONNECT:
+ UP2OCR |= (UP2OCR_DPPUE + UP2OCR_DPPUBE);
+ pr_debug("%s: connect\n", __func__);
+ break;
+ case PXA2XX_UDC_CMD_DISCONNECT:
+ UP2OCR &= ~UP2OCR_DPPUE;
+ pr_debug("%s: disconnect\n", __func__);
+ break;
+ }
+}
+
+static struct pxa2xx_udc_mach_info balloon3_udc_info = {
+ .udc_is_connected = balloon3_udc_is_connected,
+ .udc_command = balloon3_udc_command,
+};
+
+static struct pxaficp_platform_data balloon3_ficp_platform_data = {
+ .transceiver_cap = IR_SIRMODE | IR_FIRMODE | IR_OFF,
+};
+
+static unsigned long balloon3_ohci_pin_config[] = {
+ GPIO88_USBH1_PWR,
+ GPIO89_USBH1_PEN,
+};
+
+static struct pxaohci_platform_data balloon3_ohci_platform_data = {
+ .port_mode = PMM_PERPORT_MODE,
+ .flags = ENABLE_PORT_ALL | POWER_CONTROL_LOW | POWER_SENSE_LOW,
+};
+
+static unsigned long balloon3_pin_config[] __initdata = {
+ /* Select BTUART 'COM1/ttyS0' as IO option for pins 42/43/44/45 */
+ GPIO42_BTUART_RXD,
+ GPIO43_BTUART_TXD,
+ GPIO44_BTUART_CTS,
+ GPIO45_BTUART_RTS,
+
+ /* Wakeup GPIO */
+ GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH,
+
+ /* NAND & IDLE LED GPIOs */
+ GPIO9_GPIO,
+ GPIO10_GPIO,
+};
+
+static struct gpio_led balloon3_gpio_leds[] = {
+ {
+ .name = "balloon3:green:idle",
+ .default_trigger = "heartbeat",
+ .gpio = BALLOON3_GPIO_LED_IDLE,
+ .active_low = 1,
+ },
+ {
+ .name = "balloon3:green:nand",
+ .default_trigger = "nand-disk",
+ .gpio = BALLOON3_GPIO_LED_NAND,
+ .active_low = 1,
+ },
+};
+
+static struct gpio_led_platform_data balloon3_gpio_leds_platform_data = {
+ .leds = balloon3_gpio_leds,
+ .num_leds = ARRAY_SIZE(balloon3_gpio_leds),
+};
+
+static struct platform_device balloon3led_device = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &balloon3_gpio_leds_platform_data,
+ },
+};
+
+static void __init balloon3_init(void)
+{
+ pr_info("Initialising Balloon3\n");
+
+ /* system bus arbiter setting
+ * - Core_Park
+ * - LCD_wt:DMA_wt:CORE_Wt = 2:3:4
+ */
+ ARB_CNTRL = ARB_CORE_PARK | 0x234;
+
+ pxa_set_i2c_info(NULL);
+ if (balloon3_has(BALLOON3_FEATURE_AUDIO))
+ pxa_set_ac97_info(NULL);
+
+ if (balloon3_has(BALLOON3_FEATURE_TOPPOLY)) {
+ pxa2xx_mfp_config(ARRAY_AND_SIZE(balloon3_lcd_pin_config));
+ gpio_request(BALLOON3_GPIO_RUN_BACKLIGHT,
+ "LCD Backlight Power");
+ gpio_direction_output(BALLOON3_GPIO_RUN_BACKLIGHT, 1);
+ set_pxa_fb_info(&balloon3_pxafb_info);
+ }
+
+ if (balloon3_has(BALLOON3_FEATURE_MMC)) {
+ pxa2xx_mfp_config(ARRAY_AND_SIZE(balloon3_mmc_pin_config));
+ pxa_set_mci_info(&balloon3_mci_platform_data);
+ }
+ pxa_set_ficp_info(&balloon3_ficp_platform_data);
+ if (balloon3_has(BALLOON3_FEATURE_OHCI)) {
+ pxa2xx_mfp_config(ARRAY_AND_SIZE(balloon3_ohci_pin_config));
+ pxa_set_ohci_info(&balloon3_ohci_platform_data);
+ }
+ pxa_set_udc_info(&balloon3_udc_info);
+
+ pxa2xx_mfp_config(ARRAY_AND_SIZE(balloon3_pin_config));
+
+ platform_device_register(&balloon3led_device);
+}
+
+static struct map_desc balloon3_io_desc[] __initdata = {
+ { /* CPLD/FPGA */
+ .virtual = BALLOON3_FPGA_VIRT,
+ .pfn = __phys_to_pfn(BALLOON3_FPGA_PHYS),
+ .length = BALLOON3_FPGA_LENGTH,
+ .type = MT_DEVICE,
+ },
+};
+
+static void __init balloon3_map_io(void)
+{
+ pxa_map_io();
+ iotable_init(balloon3_io_desc, ARRAY_SIZE(balloon3_io_desc));
+}
+
+MACHINE_START(BALLOON3, "Balloon3")
+ /* Maintainer: Nick Bane. */
+ .phys_io = 0x40000000,
+ .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
+ .map_io = balloon3_map_io,
+ .init_irq = balloon3_init_irq,
+ .timer = &pxa_timer,
+ .init_machine = balloon3_init,
+ .boot_params = PHYS_OFFSET + 0x100,
+MACHINE_END
diff --git a/arch/arm/mach-pxa/clock.h b/arch/arm/mach-pxa/clock.h
index 5599bceff73..978a3667e90 100644
--- a/arch/arm/mach-pxa/clock.h
+++ b/arch/arm/mach-pxa/clock.h
@@ -12,7 +12,6 @@ struct clk {
unsigned int cken;
unsigned int delay;
unsigned int enabled;
- struct clk *other;
};
#define INIT_CLKREG(_clk,_devname,_conname) \
diff --git a/arch/arm/mach-pxa/cm-x270.c b/arch/arm/mach-pxa/cm-x270.c
index 1d2cec25391..eea78b6c2bc 100644
--- a/arch/arm/mach-pxa/cm-x270.c
+++ b/arch/arm/mach-pxa/cm-x270.c
@@ -13,13 +13,18 @@
#include <linux/sysdev.h>
#include <linux/irq.h>
#include <linux/gpio.h>
+#include <linux/delay.h>
#include <linux/rtc-v3020.h>
#include <video/mbxfb.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/libertas_spi.h>
+
#include <mach/pxa27x.h>
#include <mach/ohci.h>
#include <mach/mmc.h>
+#include <mach/pxa2xx_spi.h>
#include "generic.h"
@@ -34,6 +39,10 @@
/* MMC power enable */
#define GPIO105_MMC_POWER (105)
+/* WLAN GPIOS */
+#define GPIO19_WLAN_STRAP (19)
+#define GPIO102_WLAN_RST (102)
+
static unsigned long cmx270_pin_config[] = {
/* AC'97 */
GPIO28_AC97_BITCLK,
@@ -94,8 +103,8 @@ static unsigned long cmx270_pin_config[] = {
GPIO26_SSP1_RXD,
/* SSP2 */
- GPIO19_SSP2_SCLK,
- GPIO14_SSP2_SFRM,
+ GPIO19_GPIO, /* SSP2 clock is used as GPIO for Libertas pin-strap */
+ GPIO14_GPIO,
GPIO87_SSP2_TXD,
GPIO88_SSP2_RXD,
@@ -123,6 +132,7 @@ static unsigned long cmx270_pin_config[] = {
GPIO0_GPIO | WAKEUP_ON_EDGE_BOTH,
GPIO105_GPIO | MFP_LPM_DRIVE_HIGH, /* MMC/SD power */
GPIO53_GPIO, /* PC card reset */
+ GPIO102_GPIO, /* WLAN reset */
/* NAND controls */
GPIO11_GPIO | MFP_LPM_DRIVE_HIGH, /* NAND CE# */
@@ -131,6 +141,7 @@ static unsigned long cmx270_pin_config[] = {
/* interrupts */
GPIO10_GPIO, /* DM9000 interrupt */
GPIO83_GPIO, /* MMC card detect */
+ GPIO95_GPIO, /* WLAN interrupt */
};
/* V3020 RTC */
@@ -271,64 +282,114 @@ static inline void cmx270_init_ohci(void) {}
#endif
#if defined(CONFIG_MMC) || defined(CONFIG_MMC_MODULE)
-static int cmx270_mci_init(struct device *dev,
- irq_handler_t cmx270_detect_int,
- void *data)
+static struct pxamci_platform_data cmx270_mci_platform_data = {
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .gpio_card_detect = GPIO83_MMC_IRQ,
+ .gpio_card_ro = -1,
+ .gpio_power = GPIO105_MMC_POWER,
+ .gpio_power_invert = 1,
+};
+
+static void __init cmx270_init_mmc(void)
{
- int err;
+ pxa_set_mci_info(&cmx270_mci_platform_data);
+}
+#else
+static inline void cmx270_init_mmc(void) {}
+#endif
+
+#if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE)
+static struct pxa2xx_spi_master cm_x270_spi_info = {
+ .num_chipselect = 1,
+ .enable_dma = 1,
+};
+
+static struct pxa2xx_spi_chip cm_x270_libertas_chip = {
+ .rx_threshold = 1,
+ .tx_threshold = 1,
+ .timeout = 1000,
+ .gpio_cs = 14,
+};
+
+static unsigned long cm_x270_libertas_pin_config[] = {
+ /* SSP2 */
+ GPIO19_SSP2_SCLK,
+ GPIO14_GPIO,
+ GPIO87_SSP2_TXD,
+ GPIO88_SSP2_RXD,
+
+};
- err = gpio_request(GPIO105_MMC_POWER, "MMC/SD power");
- if (err) {
- dev_warn(dev, "power gpio unavailable\n");
+static int cm_x270_libertas_setup(struct spi_device *spi)
+{
+ int err = gpio_request(GPIO19_WLAN_STRAP, "WLAN STRAP");
+ if (err)
return err;
- }
- gpio_direction_output(GPIO105_MMC_POWER, 0);
+ err = gpio_request(GPIO102_WLAN_RST, "WLAN RST");
+ if (err)
+ goto err_free_strap;
- err = request_irq(CMX270_MMC_IRQ, cmx270_detect_int,
- IRQF_DISABLED | IRQF_TRIGGER_FALLING,
- "MMC card detect", data);
- if (err) {
- gpio_free(GPIO105_MMC_POWER);
- dev_err(dev, "cmx270_mci_init: MMC/SD: can't"
- " request MMC card detect IRQ\n");
- }
+ err = gpio_direction_output(GPIO102_WLAN_RST, 0);
+ if (err)
+ goto err_free_strap;
+ msleep(100);
+
+ err = gpio_direction_output(GPIO19_WLAN_STRAP, 1);
+ if (err)
+ goto err_free_strap;
+ msleep(100);
+
+ pxa2xx_mfp_config(ARRAY_AND_SIZE(cm_x270_libertas_pin_config));
+
+ gpio_set_value(GPIO102_WLAN_RST, 1);
+ msleep(100);
+
+ spi->bits_per_word = 16;
+ spi_setup(spi);
+
+ return 0;
+
+err_free_strap:
+ gpio_free(GPIO19_WLAN_STRAP);
return err;
}
-static void cmx270_mci_setpower(struct device *dev, unsigned int vdd)
+static int cm_x270_libertas_teardown(struct spi_device *spi)
{
- struct pxamci_platform_data *p_d = dev->platform_data;
-
- if ((1 << vdd) & p_d->ocr_mask) {
- dev_dbg(dev, "power on\n");
- gpio_set_value(GPIO105_MMC_POWER, 0);
- } else {
- gpio_set_value(GPIO105_MMC_POWER, 1);
- dev_dbg(dev, "power off\n");
- }
-}
+ gpio_set_value(GPIO102_WLAN_RST, 0);
+ gpio_free(GPIO102_WLAN_RST);
+ gpio_free(GPIO19_WLAN_STRAP);
-static void cmx270_mci_exit(struct device *dev, void *data)
-{
- free_irq(CMX270_MMC_IRQ, data);
- gpio_free(GPIO105_MMC_POWER);
+ return 0;
}
-static struct pxamci_platform_data cmx270_mci_platform_data = {
- .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
- .init = cmx270_mci_init,
- .setpower = cmx270_mci_setpower,
- .exit = cmx270_mci_exit,
+struct libertas_spi_platform_data cm_x270_libertas_pdata = {
+ .use_dummy_writes = 1,
+ .setup = cm_x270_libertas_setup,
+ .teardown = cm_x270_libertas_teardown,
};
-static void __init cmx270_init_mmc(void)
+static struct spi_board_info cm_x270_spi_devices[] __initdata = {
+ {
+ .modalias = "libertas_spi",
+ .max_speed_hz = 13000000,
+ .bus_num = 2,
+ .irq = gpio_to_irq(95),
+ .chip_select = 0,
+ .controller_data = &cm_x270_libertas_chip,
+ .platform_data = &cm_x270_libertas_pdata,
+ },
+};
+
+static void __init cmx270_init_spi(void)
{
- pxa_set_mci_info(&cmx270_mci_platform_data);
+ pxa2xx_set_spi_info(2, &cm_x270_spi_info);
+ spi_register_board_info(ARRAY_AND_SIZE(cm_x270_spi_devices));
}
#else
-static inline void cmx270_init_mmc(void) {}
+static inline void cmx270_init_spi(void) {}
#endif
void __init cmx270_init(void)
@@ -343,4 +404,5 @@ void __init cmx270_init(void)
cmx270_init_mmc();
cmx270_init_ohci();
cmx270_init_2700G();
+ cmx270_init_spi();
}
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index 465da26591b..aac2cda60e0 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -306,68 +306,21 @@ static void cm_x300_mci_exit(struct device *dev, void *data)
}
static struct pxamci_platform_data cm_x300_mci_platform_data = {
- .detect_delay = 20,
- .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
- .init = cm_x300_mci_init,
- .exit = cm_x300_mci_exit,
+ .detect_delay = 20,
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .init = cm_x300_mci_init,
+ .exit = cm_x300_mci_exit,
+ .gpio_card_detect = -1,
+ .gpio_card_ro = -1,
+ .gpio_power = -1,
};
-static int cm_x300_mci2_ro(struct device *dev)
-{
- return gpio_get_value(GPIO85_MMC2_WP);
-}
-
-static int cm_x300_mci2_init(struct device *dev,
- irq_handler_t cm_x300_detect_int,
- void *data)
-{
- int err;
-
- /*
- * setup GPIO for CM-X300 MMC controller
- */
- err = gpio_request(GPIO82_MMC2_IRQ, "mmc card detect");
- if (err)
- goto err_request_cd;
- gpio_direction_input(GPIO82_MMC2_IRQ);
-
- err = gpio_request(GPIO85_MMC2_WP, "mmc write protect");
- if (err)
- goto err_request_wp;
- gpio_direction_input(GPIO85_MMC2_WP);
-
- err = request_irq(CM_X300_MMC2_IRQ, cm_x300_detect_int,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "MMC card detect", data);
- if (err) {
- printk(KERN_ERR "%s: MMC/SD/SDIO: "
- "can't request card detect IRQ\n", __func__);
- goto err_request_irq;
- }
-
- return 0;
-
-err_request_irq:
- gpio_free(GPIO85_MMC2_WP);
-err_request_wp:
- gpio_free(GPIO82_MMC2_IRQ);
-err_request_cd:
- return err;
-}
-
-static void cm_x300_mci2_exit(struct device *dev, void *data)
-{
- free_irq(CM_X300_MMC2_IRQ, data);
- gpio_free(GPIO82_MMC2_IRQ);
- gpio_free(GPIO85_MMC2_WP);
-}
-
static struct pxamci_platform_data cm_x300_mci2_platform_data = {
- .detect_delay = 20,
- .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
- .init = cm_x300_mci2_init,
- .exit = cm_x300_mci2_exit,
- .get_ro = cm_x300_mci2_ro,
+ .detect_delay = 20,
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .gpio_card_detect = GPIO82_MMC2_IRQ,
+ .gpio_card_ro = GPIO85_MMC2_WP,
+ .gpio_power = -1,
};
static void __init cm_x300_init_mmc(void)
diff --git a/arch/arm/mach-pxa/colibri-pxa300.c b/arch/arm/mach-pxa/colibri-pxa300.c
index 7c9c34c19ae..37c239c5656 100644
--- a/arch/arm/mach-pxa/colibri-pxa300.c
+++ b/arch/arm/mach-pxa/colibri-pxa300.c
@@ -172,6 +172,7 @@ void __init colibri_pxa300_init(void)
{
colibri_pxa300_init_eth();
colibri_pxa300_init_ohci();
+ colibri_pxa3xx_init_nand();
colibri_pxa300_init_lcd();
colibri_pxa3xx_init_lcd(mfp_to_gpio(GPIO39_GPIO));
colibri_pxa310_init_ac97();
diff --git a/arch/arm/mach-pxa/colibri-pxa320.c b/arch/arm/mach-pxa/colibri-pxa320.c
index a18d37b3c5e..494572825c7 100644
--- a/arch/arm/mach-pxa/colibri-pxa320.c
+++ b/arch/arm/mach-pxa/colibri-pxa320.c
@@ -164,15 +164,48 @@ static inline void __init colibri_pxa320_init_ac97(void)
static inline void colibri_pxa320_init_ac97(void) {}
#endif
+/*
+ * The following configuration is verified to work with the Toradex Orchid
+ * carrier board
+ */
+static mfp_cfg_t colibri_pxa320_uart_pin_config[] __initdata = {
+ /* UART 1 configuration (may be set by bootloader) */
+ GPIO99_UART1_CTS,
+ GPIO104_UART1_RTS,
+ GPIO97_UART1_RXD,
+ GPIO98_UART1_TXD,
+ GPIO101_UART1_DTR,
+ GPIO103_UART1_DSR,
+ GPIO100_UART1_DCD,
+ GPIO102_UART1_RI,
+
+ /* UART 2 configuration */
+ GPIO109_UART2_CTS,
+ GPIO112_UART2_RTS,
+ GPIO110_UART2_RXD,
+ GPIO111_UART2_TXD,
+
+ /* UART 3 configuration */
+ GPIO30_UART3_RXD,
+ GPIO31_UART3_TXD,
+};
+
+static void __init colibri_pxa320_init_uart(void)
+{
+ pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa320_uart_pin_config));
+}
+
void __init colibri_pxa320_init(void)
{
colibri_pxa320_init_eth();
colibri_pxa320_init_ohci();
+ colibri_pxa3xx_init_nand();
colibri_pxa320_init_lcd();
colibri_pxa3xx_init_lcd(mfp_to_gpio(GPIO49_GPIO));
colibri_pxa320_init_ac97();
colibri_pxa3xx_init_mmc(ARRAY_AND_SIZE(colibri_pxa320_mmc_pin_config),
mfp_to_gpio(MFP_PIN_GPIO28));
+ colibri_pxa320_init_uart();
}
MACHINE_START(COLIBRI320, "Toradex Colibri PXA320")
diff --git a/arch/arm/mach-pxa/colibri-pxa3xx.c b/arch/arm/mach-pxa/colibri-pxa3xx.c
index ea34e34f8cd..efebaf4d734 100644
--- a/arch/arm/mach-pxa/colibri-pxa3xx.c
+++ b/arch/arm/mach-pxa/colibri-pxa3xx.c
@@ -25,6 +25,7 @@
#include <mach/colibri.h>
#include <mach/mmc.h>
#include <mach/pxafb.h>
+#include <mach/pxa3xx_nand.h>
#include "generic.h"
#include "devices.h"
@@ -95,10 +96,13 @@ static void colibri_pxa3xx_mci_exit(struct device *dev, void *data)
}
static struct pxamci_platform_data colibri_pxa3xx_mci_platform_data = {
- .detect_delay = 20,
- .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
- .init = colibri_pxa3xx_mci_init,
- .exit = colibri_pxa3xx_mci_exit,
+ .detect_delay = 20,
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .init = colibri_pxa3xx_mci_init,
+ .exit = colibri_pxa3xx_mci_exit,
+ .gpio_card_detect = -1,
+ .gpio_card_ro = -1,
+ .gpio_power = -1,
};
void __init colibri_pxa3xx_init_mmc(mfp_cfg_t *pins, int len, int detect_pin)
@@ -154,3 +158,43 @@ void __init colibri_pxa3xx_init_lcd(int bl_pin)
}
#endif
+#if defined(CONFIG_MTD_NAND_PXA3xx) || defined(CONFIG_MTD_NAND_PXA3xx_MODULE)
+static struct mtd_partition colibri_nand_partitions[] = {
+ {
+ .name = "bootloader",
+ .offset = 0,
+ .size = SZ_512K,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ },
+ {
+ .name = "kernel",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_4M,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ },
+ {
+ .name = "reserved",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_1M,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ },
+ {
+ .name = "fs",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+static struct pxa3xx_nand_platform_data colibri_nand_info = {
+ .enable_arbiter = 1,
+ .keep_config = 1,
+ .parts = colibri_nand_partitions,
+ .nr_parts = ARRAY_SIZE(colibri_nand_partitions),
+};
+
+void __init colibri_pxa3xx_init_nand(void)
+{
+ pxa3xx_set_nand_info(&colibri_nand_info);
+}
+#endif
+
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 5363e1aea3f..b536b5a5a10 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -29,6 +29,7 @@
#include <linux/spi/ads7846.h>
#include <linux/spi/corgi_lcd.h>
#include <linux/mtd/sharpsl.h>
+#include <linux/input/matrix_keypad.h>
#include <video/w100fb.h>
#include <asm/setup.h>
@@ -104,6 +105,28 @@ static unsigned long corgi_pin_config[] __initdata = {
GPIO6_MMC_CLK,
GPIO8_MMC_CS0,
+ /* GPIO Matrix Keypad */
+ GPIO66_GPIO, /* column 0 */
+ GPIO67_GPIO, /* column 1 */
+ GPIO68_GPIO, /* column 2 */
+ GPIO69_GPIO, /* column 3 */
+ GPIO70_GPIO, /* column 4 */
+ GPIO71_GPIO, /* column 5 */
+ GPIO72_GPIO, /* column 6 */
+ GPIO73_GPIO, /* column 7 */
+ GPIO74_GPIO, /* column 8 */
+ GPIO75_GPIO, /* column 9 */
+ GPIO76_GPIO, /* column 10 */
+ GPIO77_GPIO, /* column 11 */
+ GPIO58_GPIO, /* row 0 */
+ GPIO59_GPIO, /* row 1 */
+ GPIO60_GPIO, /* row 2 */
+ GPIO61_GPIO, /* row 3 */
+ GPIO62_GPIO, /* row 4 */
+ GPIO63_GPIO, /* row 5 */
+ GPIO64_GPIO, /* row 6 */
+ GPIO65_GPIO, /* row 7 */
+
/* GPIO */
GPIO9_GPIO, /* CORGI_GPIO_nSD_DETECT */
GPIO7_GPIO, /* CORGI_GPIO_nSD_WP */
@@ -267,9 +290,115 @@ static struct platform_device corgifb_device = {
/*
* Corgi Keyboard Device
*/
+#define CORGI_KEY_CALENDER KEY_F1
+#define CORGI_KEY_ADDRESS KEY_F2
+#define CORGI_KEY_FN KEY_F3
+#define CORGI_KEY_CANCEL KEY_F4
+#define CORGI_KEY_OFF KEY_SUSPEND
+#define CORGI_KEY_EXOK KEY_F5
+#define CORGI_KEY_EXCANCEL KEY_F6
+#define CORGI_KEY_EXJOGDOWN KEY_F7
+#define CORGI_KEY_EXJOGUP KEY_F8
+#define CORGI_KEY_JAP1 KEY_LEFTCTRL
+#define CORGI_KEY_JAP2 KEY_LEFTALT
+#define CORGI_KEY_MAIL KEY_F10
+#define CORGI_KEY_OK KEY_F11
+#define CORGI_KEY_MENU KEY_F12
+
+static const uint32_t corgikbd_keymap[] = {
+ KEY(0, 1, KEY_1),
+ KEY(0, 2, KEY_3),
+ KEY(0, 3, KEY_5),
+ KEY(0, 4, KEY_6),
+ KEY(0, 5, KEY_7),
+ KEY(0, 6, KEY_9),
+ KEY(0, 7, KEY_0),
+ KEY(0, 8, KEY_BACKSPACE),
+ KEY(1, 1, KEY_2),
+ KEY(1, 2, KEY_4),
+ KEY(1, 3, KEY_R),
+ KEY(1, 4, KEY_Y),
+ KEY(1, 5, KEY_8),
+ KEY(1, 6, KEY_I),
+ KEY(1, 7, KEY_O),
+ KEY(1, 8, KEY_P),
+ KEY(2, 0, KEY_TAB),
+ KEY(2, 1, KEY_Q),
+ KEY(2, 2, KEY_E),
+ KEY(2, 3, KEY_T),
+ KEY(2, 4, KEY_G),
+ KEY(2, 5, KEY_U),
+ KEY(2, 6, KEY_J),
+ KEY(2, 7, KEY_K),
+ KEY(3, 0, CORGI_KEY_CALENDER),
+ KEY(3, 1, KEY_W),
+ KEY(3, 2, KEY_S),
+ KEY(3, 3, KEY_F),
+ KEY(3, 4, KEY_V),
+ KEY(3, 5, KEY_H),
+ KEY(3, 6, KEY_M),
+ KEY(3, 7, KEY_L),
+ KEY(3, 9, KEY_RIGHTSHIFT),
+ KEY(4, 0, CORGI_KEY_ADDRESS),
+ KEY(4, 1, KEY_A),
+ KEY(4, 2, KEY_D),
+ KEY(4, 3, KEY_C),
+ KEY(4, 4, KEY_B),
+ KEY(4, 5, KEY_N),
+ KEY(4, 6, KEY_DOT),
+ KEY(4, 8, KEY_ENTER),
+ KEY(4, 10, KEY_LEFTSHIFT),
+ KEY(5, 0, CORGI_KEY_MAIL),
+ KEY(5, 1, KEY_Z),
+ KEY(5, 2, KEY_X),
+ KEY(5, 3, KEY_MINUS),
+ KEY(5, 4, KEY_SPACE),
+ KEY(5, 5, KEY_COMMA),
+ KEY(5, 7, KEY_UP),
+ KEY(5, 11, CORGI_KEY_FN),
+ KEY(6, 0, KEY_SYSRQ),
+ KEY(6, 1, CORGI_KEY_JAP1),
+ KEY(6, 2, CORGI_KEY_JAP2),
+ KEY(6, 3, CORGI_KEY_CANCEL),
+ KEY(6, 4, CORGI_KEY_OK),
+ KEY(6, 5, CORGI_KEY_MENU),
+ KEY(6, 6, KEY_LEFT),
+ KEY(6, 7, KEY_DOWN),
+ KEY(6, 8, KEY_RIGHT),
+ KEY(7, 0, CORGI_KEY_OFF),
+ KEY(7, 1, CORGI_KEY_EXOK),
+ KEY(7, 2, CORGI_KEY_EXCANCEL),
+ KEY(7, 3, CORGI_KEY_EXJOGDOWN),
+ KEY(7, 4, CORGI_KEY_EXJOGUP),
+};
+
+static struct matrix_keymap_data corgikbd_keymap_data = {
+ .keymap = corgikbd_keymap,
+ .keymap_size = ARRAY_SIZE(corgikbd_keymap),
+};
+
+static const int corgikbd_row_gpios[] =
+ { 58, 59, 60, 61, 62, 63, 64, 65 };
+static const int corgikbd_col_gpios[] =
+ { 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77 };
+
+static struct matrix_keypad_platform_data corgikbd_pdata = {
+ .keymap_data = &corgikbd_keymap_data,
+ .row_gpios = corgikbd_row_gpios,
+ .col_gpios = corgikbd_col_gpios,
+ .num_row_gpios = ARRAY_SIZE(corgikbd_row_gpios),
+ .num_col_gpios = ARRAY_SIZE(corgikbd_col_gpios),
+ .col_scan_delay_us = 10,
+ .debounce_ms = 10,
+ .wakeup = 1,
+};
+
static struct platform_device corgikbd_device = {
- .name = "corgi-keyboard",
+ .name = "matrix-keypad",
.id = -1,
+ .dev = {
+ .platform_data = &corgikbd_pdata,
+ },
};
/*
@@ -307,111 +436,20 @@ static struct platform_device corgiled_device = {
* The card detect interrupt isn't debounced so we delay it by 250ms
* to give the card a chance to fully insert/eject.
*/
-static struct pxamci_platform_data corgi_mci_platform_data;
-
-static int corgi_mci_init(struct device *dev, irq_handler_t corgi_detect_int, void *data)
-{
- int err;
-
- err = gpio_request(CORGI_GPIO_nSD_DETECT, "nSD_DETECT");
- if (err)
- goto err_out;
-
- err = gpio_request(CORGI_GPIO_nSD_WP, "nSD_WP");
- if (err)
- goto err_free_1;
-
- err = gpio_request(CORGI_GPIO_SD_PWR, "SD_PWR");
- if (err)
- goto err_free_2;
-
- gpio_direction_input(CORGI_GPIO_nSD_DETECT);
- gpio_direction_input(CORGI_GPIO_nSD_WP);
- gpio_direction_output(CORGI_GPIO_SD_PWR, 0);
-
- corgi_mci_platform_data.detect_delay = msecs_to_jiffies(250);
-
- err = request_irq(CORGI_IRQ_GPIO_nSD_DETECT, corgi_detect_int,
- IRQF_DISABLED | IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING,
- "MMC card detect", data);
- if (err) {
- pr_err("%s: MMC/SD: can't request MMC card detect IRQ\n",
- __func__);
- goto err_free_3;
- }
- return 0;
-
-err_free_3:
- gpio_free(CORGI_GPIO_SD_PWR);
-err_free_2:
- gpio_free(CORGI_GPIO_nSD_WP);
-err_free_1:
- gpio_free(CORGI_GPIO_nSD_DETECT);
-err_out:
- return err;
-}
-
-static void corgi_mci_setpower(struct device *dev, unsigned int vdd)
-{
- struct pxamci_platform_data* p_d = dev->platform_data;
-
- gpio_set_value(CORGI_GPIO_SD_PWR, ((1 << vdd) & p_d->ocr_mask));
-}
-
-static int corgi_mci_get_ro(struct device *dev)
-{
- return gpio_get_value(CORGI_GPIO_nSD_WP);
-}
-
-static void corgi_mci_exit(struct device *dev, void *data)
-{
- free_irq(CORGI_IRQ_GPIO_nSD_DETECT, data);
- gpio_free(CORGI_GPIO_SD_PWR);
- gpio_free(CORGI_GPIO_nSD_WP);
- gpio_free(CORGI_GPIO_nSD_DETECT);
-}
-
static struct pxamci_platform_data corgi_mci_platform_data = {
- .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
- .init = corgi_mci_init,
- .get_ro = corgi_mci_get_ro,
- .setpower = corgi_mci_setpower,
- .exit = corgi_mci_exit,
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .gpio_card_detect = -1,
+ .gpio_card_ro = CORGI_GPIO_nSD_WP,
+ .gpio_power = CORGI_GPIO_SD_PWR,
};
/*
* Irda
*/
-static void corgi_irda_transceiver_mode(struct device *dev, int mode)
-{
- gpio_set_value(CORGI_GPIO_IR_ON, mode & IR_OFF);
- pxa2xx_transceiver_mode(dev, mode);
-}
-
-static int corgi_irda_startup(struct device *dev)
-{
- int err;
-
- err = gpio_request(CORGI_GPIO_IR_ON, "IR_ON");
- if (err)
- return err;
-
- gpio_direction_output(CORGI_GPIO_IR_ON, 1);
- return 0;
-}
-
-static void corgi_irda_shutdown(struct device *dev)
-{
- gpio_free(CORGI_GPIO_IR_ON);
-}
-
static struct pxaficp_platform_data corgi_ficp_platform_data = {
+ .gpio_pwdown = CORGI_GPIO_IR_ON,
.transceiver_cap = IR_SIRMODE | IR_OFF,
- .transceiver_mode = corgi_irda_transceiver_mode,
- .startup = corgi_irda_startup,
- .shutdown = corgi_irda_shutdown,
};
@@ -636,6 +674,7 @@ static void __init corgi_init(void)
corgi_init_spi();
pxa_set_udc_info(&udc_info);
+ corgi_mci_platform_data.detect_delay = msecs_to_jiffies(250);
pxa_set_mci_info(&corgi_mci_platform_data);
pxa_set_ficp_info(&corgi_ficp_platform_data);
pxa_set_i2c_info(NULL);
diff --git a/arch/arm/mach-pxa/csb726.c b/arch/arm/mach-pxa/csb726.c
index 7d3e1b46e55..79141f86272 100644
--- a/arch/arm/mach-pxa/csb726.c
+++ b/arch/arm/mach-pxa/csb726.c
@@ -130,61 +130,17 @@ static struct pxamci_platform_data csb726_mci_data;
static int csb726_mci_init(struct device *dev,
irq_handler_t detect, void *data)
{
- int err;
-
csb726_mci_data.detect_delay = msecs_to_jiffies(500);
-
- err = gpio_request(CSB726_GPIO_MMC_DETECT, "MMC detect");
- if (err)
- goto err_det_req;
-
- err = gpio_direction_input(CSB726_GPIO_MMC_DETECT);
- if (err)
- goto err_det_dir;
-
- err = gpio_request(CSB726_GPIO_MMC_RO, "MMC ro");
- if (err)
- goto err_ro_req;
-
- err = gpio_direction_input(CSB726_GPIO_MMC_RO);
- if (err)
- goto err_ro_dir;
-
- err = request_irq(gpio_to_irq(CSB726_GPIO_MMC_DETECT), detect,
- IRQF_DISABLED, "MMC card detect", data);
- if (err)
- goto err_irq;
-
return 0;
-
-err_irq:
-err_ro_dir:
- gpio_free(CSB726_GPIO_MMC_RO);
-err_ro_req:
-err_det_dir:
- gpio_free(CSB726_GPIO_MMC_DETECT);
-err_det_req:
- return err;
-}
-
-static int csb726_mci_get_ro(struct device *dev)
-{
- return gpio_get_value(CSB726_GPIO_MMC_RO);
-}
-
-static void csb726_mci_exit(struct device *dev, void *data)
-{
- free_irq(gpio_to_irq(CSB726_GPIO_MMC_DETECT), data);
- gpio_free(CSB726_GPIO_MMC_RO);
- gpio_free(CSB726_GPIO_MMC_DETECT);
}
static struct pxamci_platform_data csb726_mci = {
- .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
- .init = csb726_mci_init,
- .get_ro = csb726_mci_get_ro,
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .init = csb726_mci_init,
/* FIXME setpower */
- .exit = csb726_mci_exit,
+ .gpio_card_detect = CSB726_GPIO_MMC_DETECT,
+ .gpio_card_ro = CSB726_GPIO_MMC_RO,
+ .gpio_power = -1,
};
static struct pxaohci_platform_data csb726_ohci_platform_data = {
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index ecc08f360b6..46fabe1cca1 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -935,6 +935,33 @@ void __init pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info)
{
pxa_register_device(&pxa3xx_device_nand, info);
}
+
+static struct resource pxa3xx_resources_gcu[] = {
+ {
+ .start = 0x54000000,
+ .end = 0x54000fff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_GCU,
+ .end = IRQ_GCU,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static u64 pxa3xx_gcu_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device pxa3xx_device_gcu = {
+ .name = "pxa3xx-gcu",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(pxa3xx_resources_gcu),
+ .resource = pxa3xx_resources_gcu,
+ .dev = {
+ .dma_mask = &pxa3xx_gcu_dmamask,
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+
#endif /* CONFIG_PXA3xx */
/* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1.
diff --git a/arch/arm/mach-pxa/devices.h b/arch/arm/mach-pxa/devices.h
index ecc24a4dca6..93817d99761 100644
--- a/arch/arm/mach-pxa/devices.h
+++ b/arch/arm/mach-pxa/devices.h
@@ -35,4 +35,6 @@ extern struct platform_device pxa27x_device_pwm1;
extern struct platform_device pxa3xx_device_nand;
extern struct platform_device pxa3xx_device_i2c_power;
+extern struct platform_device pxa3xx_device_gcu;
+
void __init pxa_register_device(struct platform_device *dev, void *data);
diff --git a/arch/arm/mach-pxa/e740.c b/arch/arm/mach-pxa/e740.c
index a36fc17f671..49acdfa6650 100644
--- a/arch/arm/mach-pxa/e740.c
+++ b/arch/arm/mach-pxa/e740.c
@@ -199,7 +199,6 @@ static void __init e740_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices));
pxa_set_udc_info(&e7xx_udc_mach_info);
pxa_set_ac97_info(NULL);
- e7xx_irda_init();
pxa_set_ficp_info(&e7xx_ficp_platform_data);
}
diff --git a/arch/arm/mach-pxa/e750.c b/arch/arm/mach-pxa/e750.c
index 1d00110590e..4052ece3ef4 100644
--- a/arch/arm/mach-pxa/e750.c
+++ b/arch/arm/mach-pxa/e750.c
@@ -200,7 +200,6 @@ static void __init e750_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices));
pxa_set_udc_info(&e7xx_udc_mach_info);
pxa_set_ac97_info(NULL);
- e7xx_irda_init();
pxa_set_ficp_info(&e7xx_ficp_platform_data);
}
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index 9cd09465a0e..aec7f4214b1 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -646,13 +646,16 @@ static int em_x270_mci_get_ro(struct device *dev)
}
static struct pxamci_platform_data em_x270_mci_platform_data = {
- .ocr_mask = MMC_VDD_20_21|MMC_VDD_21_22|MMC_VDD_22_23|
- MMC_VDD_24_25|MMC_VDD_25_26|MMC_VDD_26_27|
- MMC_VDD_27_28|MMC_VDD_28_29|MMC_VDD_29_30|
- MMC_VDD_30_31|MMC_VDD_31_32,
- .init = em_x270_mci_init,
- .setpower = em_x270_mci_setpower,
- .exit = em_x270_mci_exit,
+ .ocr_mask = MMC_VDD_20_21|MMC_VDD_21_22|MMC_VDD_22_23|
+ MMC_VDD_24_25|MMC_VDD_25_26|MMC_VDD_26_27|
+ MMC_VDD_27_28|MMC_VDD_28_29|MMC_VDD_29_30|
+ MMC_VDD_30_31|MMC_VDD_31_32,
+ .init = em_x270_mci_init,
+ .setpower = em_x270_mci_setpower,
+ .exit = em_x270_mci_exit,
+ .gpio_card_detect = -1,
+ .gpio_card_ro = -1,
+ .gpio_power = -1,
};
static void __init em_x270_init_mmc(void)
@@ -1022,22 +1025,32 @@ static int em_x270_sensor_power(struct device *dev, int on)
return 0;
}
-static struct soc_camera_link iclink = {
- .bus_id = 0,
- .power = em_x270_sensor_power,
-};
-
static struct i2c_board_info em_x270_i2c_cam_info[] = {
{
I2C_BOARD_INFO("mt9m111", 0x48),
+ },
+};
+
+static struct soc_camera_link iclink = {
+ .bus_id = 0,
+ .power = em_x270_sensor_power,
+ .board_info = &em_x270_i2c_cam_info[0],
+ .i2c_adapter_id = 0,
+ .module_name = "mt9m111",
+};
+
+static struct platform_device em_x270_camera = {
+ .name = "soc-camera-pdrv",
+ .id = -1,
+ .dev = {
.platform_data = &iclink,
},
};
static void __init em_x270_init_camera(void)
{
- i2c_register_board_info(0, ARRAY_AND_SIZE(em_x270_i2c_cam_info));
pxa_set_camera_info(&em_x270_camera_platform_data);
+ platform_device_register(&em_x270_camera);
}
#else
static inline void em_x270_init_camera(void) {}
@@ -1103,6 +1116,7 @@ REGULATOR_CONSUMER(ldo5, NULL, "vcc cam");
REGULATOR_CONSUMER(ldo10, &pxa_device_mci.dev, "vcc sdio");
REGULATOR_CONSUMER(ldo12, NULL, "vcc usb");
REGULATOR_CONSUMER(ldo19, &em_x270_gprs_userspace_consumer.dev, "vcc gprs");
+REGULATOR_CONSUMER(buck2, NULL, "vcc_core");
#define REGULATOR_INIT(_ldo, _min_uV, _max_uV, _ops_mask) \
static struct regulator_init_data _ldo##_data = { \
@@ -1125,6 +1139,7 @@ REGULATOR_INIT(ldo10, 2000000, 3200000,
REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_VOLTAGE);
REGULATOR_INIT(ldo12, 3000000, 3000000, REGULATOR_CHANGE_STATUS);
REGULATOR_INIT(ldo19, 3200000, 3200000, REGULATOR_CHANGE_STATUS);
+REGULATOR_INIT(buck2, 1000000, 1650000, REGULATOR_CHANGE_VOLTAGE);
struct led_info em_x270_led_info = {
.name = "em-x270:orange",
@@ -1194,6 +1209,8 @@ struct da903x_subdev_info em_x270_da9030_subdevs[] = {
DA9030_LDO(12),
DA9030_LDO(19),
+ DA9030_SUBDEV(regulator, BUCK2, &buck2_data),
+
DA9030_SUBDEV(led, LED_PC, &em_x270_led_info),
DA9030_SUBDEV(backlight, WLED, &em_x270_led_info),
DA9030_SUBDEV(battery, BAT, &em_x270_batterty_info),
@@ -1245,7 +1262,6 @@ static void __init em_x270_init_i2c(void)
static void __init em_x270_module_init(void)
{
- pr_info("%s\n", __func__);
pxa2xx_mfp_config(ARRAY_AND_SIZE(em_x270_pin_config));
mmc_cd = GPIO13_MMC_CD;
@@ -1257,7 +1273,6 @@ static void __init em_x270_module_init(void)
static void __init em_x270_exeda_init(void)
{
- pr_info("%s\n", __func__);
pxa2xx_mfp_config(ARRAY_AND_SIZE(exeda_pin_config));
mmc_cd = GPIO114_MMC_CD;
diff --git a/arch/arm/mach-pxa/eseries.c b/arch/arm/mach-pxa/eseries.c
index c60dadf847a..91417f03506 100644
--- a/arch/arm/mach-pxa/eseries.c
+++ b/arch/arm/mach-pxa/eseries.c
@@ -47,44 +47,9 @@ struct pxa2xx_udc_mach_info e7xx_udc_mach_info = {
.gpio_pullup_inverted = 1
};
-static void e7xx_irda_transceiver_mode(struct device *dev, int mode)
-{
- if (mode & IR_OFF) {
- gpio_set_value(GPIO_E7XX_IR_OFF, 1);
- pxa2xx_transceiver_mode(dev, mode);
- } else {
- pxa2xx_transceiver_mode(dev, mode);
- gpio_set_value(GPIO_E7XX_IR_OFF, 0);
- }
-}
-
-int e7xx_irda_init(void)
-{
- int ret;
-
- ret = gpio_request(GPIO_E7XX_IR_OFF, "IrDA power");
- if (ret)
- goto out;
-
- ret = gpio_direction_output(GPIO_E7XX_IR_OFF, 0);
- if (ret)
- goto out;
-
- e7xx_irda_transceiver_mode(NULL, IR_SIRMODE | IR_OFF);
-out:
- return ret;
-}
-
-static void e7xx_irda_shutdown(struct device *dev)
-{
- e7xx_irda_transceiver_mode(dev, IR_SIRMODE | IR_OFF);
- gpio_free(GPIO_E7XX_IR_OFF);
-}
-
struct pxaficp_platform_data e7xx_ficp_platform_data = {
- .transceiver_cap = IR_SIRMODE | IR_OFF,
- .transceiver_mode = e7xx_irda_transceiver_mode,
- .shutdown = e7xx_irda_shutdown,
+ .gpio_pwdown = GPIO_E7XX_IR_OFF,
+ .transceiver_cap = IR_SIRMODE | IR_OFF,
};
int eseries_tmio_enable(struct platform_device *dev)
diff --git a/arch/arm/mach-pxa/gumstix.c b/arch/arm/mach-pxa/gumstix.c
index ca9912ea78d..1708c010984 100644
--- a/arch/arm/mach-pxa/gumstix.c
+++ b/arch/arm/mach-pxa/gumstix.c
@@ -88,7 +88,10 @@ static struct platform_device *devices[] __initdata = {
#ifdef CONFIG_MMC_PXA
static struct pxamci_platform_data gumstix_mci_platform_data = {
- .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .gpio_card_detect = -1,
+ .gpio_card_ro = -1,
+ .gpio_power = -1,
};
static void __init gumstix_mmc_init(void)
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index 81359d574f8..abff9e13274 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -45,6 +45,7 @@
#include <mach/irda.h>
#include <mach/pxa2xx_spi.h>
+#include <video/platform_lcd.h>
#include <video/w100fb.h>
#include "devices.h"
@@ -174,14 +175,9 @@ static int hx4700_gpio_request(struct gpio_ress *gpios, int size)
* IRDA
*/
-static void irda_transceiver_mode(struct device *dev, int mode)
-{
- gpio_set_value(GPIO105_HX4700_nIR_ON, mode & IR_OFF);
-}
-
static struct pxaficp_platform_data ficp_info = {
- .transceiver_cap = IR_SIRMODE | IR_OFF,
- .transceiver_mode = irda_transceiver_mode,
+ .gpio_pwdown = GPIO105_HX4700_nIR_ON,
+ .transceiver_cap = IR_SIRMODE | IR_OFF,
};
/*
@@ -368,8 +364,6 @@ static struct platform_device egpio = {
* LCD - Sony display connected to ATI Imageon w3220
*/
-static int lcd_power;
-
static void sony_lcd_init(void)
{
gpio_set_value(GPIO84_HX4700_LCD_SQN, 1);
@@ -410,35 +404,6 @@ static void sony_lcd_off(void)
gpio_set_value(GPIO110_HX4700_LCD_LVDD_3V3_ON, 0);
}
-static int hx4700_lcd_set_power(struct lcd_device *ldev, int level)
-{
- switch (level) {
- case FB_BLANK_UNBLANK:
- sony_lcd_init();
- break;
- case FB_BLANK_NORMAL:
- case FB_BLANK_VSYNC_SUSPEND:
- case FB_BLANK_HSYNC_SUSPEND:
- case FB_BLANK_POWERDOWN:
- sony_lcd_off();
- break;
- }
- lcd_power = level;
- return 0;
-}
-
-static int hx4700_lcd_get_power(struct lcd_device *lm)
-{
- return lcd_power;
-}
-
-static struct lcd_ops hx4700_lcd_ops = {
- .get_power = hx4700_lcd_get_power,
- .set_power = hx4700_lcd_set_power,
-};
-
-static struct lcd_device *hx4700_lcd_device;
-
#ifdef CONFIG_PM
static void w3220_lcd_suspend(struct w100fb_par *wfb)
{
@@ -573,6 +538,27 @@ static struct platform_device w3220 = {
.resource = w3220_resources,
};
+static void hx4700_lcd_set_power(struct plat_lcd_data *pd, unsigned int power)
+{
+ if (power)
+ sony_lcd_init();
+ else
+ sony_lcd_off();
+}
+
+static struct plat_lcd_data hx4700_lcd_data = {
+ .set_power = hx4700_lcd_set_power,
+};
+
+static struct platform_device hx4700_lcd = {
+ .name = "platform-lcd",
+ .id = -1,
+ .dev = {
+ .platform_data = &hx4700_lcd_data,
+ .parent = &w3220.dev,
+ },
+};
+
/*
* Backlight
*/
@@ -872,9 +858,6 @@ static void __init hx4700_init(void)
pxa2xx_set_spi_info(2, &pxa_ssp2_master_info);
spi_register_board_info(ARRAY_AND_SIZE(tsc2046_board_info));
- hx4700_lcd_device = lcd_device_register("w100fb", NULL,
- (void *)&w3220_info, &hx4700_lcd_ops);
-
gpio_set_value(GPIO71_HX4700_ASIC3_nRESET, 0);
mdelay(10);
gpio_set_value(GPIO71_HX4700_ASIC3_nRESET, 1);
diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c
index b6243b59d9b..b6486ef20b1 100644
--- a/arch/arm/mach-pxa/idp.c
+++ b/arch/arm/mach-pxa/idp.c
@@ -168,7 +168,10 @@ static struct pxafb_mach_info sharp_lm8v31 = {
};
static struct pxamci_platform_data idp_mci_platform_data = {
- .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .gpio_card_detect = -1,
+ .gpio_card_ro = -1,
+ .gpio_power = -1,
};
static void __init idp_init(void)
diff --git a/arch/arm/mach-pxa/imote2.c b/arch/arm/mach-pxa/imote2.c
index 961807dc646..2a4945db31c 100644
--- a/arch/arm/mach-pxa/imote2.c
+++ b/arch/arm/mach-pxa/imote2.c
@@ -389,6 +389,9 @@ static int imote2_mci_get_ro(struct device *dev)
static struct pxamci_platform_data imote2_mci_platform_data = {
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, /* default anyway */
.get_ro = imote2_mci_get_ro,
+ .gpio_card_detect = -1,
+ .gpio_card_ro = -1,
+ .gpio_power = -1,
};
static struct mtd_partition imote2flash_partitions[] = {
diff --git a/arch/arm/mach-pxa/include/mach/balloon3.h b/arch/arm/mach-pxa/include/mach/balloon3.h
new file mode 100644
index 00000000000..bfec09b1814
--- /dev/null
+++ b/arch/arm/mach-pxa/include/mach/balloon3.h
@@ -0,0 +1,134 @@
+/*
+ * linux/include/asm-arm/arch-pxa/balloon3.h
+ *
+ * Authors: Nick Bane and Wookey
+ * Created: Oct, 2005
+ * Copyright: Toby Churchill Ltd
+ * Cribbed from mainstone.c, by Nicholas Pitre
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_ARCH_BALLOON3_H
+#define ASM_ARCH_BALLOON3_H
+
+enum balloon3_features {
+ BALLOON3_FEATURE_OHCI,
+ BALLOON3_FEATURE_MMC,
+ BALLOON3_FEATURE_CF,
+ BALLOON3_FEATURE_AUDIO,
+ BALLOON3_FEATURE_TOPPOLY,
+};
+
+#define BALLOON3_FPGA_PHYS PXA_CS4_PHYS
+#define BALLOON3_FPGA_VIRT (0xf1000000) /* as per balloon2 */
+#define BALLOON3_FPGA_LENGTH 0x01000000
+
+/* FPGA/CPLD registers */
+#define BALLOON3_PCMCIA0_REG (BALLOON3_FPGA_VIRT + 0x00e00008)
+/* fixme - same for now */
+#define BALLOON3_PCMCIA1_REG (BALLOON3_FPGA_VIRT + 0x00e00008)
+#define BALLOON3_NANDIO_IO_REG (BALLOON3_FPGA_VIRT + 0x00e00000)
+/* fpga/cpld interrupt control register */
+#define BALLOON3_INT_CONTROL_REG (BALLOON3_FPGA_VIRT + 0x00e0000C)
+#define BALLOON3_NANDIO_CTL2_REG (BALLOON3_FPGA_VIRT + 0x00e00010)
+#define BALLOON3_NANDIO_CTL_REG (BALLOON3_FPGA_VIRT + 0x00e00014)
+#define BALLOON3_VERSION_REG (BALLOON3_FPGA_VIRT + 0x00e0001c)
+
+#define BALLOON3_SAMOSA_ADDR_REG (BALLOON3_FPGA_VIRT + 0x00c00000)
+#define BALLOON3_SAMOSA_DATA_REG (BALLOON3_FPGA_VIRT + 0x00c00004)
+#define BALLOON3_SAMOSA_STATUS_REG (BALLOON3_FPGA_VIRT + 0x00c0001c)
+
+/* GPIOs for irqs */
+#define BALLOON3_GPIO_AUX_NIRQ (94)
+#define BALLOON3_GPIO_CODEC_IRQ (95)
+
+/* Timer and Idle LED locations */
+#define BALLOON3_GPIO_LED_NAND (9)
+#define BALLOON3_GPIO_LED_IDLE (10)
+
+/* backlight control */
+#define BALLOON3_GPIO_RUN_BACKLIGHT (99)
+
+#define BALLOON3_GPIO_S0_CD (105)
+
+/* FPGA Interrupt Mask/Acknowledge Register */
+#define BALLOON3_INT_S0_IRQ (1 << 0) /* PCMCIA 0 IRQ */
+#define BALLOON3_INT_S0_STSCHG (1 << 1) /* PCMCIA 0 status changed */
+
+/* CF Status Register */
+#define BALLOON3_PCMCIA_nIRQ (1 << 0) /* IRQ / ready signal */
+#define BALLOON3_PCMCIA_nSTSCHG_BVD1 (1 << 1)
+ /* VDD sense / card status changed */
+
+/* CF control register (write) */
+#define BALLOON3_PCMCIA_RESET (1 << 0) /* Card reset signal */
+#define BALLOON3_PCMCIA_ENABLE (1 << 1)
+#define BALLOON3_PCMCIA_ADD_ENABLE (1 << 2)
+
+/* CPLD (and FPGA) interface definitions */
+#define CPLD_LCD0_DATA_SET 0x00
+#define CPLD_LCD0_DATA_CLR 0x10
+#define CPLD_LCD0_COMMAND_SET 0x01
+#define CPLD_LCD0_COMMAND_CLR 0x11
+#define CPLD_LCD1_DATA_SET 0x02
+#define CPLD_LCD1_DATA_CLR 0x12
+#define CPLD_LCD1_COMMAND_SET 0x03
+#define CPLD_LCD1_COMMAND_CLR 0x13
+
+#define CPLD_MISC_SET 0x07
+#define CPLD_MISC_CLR 0x17
+#define CPLD_MISC_LOON_NRESET_BIT 0
+#define CPLD_MISC_LOON_UNSUSP_BIT 1
+#define CPLD_MISC_RUN_5V_BIT 2
+#define CPLD_MISC_CHG_D0_BIT 3
+#define CPLD_MISC_CHG_D1_BIT 4
+#define CPLD_MISC_DAC_NCS_BIT 5
+
+#define CPLD_LCD_SET 0x08
+#define CPLD_LCD_CLR 0x18
+#define CPLD_LCD_BACKLIGHT_EN_0_BIT 0
+#define CPLD_LCD_BACKLIGHT_EN_1_BIT 1
+#define CPLD_LCD_LED_RED_BIT 4
+#define CPLD_LCD_LED_GREEN_BIT 5
+#define CPLD_LCD_NRESET_BIT 7
+
+#define CPLD_LCD_RO_SET 0x09
+#define CPLD_LCD_RO_CLR 0x19
+#define CPLD_LCD_RO_LCD0_nWAIT_BIT 0
+#define CPLD_LCD_RO_LCD1_nWAIT_BIT 1
+
+#define CPLD_SERIAL_SET 0x0a
+#define CPLD_SERIAL_CLR 0x1a
+#define CPLD_SERIAL_GSM_RI_BIT 0
+#define CPLD_SERIAL_GSM_CTS_BIT 1
+#define CPLD_SERIAL_GSM_DTR_BIT 2
+#define CPLD_SERIAL_LPR_CTS_BIT 3
+#define CPLD_SERIAL_TC232_CTS_BIT 4
+#define CPLD_SERIAL_TC232_DSR_BIT 5
+
+#define CPLD_SROUTING_SET 0x0b
+#define CPLD_SROUTING_CLR 0x1b
+#define CPLD_SROUTING_MSP430_LPR 0
+#define CPLD_SROUTING_MSP430_TC232 1
+#define CPLD_SROUTING_MSP430_GSM 2
+#define CPLD_SROUTING_LOON_LPR (0 << 4)
+#define CPLD_SROUTING_LOON_TC232 (1 << 4)
+#define CPLD_SROUTING_LOON_GSM (2 << 4)
+
+#define CPLD_AROUTING_SET 0x0c
+#define CPLD_AROUTING_CLR 0x1c
+#define CPLD_AROUTING_MIC2PHONE_BIT 0
+#define CPLD_AROUTING_PHONE2INT_BIT 1
+#define CPLD_AROUTING_PHONE2EXT_BIT 2
+#define CPLD_AROUTING_LOONL2INT_BIT 3
+#define CPLD_AROUTING_LOONL2EXT_BIT 4
+#define CPLD_AROUTING_LOONR2PHONE_BIT 5
+#define CPLD_AROUTING_LOONR2INT_BIT 6
+#define CPLD_AROUTING_LOONR2EXT_BIT 7
+
+extern int balloon3_has(enum balloon3_features feature);
+
+#endif
diff --git a/arch/arm/mach-pxa/include/mach/colibri.h b/arch/arm/mach-pxa/include/mach/colibri.h
index a88d7caff0d..811743c5614 100644
--- a/arch/arm/mach-pxa/include/mach/colibri.h
+++ b/arch/arm/mach-pxa/include/mach/colibri.h
@@ -23,6 +23,12 @@ static inline void colibri_pxa3xx_init_lcd(int bl_pin) {}
extern void colibri_pxa3xx_init_eth(struct ax_plat_data *plat_data);
#endif
+#if defined(CONFIG_MTD_NAND_PXA3xx) || defined(CONFIG_MTD_NAND_PXA3xx_MODULE)
+extern void colibri_pxa3xx_init_nand(void);
+#else
+static inline void colibri_pxa3xx_init_nand(void) {}
+#endif
+
/* physical memory regions */
#define COLIBRI_SDRAM_BASE 0xa0000000 /* SDRAM region */
diff --git a/arch/arm/mach-pxa/include/mach/entry-macro.S b/arch/arm/mach-pxa/include/mach/entry-macro.S
index f6b4bf3e73d..241880608ac 100644
--- a/arch/arm/mach-pxa/include/mach/entry-macro.S
+++ b/arch/arm/mach-pxa/include/mach/entry-macro.S
@@ -24,34 +24,27 @@
mov \tmp, \tmp, lsr #13
and \tmp, \tmp, #0x7 @ Core G
cmp \tmp, #1
- bhi 1004f
+ bhi 1002f
+ @ Core Generation 1 (PXA25x)
mov \base, #io_p2v(0x40000000) @ IIR Ctl = 0x40d00000
add \base, \base, #0x00d00000
ldr \irqstat, [\base, #0] @ ICIP
ldr \irqnr, [\base, #4] @ ICMR
- b 1002f
-1004:
- mrc p6, 0, \irqstat, c6, c0, 0 @ ICIP2
- mrc p6, 0, \irqnr, c7, c0, 0 @ ICMR2
ands \irqnr, \irqstat, \irqnr
- beq 1003f
+ beq 1001f
rsb \irqstat, \irqnr, #0
and \irqstat, \irqstat, \irqnr
clz \irqnr, \irqstat
- rsb \irqnr, \irqnr, #31
- add \irqnr, \irqnr, #(32 + PXA_IRQ(0))
+ rsb \irqnr, \irqnr, #(31 + PXA_IRQ(0))
b 1001f
-1003:
- mrc p6, 0, \irqstat, c0, c0, 0 @ ICIP
- mrc p6, 0, \irqnr, c1, c0, 0 @ ICMR
1002:
- ands \irqnr, \irqstat, \irqnr
+ @ Core Generation 2 (PXA27x) or Core Generation 3 (PXA3xx)
+ mrc p6, 0, \irqstat, c5, c0, 0 @ ICHP
+ tst \irqstat, #0x80000000
beq 1001f
- rsb \irqstat, \irqnr, #0
- and \irqstat, \irqstat, \irqnr
- clz \irqnr, \irqstat
- rsb \irqnr, \irqnr, #(31 + PXA_IRQ(0))
+ bic \irqstat, \irqstat, #0x80000000
+ mov \irqnr, \irqstat, lsr #16
1001:
.endm
diff --git a/arch/arm/mach-pxa/include/mach/hardware.h b/arch/arm/mach-pxa/include/mach/hardware.h
index 16ab79547da..aa3d9f70a08 100644
--- a/arch/arm/mach-pxa/include/mach/hardware.h
+++ b/arch/arm/mach-pxa/include/mach/hardware.h
@@ -197,6 +197,16 @@
#define __cpu_is_pxa935(id) (0)
#endif
+#ifdef CONFIG_CPU_PXA950
+#define __cpu_is_pxa950(id) \
+ ({ \
+ unsigned int _id = (id) >> 4 & 0xfff; \
+ id == 0x697; \
+ })
+#else
+#define __cpu_is_pxa950(id) (0)
+#endif
+
#define cpu_is_pxa210() \
({ \
__cpu_is_pxa210(read_cpuid_id()); \
@@ -249,6 +259,13 @@
__cpu_is_pxa935(id); \
})
+#define cpu_is_pxa950() \
+ ({ \
+ unsigned int id = read_cpuid(CPUID_ID); \
+ __cpu_is_pxa950(id); \
+ })
+
+
/*
* CPUID Core Generation Bit
* <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
diff --git a/arch/arm/mach-pxa/include/mach/irda.h b/arch/arm/mach-pxa/include/mach/irda.h
index 0a50c3c763d..3cd41f77dda 100644
--- a/arch/arm/mach-pxa/include/mach/irda.h
+++ b/arch/arm/mach-pxa/include/mach/irda.h
@@ -12,6 +12,8 @@ struct pxaficp_platform_data {
void (*transceiver_mode)(struct device *dev, int mode);
int (*startup)(struct device *dev);
void (*shutdown)(struct device *dev);
+ int gpio_pwdown; /* powerdown GPIO for the IrDA chip */
+ bool gpio_pwdown_inverted; /* gpio_pwdown is inverted */
};
extern void pxa_set_ficp_info(struct pxaficp_platform_data *info);
diff --git a/arch/arm/mach-pxa/include/mach/irqs.h b/arch/arm/mach-pxa/include/mach/irqs.h
index 6a1d9599334..3677a9af9c8 100644
--- a/arch/arm/mach-pxa/include/mach/irqs.h
+++ b/arch/arm/mach-pxa/include/mach/irqs.h
@@ -68,9 +68,10 @@
#ifdef CONFIG_PXA3xx
#define IRQ_SSP4 PXA_IRQ(13) /* SSP4 service request */
#define IRQ_CIR PXA_IRQ(34) /* Consumer IR */
+#define IRQ_COMM_WDT PXA_IRQ(35) /* Comm WDT interrupt */
#define IRQ_TSI PXA_IRQ(36) /* Touch Screen Interface (PXA320) */
#define IRQ_USIM2 PXA_IRQ(38) /* USIM2 Controller */
-#define IRQ_GRPHICS PXA_IRQ(39) /* Graphics Controller */
+#define IRQ_GCU PXA_IRQ(39) /* Graphics Controller */
#define IRQ_MMC2 PXA_IRQ(41) /* MMC2 Controller */
#define IRQ_1WIRE PXA_IRQ(44) /* 1-Wire Controller */
#define IRQ_NAND PXA_IRQ(45) /* NAND Controller */
@@ -81,8 +82,31 @@
#define IRQ_MMC3 PXA_IRQ(55) /* MMC3 Controller (PXA310) */
#endif
-#define PXA_GPIO_IRQ_BASE PXA_IRQ(64)
-#define PXA_GPIO_IRQ_NUM (128)
+#ifdef CONFIG_CPU_PXA935
+#define IRQ_U2O PXA_IRQ(64) /* USB OTG 2.0 Controller (PXA935) */
+#define IRQ_U2H PXA_IRQ(65) /* USB Host 2.0 Controller (PXA935) */
+
+#define IRQ_MMC3_PXA935 PXA_IRQ(72) /* MMC3 Controller (PXA935) */
+#define IRQ_MMC4_PXA935 PXA_IRQ(73) /* MMC4 Controller (PXA935) */
+#define IRQ_MMC5_PXA935 PXA_IRQ(74) /* MMC5 Controller (PXA935) */
+
+#define IRQ_U2P PXA_IRQ(93) /* USB PHY D+/D- Lines (PXA935) */
+#endif
+
+#ifdef CONFIG_CPU_PXA930
+#define IRQ_ENHROT PXA_IRQ(37) /* Enhanced Rotary (PXA930) */
+#define IRQ_ACIPC0 PXA_IRQ(5)
+#define IRQ_ACIPC1 PXA_IRQ(40)
+#define IRQ_ACIPC2 PXA_IRQ(19)
+#define IRQ_TRKBALL PXA_IRQ(43) /* Track Ball */
+#endif
+
+#ifdef CONFIG_CPU_PXA950
+#define IRQ_GC500 PXA_IRQ(70) /* Graphics Controller (PXA950) */
+#endif
+
+#define PXA_GPIO_IRQ_BASE PXA_IRQ(96)
+#define PXA_GPIO_IRQ_NUM (192)
#define GPIO_2_x_TO_IRQ(x) (PXA_GPIO_IRQ_BASE + (x))
#define IRQ_GPIO(x) (((x) < 2) ? (IRQ_GPIO0 + (x)) : GPIO_2_x_TO_IRQ(x))
@@ -105,6 +129,8 @@
#define IRQ_BOARD_END (IRQ_BOARD_START + 70)
#elif defined(CONFIG_MACH_ZYLONITE)
#define IRQ_BOARD_END (IRQ_BOARD_START + 32)
+#elif defined(CONFIG_PXA_EZX)
+#define IRQ_BOARD_END (IRQ_BOARD_START + 23)
#else
#define IRQ_BOARD_END (IRQ_BOARD_START + 16)
#endif
@@ -237,6 +263,16 @@
#define MAINSTONE_S1_STSCHG_IRQ MAINSTONE_IRQ(14)
#define MAINSTONE_S1_IRQ MAINSTONE_IRQ(15)
+/* Balloon3 Interrupts */
+#define BALLOON3_IRQ(x) (IRQ_BOARD_START + (x))
+
+#define BALLOON3_BP_CF_NRDY_IRQ BALLOON3_IRQ(0)
+#define BALLOON3_BP_NSTSCHG_IRQ BALLOON3_IRQ(1)
+
+#define BALLOON3_AUX_NIRQ IRQ_GPIO(BALLOON3_GPIO_AUX_NIRQ)
+#define BALLOON3_CODEC_IRQ IRQ_GPIO(BALLOON3_GPIO_CODEC_IRQ)
+#define BALLOON3_S0_CD_IRQ IRQ_GPIO(BALLOON3_GPIO_S0_CD)
+
/* LoCoMo Interrupts (CONFIG_SHARP_LOCOMO) */
#define IRQ_LOCOMO_KEY_BASE (IRQ_BOARD_START + 0)
#define IRQ_LOCOMO_GPIO_BASE (IRQ_BOARD_START + 1)
diff --git a/arch/arm/mach-pxa/include/mach/mfp.h b/arch/arm/mach-pxa/include/mach/mfp.h
index 482185053a9..271e249ae34 100644
--- a/arch/arm/mach-pxa/include/mach/mfp.h
+++ b/arch/arm/mach-pxa/include/mach/mfp.h
@@ -16,305 +16,6 @@
#ifndef __ASM_ARCH_MFP_H
#define __ASM_ARCH_MFP_H
-#define mfp_to_gpio(m) ((m) % 128)
-
-/* list of all the configurable MFP pins */
-enum {
- MFP_PIN_INVALID = -1,
-
- MFP_PIN_GPIO0 = 0,
- MFP_PIN_GPIO1,
- MFP_PIN_GPIO2,
- MFP_PIN_GPIO3,
- MFP_PIN_GPIO4,
- MFP_PIN_GPIO5,
- MFP_PIN_GPIO6,
- MFP_PIN_GPIO7,
- MFP_PIN_GPIO8,
- MFP_PIN_GPIO9,
- MFP_PIN_GPIO10,
- MFP_PIN_GPIO11,
- MFP_PIN_GPIO12,
- MFP_PIN_GPIO13,
- MFP_PIN_GPIO14,
- MFP_PIN_GPIO15,
- MFP_PIN_GPIO16,
- MFP_PIN_GPIO17,
- MFP_PIN_GPIO18,
- MFP_PIN_GPIO19,
- MFP_PIN_GPIO20,
- MFP_PIN_GPIO21,
- MFP_PIN_GPIO22,
- MFP_PIN_GPIO23,
- MFP_PIN_GPIO24,
- MFP_PIN_GPIO25,
- MFP_PIN_GPIO26,
- MFP_PIN_GPIO27,
- MFP_PIN_GPIO28,
- MFP_PIN_GPIO29,
- MFP_PIN_GPIO30,
- MFP_PIN_GPIO31,
- MFP_PIN_GPIO32,
- MFP_PIN_GPIO33,
- MFP_PIN_GPIO34,
- MFP_PIN_GPIO35,
- MFP_PIN_GPIO36,
- MFP_PIN_GPIO37,
- MFP_PIN_GPIO38,
- MFP_PIN_GPIO39,
- MFP_PIN_GPIO40,
- MFP_PIN_GPIO41,
- MFP_PIN_GPIO42,
- MFP_PIN_GPIO43,
- MFP_PIN_GPIO44,
- MFP_PIN_GPIO45,
- MFP_PIN_GPIO46,
- MFP_PIN_GPIO47,
- MFP_PIN_GPIO48,
- MFP_PIN_GPIO49,
- MFP_PIN_GPIO50,
- MFP_PIN_GPIO51,
- MFP_PIN_GPIO52,
- MFP_PIN_GPIO53,
- MFP_PIN_GPIO54,
- MFP_PIN_GPIO55,
- MFP_PIN_GPIO56,
- MFP_PIN_GPIO57,
- MFP_PIN_GPIO58,
- MFP_PIN_GPIO59,
- MFP_PIN_GPIO60,
- MFP_PIN_GPIO61,
- MFP_PIN_GPIO62,
- MFP_PIN_GPIO63,
- MFP_PIN_GPIO64,
- MFP_PIN_GPIO65,
- MFP_PIN_GPIO66,
- MFP_PIN_GPIO67,
- MFP_PIN_GPIO68,
- MFP_PIN_GPIO69,
- MFP_PIN_GPIO70,
- MFP_PIN_GPIO71,
- MFP_PIN_GPIO72,
- MFP_PIN_GPIO73,
- MFP_PIN_GPIO74,
- MFP_PIN_GPIO75,
- MFP_PIN_GPIO76,
- MFP_PIN_GPIO77,
- MFP_PIN_GPIO78,
- MFP_PIN_GPIO79,
- MFP_PIN_GPIO80,
- MFP_PIN_GPIO81,
- MFP_PIN_GPIO82,
- MFP_PIN_GPIO83,
- MFP_PIN_GPIO84,
- MFP_PIN_GPIO85,
- MFP_PIN_GPIO86,
- MFP_PIN_GPIO87,
- MFP_PIN_GPIO88,
- MFP_PIN_GPIO89,
- MFP_PIN_GPIO90,
- MFP_PIN_GPIO91,
- MFP_PIN_GPIO92,
- MFP_PIN_GPIO93,
- MFP_PIN_GPIO94,
- MFP_PIN_GPIO95,
- MFP_PIN_GPIO96,
- MFP_PIN_GPIO97,
- MFP_PIN_GPIO98,
- MFP_PIN_GPIO99,
- MFP_PIN_GPIO100,
- MFP_PIN_GPIO101,
- MFP_PIN_GPIO102,
- MFP_PIN_GPIO103,
- MFP_PIN_GPIO104,
- MFP_PIN_GPIO105,
- MFP_PIN_GPIO106,
- MFP_PIN_GPIO107,
- MFP_PIN_GPIO108,
- MFP_PIN_GPIO109,
- MFP_PIN_GPIO110,
- MFP_PIN_GPIO111,
- MFP_PIN_GPIO112,
- MFP_PIN_GPIO113,
- MFP_PIN_GPIO114,
- MFP_PIN_GPIO115,
- MFP_PIN_GPIO116,
- MFP_PIN_GPIO117,
- MFP_PIN_GPIO118,
- MFP_PIN_GPIO119,
- MFP_PIN_GPIO120,
- MFP_PIN_GPIO121,
- MFP_PIN_GPIO122,
- MFP_PIN_GPIO123,
- MFP_PIN_GPIO124,
- MFP_PIN_GPIO125,
- MFP_PIN_GPIO126,
- MFP_PIN_GPIO127,
- MFP_PIN_GPIO0_2,
- MFP_PIN_GPIO1_2,
- MFP_PIN_GPIO2_2,
- MFP_PIN_GPIO3_2,
- MFP_PIN_GPIO4_2,
- MFP_PIN_GPIO5_2,
- MFP_PIN_GPIO6_2,
- MFP_PIN_GPIO7_2,
- MFP_PIN_GPIO8_2,
- MFP_PIN_GPIO9_2,
- MFP_PIN_GPIO10_2,
- MFP_PIN_GPIO11_2,
- MFP_PIN_GPIO12_2,
- MFP_PIN_GPIO13_2,
- MFP_PIN_GPIO14_2,
- MFP_PIN_GPIO15_2,
- MFP_PIN_GPIO16_2,
- MFP_PIN_GPIO17_2,
-
- MFP_PIN_ULPI_STP,
- MFP_PIN_ULPI_NXT,
- MFP_PIN_ULPI_DIR,
-
- MFP_PIN_nXCVREN,
- MFP_PIN_DF_CLE_nOE,
- MFP_PIN_DF_nADV1_ALE,
- MFP_PIN_DF_SCLK_E,
- MFP_PIN_DF_SCLK_S,
- MFP_PIN_nBE0,
- MFP_PIN_nBE1,
- MFP_PIN_DF_nADV2_ALE,
- MFP_PIN_DF_INT_RnB,
- MFP_PIN_DF_nCS0,
- MFP_PIN_DF_nCS1,
- MFP_PIN_nLUA,
- MFP_PIN_nLLA,
- MFP_PIN_DF_nWE,
- MFP_PIN_DF_ALE_nWE,
- MFP_PIN_DF_nRE_nOE,
- MFP_PIN_DF_ADDR0,
- MFP_PIN_DF_ADDR1,
- MFP_PIN_DF_ADDR2,
- MFP_PIN_DF_ADDR3,
- MFP_PIN_DF_IO0,
- MFP_PIN_DF_IO1,
- MFP_PIN_DF_IO2,
- MFP_PIN_DF_IO3,
- MFP_PIN_DF_IO4,
- MFP_PIN_DF_IO5,
- MFP_PIN_DF_IO6,
- MFP_PIN_DF_IO7,
- MFP_PIN_DF_IO8,
- MFP_PIN_DF_IO9,
- MFP_PIN_DF_IO10,
- MFP_PIN_DF_IO11,
- MFP_PIN_DF_IO12,
- MFP_PIN_DF_IO13,
- MFP_PIN_DF_IO14,
- MFP_PIN_DF_IO15,
-
- /* additional pins on PXA930 */
- MFP_PIN_GSIM_UIO,
- MFP_PIN_GSIM_UCLK,
- MFP_PIN_GSIM_UDET,
- MFP_PIN_GSIM_nURST,
- MFP_PIN_PMIC_INT,
- MFP_PIN_RDY,
-
- MFP_PIN_MAX,
-};
-
-/*
- * a possible MFP configuration is represented by a 32-bit integer
- *
- * bit 0.. 9 - MFP Pin Number (1024 Pins Maximum)
- * bit 10..12 - Alternate Function Selection
- * bit 13..15 - Drive Strength
- * bit 16..18 - Low Power Mode State
- * bit 19..20 - Low Power Mode Edge Detection
- * bit 21..22 - Run Mode Pull State
- *
- * to facilitate the definition, the following macros are provided
- *
- * MFP_CFG_DEFAULT - default MFP configuration value, with
- * alternate function = 0,
- * drive strength = fast 3mA (MFP_DS03X)
- * low power mode = default
- * edge detection = none
- *
- * MFP_CFG - default MFPR value with alternate function
- * MFP_CFG_DRV - default MFPR value with alternate function and
- * pin drive strength
- * MFP_CFG_LPM - default MFPR value with alternate function and
- * low power mode
- * MFP_CFG_X - default MFPR value with alternate function,
- * pin drive strength and low power mode
- */
-
-typedef unsigned long mfp_cfg_t;
-
-#define MFP_PIN(x) ((x) & 0x3ff)
-
-#define MFP_AF0 (0x0 << 10)
-#define MFP_AF1 (0x1 << 10)
-#define MFP_AF2 (0x2 << 10)
-#define MFP_AF3 (0x3 << 10)
-#define MFP_AF4 (0x4 << 10)
-#define MFP_AF5 (0x5 << 10)
-#define MFP_AF6 (0x6 << 10)
-#define MFP_AF7 (0x7 << 10)
-#define MFP_AF_MASK (0x7 << 10)
-#define MFP_AF(x) (((x) >> 10) & 0x7)
-
-#define MFP_DS01X (0x0 << 13)
-#define MFP_DS02X (0x1 << 13)
-#define MFP_DS03X (0x2 << 13)
-#define MFP_DS04X (0x3 << 13)
-#define MFP_DS06X (0x4 << 13)
-#define MFP_DS08X (0x5 << 13)
-#define MFP_DS10X (0x6 << 13)
-#define MFP_DS13X (0x7 << 13)
-#define MFP_DS_MASK (0x7 << 13)
-#define MFP_DS(x) (((x) >> 13) & 0x7)
-
-#define MFP_LPM_DEFAULT (0x0 << 16)
-#define MFP_LPM_DRIVE_LOW (0x1 << 16)
-#define MFP_LPM_DRIVE_HIGH (0x2 << 16)
-#define MFP_LPM_PULL_LOW (0x3 << 16)
-#define MFP_LPM_PULL_HIGH (0x4 << 16)
-#define MFP_LPM_FLOAT (0x5 << 16)
-#define MFP_LPM_INPUT (0x6 << 16)
-#define MFP_LPM_STATE_MASK (0x7 << 16)
-#define MFP_LPM_STATE(x) (((x) >> 16) & 0x7)
-
-#define MFP_LPM_EDGE_NONE (0x0 << 19)
-#define MFP_LPM_EDGE_RISE (0x1 << 19)
-#define MFP_LPM_EDGE_FALL (0x2 << 19)
-#define MFP_LPM_EDGE_BOTH (0x3 << 19)
-#define MFP_LPM_EDGE_MASK (0x3 << 19)
-#define MFP_LPM_EDGE(x) (((x) >> 19) & 0x3)
-
-#define MFP_PULL_NONE (0x0 << 21)
-#define MFP_PULL_LOW (0x1 << 21)
-#define MFP_PULL_HIGH (0x2 << 21)
-#define MFP_PULL_BOTH (0x3 << 21)
-#define MFP_PULL_MASK (0x3 << 21)
-#define MFP_PULL(x) (((x) >> 21) & 0x3)
-
-#define MFP_CFG_DEFAULT (MFP_AF0 | MFP_DS03X | MFP_LPM_DEFAULT |\
- MFP_LPM_EDGE_NONE | MFP_PULL_NONE)
-
-#define MFP_CFG(pin, af) \
- ((MFP_CFG_DEFAULT & ~MFP_AF_MASK) |\
- (MFP_PIN(MFP_PIN_##pin) | MFP_##af))
-
-#define MFP_CFG_DRV(pin, af, drv) \
- ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_DS_MASK)) |\
- (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_##drv))
-
-#define MFP_CFG_LPM(pin, af, lpm) \
- ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_LPM_STATE_MASK)) |\
- (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_LPM_##lpm))
-
-#define MFP_CFG_X(pin, af, drv, lpm) \
- ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_DS_MASK | MFP_LPM_STATE_MASK)) |\
- (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_##drv | MFP_LPM_##lpm))
+#include <plat/mfp.h>
#endif /* __ASM_ARCH_MFP_H */
diff --git a/arch/arm/mach-pxa/include/mach/mmc.h b/arch/arm/mach-pxa/include/mach/mmc.h
index 6d1304c9270..02a69dc2ee6 100644
--- a/arch/arm/mach-pxa/include/mach/mmc.h
+++ b/arch/arm/mach-pxa/include/mach/mmc.h
@@ -14,6 +14,11 @@ struct pxamci_platform_data {
int (*get_ro)(struct device *);
void (*setpower)(struct device *, unsigned int);
void (*exit)(struct device *, void *);
+ int gpio_card_detect; /* gpio detecting card insertion */
+ int gpio_card_ro; /* gpio detecting read only toggle */
+ bool gpio_card_ro_invert; /* gpio ro is inverted */
+ int gpio_power; /* gpio powering up MMC bus */
+ bool gpio_power_invert; /* gpio power is inverted */
};
extern void pxa_set_mci_info(struct pxamci_platform_data *info);
diff --git a/arch/arm/mach-pxa/include/mach/palmtc.h b/arch/arm/mach-pxa/include/mach/palmtc.h
new file mode 100644
index 00000000000..3dc9b074ab4
--- /dev/null
+++ b/arch/arm/mach-pxa/include/mach/palmtc.h
@@ -0,0 +1,86 @@
+/*
+ * linux/include/asm-arm/arch-pxa/palmtc-gpio.h
+ *
+ * GPIOs and interrupts for Palm Tungsten|C Handheld Computer
+ *
+ * Authors: Alex Osborne <bobofdoom@gmail.com>
+ * Marek Vasut <marek.vasut@gmail.com>
+ * Holger Bocklet <bitz.email@gmx.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _INCLUDE_PALMTC_H_
+#define _INCLUDE_PALMTC_H_
+
+/** HERE ARE GPIOs **/
+
+/* GPIOs */
+#define GPIO_NR_PALMTC_EARPHONE_DETECT 2
+#define GPIO_NR_PALMTC_CRADLE_DETECT 5
+#define GPIO_NR_PALMTC_HOTSYNC_BUTTON 7
+
+/* SD/MMC */
+#define GPIO_NR_PALMTC_SD_DETECT_N 12
+#define GPIO_NR_PALMTC_SD_POWER 32
+#define GPIO_NR_PALMTC_SD_READONLY 54
+
+/* WLAN */
+#define GPIO_NR_PALMTC_PCMCIA_READY 13
+#define GPIO_NR_PALMTC_PCMCIA_PWRREADY 14
+#define GPIO_NR_PALMTC_PCMCIA_POWER1 15
+#define GPIO_NR_PALMTC_PCMCIA_POWER2 33
+#define GPIO_NR_PALMTC_PCMCIA_POWER3 55
+#define GPIO_NR_PALMTC_PCMCIA_RESET 78
+
+/* UDC */
+#define GPIO_NR_PALMTC_USB_DETECT_N 4
+#define GPIO_NR_PALMTC_USB_POWER 36
+
+/* LCD/BACKLIGHT */
+#define GPIO_NR_PALMTC_BL_POWER 16
+#define GPIO_NR_PALMTC_LCD_POWER 44
+#define GPIO_NR_PALMTC_LCD_BLANK 38
+
+/* UART */
+#define GPIO_NR_PALMTC_RS232_POWER 37
+
+/* IRDA */
+#define GPIO_NR_PALMTC_IR_DISABLE 45
+
+/* IRQs */
+#define IRQ_GPIO_PALMTC_SD_DETECT_N IRQ_GPIO(GPIO_NR_PALMTC_SD_DETECT_N)
+#define IRQ_GPIO_PALMTC_WLAN_READY IRQ_GPIO(GPIO_NR_PALMTC_WLAN_READY)
+
+/* UCB1400 GPIOs */
+#define GPIO_NR_PALMTC_POWER_DETECT (0x80 | 0x00)
+#define GPIO_NR_PALMTC_HEADPHONE_DETECT (0x80 | 0x01)
+#define GPIO_NR_PALMTC_SPEAKER_ENABLE (0x80 | 0x03)
+#define GPIO_NR_PALMTC_VIBRA_POWER (0x80 | 0x05)
+#define GPIO_NR_PALMTC_LED_POWER (0x80 | 0x07)
+
+/** HERE ARE INIT VALUES **/
+#define PALMTC_UCB1400_GPIO_OFFSET 0x80
+
+/* BATTERY */
+#define PALMTC_BAT_MAX_VOLTAGE 4000 /* 4.00V maximum voltage */
+#define PALMTC_BAT_MIN_VOLTAGE 3550 /* 3.55V critical voltage */
+#define PALMTC_BAT_MAX_CURRENT 0 /* unknokn */
+#define PALMTC_BAT_MIN_CURRENT 0 /* unknown */
+#define PALMTC_BAT_MAX_CHARGE 1 /* unknown */
+#define PALMTC_BAT_MIN_CHARGE 1 /* unknown */
+#define PALMTC_MAX_LIFE_MINS 240 /* on-life in minutes */
+
+#define PALMTC_BAT_MEASURE_DELAY (HZ * 1)
+
+/* BACKLIGHT */
+#define PALMTC_MAX_INTENSITY 0xFE
+#define PALMTC_DEFAULT_INTENSITY 0x7E
+#define PALMTC_LIMIT_MASK 0x7F
+#define PALMTC_PRESCALER 0x3F
+#define PALMTC_PERIOD_NS 3500
+
+#endif
diff --git a/arch/arm/mach-pxa/include/mach/palmtx.h b/arch/arm/mach-pxa/include/mach/palmtx.h
index e74082c872e..1be0db6ed55 100644
--- a/arch/arm/mach-pxa/include/mach/palmtx.h
+++ b/arch/arm/mach-pxa/include/mach/palmtx.h
@@ -82,6 +82,11 @@
#define PALMTX_PHYS_FLASH_START PXA_CS0_PHYS /* ChipSelect 0 */
#define PALMTX_PHYS_NAND_START PXA_CS1_PHYS /* ChipSelect 1 */
+#define PALMTX_NAND_ALE_PHYS (PALMTX_PHYS_NAND_START | (1 << 24))
+#define PALMTX_NAND_CLE_PHYS (PALMTX_PHYS_NAND_START | (1 << 25))
+#define PALMTX_NAND_ALE_VIRT 0xff100000
+#define PALMTX_NAND_CLE_VIRT 0xff200000
+
/* TOUCHSCREEN */
#define AC97_LINK_FRAME 21
diff --git a/arch/arm/mach-pxa/include/mach/pxa3xx-regs.h b/arch/arm/mach-pxa/include/mach/pxa3xx-regs.h
index 7d1a059b3d4..e91d63cfe81 100644
--- a/arch/arm/mach-pxa/include/mach/pxa3xx-regs.h
+++ b/arch/arm/mach-pxa/include/mach/pxa3xx-regs.h
@@ -208,7 +208,7 @@
#define CKEN_MVED 43 /* < MVED clock enable */
/* Note: GCU clock enable bit differs on PXA300/PXA310 and PXA320 */
-#define PXA300_CKEN_GRAPHICS 42 /* Graphics controller clock enable */
-#define PXA320_CKEN_GRAPHICS 7 /* Graphics controller clock enable */
+#define CKEN_PXA300_GCU 42 /* Graphics controller clock enable */
+#define CKEN_PXA320_GCU 7 /* Graphics controller clock enable */
#endif /* __ASM_ARCH_PXA3XX_REGS_H */
diff --git a/arch/arm/mach-pxa/include/mach/pxafb.h b/arch/arm/mach-pxa/include/mach/pxafb.h
index 6932720ba04..f73061c90b5 100644
--- a/arch/arm/mach-pxa/include/mach/pxafb.h
+++ b/arch/arm/mach-pxa/include/mach/pxafb.h
@@ -118,7 +118,8 @@ struct pxafb_mach_info {
u_int fixed_modes:1,
cmap_inverse:1,
cmap_static:1,
- unused:29;
+ acceleration_enabled:1,
+ unused:28;
/* The following should be defined in LCCR0
* LCCR0_Act or LCCR0_Pas Active or Passive
diff --git a/arch/arm/mach-pxa/include/mach/regs-intc.h b/arch/arm/mach-pxa/include/mach/regs-intc.h
index ad23e74b762..68464ce1c1e 100644
--- a/arch/arm/mach-pxa/include/mach/regs-intc.h
+++ b/arch/arm/mach-pxa/include/mach/regs-intc.h
@@ -13,6 +13,7 @@
#define ICFP __REG(0x40D0000C) /* Interrupt Controller FIQ Pending Register */
#define ICPR __REG(0x40D00010) /* Interrupt Controller Pending Register */
#define ICCR __REG(0x40D00014) /* Interrupt Controller Control Register */
+#define ICHP __REG(0x40D00018) /* Interrupt Controller Highest Priority Register */
#define ICIP2 __REG(0x40D0009C) /* Interrupt Controller IRQ Pending Register 2 */
#define ICMR2 __REG(0x40D000A0) /* Interrupt Controller Mask Register 2 */
@@ -20,4 +21,14 @@
#define ICFP2 __REG(0x40D000A8) /* Interrupt Controller FIQ Pending Register 2 */
#define ICPR2 __REG(0x40D000AC) /* Interrupt Controller Pending Register 2 */
+#define ICIP3 __REG(0x40D00130) /* Interrupt Controller IRQ Pending Register 3 */
+#define ICMR3 __REG(0x40D00134) /* Interrupt Controller Mask Register 3 */
+#define ICLR3 __REG(0x40D00138) /* Interrupt Controller Level Register 3 */
+#define ICFP3 __REG(0x40D0013C) /* Interrupt Controller FIQ Pending Register 3 */
+#define ICPR3 __REG(0x40D00140) /* Interrupt Controller Pending Register 3 */
+
+#define IPR(x) __REG(0x40D0001C + (x < 32 ? (x << 2) \
+ : (x < 64 ? (0x94 + ((x - 32) << 2)) \
+ : (0x128 + ((x - 64) << 2)))))
+
#endif /* __ASM_MACH_REGS_INTC_H */
diff --git a/arch/arm/mach-pxa/include/mach/uncompress.h b/arch/arm/mach-pxa/include/mach/uncompress.h
index b54749413e9..237734b5b1b 100644
--- a/arch/arm/mach-pxa/include/mach/uncompress.h
+++ b/arch/arm/mach-pxa/include/mach/uncompress.h
@@ -37,7 +37,7 @@ static inline void arch_decomp_setup(void)
{
if (machine_is_littleton() || machine_is_intelmote2()
|| machine_is_csb726() || machine_is_stargate2()
- || machine_is_cm_x300())
+ || machine_is_cm_x300() || machine_is_balloon3())
UART = STUART;
}
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index f6e0300e4f6..d694ce28966 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -120,7 +120,7 @@ static void __init pxa_init_low_gpio_irq(set_wake_t fn)
void __init pxa_init_irq(int irq_nr, set_wake_t fn)
{
- int irq;
+ int irq, i;
pxa_internal_irq_nr = irq_nr;
@@ -129,6 +129,12 @@ void __init pxa_init_irq(int irq_nr, set_wake_t fn)
_ICLR(irq) = 0; /* all IRQs are IRQ, not FIQ */
}
+ /* initialize interrupt priority */
+ if (cpu_is_pxa27x() || cpu_is_pxa3xx()) {
+ for (i = 0; i < irq_nr; i++)
+ IPR(i) = i | (1 << 31);
+ }
+
/* only unmasked interrupts kick us out of idle */
ICCR = 1;
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
index 55b3788fd1a..13848955d13 100644
--- a/arch/arm/mach-pxa/littleton.c
+++ b/arch/arm/mach-pxa/littleton.c
@@ -265,45 +265,12 @@ static inline void littleton_init_keypad(void) {}
#endif
#if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
-static int littleton_mci_init(struct device *dev,
- irq_handler_t littleton_detect_int, void *data)
-{
- int err, gpio_cd = GPIO_MMC1_CARD_DETECT;
-
- err = gpio_request(gpio_cd, "mmc card detect");
- if (err)
- goto err_request_cd;
-
- gpio_direction_input(gpio_cd);
-
- err = request_irq(gpio_to_irq(gpio_cd), littleton_detect_int,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "mmc card detect", data);
- if (err) {
- dev_err(dev, "failed to request card detect IRQ\n");
- goto err_request_irq;
- }
- return 0;
-
-err_request_irq:
- gpio_free(gpio_cd);
-err_request_cd:
- return err;
-}
-
-static void littleton_mci_exit(struct device *dev, void *data)
-{
- int gpio_cd = GPIO_MMC1_CARD_DETECT;
-
- free_irq(gpio_to_irq(gpio_cd), data);
- gpio_free(gpio_cd);
-}
-
static struct pxamci_platform_data littleton_mci_platform_data = {
- .detect_delay = 20,
- .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
- .init = littleton_mci_init,
- .exit = littleton_mci_exit,
+ .detect_delay = 20,
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .gpio_card_detect = GPIO_MMC1_CARD_DETECT,
+ .gpio_card_ro = -1,
+ .gpio_power = -1,
};
static void __init littleton_init_mmc(void)
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c
index f04c8333dff..c6a94d3fdd6 100644
--- a/arch/arm/mach-pxa/lubbock.c
+++ b/arch/arm/mach-pxa/lubbock.c
@@ -482,11 +482,14 @@ static void lubbock_mci_exit(struct device *dev, void *data)
}
static struct pxamci_platform_data lubbock_mci_platform_data = {
- .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
- .detect_delay = 1,
- .init = lubbock_mci_init,
- .get_ro = lubbock_mci_get_ro,
- .exit = lubbock_mci_exit,
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .detect_delay = 1,
+ .init = lubbock_mci_init,
+ .get_ro = lubbock_mci_get_ro,
+ .exit = lubbock_mci_exit,
+ .gpio_card_detect = -1,
+ .gpio_card_ro = -1,
+ .gpio_power = -1,
};
static void lubbock_irda_transceiver_mode(struct device *dev, int mode)
@@ -504,8 +507,9 @@ static void lubbock_irda_transceiver_mode(struct device *dev, int mode)
}
static struct pxaficp_platform_data lubbock_ficp_platform_data = {
- .transceiver_cap = IR_SIRMODE | IR_FIRMODE,
- .transceiver_mode = lubbock_irda_transceiver_mode,
+ .gpio_pwdown = -1,
+ .transceiver_cap = IR_SIRMODE | IR_FIRMODE,
+ .transceiver_mode = lubbock_irda_transceiver_mode,
};
static void __init lubbock_init(void)
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
index ca39669cffc..5360c07f513 100644
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -140,15 +140,9 @@ static unsigned long magician_pin_config[] __initdata = {
* IRDA
*/
-static void magician_irda_transceiver_mode(struct device *dev, int mode)
-{
- gpio_set_value(GPIO83_MAGICIAN_nIR_EN, mode & IR_OFF);
- pxa2xx_transceiver_mode(dev, mode);
-}
-
static struct pxaficp_platform_data magician_ficp_info = {
- .transceiver_cap = IR_SIRMODE | IR_OFF,
- .transceiver_mode = magician_irda_transceiver_mode,
+ .gpio_pwdown = GPIO83_MAGICIAN_nIR_EN,
+ .transceiver_cap = IR_SIRMODE | IR_OFF,
};
/*
@@ -651,55 +645,24 @@ static struct platform_device bq24022 = {
static int magician_mci_init(struct device *dev,
irq_handler_t detect_irq, void *data)
{
- int err;
-
- err = request_irq(IRQ_MAGICIAN_SD, detect_irq,
+ return request_irq(IRQ_MAGICIAN_SD, detect_irq,
IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
- "MMC card detect", data);
- if (err)
- goto err_request_irq;
- err = gpio_request(EGPIO_MAGICIAN_SD_POWER, "SD_POWER");
- if (err)
- goto err_request_power;
- err = gpio_request(EGPIO_MAGICIAN_nSD_READONLY, "nSD_READONLY");
- if (err)
- goto err_request_readonly;
-
- return 0;
-
-err_request_readonly:
- gpio_free(EGPIO_MAGICIAN_SD_POWER);
-err_request_power:
- free_irq(IRQ_MAGICIAN_SD, data);
-err_request_irq:
- return err;
-}
-
-static void magician_mci_setpower(struct device *dev, unsigned int vdd)
-{
- struct pxamci_platform_data *pdata = dev->platform_data;
-
- gpio_set_value(EGPIO_MAGICIAN_SD_POWER, (1 << vdd) & pdata->ocr_mask);
-}
-
-static int magician_mci_get_ro(struct device *dev)
-{
- return (!gpio_get_value(EGPIO_MAGICIAN_nSD_READONLY));
+ "mmc card detect", data);
}
static void magician_mci_exit(struct device *dev, void *data)
{
- gpio_free(EGPIO_MAGICIAN_nSD_READONLY);
- gpio_free(EGPIO_MAGICIAN_SD_POWER);
free_irq(IRQ_MAGICIAN_SD, data);
}
static struct pxamci_platform_data magician_mci_info = {
- .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
- .init = magician_mci_init,
- .get_ro = magician_mci_get_ro,
- .setpower = magician_mci_setpower,
- .exit = magician_mci_exit,
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .init = magician_mci_init,
+ .exit = magician_mci_exit,
+ .gpio_card_detect = -1,
+ .gpio_card_ro = EGPIO_MAGICIAN_nSD_READONLY,
+ .gpio_card_ro_invert = 1,
+ .gpio_power = EGPIO_MAGICIAN_SD_POWER,
};
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index f4dabf0273c..a4eeae345e6 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -450,10 +450,13 @@ static void mainstone_mci_exit(struct device *dev, void *data)
}
static struct pxamci_platform_data mainstone_mci_platform_data = {
- .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
- .init = mainstone_mci_init,
- .setpower = mainstone_mci_setpower,
- .exit = mainstone_mci_exit,
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .init = mainstone_mci_init,
+ .setpower = mainstone_mci_setpower,
+ .exit = mainstone_mci_exit,
+ .gpio_card_detect = -1,
+ .gpio_card_ro = -1,
+ .gpio_power = -1,
};
static void mainstone_irda_transceiver_mode(struct device *dev, int mode)
@@ -476,8 +479,9 @@ static void mainstone_irda_transceiver_mode(struct device *dev, int mode)
}
static struct pxaficp_platform_data mainstone_ficp_platform_data = {
- .transceiver_cap = IR_SIRMODE | IR_FIRMODE | IR_OFF,
- .transceiver_mode = mainstone_irda_transceiver_mode,
+ .gpio_pwdown = -1,
+ .transceiver_cap = IR_SIRMODE | IR_FIRMODE | IR_OFF,
+ .transceiver_mode = mainstone_irda_transceiver_mode,
};
static struct gpio_keys_button gpio_keys_button[] = {
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index 2d28132c725..3cab452e556 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -434,72 +434,15 @@ struct gpio_vbus_mach_info gpio_vbus_data = {
/*
* SDIO/MMC Card controller
*/
-static void mci_setpower(struct device *dev, unsigned int vdd)
-{
- struct pxamci_platform_data *p_d = dev->platform_data;
-
- if ((1 << vdd) & p_d->ocr_mask)
- gpio_set_value(GPIO91_SDIO_EN, 1); /* enable SDIO power */
- else
- gpio_set_value(GPIO91_SDIO_EN, 0); /* disable SDIO power */
-}
-
-static int mci_get_ro(struct device *dev)
-{
- return gpio_get_value(GPIO78_SDIO_RO);
-}
-
-struct gpio_ress mci_gpios[] = {
- MIO_GPIO_IN(GPIO78_SDIO_RO, "SDIO readonly detect"),
- MIO_GPIO_IN(GPIO15_SDIO_INSERT, "SDIO insertion detect"),
- MIO_GPIO_OUT(GPIO91_SDIO_EN, 0, "SDIO power enable")
-};
-
-static void mci_exit(struct device *dev, void *data)
-{
- mio_gpio_free(ARRAY_AND_SIZE(mci_gpios));
- free_irq(gpio_to_irq(GPIO15_SDIO_INSERT), data);
-}
-
-static struct pxamci_platform_data mioa701_mci_info;
-
/**
* The card detect interrupt isn't debounced so we delay it by 250ms
* to give the card a chance to fully insert/eject.
*/
-static int mci_init(struct device *dev, irq_handler_t detect_int, void *data)
-{
- int rc;
- int irq = gpio_to_irq(GPIO15_SDIO_INSERT);
-
- rc = mio_gpio_request(ARRAY_AND_SIZE(mci_gpios));
- if (rc)
- goto err_gpio;
- /* enable RE/FE interrupt on card insertion and removal */
- rc = request_irq(irq, detect_int,
- IRQF_DISABLED | IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING,
- "MMC card detect", data);
- if (rc)
- goto err_irq;
-
- mioa701_mci_info.detect_delay = msecs_to_jiffies(250);
- return 0;
-
-err_irq:
- dev_err(dev, "mioa701_mci_init: MMC/SD:"
- " can't request MMC card detect IRQ\n");
- mio_gpio_free(ARRAY_AND_SIZE(mci_gpios));
-err_gpio:
- return rc;
-}
-
static struct pxamci_platform_data mioa701_mci_info = {
- .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
- .init = mci_init,
- .get_ro = mci_get_ro,
- .setpower = mci_setpower,
- .exit = mci_exit,
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .gpio_card_detect = GPIO15_SDIO_INSERT,
+ .gpio_card_ro = GPIO78_SDIO_RO,
+ .gpio_power = GPIO91_SDIO_EN,
};
/* FlashRAM */
@@ -765,19 +708,20 @@ static struct i2c_board_info __initdata mioa701_pi2c_devices[] = {
},
};
-static struct soc_camera_link iclink = {
- .bus_id = 0, /* Must match id in pxa27x_device_camera in device.c */
-};
-
/* Board I2C devices. */
static struct i2c_board_info __initdata mioa701_i2c_devices[] = {
{
- /* Must initialize before the camera(s) */
I2C_BOARD_INFO("mt9m111", 0x5d),
- .platform_data = &iclink,
},
};
+static struct soc_camera_link iclink = {
+ .bus_id = 0, /* Match id in pxa27x_device_camera in device.c */
+ .board_info = &mioa701_i2c_devices[0],
+ .i2c_adapter_id = 0,
+ .module_name = "mt9m111",
+};
+
struct i2c_pxa_platform_data i2c_pdata = {
.fast_mode = 1,
};
@@ -811,6 +755,7 @@ MIO_SIMPLE_DEV(pxa2xx_pcm, "pxa2xx-pcm", NULL)
MIO_SIMPLE_DEV(mioa701_sound, "mioa701-wm9713", NULL)
MIO_SIMPLE_DEV(mioa701_board, "mioa701-board", NULL)
MIO_SIMPLE_DEV(gpio_vbus, "gpio-vbus", &gpio_vbus_data);
+MIO_SIMPLE_DEV(mioa701_camera, "soc-camera-pdrv",&iclink);
static struct platform_device *devices[] __initdata = {
&mioa701_gpio_keys,
@@ -821,6 +766,7 @@ static struct platform_device *devices[] __initdata = {
&power_dev,
&strataflash,
&gpio_vbus,
+ &mioa701_camera,
&mioa701_board,
};
@@ -841,7 +787,7 @@ static void mioa701_restart(char c, const char *cmd)
static struct gpio_ress global_gpios[] = {
MIO_GPIO_OUT(GPIO9_CHARGE_EN, 1, "Charger enable"),
MIO_GPIO_OUT(GPIO18_POWEROFF, 0, "Power Off"),
- MIO_GPIO_OUT(GPIO87_LCD_POWER, 0, "LCD Power")
+ MIO_GPIO_OUT(GPIO87_LCD_POWER, 0, "LCD Power"),
};
static void __init mioa701_machine_init(void)
@@ -855,6 +801,7 @@ static void __init mioa701_machine_init(void)
mio_gpio_request(ARRAY_AND_SIZE(global_gpios));
bootstrap_init();
set_pxa_fb_info(&mioa701_pxafb_info);
+ mioa701_mci_info.detect_delay = msecs_to_jiffies(250);
pxa_set_mci_info(&mioa701_mci_info);
pxa_set_keypad_info(&mioa701_keypad_info);
wm97xx_bat_set_pdata(&mioa701_battery_data);
@@ -869,7 +816,6 @@ static void __init mioa701_machine_init(void)
pxa_set_i2c_info(&i2c_pdata);
pxa27x_set_i2c_power_info(NULL);
pxa_set_camera_info(&mioa701_pxacamera_platform_data);
- i2c_register_board_info(0, ARRAY_AND_SIZE(mioa701_i2c_devices));
}
static void mioa701_machine_exit(void)
diff --git a/arch/arm/mach-pxa/palmld.c b/arch/arm/mach-pxa/palmld.c
index 169fcc18154..1ad029dd443 100644
--- a/arch/arm/mach-pxa/palmld.c
+++ b/arch/arm/mach-pxa/palmld.c
@@ -25,6 +25,9 @@
#include <linux/wm97xx_batt.h>
#include <linux/power_supply.h>
#include <linux/sysdev.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -141,85 +144,50 @@ static unsigned long palmld_pin_config[] __initdata = {
};
/******************************************************************************
- * SD/MMC card controller
+ * NOR Flash
******************************************************************************/
-static int palmld_mci_init(struct device *dev, irq_handler_t palmld_detect_int,
- void *data)
-{
- int err = 0;
-
- /* Setup an interrupt for detecting card insert/remove events */
- err = gpio_request(GPIO_NR_PALMLD_SD_DETECT_N, "SD IRQ");
- if (err)
- goto err;
- err = gpio_direction_input(GPIO_NR_PALMLD_SD_DETECT_N);
- if (err)
- goto err2;
- err = request_irq(gpio_to_irq(GPIO_NR_PALMLD_SD_DETECT_N),
- palmld_detect_int, IRQF_DISABLED | IRQF_SAMPLE_RANDOM |
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- "SD/MMC card detect", data);
- if (err) {
- printk(KERN_ERR "%s: cannot request SD/MMC card detect IRQ\n",
- __func__);
- goto err2;
+static struct mtd_partition palmld_partitions[] = {
+ {
+ .name = "Flash",
+ .offset = 0x00000000,
+ .size = MTDPART_SIZ_FULL,
+ .mask_flags = 0
}
+};
- err = gpio_request(GPIO_NR_PALMLD_SD_POWER, "SD_POWER");
- if (err)
- goto err3;
- err = gpio_direction_output(GPIO_NR_PALMLD_SD_POWER, 0);
- if (err)
- goto err4;
-
- err = gpio_request(GPIO_NR_PALMLD_SD_READONLY, "SD_READONLY");
- if (err)
- goto err4;
- err = gpio_direction_input(GPIO_NR_PALMLD_SD_READONLY);
- if (err)
- goto err5;
-
- printk(KERN_DEBUG "%s: irq registered\n", __func__);
-
- return 0;
-
-err5:
- gpio_free(GPIO_NR_PALMLD_SD_READONLY);
-err4:
- gpio_free(GPIO_NR_PALMLD_SD_POWER);
-err3:
- free_irq(gpio_to_irq(GPIO_NR_PALMLD_SD_DETECT_N), data);
-err2:
- gpio_free(GPIO_NR_PALMLD_SD_DETECT_N);
-err:
- return err;
-}
-
-static void palmld_mci_exit(struct device *dev, void *data)
-{
- gpio_free(GPIO_NR_PALMLD_SD_READONLY);
- gpio_free(GPIO_NR_PALMLD_SD_POWER);
- free_irq(gpio_to_irq(GPIO_NR_PALMLD_SD_DETECT_N), data);
- gpio_free(GPIO_NR_PALMLD_SD_DETECT_N);
-}
+static struct physmap_flash_data palmld_flash_data[] = {
+ {
+ .width = 2, /* bankwidth in bytes */
+ .parts = palmld_partitions,
+ .nr_parts = ARRAY_SIZE(palmld_partitions)
+ }
+};
-static void palmld_mci_power(struct device *dev, unsigned int vdd)
-{
- struct pxamci_platform_data *p_d = dev->platform_data;
- gpio_set_value(GPIO_NR_PALMLD_SD_POWER, p_d->ocr_mask & (1 << vdd));
-}
+static struct resource palmld_flash_resource = {
+ .start = PXA_CS0_PHYS,
+ .end = PXA_CS0_PHYS + SZ_4M - 1,
+ .flags = IORESOURCE_MEM,
+};
-static int palmld_mci_get_ro(struct device *dev)
-{
- return gpio_get_value(GPIO_NR_PALMLD_SD_READONLY);
-}
+static struct platform_device palmld_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .resource = &palmld_flash_resource,
+ .num_resources = 1,
+ .dev = {
+ .platform_data = palmld_flash_data,
+ },
+};
+/******************************************************************************
+ * SD/MMC card controller
+ ******************************************************************************/
static struct pxamci_platform_data palmld_mci_platform_data = {
- .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
- .setpower = palmld_mci_power,
- .get_ro = palmld_mci_get_ro,
- .init = palmld_mci_init,
- .exit = palmld_mci_exit,
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .gpio_card_detect = GPIO_NR_PALMLD_SD_DETECT_N,
+ .gpio_card_ro = GPIO_NR_PALMLD_SD_READONLY,
+ .gpio_power = GPIO_NR_PALMLD_SD_POWER,
+ .detect_delay = 20,
};
/******************************************************************************
@@ -336,35 +304,9 @@ static struct platform_device palmld_backlight = {
/******************************************************************************
* IrDA
******************************************************************************/
-static int palmld_irda_startup(struct device *dev)
-{
- int err;
- err = gpio_request(GPIO_NR_PALMLD_IR_DISABLE, "IR DISABLE");
- if (err)
- goto err;
- err = gpio_direction_output(GPIO_NR_PALMLD_IR_DISABLE, 1);
- if (err)
- gpio_free(GPIO_NR_PALMLD_IR_DISABLE);
-err:
- return err;
-}
-
-static void palmld_irda_shutdown(struct device *dev)
-{
- gpio_free(GPIO_NR_PALMLD_IR_DISABLE);
-}
-
-static void palmld_irda_transceiver_mode(struct device *dev, int mode)
-{
- gpio_set_value(GPIO_NR_PALMLD_IR_DISABLE, mode & IR_OFF);
- pxa2xx_transceiver_mode(dev, mode);
-}
-
static struct pxaficp_platform_data palmld_ficp_platform_data = {
- .startup = palmld_irda_startup,
- .shutdown = palmld_irda_shutdown,
- .transceiver_cap = IR_SIRMODE | IR_FIRMODE | IR_OFF,
- .transceiver_mode = palmld_irda_transceiver_mode,
+ .gpio_pwdown = GPIO_NR_PALMLD_IR_DISABLE,
+ .transceiver_cap = IR_SIRMODE | IR_OFF,
};
/******************************************************************************
@@ -560,6 +502,7 @@ static struct platform_device *devices[] __initdata = {
&power_supply,
&palmld_asoc,
&palmld_hdd,
+ &palmld_flash,
};
static struct map_desc palmld_io_desc[] __initdata = {
diff --git a/arch/arm/mach-pxa/palmt5.c b/arch/arm/mach-pxa/palmt5.c
index 33f726ff55e..2dd7ce28556 100644
--- a/arch/arm/mach-pxa/palmt5.c
+++ b/arch/arm/mach-pxa/palmt5.c
@@ -124,83 +124,12 @@ static unsigned long palmt5_pin_config[] __initdata = {
/******************************************************************************
* SD/MMC card controller
******************************************************************************/
-static int palmt5_mci_init(struct device *dev, irq_handler_t palmt5_detect_int,
- void *data)
-{
- int err = 0;
-
- /* Setup an interrupt for detecting card insert/remove events */
- err = gpio_request(GPIO_NR_PALMT5_SD_DETECT_N, "SD IRQ");
- if (err)
- goto err;
- err = gpio_direction_input(GPIO_NR_PALMT5_SD_DETECT_N);
- if (err)
- goto err2;
- err = request_irq(gpio_to_irq(GPIO_NR_PALMT5_SD_DETECT_N),
- palmt5_detect_int, IRQF_DISABLED | IRQF_SAMPLE_RANDOM |
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- "SD/MMC card detect", data);
- if (err) {
- printk(KERN_ERR "%s: cannot request SD/MMC card detect IRQ\n",
- __func__);
- goto err2;
- }
-
- err = gpio_request(GPIO_NR_PALMT5_SD_POWER, "SD_POWER");
- if (err)
- goto err3;
- err = gpio_direction_output(GPIO_NR_PALMT5_SD_POWER, 0);
- if (err)
- goto err4;
-
- err = gpio_request(GPIO_NR_PALMT5_SD_READONLY, "SD_READONLY");
- if (err)
- goto err4;
- err = gpio_direction_input(GPIO_NR_PALMT5_SD_READONLY);
- if (err)
- goto err5;
-
- printk(KERN_DEBUG "%s: irq registered\n", __func__);
-
- return 0;
-
-err5:
- gpio_free(GPIO_NR_PALMT5_SD_READONLY);
-err4:
- gpio_free(GPIO_NR_PALMT5_SD_POWER);
-err3:
- free_irq(gpio_to_irq(GPIO_NR_PALMT5_SD_DETECT_N), data);
-err2:
- gpio_free(GPIO_NR_PALMT5_SD_DETECT_N);
-err:
- return err;
-}
-
-static void palmt5_mci_exit(struct device *dev, void *data)
-{
- gpio_free(GPIO_NR_PALMT5_SD_READONLY);
- gpio_free(GPIO_NR_PALMT5_SD_POWER);
- free_irq(IRQ_GPIO_PALMT5_SD_DETECT_N, data);
- gpio_free(GPIO_NR_PALMT5_SD_DETECT_N);
-}
-
-static void palmt5_mci_power(struct device *dev, unsigned int vdd)
-{
- struct pxamci_platform_data *p_d = dev->platform_data;
- gpio_set_value(GPIO_NR_PALMT5_SD_POWER, p_d->ocr_mask & (1 << vdd));
-}
-
-static int palmt5_mci_get_ro(struct device *dev)
-{
- return gpio_get_value(GPIO_NR_PALMT5_SD_READONLY);
-}
-
static struct pxamci_platform_data palmt5_mci_platform_data = {
- .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
- .setpower = palmt5_mci_power,
- .get_ro = palmt5_mci_get_ro,
- .init = palmt5_mci_init,
- .exit = palmt5_mci_exit,
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .gpio_card_detect = GPIO_NR_PALMT5_SD_DETECT_N,
+ .gpio_card_ro = GPIO_NR_PALMT5_SD_READONLY,
+ .gpio_power = GPIO_NR_PALMT5_SD_POWER,
+ .detect_delay = 20,
};
/******************************************************************************
@@ -314,35 +243,9 @@ static struct platform_device palmt5_backlight = {
/******************************************************************************
* IrDA
******************************************************************************/
-static int palmt5_irda_startup(struct device *dev)
-{
- int err;
- err = gpio_request(GPIO_NR_PALMT5_IR_DISABLE, "IR DISABLE");
- if (err)
- goto err;
- err = gpio_direction_output(GPIO_NR_PALMT5_IR_DISABLE, 1);
- if (err)
- gpio_free(GPIO_NR_PALMT5_IR_DISABLE);
-err:
- return err;
-}
-
-static void palmt5_irda_shutdown(struct device *dev)
-{
- gpio_free(GPIO_NR_PALMT5_IR_DISABLE);
-}
-
-static void palmt5_irda_transceiver_mode(struct device *dev, int mode)
-{
- gpio_set_value(GPIO_NR_PALMT5_IR_DISABLE, mode & IR_OFF);
- pxa2xx_transceiver_mode(dev, mode);
-}
-
static struct pxaficp_platform_data palmt5_ficp_platform_data = {
- .startup = palmt5_irda_startup,
- .shutdown = palmt5_irda_shutdown,
- .transceiver_cap = IR_SIRMODE | IR_FIRMODE | IR_OFF,
- .transceiver_mode = palmt5_irda_transceiver_mode,
+ .gpio_pwdown = GPIO_NR_PALMT5_IR_DISABLE,
+ .transceiver_cap = IR_SIRMODE | IR_OFF,
};
/******************************************************************************
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c
new file mode 100644
index 00000000000..bb2cc0dd44e
--- /dev/null
+++ b/arch/arm/mach-pxa/palmtc.c
@@ -0,0 +1,436 @@
+/*
+ * linux/arch/arm/mach-pxa/palmtc.c
+ *
+ * Support for the Palm Tungsten|C
+ *
+ * Author: Marek Vasut <marek.vasut@gmail.com>
+ *
+ * Based on work of:
+ * Petr Blaha <p3t3@centrum.cz>
+ * Chetan S. Kumar <shivakumar.chetan@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/input.h>
+#include <linux/pwm_backlight.h>
+#include <linux/gpio.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/ucb1400.h>
+#include <linux/power_supply.h>
+#include <linux/gpio_keys.h>
+#include <linux/mtd/physmap.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <mach/audio.h>
+#include <mach/palmtc.h>
+#include <mach/mmc.h>
+#include <mach/pxafb.h>
+#include <mach/mfp-pxa25x.h>
+#include <mach/irda.h>
+#include <mach/udc.h>
+#include <mach/pxa2xx-regs.h>
+
+#include "generic.h"
+#include "devices.h"
+
+/******************************************************************************
+ * Pin configuration
+ ******************************************************************************/
+static unsigned long palmtc_pin_config[] __initdata = {
+ /* MMC */
+ GPIO6_MMC_CLK,
+ GPIO8_MMC_CS0,
+ GPIO12_GPIO, /* detect */
+ GPIO32_GPIO, /* power */
+ GPIO54_GPIO, /* r/o switch */
+
+ /* PCMCIA */
+ GPIO52_nPCE_1,
+ GPIO53_nPCE_2,
+ GPIO50_nPIOR,
+ GPIO51_nPIOW,
+ GPIO49_nPWE,
+ GPIO48_nPOE,
+ GPIO52_nPCE_1,
+ GPIO53_nPCE_2,
+ GPIO57_nIOIS16,
+ GPIO56_nPWAIT,
+
+ /* AC97 */
+ GPIO28_AC97_BITCLK,
+ GPIO29_AC97_SDATA_IN_0,
+ GPIO30_AC97_SDATA_OUT,
+ GPIO31_AC97_SYNC,
+
+ /* IrDA */
+ GPIO45_GPIO, /* ir disable */
+ GPIO46_FICP_RXD,
+ GPIO47_FICP_TXD,
+
+ /* PWM */
+ GPIO17_PWM1_OUT,
+
+ /* USB */
+ GPIO4_GPIO, /* detect */
+ GPIO36_GPIO, /* pullup */
+
+ /* LCD */
+ GPIO58_LCD_LDD_0,
+ GPIO59_LCD_LDD_1,
+ GPIO60_LCD_LDD_2,
+ GPIO61_LCD_LDD_3,
+ GPIO62_LCD_LDD_4,
+ GPIO63_LCD_LDD_5,
+ GPIO64_LCD_LDD_6,
+ GPIO65_LCD_LDD_7,
+ GPIO66_LCD_LDD_8,
+ GPIO67_LCD_LDD_9,
+ GPIO68_LCD_LDD_10,
+ GPIO69_LCD_LDD_11,
+ GPIO70_LCD_LDD_12,
+ GPIO71_LCD_LDD_13,
+ GPIO72_LCD_LDD_14,
+ GPIO73_LCD_LDD_15,
+ GPIO74_LCD_FCLK,
+ GPIO75_LCD_LCLK,
+ GPIO76_LCD_PCLK,
+ GPIO77_LCD_BIAS,
+
+ /* MATRIX KEYPAD */
+ GPIO0_GPIO | WAKEUP_ON_EDGE_BOTH, /* in 0 */
+ GPIO9_GPIO | WAKEUP_ON_EDGE_BOTH, /* in 1 */
+ GPIO10_GPIO | WAKEUP_ON_EDGE_BOTH, /* in 2 */
+ GPIO11_GPIO | WAKEUP_ON_EDGE_BOTH, /* in 3 */
+ GPIO18_GPIO | MFP_LPM_DRIVE_LOW, /* out 0 */
+ GPIO19_GPIO | MFP_LPM_DRIVE_LOW, /* out 1 */
+ GPIO20_GPIO | MFP_LPM_DRIVE_LOW, /* out 2 */
+ GPIO21_GPIO | MFP_LPM_DRIVE_LOW, /* out 3 */
+ GPIO22_GPIO | MFP_LPM_DRIVE_LOW, /* out 4 */
+ GPIO23_GPIO | MFP_LPM_DRIVE_LOW, /* out 5 */
+ GPIO24_GPIO | MFP_LPM_DRIVE_LOW, /* out 6 */
+ GPIO25_GPIO | MFP_LPM_DRIVE_LOW, /* out 7 */
+ GPIO26_GPIO | MFP_LPM_DRIVE_LOW, /* out 8 */
+ GPIO27_GPIO | MFP_LPM_DRIVE_LOW, /* out 9 */
+ GPIO79_GPIO | MFP_LPM_DRIVE_LOW, /* out 10 */
+ GPIO80_GPIO | MFP_LPM_DRIVE_LOW, /* out 11 */
+
+ /* PXA GPIO KEYS */
+ GPIO7_GPIO | WAKEUP_ON_EDGE_BOTH, /* hotsync button on cradle */
+
+ /* MISC */
+ GPIO1_RST, /* reset */
+ GPIO2_GPIO, /* earphone detect */
+ GPIO16_GPIO, /* backlight switch */
+};
+
+/******************************************************************************
+ * SD/MMC card controller
+ ******************************************************************************/
+static struct pxamci_platform_data palmtc_mci_platform_data = {
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .gpio_power = GPIO_NR_PALMTC_SD_POWER,
+ .gpio_card_ro = GPIO_NR_PALMTC_SD_READONLY,
+ .gpio_card_detect = GPIO_NR_PALMTC_SD_DETECT_N,
+ .detect_delay = 20,
+};
+
+/******************************************************************************
+ * GPIO keys
+ ******************************************************************************/
+static struct gpio_keys_button palmtc_pxa_buttons[] = {
+ {KEY_F8, GPIO_NR_PALMTC_HOTSYNC_BUTTON, 1, "HotSync Button", EV_KEY, 1},
+};
+
+static struct gpio_keys_platform_data palmtc_pxa_keys_data = {
+ .buttons = palmtc_pxa_buttons,
+ .nbuttons = ARRAY_SIZE(palmtc_pxa_buttons),
+};
+
+static struct platform_device palmtc_pxa_keys = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &palmtc_pxa_keys_data,
+ },
+};
+
+/******************************************************************************
+ * Backlight
+ ******************************************************************************/
+static int palmtc_backlight_init(struct device *dev)
+{
+ int ret;
+
+ ret = gpio_request(GPIO_NR_PALMTC_BL_POWER, "BL POWER");
+ if (ret)
+ goto err;
+ ret = gpio_direction_output(GPIO_NR_PALMTC_BL_POWER, 1);
+ if (ret)
+ goto err2;
+
+ return 0;
+
+err2:
+ gpio_free(GPIO_NR_PALMTC_BL_POWER);
+err:
+ return ret;
+}
+
+static int palmtc_backlight_notify(int brightness)
+{
+ /* backlight is on when GPIO16 AF0 is high */
+ gpio_set_value(GPIO_NR_PALMTC_BL_POWER, brightness);
+ return brightness;
+}
+
+static void palmtc_backlight_exit(struct device *dev)
+{
+ gpio_free(GPIO_NR_PALMTC_BL_POWER);
+}
+
+static struct platform_pwm_backlight_data palmtc_backlight_data = {
+ .pwm_id = 1,
+ .max_brightness = PALMTC_MAX_INTENSITY,
+ .dft_brightness = PALMTC_MAX_INTENSITY,
+ .pwm_period_ns = PALMTC_PERIOD_NS,
+ .init = palmtc_backlight_init,
+ .notify = palmtc_backlight_notify,
+ .exit = palmtc_backlight_exit,
+};
+
+static struct platform_device palmtc_backlight = {
+ .name = "pwm-backlight",
+ .dev = {
+ .parent = &pxa25x_device_pwm1.dev,
+ .platform_data = &palmtc_backlight_data,
+ },
+};
+
+/******************************************************************************
+ * IrDA
+ ******************************************************************************/
+static struct pxaficp_platform_data palmtc_ficp_platform_data = {
+ .gpio_pwdown = GPIO_NR_PALMTC_IR_DISABLE,
+ .transceiver_cap = IR_SIRMODE | IR_OFF,
+};
+
+/******************************************************************************
+ * Keyboard
+ ******************************************************************************/
+static const uint32_t palmtc_matrix_keys[] = {
+ KEY(0, 0, KEY_F1),
+ KEY(0, 1, KEY_X),
+ KEY(0, 2, KEY_POWER),
+ KEY(0, 3, KEY_TAB),
+ KEY(0, 4, KEY_A),
+ KEY(0, 5, KEY_Q),
+ KEY(0, 6, KEY_LEFTSHIFT),
+ KEY(0, 7, KEY_Z),
+ KEY(0, 8, KEY_S),
+ KEY(0, 9, KEY_W),
+ KEY(0, 10, KEY_E),
+ KEY(0, 11, KEY_UP),
+
+ KEY(1, 0, KEY_F2),
+ KEY(1, 1, KEY_DOWN),
+ KEY(1, 3, KEY_D),
+ KEY(1, 4, KEY_C),
+ KEY(1, 5, KEY_F),
+ KEY(1, 6, KEY_R),
+ KEY(1, 7, KEY_SPACE),
+ KEY(1, 8, KEY_V),
+ KEY(1, 9, KEY_G),
+ KEY(1, 10, KEY_T),
+ KEY(1, 11, KEY_LEFT),
+
+ KEY(2, 0, KEY_F3),
+ KEY(2, 1, KEY_LEFTCTRL),
+ KEY(2, 3, KEY_H),
+ KEY(2, 4, KEY_Y),
+ KEY(2, 5, KEY_N),
+ KEY(2, 6, KEY_J),
+ KEY(2, 7, KEY_U),
+ KEY(2, 8, KEY_M),
+ KEY(2, 9, KEY_K),
+ KEY(2, 10, KEY_I),
+ KEY(2, 11, KEY_RIGHT),
+
+ KEY(3, 0, KEY_F4),
+ KEY(3, 1, KEY_ENTER),
+ KEY(3, 3, KEY_DOT),
+ KEY(3, 4, KEY_L),
+ KEY(3, 5, KEY_O),
+ KEY(3, 6, KEY_LEFTALT),
+ KEY(3, 7, KEY_ENTER),
+ KEY(3, 8, KEY_BACKSPACE),
+ KEY(3, 9, KEY_P),
+ KEY(3, 10, KEY_B),
+ KEY(3, 11, KEY_FN),
+};
+
+const struct matrix_keymap_data palmtc_keymap_data = {
+ .keymap = palmtc_matrix_keys,
+ .keymap_size = ARRAY_SIZE(palmtc_matrix_keys),
+};
+
+const static unsigned int palmtc_keypad_row_gpios[] = {
+ 0, 9, 10, 11
+};
+
+const static unsigned int palmtc_keypad_col_gpios[] = {
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 79, 80
+};
+
+static struct matrix_keypad_platform_data palmtc_keypad_platform_data = {
+ .keymap_data = &palmtc_keymap_data,
+ .col_gpios = palmtc_keypad_row_gpios,
+ .num_col_gpios = 12,
+ .row_gpios = palmtc_keypad_col_gpios,
+ .num_row_gpios = 4,
+ .active_low = 1,
+
+ .debounce_ms = 20,
+ .col_scan_delay_us = 5,
+};
+
+static struct platform_device palmtc_keyboard = {
+ .name = "matrix-keypad",
+ .id = -1,
+ .dev = {
+ .platform_data = &palmtc_keypad_platform_data,
+ },
+};
+
+/******************************************************************************
+ * UDC
+ ******************************************************************************/
+static struct pxa2xx_udc_mach_info palmtc_udc_info __initdata = {
+ .gpio_vbus = GPIO_NR_PALMTC_USB_DETECT_N,
+ .gpio_vbus_inverted = 1,
+ .gpio_pullup = GPIO_NR_PALMTC_USB_POWER,
+};
+
+/******************************************************************************
+ * Touchscreen / Battery / GPIO-extender
+ ******************************************************************************/
+static struct platform_device palmtc_ucb1400_core = {
+ .name = "ucb1400_core",
+ .id = -1,
+};
+
+/******************************************************************************
+ * NOR Flash
+ ******************************************************************************/
+static struct resource palmtc_flash_resource = {
+ .start = PXA_CS0_PHYS,
+ .end = PXA_CS0_PHYS + SZ_16M - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct mtd_partition palmtc_flash_parts[] = {
+ {
+ .name = "U-Boot Bootloader",
+ .offset = 0x0,
+ .size = 0x40000,
+ },
+ {
+ .name = "Linux Kernel",
+ .offset = 0x40000,
+ .size = 0x2c0000,
+ },
+ {
+ .name = "Filesystem",
+ .offset = 0x300000,
+ .size = 0xcc0000,
+ },
+ {
+ .name = "U-Boot Environment",
+ .offset = 0xfc0000,
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+static struct physmap_flash_data palmtc_flash_data = {
+ .width = 4,
+ .parts = palmtc_flash_parts,
+ .nr_parts = ARRAY_SIZE(palmtc_flash_parts),
+};
+
+static struct platform_device palmtc_flash = {
+ .name = "physmap-flash",
+ .id = -1,
+ .resource = &palmtc_flash_resource,
+ .num_resources = 1,
+ .dev = {
+ .platform_data = &palmtc_flash_data,
+ },
+};
+
+/******************************************************************************
+ * Framebuffer
+ ******************************************************************************/
+static struct pxafb_mode_info palmtc_lcd_modes[] = {
+{
+ .pixclock = 115384,
+ .xres = 320,
+ .yres = 320,
+ .bpp = 16,
+
+ .left_margin = 27,
+ .right_margin = 7,
+ .upper_margin = 7,
+ .lower_margin = 8,
+
+ .hsync_len = 6,
+ .vsync_len = 1,
+},
+};
+
+static struct pxafb_mach_info palmtc_lcd_screen = {
+ .modes = palmtc_lcd_modes,
+ .num_modes = ARRAY_SIZE(palmtc_lcd_modes),
+ .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL,
+};
+
+/******************************************************************************
+ * Machine init
+ ******************************************************************************/
+static struct platform_device *devices[] __initdata = {
+ &palmtc_backlight,
+ &palmtc_ucb1400_core,
+ &palmtc_keyboard,
+ &palmtc_pxa_keys,
+ &palmtc_flash,
+};
+
+static void __init palmtc_init(void)
+{
+ pxa2xx_mfp_config(ARRAY_AND_SIZE(palmtc_pin_config));
+
+ set_pxa_fb_info(&palmtc_lcd_screen);
+ pxa_set_mci_info(&palmtc_mci_platform_data);
+ pxa_set_udc_info(&palmtc_udc_info);
+ pxa_set_ac97_info(NULL);
+ pxa_set_ficp_info(&palmtc_ficp_platform_data);
+
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+};
+
+MACHINE_START(PALMTC, "Palm Tungsten|C")
+ .phys_io = 0x40000000,
+ .boot_params = 0xa0000100,
+ .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
+ .map_io = pxa_map_io,
+ .init_irq = pxa25x_init_irq,
+ .timer = &pxa_timer,
+ .init_machine = palmtc_init
+MACHINE_END
diff --git a/arch/arm/mach-pxa/palmte2.c b/arch/arm/mach-pxa/palmte2.c
index d823b09801d..277c4062e3c 100644
--- a/arch/arm/mach-pxa/palmte2.c
+++ b/arch/arm/mach-pxa/palmte2.c
@@ -117,83 +117,11 @@ static unsigned long palmte2_pin_config[] __initdata = {
/******************************************************************************
* SD/MMC card controller
******************************************************************************/
-static int palmte2_mci_init(struct device *dev,
- irq_handler_t palmte2_detect_int, void *data)
-{
- int err = 0;
-
- /* Setup an interrupt for detecting card insert/remove events */
- err = gpio_request(GPIO_NR_PALMTE2_SD_DETECT_N, "SD IRQ");
- if (err)
- goto err;
- err = gpio_direction_input(GPIO_NR_PALMTE2_SD_DETECT_N);
- if (err)
- goto err2;
- err = request_irq(gpio_to_irq(GPIO_NR_PALMTE2_SD_DETECT_N),
- palmte2_detect_int, IRQF_DISABLED | IRQF_SAMPLE_RANDOM |
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- "SD/MMC card detect", data);
- if (err) {
- printk(KERN_ERR "%s: cannot request SD/MMC card detect IRQ\n",
- __func__);
- goto err2;
- }
-
- err = gpio_request(GPIO_NR_PALMTE2_SD_POWER, "SD_POWER");
- if (err)
- goto err3;
- err = gpio_direction_output(GPIO_NR_PALMTE2_SD_POWER, 0);
- if (err)
- goto err4;
-
- err = gpio_request(GPIO_NR_PALMTE2_SD_READONLY, "SD_READONLY");
- if (err)
- goto err4;
- err = gpio_direction_input(GPIO_NR_PALMTE2_SD_READONLY);
- if (err)
- goto err5;
-
- printk(KERN_DEBUG "%s: irq registered\n", __func__);
-
- return 0;
-
-err5:
- gpio_free(GPIO_NR_PALMTE2_SD_READONLY);
-err4:
- gpio_free(GPIO_NR_PALMTE2_SD_POWER);
-err3:
- free_irq(gpio_to_irq(GPIO_NR_PALMTE2_SD_DETECT_N), data);
-err2:
- gpio_free(GPIO_NR_PALMTE2_SD_DETECT_N);
-err:
- return err;
-}
-
-static void palmte2_mci_exit(struct device *dev, void *data)
-{
- gpio_free(GPIO_NR_PALMTE2_SD_READONLY);
- gpio_free(GPIO_NR_PALMTE2_SD_POWER);
- free_irq(gpio_to_irq(GPIO_NR_PALMTE2_SD_DETECT_N), data);
- gpio_free(GPIO_NR_PALMTE2_SD_DETECT_N);
-}
-
-static void palmte2_mci_power(struct device *dev, unsigned int vdd)
-{
- struct pxamci_platform_data *p_d = dev->platform_data;
- gpio_set_value(GPIO_NR_PALMTE2_SD_POWER, p_d->ocr_mask & (1 << vdd));
-}
-
-static int palmte2_mci_get_ro(struct device *dev)
-{
- return gpio_get_value(GPIO_NR_PALMTE2_SD_READONLY);
-}
-
static struct pxamci_platform_data palmte2_mci_platform_data = {
- .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
- .setpower = palmte2_mci_power,
- .get_ro = palmte2_mci_get_ro,
- .init = palmte2_mci_init,
- .exit = palmte2_mci_exit,
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .gpio_card_detect = GPIO_NR_PALMTE2_SD_DETECT_N,
+ .gpio_card_ro = GPIO_NR_PALMTE2_SD_READONLY,
+ .gpio_power = GPIO_NR_PALMTE2_SD_POWER,
};
/******************************************************************************
@@ -287,35 +215,9 @@ static struct platform_device palmte2_backlight = {
/******************************************************************************
* IrDA
******************************************************************************/
-static int palmte2_irda_startup(struct device *dev)
-{
- int err;
- err = gpio_request(GPIO_NR_PALMTE2_IR_DISABLE, "IR DISABLE");
- if (err)
- goto err;
- err = gpio_direction_output(GPIO_NR_PALMTE2_IR_DISABLE, 1);
- if (err)
- gpio_free(GPIO_NR_PALMTE2_IR_DISABLE);
-err:
- return err;
-}
-
-static void palmte2_irda_shutdown(struct device *dev)
-{
- gpio_free(GPIO_NR_PALMTE2_IR_DISABLE);
-}
-
-static void palmte2_irda_transceiver_mode(struct device *dev, int mode)
-{
- gpio_set_value(GPIO_NR_PALMTE2_IR_DISABLE, mode & IR_OFF);
- pxa2xx_transceiver_mode(dev, mode);
-}
-
static struct pxaficp_platform_data palmte2_ficp_platform_data = {
- .startup = palmte2_irda_startup,
- .shutdown = palmte2_irda_shutdown,
- .transceiver_cap = IR_SIRMODE | IR_FIRMODE | IR_OFF,
- .transceiver_mode = palmte2_irda_transceiver_mode,
+ .gpio_pwdown = GPIO_NR_PALMTE2_IR_DISABLE,
+ .transceiver_cap = IR_SIRMODE | IR_OFF,
};
/******************************************************************************
diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c
index 83d02087958..76a2b37eaf3 100644
--- a/arch/arm/mach-pxa/palmtx.c
+++ b/arch/arm/mach-pxa/palmtx.c
@@ -28,6 +28,10 @@
#include <linux/wm97xx_batt.h>
#include <linux/power_supply.h>
#include <linux/usb/gpio_vbus.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/physmap.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -131,6 +135,10 @@ static unsigned long palmtx_pin_config[] __initdata = {
GPIO34_FFUART_RXD,
GPIO39_FFUART_TXD,
+ /* NAND */
+ GPIO15_nCS_1,
+ GPIO18_RDY,
+
/* MISC. */
GPIO10_GPIO, /* hotsync button */
GPIO12_GPIO, /* power detect */
@@ -138,85 +146,50 @@ static unsigned long palmtx_pin_config[] __initdata = {
};
/******************************************************************************
- * SD/MMC card controller
+ * NOR Flash
******************************************************************************/
-static int palmtx_mci_init(struct device *dev, irq_handler_t palmtx_detect_int,
- void *data)
-{
- int err = 0;
-
- /* Setup an interrupt for detecting card insert/remove events */
- err = gpio_request(GPIO_NR_PALMTX_SD_DETECT_N, "SD IRQ");
- if (err)
- goto err;
- err = gpio_direction_input(GPIO_NR_PALMTX_SD_DETECT_N);
- if (err)
- goto err2;
- err = request_irq(gpio_to_irq(GPIO_NR_PALMTX_SD_DETECT_N),
- palmtx_detect_int, IRQF_DISABLED | IRQF_SAMPLE_RANDOM |
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- "SD/MMC card detect", data);
- if (err) {
- printk(KERN_ERR "%s: cannot request SD/MMC card detect IRQ\n",
- __func__);
- goto err2;
+static struct mtd_partition palmtx_partitions[] = {
+ {
+ .name = "Flash",
+ .offset = 0x00000000,
+ .size = MTDPART_SIZ_FULL,
+ .mask_flags = 0
}
+};
- err = gpio_request(GPIO_NR_PALMTX_SD_POWER, "SD_POWER");
- if (err)
- goto err3;
- err = gpio_direction_output(GPIO_NR_PALMTX_SD_POWER, 0);
- if (err)
- goto err4;
-
- err = gpio_request(GPIO_NR_PALMTX_SD_READONLY, "SD_READONLY");
- if (err)
- goto err4;
- err = gpio_direction_input(GPIO_NR_PALMTX_SD_READONLY);
- if (err)
- goto err5;
-
- printk(KERN_DEBUG "%s: irq registered\n", __func__);
-
- return 0;
-
-err5:
- gpio_free(GPIO_NR_PALMTX_SD_READONLY);
-err4:
- gpio_free(GPIO_NR_PALMTX_SD_POWER);
-err3:
- free_irq(gpio_to_irq(GPIO_NR_PALMTX_SD_DETECT_N), data);
-err2:
- gpio_free(GPIO_NR_PALMTX_SD_DETECT_N);
-err:
- return err;
-}
-
-static void palmtx_mci_exit(struct device *dev, void *data)
-{
- gpio_free(GPIO_NR_PALMTX_SD_READONLY);
- gpio_free(GPIO_NR_PALMTX_SD_POWER);
- free_irq(gpio_to_irq(GPIO_NR_PALMTX_SD_DETECT_N), data);
- gpio_free(GPIO_NR_PALMTX_SD_DETECT_N);
-}
+static struct physmap_flash_data palmtx_flash_data[] = {
+ {
+ .width = 2, /* bankwidth in bytes */
+ .parts = palmtx_partitions,
+ .nr_parts = ARRAY_SIZE(palmtx_partitions)
+ }
+};
-static void palmtx_mci_power(struct device *dev, unsigned int vdd)
-{
- struct pxamci_platform_data *p_d = dev->platform_data;
- gpio_set_value(GPIO_NR_PALMTX_SD_POWER, p_d->ocr_mask & (1 << vdd));
-}
+static struct resource palmtx_flash_resource = {
+ .start = PXA_CS0_PHYS,
+ .end = PXA_CS0_PHYS + SZ_8M - 1,
+ .flags = IORESOURCE_MEM,
+};
-static int palmtx_mci_get_ro(struct device *dev)
-{
- return gpio_get_value(GPIO_NR_PALMTX_SD_READONLY);
-}
+static struct platform_device palmtx_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .resource = &palmtx_flash_resource,
+ .num_resources = 1,
+ .dev = {
+ .platform_data = palmtx_flash_data,
+ },
+};
+/******************************************************************************
+ * SD/MMC card controller
+ ******************************************************************************/
static struct pxamci_platform_data palmtx_mci_platform_data = {
- .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
- .setpower = palmtx_mci_power,
- .get_ro = palmtx_mci_get_ro,
- .init = palmtx_mci_init,
- .exit = palmtx_mci_exit,
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .gpio_card_detect = GPIO_NR_PALMTX_SD_DETECT_N,
+ .gpio_card_ro = GPIO_NR_PALMTX_SD_READONLY,
+ .gpio_power = GPIO_NR_PALMTX_SD_POWER,
+ .detect_delay = 20,
};
/******************************************************************************
@@ -330,35 +303,9 @@ static struct platform_device palmtx_backlight = {
/******************************************************************************
* IrDA
******************************************************************************/
-static int palmtx_irda_startup(struct device *dev)
-{
- int err;
- err = gpio_request(GPIO_NR_PALMTX_IR_DISABLE, "IR DISABLE");
- if (err)
- goto err;
- err = gpio_direction_output(GPIO_NR_PALMTX_IR_DISABLE, 1);
- if (err)
- gpio_free(GPIO_NR_PALMTX_IR_DISABLE);
-err:
- return err;
-}
-
-static void palmtx_irda_shutdown(struct device *dev)
-{
- gpio_free(GPIO_NR_PALMTX_IR_DISABLE);
-}
-
-static void palmtx_irda_transceiver_mode(struct device *dev, int mode)
-{
- gpio_set_value(GPIO_NR_PALMTX_IR_DISABLE, mode & IR_OFF);
- pxa2xx_transceiver_mode(dev, mode);
-}
-
static struct pxaficp_platform_data palmtx_ficp_platform_data = {
- .startup = palmtx_irda_startup,
- .shutdown = palmtx_irda_shutdown,
- .transceiver_cap = IR_SIRMODE | IR_FIRMODE | IR_OFF,
- .transceiver_mode = palmtx_irda_transceiver_mode,
+ .gpio_pwdown = GPIO_NR_PALMTX_IR_DISABLE,
+ .transceiver_cap = IR_SIRMODE | IR_OFF,
};
/******************************************************************************
@@ -493,6 +440,68 @@ static struct pxafb_mach_info palmtx_lcd_screen = {
};
/******************************************************************************
+ * NAND Flash
+ ******************************************************************************/
+static void palmtx_nand_cmd_ctl(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ unsigned long nandaddr = (unsigned long)this->IO_ADDR_W;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writeb(cmd, PALMTX_NAND_CLE_VIRT);
+ else if (ctrl & NAND_ALE)
+ writeb(cmd, PALMTX_NAND_ALE_VIRT);
+ else
+ writeb(cmd, nandaddr);
+}
+
+static struct mtd_partition palmtx_partition_info[] = {
+ [0] = {
+ .name = "palmtx-0",
+ .offset = 0,
+ .size = MTDPART_SIZ_FULL
+ },
+};
+
+static const char *palmtx_part_probes[] = { "cmdlinepart", NULL };
+
+struct platform_nand_data palmtx_nand_platdata = {
+ .chip = {
+ .nr_chips = 1,
+ .chip_offset = 0,
+ .nr_partitions = ARRAY_SIZE(palmtx_partition_info),
+ .partitions = palmtx_partition_info,
+ .chip_delay = 20,
+ .part_probe_types = palmtx_part_probes,
+ },
+ .ctrl = {
+ .cmd_ctrl = palmtx_nand_cmd_ctl,
+ },
+};
+
+static struct resource palmtx_nand_resource[] = {
+ [0] = {
+ .start = PXA_CS1_PHYS,
+ .end = PXA_CS1_PHYS + SZ_1M - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device palmtx_nand = {
+ .name = "gen_nand",
+ .num_resources = ARRAY_SIZE(palmtx_nand_resource),
+ .resource = palmtx_nand_resource,
+ .id = -1,
+ .dev = {
+ .platform_data = &palmtx_nand_platdata,
+ }
+};
+
+/******************************************************************************
* Power management - standby
******************************************************************************/
static void __init palmtx_pm_init(void)
@@ -518,6 +527,8 @@ static struct platform_device *devices[] __initdata = {
&power_supply,
&palmtx_asoc,
&palmtx_gpio_vbus,
+ &palmtx_flash,
+ &palmtx_nand,
};
static struct map_desc palmtx_io_desc[] __initdata = {
@@ -525,8 +536,18 @@ static struct map_desc palmtx_io_desc[] __initdata = {
.virtual = PALMTX_PCMCIA_VIRT,
.pfn = __phys_to_pfn(PALMTX_PCMCIA_PHYS),
.length = PALMTX_PCMCIA_SIZE,
- .type = MT_DEVICE
-},
+ .type = MT_DEVICE,
+}, {
+ .virtual = PALMTX_NAND_ALE_VIRT,
+ .pfn = __phys_to_pfn(PALMTX_NAND_ALE_PHYS),
+ .length = SZ_1M,
+ .type = MT_DEVICE,
+}, {
+ .virtual = PALMTX_NAND_CLE_VIRT,
+ .pfn = __phys_to_pfn(PALMTX_NAND_CLE_PHYS),
+ .length = SZ_1M,
+ .type = MT_DEVICE,
+}
};
static void __init palmtx_map_io(void)
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c
index c3645aa3fa3..c2bf493c5f5 100644
--- a/arch/arm/mach-pxa/palmz72.c
+++ b/arch/arm/mach-pxa/palmz72.c
@@ -129,88 +129,14 @@ static unsigned long palmz72_pin_config[] __initdata = {
/******************************************************************************
* SD/MMC card controller
******************************************************************************/
-static int palmz72_mci_init(struct device *dev,
- irq_handler_t palmz72_detect_int, void *data)
-{
- int err = 0;
-
- /* Setup an interrupt for detecting card insert/remove events */
- err = gpio_request(GPIO_NR_PALMZ72_SD_DETECT_N, "SD IRQ");
- if (err)
- goto err;
- err = gpio_direction_input(GPIO_NR_PALMZ72_SD_DETECT_N);
- if (err)
- goto err2;
- err = request_irq(gpio_to_irq(GPIO_NR_PALMZ72_SD_DETECT_N),
- palmz72_detect_int, IRQF_DISABLED | IRQF_SAMPLE_RANDOM |
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- "SD/MMC card detect", data);
- if (err) {
- printk(KERN_ERR "%s: cannot request SD/MMC card detect IRQ\n",
- __func__);
- goto err2;
- }
-
- /* SD_POWER is not actually power, but it is more like chip
- * select, i.e. it is inverted */
-
- err = gpio_request(GPIO_NR_PALMZ72_SD_POWER_N, "SD_POWER");
- if (err)
- goto err3;
- err = gpio_direction_output(GPIO_NR_PALMZ72_SD_POWER_N, 0);
- if (err)
- goto err4;
- err = gpio_request(GPIO_NR_PALMZ72_SD_RO, "SD_RO");
- if (err)
- goto err4;
- err = gpio_direction_input(GPIO_NR_PALMZ72_SD_RO);
- if (err)
- goto err5;
-
- printk(KERN_DEBUG "%s: irq registered\n", __func__);
-
- return 0;
-
-err5:
- gpio_free(GPIO_NR_PALMZ72_SD_RO);
-err4:
- gpio_free(GPIO_NR_PALMZ72_SD_POWER_N);
-err3:
- free_irq(gpio_to_irq(GPIO_NR_PALMZ72_SD_DETECT_N), data);
-err2:
- gpio_free(GPIO_NR_PALMZ72_SD_DETECT_N);
-err:
- return err;
-}
-
-static void palmz72_mci_exit(struct device *dev, void *data)
-{
- gpio_free(GPIO_NR_PALMZ72_SD_POWER_N);
- free_irq(gpio_to_irq(GPIO_NR_PALMZ72_SD_DETECT_N), data);
- gpio_free(GPIO_NR_PALMZ72_SD_DETECT_N);
- gpio_free(GPIO_NR_PALMZ72_SD_RO);
-}
-
-static void palmz72_mci_power(struct device *dev, unsigned int vdd)
-{
- struct pxamci_platform_data *p_d = dev->platform_data;
- if (p_d->ocr_mask & (1 << vdd))
- gpio_set_value(GPIO_NR_PALMZ72_SD_POWER_N, 0);
- else
- gpio_set_value(GPIO_NR_PALMZ72_SD_POWER_N, 1);
-}
-
-static int palmz72_mci_ro(struct device *dev)
-{
- return gpio_get_value(GPIO_NR_PALMZ72_SD_RO);
-}
-
+/* SD_POWER is not actually power, but it is more like chip
+ * select, i.e. it is inverted */
static struct pxamci_platform_data palmz72_mci_platform_data = {
- .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
- .setpower = palmz72_mci_power,
- .get_ro = palmz72_mci_ro,
- .init = palmz72_mci_init,
- .exit = palmz72_mci_exit,
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .gpio_card_detect = GPIO_NR_PALMZ72_SD_DETECT_N,
+ .gpio_card_ro = GPIO_NR_PALMZ72_SD_RO,
+ .gpio_power = GPIO_NR_PALMZ72_SD_POWER_N,
+ .gpio_power_invert = 1,
};
/******************************************************************************
@@ -304,35 +230,9 @@ static struct platform_device palmz72_backlight = {
/******************************************************************************
* IrDA
******************************************************************************/
-static int palmz72_irda_startup(struct device *dev)
-{
- int err;
- err = gpio_request(GPIO_NR_PALMZ72_IR_DISABLE, "IR DISABLE");
- if (err)
- goto err;
- err = gpio_direction_output(GPIO_NR_PALMZ72_IR_DISABLE, 1);
- if (err)
- gpio_free(GPIO_NR_PALMZ72_IR_DISABLE);
-err:
- return err;
-}
-
-static void palmz72_irda_shutdown(struct device *dev)
-{
- gpio_free(GPIO_NR_PALMZ72_IR_DISABLE);
-}
-
-static void palmz72_irda_transceiver_mode(struct device *dev, int mode)
-{
- gpio_set_value(GPIO_NR_PALMZ72_IR_DISABLE, mode & IR_OFF);
- pxa2xx_transceiver_mode(dev, mode);
-}
-
static struct pxaficp_platform_data palmz72_ficp_platform_data = {
- .startup = palmz72_irda_startup,
- .shutdown = palmz72_irda_shutdown,
+ .gpio_pwdown = GPIO_NR_PALMZ72_IR_DISABLE,
.transceiver_cap = IR_SIRMODE | IR_OFF,
- .transceiver_mode = palmz72_irda_transceiver_mode,
};
/******************************************************************************
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index 01791d74e08..bbda57078e0 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -321,11 +321,14 @@ static void pcm990_mci_exit(struct device *dev, void *data)
#define MSECS_PER_JIFFY (1000/HZ)
static struct pxamci_platform_data pcm990_mci_platform_data = {
- .detect_delay = 250 / MSECS_PER_JIFFY,
- .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
- .init = pcm990_mci_init,
- .setpower = pcm990_mci_setpower,
- .exit = pcm990_mci_exit,
+ .detect_delay = 250 / MSECS_PER_JIFFY,
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .init = pcm990_mci_init,
+ .setpower = pcm990_mci_setpower,
+ .exit = pcm990_mci_exit,
+ .gpio_card_detect = -1,
+ .gpio_card_ro = -1,
+ .gpio_power = -1,
};
static struct pxaohci_platform_data pcm990_ohci_platform_data = {
@@ -427,25 +430,56 @@ static void pcm990_camera_free_bus(struct soc_camera_link *link)
gpio_bus_switch = -EINVAL;
}
-static struct soc_camera_link iclink = {
- .bus_id = 0, /* Must match with the camera ID above */
- .query_bus_param = pcm990_camera_query_bus_param,
- .set_bus_param = pcm990_camera_set_bus_param,
- .free_bus = pcm990_camera_free_bus,
-};
-
/* Board I2C devices. */
static struct i2c_board_info __initdata pcm990_i2c_devices[] = {
{
/* Must initialize before the camera(s) */
I2C_BOARD_INFO("pca9536", 0x41),
.platform_data = &pca9536_data,
- }, {
+ },
+};
+
+static struct i2c_board_info pcm990_camera_i2c[] = {
+ {
I2C_BOARD_INFO("mt9v022", 0x48),
- .platform_data = &iclink, /* With extender */
}, {
I2C_BOARD_INFO("mt9m001", 0x5d),
- .platform_data = &iclink, /* With extender */
+ },
+};
+
+static struct soc_camera_link iclink[] = {
+ {
+ .bus_id = 0, /* Must match with the camera ID */
+ .board_info = &pcm990_camera_i2c[0],
+ .i2c_adapter_id = 0,
+ .query_bus_param = pcm990_camera_query_bus_param,
+ .set_bus_param = pcm990_camera_set_bus_param,
+ .free_bus = pcm990_camera_free_bus,
+ .module_name = "mt9v022",
+ }, {
+ .bus_id = 0, /* Must match with the camera ID */
+ .board_info = &pcm990_camera_i2c[1],
+ .i2c_adapter_id = 0,
+ .query_bus_param = pcm990_camera_query_bus_param,
+ .set_bus_param = pcm990_camera_set_bus_param,
+ .free_bus = pcm990_camera_free_bus,
+ .module_name = "mt9m001",
+ },
+};
+
+static struct platform_device pcm990_camera[] = {
+ {
+ .name = "soc-camera-pdrv",
+ .id = 0,
+ .dev = {
+ .platform_data = &iclink[0],
+ },
+ }, {
+ .name = "soc-camera-pdrv",
+ .id = 1,
+ .dev = {
+ .platform_data = &iclink[1],
+ },
},
};
#endif /* CONFIG_VIDEO_PXA27x ||CONFIG_VIDEO_PXA27x_MODULE */
@@ -501,6 +535,9 @@ void __init pcm990_baseboard_init(void)
pxa_set_camera_info(&pcm990_pxacamera_platform_data);
i2c_register_board_info(0, ARRAY_AND_SIZE(pcm990_i2c_devices));
+
+ platform_device_register(&pcm990_camera[0]);
+ platform_device_register(&pcm990_camera[1]);
#endif
printk(KERN_INFO "PCM-990 Evaluation baseboard initialized\n");
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index 9352d4a3483..a186994f77f 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -245,20 +245,10 @@ static inline void poodle_init_spi(void) {}
* The card detect interrupt isn't debounced so we delay it by 250ms
* to give the card a chance to fully insert/eject.
*/
-static struct pxamci_platform_data poodle_mci_platform_data;
-
static int poodle_mci_init(struct device *dev, irq_handler_t poodle_detect_int, void *data)
{
int err;
- err = gpio_request(POODLE_GPIO_nSD_DETECT, "nSD_DETECT");
- if (err)
- goto err_out;
-
- err = gpio_request(POODLE_GPIO_nSD_WP, "nSD_WP");
- if (err)
- goto err_free_1;
-
err = gpio_request(POODLE_GPIO_SD_PWR, "SD_PWR");
if (err)
goto err_free_2;
@@ -267,34 +257,14 @@ static int poodle_mci_init(struct device *dev, irq_handler_t poodle_detect_int,
if (err)
goto err_free_3;
- gpio_direction_input(POODLE_GPIO_nSD_DETECT);
- gpio_direction_input(POODLE_GPIO_nSD_WP);
-
gpio_direction_output(POODLE_GPIO_SD_PWR, 0);
gpio_direction_output(POODLE_GPIO_SD_PWR1, 0);
- poodle_mci_platform_data.detect_delay = msecs_to_jiffies(250);
-
- err = request_irq(POODLE_IRQ_GPIO_nSD_DETECT, poodle_detect_int,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "MMC card detect", data);
- if (err) {
- pr_err("%s: MMC/SD: can't request MMC card detect IRQ\n",
- __func__);
- goto err_free_4;
- }
-
return 0;
-err_free_4:
- gpio_free(POODLE_GPIO_SD_PWR1);
err_free_3:
gpio_free(POODLE_GPIO_SD_PWR);
err_free_2:
- gpio_free(POODLE_GPIO_nSD_WP);
-err_free_1:
- gpio_free(POODLE_GPIO_nSD_DETECT);
-err_out:
return err;
}
@@ -312,62 +282,29 @@ static void poodle_mci_setpower(struct device *dev, unsigned int vdd)
}
}
-static int poodle_mci_get_ro(struct device *dev)
-{
- return !!gpio_get_value(POODLE_GPIO_nSD_WP);
- return GPLR(POODLE_GPIO_nSD_WP) & GPIO_bit(POODLE_GPIO_nSD_WP);
-}
-
-
static void poodle_mci_exit(struct device *dev, void *data)
{
- free_irq(POODLE_IRQ_GPIO_nSD_DETECT, data);
gpio_free(POODLE_GPIO_SD_PWR1);
gpio_free(POODLE_GPIO_SD_PWR);
- gpio_free(POODLE_GPIO_nSD_WP);
- gpio_free(POODLE_GPIO_nSD_DETECT);
}
static struct pxamci_platform_data poodle_mci_platform_data = {
- .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
- .init = poodle_mci_init,
- .get_ro = poodle_mci_get_ro,
- .setpower = poodle_mci_setpower,
- .exit = poodle_mci_exit,
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .init = poodle_mci_init,
+ .setpower = poodle_mci_setpower,
+ .exit = poodle_mci_exit,
+ .gpio_card_detect = POODLE_IRQ_GPIO_nSD_DETECT,
+ .gpio_card_ro = POODLE_GPIO_nSD_WP,
+ .gpio_power = -1,
};
/*
* Irda
*/
-static void poodle_irda_transceiver_mode(struct device *dev, int mode)
-{
- gpio_set_value(POODLE_GPIO_IR_ON, mode & IR_OFF);
- pxa2xx_transceiver_mode(dev, mode);
-}
-
-static int poodle_irda_startup(struct device *dev)
-{
- int err;
-
- err = gpio_request(POODLE_GPIO_IR_ON, "IR_ON");
- if (err)
- return err;
-
- gpio_direction_output(POODLE_GPIO_IR_ON, 1);
- return 0;
-}
-
-static void poodle_irda_shutdown(struct device *dev)
-{
- gpio_free(POODLE_GPIO_IR_ON);
-}
-
static struct pxaficp_platform_data poodle_ficp_platform_data = {
+ .gpio_pwdown = POODLE_GPIO_IR_ON,
.transceiver_cap = IR_SIRMODE | IR_OFF,
- .transceiver_mode = poodle_irda_transceiver_mode,
- .startup = poodle_irda_startup,
- .shutdown = poodle_irda_shutdown,
};
@@ -521,6 +458,7 @@ static void __init poodle_init(void)
set_pxa_fb_parent(&poodle_locomo_device.dev);
set_pxa_fb_info(&poodle_fb_info);
pxa_set_udc_info(&udc_info);
+ poodle_mci_platform_data.detect_delay = msecs_to_jiffies(250);
pxa_set_mci_info(&poodle_mci_platform_data);
pxa_set_ficp_info(&poodle_ficp_platform_data);
pxa_set_i2c_info(NULL);
diff --git a/arch/arm/mach-pxa/pxa2xx.c b/arch/arm/mach-pxa/pxa2xx.c
index 2f3394f8591..868270421b8 100644
--- a/arch/arm/mach-pxa/pxa2xx.c
+++ b/arch/arm/mach-pxa/pxa2xx.c
@@ -52,3 +52,4 @@ void pxa2xx_transceiver_mode(struct device *dev, int mode)
} else
BUG();
}
+EXPORT_SYMBOL_GPL(pxa2xx_transceiver_mode);
diff --git a/arch/arm/mach-pxa/pxa300.c b/arch/arm/mach-pxa/pxa300.c
index 4ba6d21f851..f4af6e2bef8 100644
--- a/arch/arm/mach-pxa/pxa300.c
+++ b/arch/arm/mach-pxa/pxa300.c
@@ -84,9 +84,11 @@ static struct mfp_addr_map pxa310_mfp_addr_map[] __initdata = {
};
static DEFINE_PXA3_CKEN(common_nand, NAND, 156000000, 0);
+static DEFINE_PXA3_CKEN(gcu, PXA300_GCU, 0, 0);
static struct clk_lookup common_clkregs[] = {
INIT_CLKREG(&clk_common_nand, "pxa3xx-nand", NULL),
+ INIT_CLKREG(&clk_gcu, "pxa3xx-gcu", NULL),
};
static DEFINE_PXA3_CKEN(pxa310_mmc3, MMC3, 19500000, 0);
diff --git a/arch/arm/mach-pxa/pxa320.c b/arch/arm/mach-pxa/pxa320.c
index 8b3d97efada..c7373e74a10 100644
--- a/arch/arm/mach-pxa/pxa320.c
+++ b/arch/arm/mach-pxa/pxa320.c
@@ -78,9 +78,11 @@ static struct mfp_addr_map pxa320_mfp_addr_map[] __initdata = {
};
static DEFINE_PXA3_CKEN(pxa320_nand, NAND, 104000000, 0);
+static DEFINE_PXA3_CKEN(gcu, PXA320_GCU, 0, 0);
static struct clk_lookup pxa320_clkregs[] = {
INIT_CLKREG(&clk_pxa320_nand, "pxa3xx-nand", NULL),
+ INIT_CLKREG(&clk_gcu, "pxa3xx-gcu", NULL),
};
static int __init pxa320_init(void)
diff --git a/arch/arm/mach-pxa/pxa930.c b/arch/arm/mach-pxa/pxa930.c
index 71131742fff..06429200828 100644
--- a/arch/arm/mach-pxa/pxa930.c
+++ b/arch/arm/mach-pxa/pxa930.c
@@ -176,13 +176,30 @@ static struct mfp_addr_map pxa930_mfp_addr_map[] __initdata = {
MFP_ADDR_END,
};
+static struct mfp_addr_map pxa935_mfp_addr_map[] __initdata = {
+ MFP_ADDR(GPIO159, 0x0524),
+ MFP_ADDR(GPIO163, 0x0534),
+ MFP_ADDR(GPIO167, 0x0544),
+ MFP_ADDR(GPIO168, 0x0548),
+ MFP_ADDR(GPIO169, 0x054c),
+ MFP_ADDR(GPIO170, 0x0550),
+ MFP_ADDR(GPIO171, 0x0554),
+ MFP_ADDR(GPIO172, 0x0558),
+ MFP_ADDR(GPIO173, 0x055c),
+
+ MFP_ADDR_END,
+};
+
static int __init pxa930_init(void)
{
- if (cpu_is_pxa930()) {
+ if (cpu_is_pxa930() || cpu_is_pxa935()) {
mfp_init_base(io_p2v(MFPR_BASE));
mfp_init_addr(pxa930_mfp_addr_map);
}
+ if (cpu_is_pxa935())
+ mfp_init_addr(pxa935_mfp_addr_map);
+
return 0;
}
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index dda310fe71c..ee8d6038ce8 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -24,6 +24,7 @@
#include <linux/spi/ads7846.h>
#include <linux/spi/corgi_lcd.h>
#include <linux/mtd/sharpsl.h>
+#include <linux/input/matrix_keypad.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
@@ -111,6 +112,26 @@ static unsigned long spitz_pin_config[] __initdata = {
GPIO105_GPIO, /* SPITZ_GPIO_CF_IRQ */
GPIO106_GPIO, /* SPITZ_GPIO_CF2_IRQ */
+ /* GPIO matrix keypad */
+ GPIO88_GPIO, /* column 0 */
+ GPIO23_GPIO, /* column 1 */
+ GPIO24_GPIO, /* column 2 */
+ GPIO25_GPIO, /* column 3 */
+ GPIO26_GPIO, /* column 4 */
+ GPIO27_GPIO, /* column 5 */
+ GPIO52_GPIO, /* column 6 */
+ GPIO103_GPIO, /* column 7 */
+ GPIO107_GPIO, /* column 8 */
+ GPIO108_GPIO, /* column 9 */
+ GPIO114_GPIO, /* column 10 */
+ GPIO12_GPIO, /* row 0 */
+ GPIO17_GPIO, /* row 1 */
+ GPIO91_GPIO, /* row 2 */
+ GPIO34_GPIO, /* row 3 */
+ GPIO36_GPIO, /* row 4 */
+ GPIO38_GPIO, /* row 5 */
+ GPIO39_GPIO, /* row 6 */
+
/* I2C */
GPIO117_I2C_SCL,
GPIO118_I2C_SDA,
@@ -242,9 +263,115 @@ EXPORT_SYMBOL(spitzscoop2_device);
/*
* Spitz Keyboard Device
*/
+#define SPITZ_KEY_CALENDAR KEY_F1
+#define SPITZ_KEY_ADDRESS KEY_F2
+#define SPITZ_KEY_FN KEY_F3
+#define SPITZ_KEY_CANCEL KEY_F4
+#define SPITZ_KEY_EXOK KEY_F5
+#define SPITZ_KEY_EXCANCEL KEY_F6
+#define SPITZ_KEY_EXJOGDOWN KEY_F7
+#define SPITZ_KEY_EXJOGUP KEY_F8
+#define SPITZ_KEY_JAP1 KEY_LEFTALT
+#define SPITZ_KEY_JAP2 KEY_RIGHTCTRL
+#define SPITZ_KEY_SYNC KEY_F9
+#define SPITZ_KEY_MAIL KEY_F10
+#define SPITZ_KEY_OK KEY_F11
+#define SPITZ_KEY_MENU KEY_F12
+
+static const uint32_t spitzkbd_keymap[] = {
+ KEY(0, 0, KEY_LEFTCTRL),
+ KEY(0, 1, KEY_1),
+ KEY(0, 2, KEY_3),
+ KEY(0, 3, KEY_5),
+ KEY(0, 4, KEY_6),
+ KEY(0, 5, KEY_7),
+ KEY(0, 6, KEY_9),
+ KEY(0, 7, KEY_0),
+ KEY(0, 8, KEY_BACKSPACE),
+ KEY(0, 9, SPITZ_KEY_EXOK), /* EXOK */
+ KEY(0, 10, SPITZ_KEY_EXCANCEL), /* EXCANCEL */
+ KEY(1, 1, KEY_2),
+ KEY(1, 2, KEY_4),
+ KEY(1, 3, KEY_R),
+ KEY(1, 4, KEY_Y),
+ KEY(1, 5, KEY_8),
+ KEY(1, 6, KEY_I),
+ KEY(1, 7, KEY_O),
+ KEY(1, 8, KEY_P),
+ KEY(1, 9, SPITZ_KEY_EXJOGDOWN), /* EXJOGDOWN */
+ KEY(1, 10, SPITZ_KEY_EXJOGUP), /* EXJOGUP */
+ KEY(2, 0, KEY_TAB),
+ KEY(2, 1, KEY_Q),
+ KEY(2, 2, KEY_E),
+ KEY(2, 3, KEY_T),
+ KEY(2, 4, KEY_G),
+ KEY(2, 5, KEY_U),
+ KEY(2, 6, KEY_J),
+ KEY(2, 7, KEY_K),
+ KEY(3, 0, SPITZ_KEY_ADDRESS), /* ADDRESS */
+ KEY(3, 1, KEY_W),
+ KEY(3, 2, KEY_S),
+ KEY(3, 3, KEY_F),
+ KEY(3, 4, KEY_V),
+ KEY(3, 5, KEY_H),
+ KEY(3, 6, KEY_M),
+ KEY(3, 7, KEY_L),
+ KEY(3, 9, KEY_RIGHTSHIFT),
+ KEY(4, 0, SPITZ_KEY_CALENDAR), /* CALENDAR */
+ KEY(4, 1, KEY_A),
+ KEY(4, 2, KEY_D),
+ KEY(4, 3, KEY_C),
+ KEY(4, 4, KEY_B),
+ KEY(4, 5, KEY_N),
+ KEY(4, 6, KEY_DOT),
+ KEY(4, 8, KEY_ENTER),
+ KEY(4, 9, KEY_LEFTSHIFT),
+ KEY(5, 0, SPITZ_KEY_MAIL), /* MAIL */
+ KEY(5, 1, KEY_Z),
+ KEY(5, 2, KEY_X),
+ KEY(5, 3, KEY_MINUS),
+ KEY(5, 4, KEY_SPACE),
+ KEY(5, 5, KEY_COMMA),
+ KEY(5, 7, KEY_UP),
+ KEY(5, 10, SPITZ_KEY_FN), /* FN */
+ KEY(6, 0, KEY_SYSRQ),
+ KEY(6, 1, SPITZ_KEY_JAP1), /* JAP1 */
+ KEY(6, 2, SPITZ_KEY_JAP2), /* JAP2 */
+ KEY(6, 3, SPITZ_KEY_CANCEL), /* CANCEL */
+ KEY(6, 4, SPITZ_KEY_OK), /* OK */
+ KEY(6, 5, SPITZ_KEY_MENU), /* MENU */
+ KEY(6, 6, KEY_LEFT),
+ KEY(6, 7, KEY_DOWN),
+ KEY(6, 8, KEY_RIGHT),
+};
+
+static const struct matrix_keymap_data spitzkbd_keymap_data = {
+ .keymap = spitzkbd_keymap,
+ .keymap_size = ARRAY_SIZE(spitzkbd_keymap),
+};
+
+static const uint32_t spitzkbd_row_gpios[] =
+ { 12, 17, 91, 34, 36, 38, 39 };
+static const uint32_t spitzkbd_col_gpios[] =
+ { 88, 23, 24, 25, 26, 27, 52, 103, 107, 108, 114 };
+
+static struct matrix_keypad_platform_data spitzkbd_pdata = {
+ .keymap_data = &spitzkbd_keymap_data,
+ .row_gpios = spitzkbd_row_gpios,
+ .col_gpios = spitzkbd_col_gpios,
+ .num_row_gpios = ARRAY_SIZE(spitzkbd_row_gpios),
+ .num_col_gpios = ARRAY_SIZE(spitzkbd_col_gpios),
+ .col_scan_delay_us = 10,
+ .debounce_ms = 10,
+ .wakeup = 1,
+};
+
static struct platform_device spitzkbd_device = {
- .name = "spitz-keyboard",
+ .name = "matrix-keypad",
.id = -1,
+ .dev = {
+ .platform_data = &spitzkbd_pdata,
+ },
};
@@ -296,6 +423,7 @@ static struct ads7846_platform_data spitz_ads7846_info = {
.vref_delay_usecs = 100,
.x_plate_ohms = 419,
.y_plate_ohms = 486,
+ .pressure_max = 1024,
.gpio_pendown = SPITZ_GPIO_TP_INT,
.wait_for_sync = spitz_wait_for_hsync,
};
@@ -378,45 +506,6 @@ static inline void spitz_init_spi(void) {}
* The card detect interrupt isn't debounced so we delay it by 250ms
* to give the card a chance to fully insert/eject.
*/
-
-static struct pxamci_platform_data spitz_mci_platform_data;
-
-static int spitz_mci_init(struct device *dev, irq_handler_t spitz_detect_int, void *data)
-{
- int err;
-
- err = gpio_request(SPITZ_GPIO_nSD_DETECT, "nSD_DETECT");
- if (err)
- goto err_out;
-
- err = gpio_request(SPITZ_GPIO_nSD_WP, "nSD_WP");
- if (err)
- goto err_free_1;
-
- gpio_direction_input(SPITZ_GPIO_nSD_DETECT);
- gpio_direction_input(SPITZ_GPIO_nSD_WP);
-
- spitz_mci_platform_data.detect_delay = msecs_to_jiffies(250);
-
- err = request_irq(SPITZ_IRQ_GPIO_nSD_DETECT, spitz_detect_int,
- IRQF_DISABLED | IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING,
- "MMC card detect", data);
- if (err) {
- pr_err("%s: MMC/SD: can't request MMC card detect IRQ\n",
- __func__);
- goto err_free_2;
- }
- return 0;
-
-err_free_2:
- gpio_free(SPITZ_GPIO_nSD_WP);
-err_free_1:
- gpio_free(SPITZ_GPIO_nSD_DETECT);
-err_out:
- return err;
-}
-
static void spitz_mci_setpower(struct device *dev, unsigned int vdd)
{
struct pxamci_platform_data* p_d = dev->platform_data;
@@ -427,24 +516,12 @@ static void spitz_mci_setpower(struct device *dev, unsigned int vdd)
spitz_card_pwr_ctrl(SPITZ_PWR_SD, 0x0000);
}
-static int spitz_mci_get_ro(struct device *dev)
-{
- return gpio_get_value(SPITZ_GPIO_nSD_WP);
-}
-
-static void spitz_mci_exit(struct device *dev, void *data)
-{
- free_irq(SPITZ_IRQ_GPIO_nSD_DETECT, data);
- gpio_free(SPITZ_GPIO_nSD_WP);
- gpio_free(SPITZ_GPIO_nSD_DETECT);
-}
-
static struct pxamci_platform_data spitz_mci_platform_data = {
- .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
- .init = spitz_mci_init,
- .get_ro = spitz_mci_get_ro,
- .setpower = spitz_mci_setpower,
- .exit = spitz_mci_exit,
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .setpower = spitz_mci_setpower,
+ .gpio_card_detect = SPITZ_GPIO_nSD_DETECT,
+ .gpio_card_ro = SPITZ_GPIO_nSD_WP,
+ .gpio_power = -1,
};
@@ -484,50 +561,10 @@ static struct pxaohci_platform_data spitz_ohci_platform_data = {
/*
* Irda
*/
-static int spitz_irda_startup(struct device *dev)
-{
- int rc;
-
- rc = gpio_request(SPITZ_GPIO_IR_ON, "IrDA on");
- if (rc)
- goto err;
-
- rc = gpio_direction_output(SPITZ_GPIO_IR_ON, 1);
- if (rc)
- goto err_dir;
-
- return 0;
-
-err_dir:
- gpio_free(SPITZ_GPIO_IR_ON);
-err:
- return rc;
-}
-
-static void spitz_irda_shutdown(struct device *dev)
-{
- gpio_free(SPITZ_GPIO_IR_ON);
-}
-
-static void spitz_irda_transceiver_mode(struct device *dev, int mode)
-{
- gpio_set_value(SPITZ_GPIO_IR_ON, mode & IR_OFF);
- pxa2xx_transceiver_mode(dev, mode);
-}
-
-#ifdef CONFIG_MACH_AKITA
-static void akita_irda_transceiver_mode(struct device *dev, int mode)
-{
- gpio_set_value(AKITA_GPIO_IR_ON, mode & IR_OFF);
- pxa2xx_transceiver_mode(dev, mode);
-}
-#endif
static struct pxaficp_platform_data spitz_ficp_platform_data = {
+/* .gpio_pwdown is set in spitz_init() and akita_init() accordingly */
.transceiver_cap = IR_SIRMODE | IR_OFF,
- .transceiver_mode = spitz_irda_transceiver_mode,
- .startup = spitz_irda_startup,
- .shutdown = spitz_irda_shutdown,
};
@@ -695,6 +732,7 @@ static void __init common_init(void)
spitz_init_spi();
platform_add_devices(devices, ARRAY_SIZE(devices));
+ spitz_mci_platform_data.detect_delay = msecs_to_jiffies(250);
pxa_set_mci_info(&spitz_mci_platform_data);
pxa_set_ohci_info(&spitz_ohci_platform_data);
pxa_set_ficp_info(&spitz_ficp_platform_data);
@@ -705,6 +743,8 @@ static void __init common_init(void)
#if defined(CONFIG_MACH_SPITZ) || defined(CONFIG_MACH_BORZOI)
static void __init spitz_init(void)
{
+ spitz_ficp_platform_data.gpio_pwdown = SPITZ_GPIO_IR_ON;
+
platform_scoop_config = &spitz_pcmcia_config;
common_init();
@@ -747,7 +787,7 @@ static struct nand_ecclayout akita_oobinfo = {
static void __init akita_init(void)
{
- spitz_ficp_platform_data.transceiver_mode = akita_irda_transceiver_mode;
+ spitz_ficp_platform_data.gpio_pwdown = AKITA_GPIO_IR_ON;
sharpsl_nand_platform_data.badblock_pattern = &sharpsl_akita_bbt;
sharpsl_nand_platform_data.ecc_layout = &akita_oobinfo;
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 117ad5920e5..e81a52673d4 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -247,49 +247,10 @@ static struct pxa2xx_udc_mach_info udc_info __initdata = {
/*
* MMC/SD Device
*/
-static struct pxamci_platform_data tosa_mci_platform_data;
-
static int tosa_mci_init(struct device *dev, irq_handler_t tosa_detect_int, void *data)
{
int err;
- tosa_mci_platform_data.detect_delay = msecs_to_jiffies(250);
-
- err = gpio_request(TOSA_GPIO_nSD_DETECT, "MMC/SD card detect");
- if (err) {
- printk(KERN_ERR "tosa_mci_init: can't request nSD_DETECT gpio\n");
- goto err_gpio_detect;
- }
- err = gpio_direction_input(TOSA_GPIO_nSD_DETECT);
- if (err)
- goto err_gpio_detect_dir;
-
- err = request_irq(TOSA_IRQ_GPIO_nSD_DETECT, tosa_detect_int,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "MMC/SD card detect", data);
- if (err) {
- printk(KERN_ERR "tosa_mci_init: MMC/SD: can't request MMC card detect IRQ\n");
- goto err_irq;
- }
-
- err = gpio_request(TOSA_GPIO_SD_WP, "SD Write Protect");
- if (err) {
- printk(KERN_ERR "tosa_mci_init: can't request SD_WP gpio\n");
- goto err_gpio_wp;
- }
- err = gpio_direction_input(TOSA_GPIO_SD_WP);
- if (err)
- goto err_gpio_wp_dir;
-
- err = gpio_request(TOSA_GPIO_PWR_ON, "SD Power");
- if (err) {
- printk(KERN_ERR "tosa_mci_init: can't request SD_PWR gpio\n");
- goto err_gpio_pwr;
- }
- err = gpio_direction_output(TOSA_GPIO_PWR_ON, 0);
- if (err)
- goto err_gpio_pwr_dir;
-
err = gpio_request(TOSA_GPIO_nSD_INT, "SD Int");
if (err) {
printk(KERN_ERR "tosa_mci_init: can't request SD_PWR gpio\n");
@@ -304,51 +265,21 @@ static int tosa_mci_init(struct device *dev, irq_handler_t tosa_detect_int, void
err_gpio_int_dir:
gpio_free(TOSA_GPIO_nSD_INT);
err_gpio_int:
-err_gpio_pwr_dir:
- gpio_free(TOSA_GPIO_PWR_ON);
-err_gpio_pwr:
-err_gpio_wp_dir:
- gpio_free(TOSA_GPIO_SD_WP);
-err_gpio_wp:
- free_irq(TOSA_IRQ_GPIO_nSD_DETECT, data);
-err_irq:
-err_gpio_detect_dir:
- gpio_free(TOSA_GPIO_nSD_DETECT);
-err_gpio_detect:
return err;
}
-static void tosa_mci_setpower(struct device *dev, unsigned int vdd)
-{
- struct pxamci_platform_data* p_d = dev->platform_data;
-
- if (( 1 << vdd) & p_d->ocr_mask) {
- gpio_set_value(TOSA_GPIO_PWR_ON, 1);
- } else {
- gpio_set_value(TOSA_GPIO_PWR_ON, 0);
- }
-}
-
-static int tosa_mci_get_ro(struct device *dev)
-{
- return gpio_get_value(TOSA_GPIO_SD_WP);
-}
-
static void tosa_mci_exit(struct device *dev, void *data)
{
gpio_free(TOSA_GPIO_nSD_INT);
- gpio_free(TOSA_GPIO_PWR_ON);
- gpio_free(TOSA_GPIO_SD_WP);
- free_irq(TOSA_IRQ_GPIO_nSD_DETECT, data);
- gpio_free(TOSA_GPIO_nSD_DETECT);
}
static struct pxamci_platform_data tosa_mci_platform_data = {
- .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
- .init = tosa_mci_init,
- .get_ro = tosa_mci_get_ro,
- .setpower = tosa_mci_setpower,
- .exit = tosa_mci_exit,
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .init = tosa_mci_init,
+ .exit = tosa_mci_exit,
+ .gpio_card_detect = TOSA_GPIO_nSD_DETECT,
+ .gpio_card_ro = TOSA_GPIO_SD_WP,
+ .gpio_power = TOSA_GPIO_PWR_ON,
};
/*
@@ -406,10 +337,11 @@ static void tosa_irda_shutdown(struct device *dev)
}
static struct pxaficp_platform_data tosa_ficp_platform_data = {
- .transceiver_cap = IR_SIRMODE | IR_OFF,
- .transceiver_mode = tosa_irda_transceiver_mode,
- .startup = tosa_irda_startup,
- .shutdown = tosa_irda_shutdown,
+ .gpio_pwdown = -1,
+ .transceiver_cap = IR_SIRMODE | IR_OFF,
+ .transceiver_mode = tosa_irda_transceiver_mode,
+ .startup = tosa_irda_startup,
+ .shutdown = tosa_irda_shutdown,
};
/*
@@ -910,6 +842,7 @@ static void __init tosa_init(void)
dummy = gpiochip_reserve(TOSA_SCOOP_JC_GPIO_BASE, 12);
dummy = gpiochip_reserve(TOSA_TC6393XB_GPIO_BASE, 16);
+ tosa_mci_platform_data.detect_delay = msecs_to_jiffies(250);
pxa_set_mci_info(&tosa_mci_platform_data);
pxa_set_udc_info(&udc_info);
pxa_set_ficp_info(&tosa_ficp_platform_data);
diff --git a/arch/arm/mach-pxa/treo680.c b/arch/arm/mach-pxa/treo680.c
index 753ec4df17b..fe085076fbf 100644
--- a/arch/arm/mach-pxa/treo680.c
+++ b/arch/arm/mach-pxa/treo680.c
@@ -153,87 +153,11 @@ static unsigned long treo680_pin_config[] __initdata = {
/******************************************************************************
* SD/MMC card controller
******************************************************************************/
-static int treo680_mci_init(struct device *dev,
- irq_handler_t treo680_detect_int, void *data)
-{
- int err = 0;
-
- /* Setup an interrupt for detecting card insert/remove events */
- err = gpio_request(GPIO_NR_TREO680_SD_DETECT_N, "SD IRQ");
-
- if (err)
- goto err;
-
- err = gpio_direction_input(GPIO_NR_TREO680_SD_DETECT_N);
- if (err)
- goto err2;
-
- err = request_irq(gpio_to_irq(GPIO_NR_TREO680_SD_DETECT_N),
- treo680_detect_int, IRQF_DISABLED | IRQF_SAMPLE_RANDOM |
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- "SD/MMC card detect", data);
-
- if (err) {
- dev_err(dev, "%s: cannot request SD/MMC card detect IRQ\n",
- __func__);
- goto err2;
- }
-
- err = gpio_request(GPIO_NR_TREO680_SD_POWER, "SD_POWER");
- if (err)
- goto err3;
-
- err = gpio_direction_output(GPIO_NR_TREO680_SD_POWER, 1);
- if (err)
- goto err4;
-
- err = gpio_request(GPIO_NR_TREO680_SD_READONLY, "SD_READONLY");
- if (err)
- goto err4;
-
- err = gpio_direction_input(GPIO_NR_TREO680_SD_READONLY);
- if (err)
- goto err5;
-
- return 0;
-
-err5:
- gpio_free(GPIO_NR_TREO680_SD_READONLY);
-err4:
- gpio_free(GPIO_NR_TREO680_SD_POWER);
-err3:
- free_irq(gpio_to_irq(GPIO_NR_TREO680_SD_DETECT_N), data);
-err2:
- gpio_free(GPIO_NR_TREO680_SD_DETECT_N);
-err:
- return err;
-}
-
-static void treo680_mci_exit(struct device *dev, void *data)
-{
- gpio_free(GPIO_NR_TREO680_SD_READONLY);
- gpio_free(GPIO_NR_TREO680_SD_POWER);
- free_irq(gpio_to_irq(GPIO_NR_TREO680_SD_DETECT_N), data);
- gpio_free(GPIO_NR_TREO680_SD_DETECT_N);
-}
-
-static void treo680_mci_power(struct device *dev, unsigned int vdd)
-{
- struct pxamci_platform_data *p_d = dev->platform_data;
- gpio_set_value(GPIO_NR_TREO680_SD_POWER, p_d->ocr_mask & (1 << vdd));
-}
-
-static int treo680_mci_get_ro(struct device *dev)
-{
- return gpio_get_value(GPIO_NR_TREO680_SD_READONLY);
-}
-
static struct pxamci_platform_data treo680_mci_platform_data = {
- .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
- .setpower = treo680_mci_power,
- .get_ro = treo680_mci_get_ro,
- .init = treo680_mci_init,
- .exit = treo680_mci_exit,
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .gpio_card_detect = GPIO_NR_TREO680_SD_DETECT_N,
+ .gpio_card_ro = GPIO_NR_TREO680_SD_READONLY,
+ .gpio_power = GPIO_NR_TREO680_SD_POWER,
};
/******************************************************************************
@@ -330,16 +254,9 @@ static int treo680_backlight_init(struct device *dev)
ret = gpio_direction_output(GPIO_NR_TREO680_BL_POWER, 0);
if (ret)
goto err2;
- ret = gpio_request(GPIO_NR_TREO680_LCD_POWER, "LCD POWER");
- if (ret)
- goto err2;
- ret = gpio_direction_output(GPIO_NR_TREO680_LCD_POWER, 0);
- if (ret)
- goto err3;
return 0;
-err3:
- gpio_free(GPIO_NR_TREO680_LCD_POWER);
+
err2:
gpio_free(GPIO_NR_TREO680_BL_POWER);
err:
@@ -355,7 +272,6 @@ static int treo680_backlight_notify(int brightness)
static void treo680_backlight_exit(struct device *dev)
{
gpio_free(GPIO_NR_TREO680_BL_POWER);
- gpio_free(GPIO_NR_TREO680_LCD_POWER);
}
static struct platform_pwm_backlight_data treo680_backlight_data = {
@@ -379,44 +295,9 @@ static struct platform_device treo680_backlight = {
/******************************************************************************
* IrDA
******************************************************************************/
-static void treo680_transceiver_mode(struct device *dev, int mode)
-{
- gpio_set_value(GPIO_NR_TREO680_IR_EN, mode & IR_OFF);
- pxa2xx_transceiver_mode(dev, mode);
-}
-
-static int treo680_irda_startup(struct device *dev)
-{
- int err;
-
- err = gpio_request(GPIO_NR_TREO680_IR_EN, "Ir port disable");
- if (err)
- goto err1;
-
- err = gpio_direction_output(GPIO_NR_TREO680_IR_EN, 1);
- if (err)
- goto err2;
-
- return 0;
-
-err2:
- dev_err(dev, "treo680_irda: cannot change IR gpio direction\n");
- gpio_free(GPIO_NR_TREO680_IR_EN);
-err1:
- dev_err(dev, "treo680_irda: cannot allocate IR gpio\n");
- return err;
-}
-
-static void treo680_irda_shutdown(struct device *dev)
-{
- gpio_free(GPIO_NR_TREO680_IR_EN);
-}
-
static struct pxaficp_platform_data treo680_ficp_info = {
- .transceiver_cap = IR_FIRMODE | IR_SIRMODE | IR_OFF,
- .startup = treo680_irda_startup,
- .shutdown = treo680_irda_shutdown,
- .transceiver_mode = treo680_transceiver_mode,
+ .gpio_pwdown = GPIO_NR_TREO680_IR_EN,
+ .transceiver_cap = IR_SIRMODE | IR_OFF,
};
/******************************************************************************
@@ -546,6 +427,11 @@ static struct pxafb_mode_info treo680_lcd_modes[] = {
},
};
+static void treo680_lcd_power(int on, struct fb_var_screeninfo *info)
+{
+ gpio_set_value(GPIO_NR_TREO680_BL_POWER, on);
+}
+
static struct pxafb_mach_info treo680_lcd_screen = {
.modes = treo680_lcd_modes,
.num_modes = ARRAY_SIZE(treo680_lcd_modes),
@@ -585,11 +471,32 @@ static void __init treo680_udc_init(void)
}
}
+static void __init treo680_lcd_power_init(void)
+{
+ int ret;
+
+ ret = gpio_request(GPIO_NR_TREO680_LCD_POWER, "LCD POWER");
+ if (ret) {
+ pr_err("Treo680: LCD power GPIO request failed!\n");
+ return;
+ }
+
+ ret = gpio_direction_output(GPIO_NR_TREO680_LCD_POWER, 0);
+ if (ret) {
+ pr_err("Treo680: setting LCD power GPIO direction failed!\n");
+ gpio_free(GPIO_NR_TREO680_LCD_POWER);
+ return;
+ }
+
+ treo680_lcd_screen.pxafb_lcd_power = treo680_lcd_power;
+}
+
static void __init treo680_init(void)
{
treo680_pm_init();
pxa2xx_mfp_config(ARRAY_AND_SIZE(treo680_pin_config));
pxa_set_keypad_info(&treo680_keypad_platform_data);
+ treo680_lcd_power_init();
set_pxa_fb_info(&treo680_lcd_screen);
pxa_set_mci_info(&treo680_mci_platform_data);
treo680_udc_init();
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
index 825f540176d..3981e0356d1 100644
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -367,6 +367,9 @@ static struct pxamci_platform_data trizeps4_mci_platform_data = {
.exit = trizeps4_mci_exit,
.get_ro = NULL, /* write-protection not supported */
.setpower = NULL, /* power-switching not supported */
+ .gpio_card_detect = -1,
+ .gpio_card_ro = -1,
+ .gpio_power = -1,
};
/****************************************************************************
@@ -412,6 +415,7 @@ static void trizeps4_irda_transceiver_mode(struct device *dev, int mode)
}
static struct pxaficp_platform_data trizeps4_ficp_platform_data = {
+ .gpio_pwdown = -1,
.transceiver_cap = IR_SIRMODE | IR_FIRMODE | IR_OFF,
.transceiver_mode = trizeps4_irda_transceiver_mode,
.startup = trizeps4_irda_startup,
diff --git a/arch/arm/mach-pxa/xcep.c b/arch/arm/mach-pxa/xcep.c
new file mode 100644
index 00000000000..3fd79cbb36c
--- /dev/null
+++ b/arch/arm/mach-pxa/xcep.c
@@ -0,0 +1,187 @@
+/* linux/arch/arm/mach-pxa/xcep.c
+ *
+ * Support for the Iskratel Electronics XCEP platform as used in
+ * the Libera instruments from Instrumentation Technologies.
+ *
+ * Author: Ales Bardorfer <ales@i-tech.si>
+ * Contributions by: Abbott, MG (Michael) <michael.abbott@diamond.ac.uk>
+ * Contributions by: Matej Kenda <matej.kenda@i-tech.si>
+ * Created: June 2006
+ * Copyright: (C) 2006-2009 Instrumentation Technologies
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/smc91x.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/irq.h>
+#include <asm/mach/map.h>
+
+#include <plat/i2c.h>
+
+#include <mach/hardware.h>
+#include <mach/pxa2xx-regs.h>
+#include <mach/mfp-pxa25x.h>
+
+#include "generic.h"
+
+#define XCEP_ETH_PHYS (PXA_CS3_PHYS + 0x00000300)
+#define XCEP_ETH_PHYS_END (PXA_CS3_PHYS + 0x000fffff)
+#define XCEP_ETH_ATTR (PXA_CS3_PHYS + 0x02000000)
+#define XCEP_ETH_ATTR_END (PXA_CS3_PHYS + 0x020fffff)
+#define XCEP_ETH_IRQ IRQ_GPIO0
+
+/* XCEP CPLD base */
+#define XCEP_CPLD_BASE 0xf0000000
+
+
+/* Flash partitions. */
+
+static struct mtd_partition xcep_partitions[] = {
+ {
+ .name = "Bootloader",
+ .size = 0x00040000,
+ .offset = 0,
+ .mask_flags = MTD_WRITEABLE
+ }, {
+ .name = "Bootloader ENV",
+ .size = 0x00040000,
+ .offset = 0x00040000,
+ .mask_flags = MTD_WRITEABLE
+ }, {
+ .name = "Kernel",
+ .size = 0x00100000,
+ .offset = 0x00080000,
+ }, {
+ .name = "Rescue fs",
+ .size = 0x00280000,
+ .offset = 0x00180000,
+ }, {
+ .name = "Filesystem",
+ .size = MTDPART_SIZ_FULL,
+ .offset = 0x00400000
+ }
+};
+
+static struct physmap_flash_data xcep_flash_data[] = {
+ {
+ .width = 4, /* bankwidth in bytes */
+ .parts = xcep_partitions,
+ .nr_parts = ARRAY_SIZE(xcep_partitions)
+ }
+};
+
+static struct resource flash_resource = {
+ .start = PXA_CS0_PHYS,
+ .end = PXA_CS0_PHYS + SZ_32M - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device flash_device = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = xcep_flash_data,
+ },
+ .resource = &flash_resource,
+ .num_resources = 1,
+};
+
+
+
+/* SMC LAN91C111 network controller. */
+
+static struct resource smc91x_resources[] = {
+ [0] = {
+ .name = "smc91x-regs",
+ .start = XCEP_ETH_PHYS,
+ .end = XCEP_ETH_PHYS_END,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = XCEP_ETH_IRQ,
+ .end = XCEP_ETH_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .name = "smc91x-attrib",
+ .start = XCEP_ETH_ATTR,
+ .end = XCEP_ETH_ATTR_END,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct smc91x_platdata xcep_smc91x_info = {
+ .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT | SMC91X_USE_DMA,
+};
+
+static struct platform_device smc91x_device = {
+ .name = "smc91x",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(smc91x_resources),
+ .resource = smc91x_resources,
+ .dev = {
+ .platform_data = &xcep_smc91x_info,
+ },
+};
+
+
+static struct platform_device *devices[] __initdata = {
+ &flash_device,
+ &smc91x_device,
+};
+
+
+/* We have to state that there are HWMON devices on the I2C bus on XCEP.
+ * Drivers for HWMON verify capabilities of the adapter when loading and
+ * refuse to attach if the adapter doesn't support HWMON class of devices.
+ * See also Documentation/i2c/porting-clients. */
+static struct i2c_pxa_platform_data xcep_i2c_platform_data = {
+ .class = I2C_CLASS_HWMON
+};
+
+
+static mfp_cfg_t xcep_pin_config[] __initdata = {
+ GPIO79_nCS_3, /* SMC 91C111 chip select. */
+ GPIO80_nCS_4, /* CPLD chip select. */
+ /* SSP communication to MSP430 */
+ GPIO23_SSP1_SCLK,
+ GPIO24_SSP1_SFRM,
+ GPIO25_SSP1_TXD,
+ GPIO26_SSP1_RXD,
+ GPIO27_SSP1_EXTCLK
+};
+
+static void __init xcep_init(void)
+{
+ pxa2xx_mfp_config(ARRAY_AND_SIZE(xcep_pin_config));
+
+ /* See Intel XScale Developer's Guide for details */
+ /* Set RDF and RDN to appropriate values (chip select 3 (smc91x)) */
+ MSC1 = (MSC1 & 0xffff) | 0xD5540000;
+ /* Set RDF and RDN to appropriate values (chip select 5 (fpga)) */
+ MSC2 = (MSC2 & 0xffff) | 0x72A00000;
+
+ platform_add_devices(ARRAY_AND_SIZE(devices));
+ pxa_set_i2c_info(&xcep_i2c_platform_data);
+}
+
+MACHINE_START(XCEP, "Iskratel XCEP")
+ .phys_io = 0x40000000,
+ .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
+ .boot_params = 0xa0000100,
+ .init_machine = xcep_init,
+ .map_io = pxa_map_io,
+ .init_irq = pxa25x_init_irq,
+ .timer = &pxa_timer,
+MACHINE_END
+
diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c
index 218d2001f1d..09784d3954e 100644
--- a/arch/arm/mach-pxa/zylonite.c
+++ b/arch/arm/mach-pxa/zylonite.c
@@ -290,6 +290,9 @@ static struct pxamci_platform_data zylonite_mci_platform_data = {
.init = zylonite_mci_init,
.exit = zylonite_mci_exit,
.get_ro = zylonite_mci_ro,
+ .gpio_card_detect = -1,
+ .gpio_card_ro = -1,
+ .gpio_power = -1,
};
static struct pxamci_platform_data zylonite_mci2_platform_data = {
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index dc3519c50ab..a2083b60e3f 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -30,6 +30,7 @@
#include <linux/io.h>
#include <linux/smsc911x.h>
#include <linux/ata_platform.h>
+#include <linux/amba/mmci.h>
#include <asm/clkdev.h>
#include <asm/system.h>
@@ -44,7 +45,6 @@
#include <asm/mach/flash.h>
#include <asm/mach/irq.h>
#include <asm/mach/map.h>
-#include <asm/mach/mmc.h>
#include <asm/hardware/gic.h>
@@ -237,14 +237,14 @@ static unsigned int realview_mmc_status(struct device *dev)
return readl(REALVIEW_SYSMCI) & mask;
}
-struct mmc_platform_data realview_mmc0_plat_data = {
+struct mmci_platform_data realview_mmc0_plat_data = {
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.status = realview_mmc_status,
.gpio_wp = 17,
.gpio_cd = 16,
};
-struct mmc_platform_data realview_mmc1_plat_data = {
+struct mmci_platform_data realview_mmc1_plat_data = {
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.status = realview_mmc_status,
.gpio_wp = 19,
@@ -296,31 +296,31 @@ static struct clk ref24_clk = {
static struct clk_lookup lookups[] = {
{ /* UART0 */
- .dev_id = "dev:f1",
+ .dev_id = "dev:uart0",
.clk = &ref24_clk,
}, { /* UART1 */
- .dev_id = "dev:f2",
+ .dev_id = "dev:uart1",
.clk = &ref24_clk,
}, { /* UART2 */
- .dev_id = "dev:f3",
+ .dev_id = "dev:uart2",
.clk = &ref24_clk,
}, { /* UART3 */
- .dev_id = "fpga:09",
+ .dev_id = "fpga:uart3",
.clk = &ref24_clk,
}, { /* KMI0 */
- .dev_id = "fpga:06",
+ .dev_id = "fpga:kmi0",
.clk = &ref24_clk,
}, { /* KMI1 */
- .dev_id = "fpga:07",
+ .dev_id = "fpga:kmi1",
.clk = &ref24_clk,
}, { /* MMC0 */
- .dev_id = "fpga:05",
+ .dev_id = "fpga:mmc0",
.clk = &ref24_clk,
}, { /* EB:CLCD */
- .dev_id = "dev:20",
+ .dev_id = "dev:clcd",
.clk = &oscvco_clk,
}, { /* PB:CLCD */
- .dev_id = "issp:20",
+ .dev_id = "issp:clcd",
.clk = &oscvco_clk,
}
};
diff --git a/arch/arm/mach-realview/core.h b/arch/arm/mach-realview/core.h
index 59a337ba4be..46cd6acb4d4 100644
--- a/arch/arm/mach-realview/core.h
+++ b/arch/arm/mach-realview/core.h
@@ -47,8 +47,8 @@ static struct amba_device name##_device = { \
extern struct platform_device realview_flash_device;
extern struct platform_device realview_cf_device;
extern struct platform_device realview_i2c_device;
-extern struct mmc_platform_data realview_mmc0_plat_data;
-extern struct mmc_platform_data realview_mmc1_plat_data;
+extern struct mmci_platform_data realview_mmc0_plat_data;
+extern struct mmci_platform_data realview_mmc1_plat_data;
extern struct clcd_board clcd_plat_data;
extern void __iomem *gic_cpu_base_addr;
extern void __iomem *timer0_va_base;
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index abd13b44867..1d65e64ae57 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -24,6 +24,7 @@
#include <linux/sysdev.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl061.h>
+#include <linux/amba/mmci.h>
#include <linux/io.h>
#include <mach/hardware.h>
@@ -37,7 +38,6 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <asm/mach/mmc.h>
#include <asm/mach/time.h>
#include <mach/board-eb.h>
@@ -193,27 +193,27 @@ static struct pl061_platform_data gpio2_plat_data = {
#define EB_SSP_DMA { 9, 8 }
/* FPGA Primecells */
-AMBA_DEVICE(aaci, "fpga:04", AACI, NULL);
-AMBA_DEVICE(mmc0, "fpga:05", MMCI0, &realview_mmc0_plat_data);
-AMBA_DEVICE(kmi0, "fpga:06", KMI0, NULL);
-AMBA_DEVICE(kmi1, "fpga:07", KMI1, NULL);
-AMBA_DEVICE(uart3, "fpga:09", EB_UART3, NULL);
+AMBA_DEVICE(aaci, "fpga:aaci", AACI, NULL);
+AMBA_DEVICE(mmc0, "fpga:mmc0", MMCI0, &realview_mmc0_plat_data);
+AMBA_DEVICE(kmi0, "fpga:kmi0", KMI0, NULL);
+AMBA_DEVICE(kmi1, "fpga:kmi1", KMI1, NULL);
+AMBA_DEVICE(uart3, "fpga:uart3", EB_UART3, NULL);
/* DevChip Primecells */
-AMBA_DEVICE(smc, "dev:00", EB_SMC, NULL);
-AMBA_DEVICE(clcd, "dev:20", EB_CLCD, &clcd_plat_data);
-AMBA_DEVICE(dmac, "dev:30", DMAC, NULL);
-AMBA_DEVICE(sctl, "dev:e0", SCTL, NULL);
-AMBA_DEVICE(wdog, "dev:e1", EB_WATCHDOG, NULL);
-AMBA_DEVICE(gpio0, "dev:e4", EB_GPIO0, &gpio0_plat_data);
-AMBA_DEVICE(gpio1, "dev:e5", GPIO1, &gpio1_plat_data);
-AMBA_DEVICE(gpio2, "dev:e6", GPIO2, &gpio2_plat_data);
-AMBA_DEVICE(rtc, "dev:e8", EB_RTC, NULL);
-AMBA_DEVICE(sci0, "dev:f0", SCI, NULL);
-AMBA_DEVICE(uart0, "dev:f1", EB_UART0, NULL);
-AMBA_DEVICE(uart1, "dev:f2", EB_UART1, NULL);
-AMBA_DEVICE(uart2, "dev:f3", EB_UART2, NULL);
-AMBA_DEVICE(ssp0, "dev:f4", EB_SSP, NULL);
+AMBA_DEVICE(smc, "dev:smc", EB_SMC, NULL);
+AMBA_DEVICE(clcd, "dev:clcd", EB_CLCD, &clcd_plat_data);
+AMBA_DEVICE(dmac, "dev:dmac", DMAC, NULL);
+AMBA_DEVICE(sctl, "dev:sctl", SCTL, NULL);
+AMBA_DEVICE(wdog, "dev:wdog", EB_WATCHDOG, NULL);
+AMBA_DEVICE(gpio0, "dev:gpio0", EB_GPIO0, &gpio0_plat_data);
+AMBA_DEVICE(gpio1, "dev:gpio1", GPIO1, &gpio1_plat_data);
+AMBA_DEVICE(gpio2, "dev:gpio2", GPIO2, &gpio2_plat_data);
+AMBA_DEVICE(rtc, "dev:rtc", EB_RTC, NULL);
+AMBA_DEVICE(sci0, "dev:sci0", SCI, NULL);
+AMBA_DEVICE(uart0, "dev:uart0", EB_UART0, NULL);
+AMBA_DEVICE(uart1, "dev:uart1", EB_UART1, NULL);
+AMBA_DEVICE(uart2, "dev:uart2", EB_UART2, NULL);
+AMBA_DEVICE(ssp0, "dev:ssp0", EB_SSP, NULL);
static struct amba_device *amba_devs[] __initdata = {
&dmac_device,
diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c
index 17fbb0e889b..2817fe09931 100644
--- a/arch/arm/mach-realview/realview_pb1176.c
+++ b/arch/arm/mach-realview/realview_pb1176.c
@@ -24,6 +24,7 @@
#include <linux/sysdev.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl061.h>
+#include <linux/amba/mmci.h>
#include <linux/io.h>
#include <mach/hardware.h>
@@ -37,7 +38,6 @@
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <asm/mach/map.h>
-#include <asm/mach/mmc.h>
#include <asm/mach/time.h>
#include <mach/board-pb1176.h>
@@ -170,29 +170,29 @@ static struct pl061_platform_data gpio2_plat_data = {
#define PB1176_SSP_DMA { 9, 8 }
/* FPGA Primecells */
-AMBA_DEVICE(aaci, "fpga:04", AACI, NULL);
-AMBA_DEVICE(mmc0, "fpga:05", MMCI0, &realview_mmc0_plat_data);
-AMBA_DEVICE(kmi0, "fpga:06", KMI0, NULL);
-AMBA_DEVICE(kmi1, "fpga:07", KMI1, NULL);
-AMBA_DEVICE(uart3, "fpga:09", PB1176_UART3, NULL);
+AMBA_DEVICE(aaci, "fpga:aaci", AACI, NULL);
+AMBA_DEVICE(mmc0, "fpga:mmc0", MMCI0, &realview_mmc0_plat_data);
+AMBA_DEVICE(kmi0, "fpga:kmi0", KMI0, NULL);
+AMBA_DEVICE(kmi1, "fpga:kmi1", KMI1, NULL);
+AMBA_DEVICE(uart3, "fpga:uart3", PB1176_UART3, NULL);
/* DevChip Primecells */
-AMBA_DEVICE(smc, "dev:00", PB1176_SMC, NULL);
-AMBA_DEVICE(sctl, "dev:e0", SCTL, NULL);
-AMBA_DEVICE(wdog, "dev:e1", PB1176_WATCHDOG, NULL);
-AMBA_DEVICE(gpio0, "dev:e4", PB1176_GPIO0, &gpio0_plat_data);
-AMBA_DEVICE(gpio1, "dev:e5", GPIO1, &gpio1_plat_data);
-AMBA_DEVICE(gpio2, "dev:e6", GPIO2, &gpio2_plat_data);
-AMBA_DEVICE(rtc, "dev:e8", PB1176_RTC, NULL);
-AMBA_DEVICE(sci0, "dev:f0", SCI, NULL);
-AMBA_DEVICE(uart0, "dev:f1", PB1176_UART0, NULL);
-AMBA_DEVICE(uart1, "dev:f2", PB1176_UART1, NULL);
-AMBA_DEVICE(uart2, "dev:f3", PB1176_UART2, NULL);
-AMBA_DEVICE(ssp0, "dev:f4", PB1176_SSP, NULL);
+AMBA_DEVICE(smc, "dev:smc", PB1176_SMC, NULL);
+AMBA_DEVICE(sctl, "dev:sctl", SCTL, NULL);
+AMBA_DEVICE(wdog, "dev:wdog", PB1176_WATCHDOG, NULL);
+AMBA_DEVICE(gpio0, "dev:gpio0", PB1176_GPIO0, &gpio0_plat_data);
+AMBA_DEVICE(gpio1, "dev:gpio1", GPIO1, &gpio1_plat_data);
+AMBA_DEVICE(gpio2, "dev:gpio2", GPIO2, &gpio2_plat_data);
+AMBA_DEVICE(rtc, "dev:rtc", PB1176_RTC, NULL);
+AMBA_DEVICE(sci0, "dev:sci0", SCI, NULL);
+AMBA_DEVICE(uart0, "dev:uart0", PB1176_UART0, NULL);
+AMBA_DEVICE(uart1, "dev:uart1", PB1176_UART1, NULL);
+AMBA_DEVICE(uart2, "dev:uart2", PB1176_UART2, NULL);
+AMBA_DEVICE(ssp0, "dev:ssp0", PB1176_SSP, NULL);
/* Primecells on the NEC ISSP chip */
-AMBA_DEVICE(clcd, "issp:20", PB1176_CLCD, &clcd_plat_data);
-//AMBA_DEVICE(dmac, "issp:30", PB1176_DMAC, NULL);
+AMBA_DEVICE(clcd, "issp:clcd", PB1176_CLCD, &clcd_plat_data);
+//AMBA_DEVICE(dmac, "issp:dmac", PB1176_DMAC, NULL);
static struct amba_device *amba_devs[] __initdata = {
// &dmac_device,
diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c
index fdd042b85f4..94680fcf726 100644
--- a/arch/arm/mach-realview/realview_pb11mp.c
+++ b/arch/arm/mach-realview/realview_pb11mp.c
@@ -24,6 +24,7 @@
#include <linux/sysdev.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl061.h>
+#include <linux/amba/mmci.h>
#include <linux/io.h>
#include <mach/hardware.h>
@@ -38,7 +39,6 @@
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <asm/mach/map.h>
-#include <asm/mach/mmc.h>
#include <asm/mach/time.h>
#include <mach/board-pb11mp.h>
@@ -172,29 +172,29 @@ static struct pl061_platform_data gpio2_plat_data = {
#define PB11MP_SSP_DMA { 9, 8 }
/* FPGA Primecells */
-AMBA_DEVICE(aaci, "fpga:04", AACI, NULL);
-AMBA_DEVICE(mmc0, "fpga:05", MMCI0, &realview_mmc0_plat_data);
-AMBA_DEVICE(kmi0, "fpga:06", KMI0, NULL);
-AMBA_DEVICE(kmi1, "fpga:07", KMI1, NULL);
-AMBA_DEVICE(uart3, "fpga:09", PB11MP_UART3, NULL);
+AMBA_DEVICE(aaci, "fpga:aaci", AACI, NULL);
+AMBA_DEVICE(mmc0, "fpga:mmc0", MMCI0, &realview_mmc0_plat_data);
+AMBA_DEVICE(kmi0, "fpga:kmi0", KMI0, NULL);
+AMBA_DEVICE(kmi1, "fpga:kmi1", KMI1, NULL);
+AMBA_DEVICE(uart3, "fpga:uart3", PB11MP_UART3, NULL);
/* DevChip Primecells */
-AMBA_DEVICE(smc, "dev:00", PB11MP_SMC, NULL);
-AMBA_DEVICE(sctl, "dev:e0", SCTL, NULL);
-AMBA_DEVICE(wdog, "dev:e1", PB11MP_WATCHDOG, NULL);
-AMBA_DEVICE(gpio0, "dev:e4", PB11MP_GPIO0, &gpio0_plat_data);
-AMBA_DEVICE(gpio1, "dev:e5", GPIO1, &gpio1_plat_data);
-AMBA_DEVICE(gpio2, "dev:e6", GPIO2, &gpio2_plat_data);
-AMBA_DEVICE(rtc, "dev:e8", PB11MP_RTC, NULL);
-AMBA_DEVICE(sci0, "dev:f0", SCI, NULL);
-AMBA_DEVICE(uart0, "dev:f1", PB11MP_UART0, NULL);
-AMBA_DEVICE(uart1, "dev:f2", PB11MP_UART1, NULL);
-AMBA_DEVICE(uart2, "dev:f3", PB11MP_UART2, NULL);
-AMBA_DEVICE(ssp0, "dev:f4", PB11MP_SSP, NULL);
+AMBA_DEVICE(smc, "dev:smc", PB11MP_SMC, NULL);
+AMBA_DEVICE(sctl, "dev:sctl", SCTL, NULL);
+AMBA_DEVICE(wdog, "dev:wdog", PB11MP_WATCHDOG, NULL);
+AMBA_DEVICE(gpio0, "dev:gpio0", PB11MP_GPIO0, &gpio0_plat_data);
+AMBA_DEVICE(gpio1, "dev:gpio1", GPIO1, &gpio1_plat_data);
+AMBA_DEVICE(gpio2, "dev:gpio2", GPIO2, &gpio2_plat_data);
+AMBA_DEVICE(rtc, "dev:rtc", PB11MP_RTC, NULL);
+AMBA_DEVICE(sci0, "dev:sci0", SCI, NULL);
+AMBA_DEVICE(uart0, "dev:uart0", PB11MP_UART0, NULL);
+AMBA_DEVICE(uart1, "dev:uart1", PB11MP_UART1, NULL);
+AMBA_DEVICE(uart2, "dev:uart2", PB11MP_UART2, NULL);
+AMBA_DEVICE(ssp0, "dev:ssp0", PB11MP_SSP, NULL);
/* Primecells on the NEC ISSP chip */
-AMBA_DEVICE(clcd, "issp:20", PB11MP_CLCD, &clcd_plat_data);
-AMBA_DEVICE(dmac, "issp:30", DMAC, NULL);
+AMBA_DEVICE(clcd, "issp:clcd", PB11MP_CLCD, &clcd_plat_data);
+AMBA_DEVICE(dmac, "issp:dmac", DMAC, NULL);
static struct amba_device *amba_devs[] __initdata = {
&dmac_device,
diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c
index 70bba9900d9..941beb2b970 100644
--- a/arch/arm/mach-realview/realview_pba8.c
+++ b/arch/arm/mach-realview/realview_pba8.c
@@ -24,6 +24,7 @@
#include <linux/sysdev.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl061.h>
+#include <linux/amba/mmci.h>
#include <linux/io.h>
#include <asm/irq.h>
@@ -34,7 +35,6 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <asm/mach/mmc.h>
#include <asm/mach/time.h>
#include <mach/hardware.h>
@@ -162,29 +162,29 @@ static struct pl061_platform_data gpio2_plat_data = {
#define PBA8_SSP_DMA { 9, 8 }
/* FPGA Primecells */
-AMBA_DEVICE(aaci, "fpga:04", AACI, NULL);
-AMBA_DEVICE(mmc0, "fpga:05", MMCI0, &realview_mmc0_plat_data);
-AMBA_DEVICE(kmi0, "fpga:06", KMI0, NULL);
-AMBA_DEVICE(kmi1, "fpga:07", KMI1, NULL);
-AMBA_DEVICE(uart3, "fpga:09", PBA8_UART3, NULL);
+AMBA_DEVICE(aaci, "fpga:aaci", AACI, NULL);
+AMBA_DEVICE(mmc0, "fpga:mmc0", MMCI0, &realview_mmc0_plat_data);
+AMBA_DEVICE(kmi0, "fpga:kmi0", KMI0, NULL);
+AMBA_DEVICE(kmi1, "fpga:kmi1", KMI1, NULL);
+AMBA_DEVICE(uart3, "fpga:uart3", PBA8_UART3, NULL);
/* DevChip Primecells */
-AMBA_DEVICE(smc, "dev:00", PBA8_SMC, NULL);
-AMBA_DEVICE(sctl, "dev:e0", SCTL, NULL);
-AMBA_DEVICE(wdog, "dev:e1", PBA8_WATCHDOG, NULL);
-AMBA_DEVICE(gpio0, "dev:e4", PBA8_GPIO0, &gpio0_plat_data);
-AMBA_DEVICE(gpio1, "dev:e5", GPIO1, &gpio1_plat_data);
-AMBA_DEVICE(gpio2, "dev:e6", GPIO2, &gpio2_plat_data);
-AMBA_DEVICE(rtc, "dev:e8", PBA8_RTC, NULL);
-AMBA_DEVICE(sci0, "dev:f0", SCI, NULL);
-AMBA_DEVICE(uart0, "dev:f1", PBA8_UART0, NULL);
-AMBA_DEVICE(uart1, "dev:f2", PBA8_UART1, NULL);
-AMBA_DEVICE(uart2, "dev:f3", PBA8_UART2, NULL);
-AMBA_DEVICE(ssp0, "dev:f4", PBA8_SSP, NULL);
+AMBA_DEVICE(smc, "dev:smc", PBA8_SMC, NULL);
+AMBA_DEVICE(sctl, "dev:sctl", SCTL, NULL);
+AMBA_DEVICE(wdog, "dev:wdog", PBA8_WATCHDOG, NULL);
+AMBA_DEVICE(gpio0, "dev:gpio0", PBA8_GPIO0, &gpio0_plat_data);
+AMBA_DEVICE(gpio1, "dev:gpio1", GPIO1, &gpio1_plat_data);
+AMBA_DEVICE(gpio2, "dev:gpio2", GPIO2, &gpio2_plat_data);
+AMBA_DEVICE(rtc, "dev:rtc", PBA8_RTC, NULL);
+AMBA_DEVICE(sci0, "dev:sci0", SCI, NULL);
+AMBA_DEVICE(uart0, "dev:uart0", PBA8_UART0, NULL);
+AMBA_DEVICE(uart1, "dev:uart1", PBA8_UART1, NULL);
+AMBA_DEVICE(uart2, "dev:uart2", PBA8_UART2, NULL);
+AMBA_DEVICE(ssp0, "dev:ssp0", PBA8_SSP, NULL);
/* Primecells on the NEC ISSP chip */
-AMBA_DEVICE(clcd, "issp:20", PBA8_CLCD, &clcd_plat_data);
-AMBA_DEVICE(dmac, "issp:30", DMAC, NULL);
+AMBA_DEVICE(clcd, "issp:clcd", PBA8_CLCD, &clcd_plat_data);
+AMBA_DEVICE(dmac, "issp:dmac", DMAC, NULL);
static struct amba_device *amba_devs[] __initdata = {
&dmac_device,
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index ce6c5d25fbe..7e4bc6cdca5 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -23,6 +23,7 @@
#include <linux/sysdev.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl061.h>
+#include <linux/amba/mmci.h>
#include <linux/io.h>
#include <asm/irq.h>
@@ -34,7 +35,6 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <asm/mach/mmc.h>
#include <asm/mach/time.h>
#include <mach/hardware.h>
@@ -182,29 +182,29 @@ static struct pl061_platform_data gpio2_plat_data = {
#define PBX_SSP_DMA { 9, 8 }
/* FPGA Primecells */
-AMBA_DEVICE(aaci, "fpga:04", AACI, NULL);
-AMBA_DEVICE(mmc0, "fpga:05", MMCI0, &realview_mmc0_plat_data);
-AMBA_DEVICE(kmi0, "fpga:06", KMI0, NULL);
-AMBA_DEVICE(kmi1, "fpga:07", KMI1, NULL);
-AMBA_DEVICE(uart3, "fpga:09", PBX_UART3, NULL);
+AMBA_DEVICE(aaci, "fpga:aaci", AACI, NULL);
+AMBA_DEVICE(mmc0, "fpga:mmc0", MMCI0, &realview_mmc0_plat_data);
+AMBA_DEVICE(kmi0, "fpga:kmi0", KMI0, NULL);
+AMBA_DEVICE(kmi1, "fpga:kmi1", KMI1, NULL);
+AMBA_DEVICE(uart3, "fpga:uart3", PBX_UART3, NULL);
/* DevChip Primecells */
-AMBA_DEVICE(smc, "dev:00", PBX_SMC, NULL);
-AMBA_DEVICE(sctl, "dev:e0", SCTL, NULL);
-AMBA_DEVICE(wdog, "dev:e1", PBX_WATCHDOG, NULL);
-AMBA_DEVICE(gpio0, "dev:e4", PBX_GPIO0, &gpio0_plat_data);
-AMBA_DEVICE(gpio1, "dev:e5", GPIO1, &gpio1_plat_data);
-AMBA_DEVICE(gpio2, "dev:e6", GPIO2, &gpio2_plat_data);
-AMBA_DEVICE(rtc, "dev:e8", PBX_RTC, NULL);
-AMBA_DEVICE(sci0, "dev:f0", SCI, NULL);
-AMBA_DEVICE(uart0, "dev:f1", PBX_UART0, NULL);
-AMBA_DEVICE(uart1, "dev:f2", PBX_UART1, NULL);
-AMBA_DEVICE(uart2, "dev:f3", PBX_UART2, NULL);
-AMBA_DEVICE(ssp0, "dev:f4", PBX_SSP, NULL);
+AMBA_DEVICE(smc, "dev:smc", PBX_SMC, NULL);
+AMBA_DEVICE(sctl, "dev:sctl", SCTL, NULL);
+AMBA_DEVICE(wdog, "dev:wdog", PBX_WATCHDOG, NULL);
+AMBA_DEVICE(gpio0, "dev:gpio0", PBX_GPIO0, &gpio0_plat_data);
+AMBA_DEVICE(gpio1, "dev:gpio1", GPIO1, &gpio1_plat_data);
+AMBA_DEVICE(gpio2, "dev:gpio2", GPIO2, &gpio2_plat_data);
+AMBA_DEVICE(rtc, "dev:rtc", PBX_RTC, NULL);
+AMBA_DEVICE(sci0, "dev:sci0", SCI, NULL);
+AMBA_DEVICE(uart0, "dev:uart0", PBX_UART0, NULL);
+AMBA_DEVICE(uart1, "dev:uart1", PBX_UART1, NULL);
+AMBA_DEVICE(uart2, "dev:uart2", PBX_UART2, NULL);
+AMBA_DEVICE(ssp0, "dev:ssp0", PBX_SSP, NULL);
/* Primecells on the NEC ISSP chip */
-AMBA_DEVICE(clcd, "issp:20", PBX_CLCD, &clcd_plat_data);
-AMBA_DEVICE(dmac, "issp:30", DMAC, NULL);
+AMBA_DEVICE(clcd, "issp:clcd", PBX_CLCD, &clcd_plat_data);
+AMBA_DEVICE(dmac, "issp:dmac", DMAC, NULL);
static struct amba_device *amba_devs[] __initdata = {
&dmac_device,
diff --git a/arch/arm/mach-s3c2410/Kconfig b/arch/arm/mach-s3c2410/Kconfig
index d8c023d4df3..3d4e9da3fa5 100644
--- a/arch/arm/mach-s3c2410/Kconfig
+++ b/arch/arm/mach-s3c2410/Kconfig
@@ -77,6 +77,7 @@ config ARCH_H1940
select CPU_S3C2410
select PM_H1940 if PM
select S3C_DEV_USB_HOST
+ select S3C_DEV_NAND
help
Say Y here if you are using the HP IPAQ H1940
@@ -89,6 +90,7 @@ config MACH_N30
bool "Acer N30 family"
select CPU_S3C2410
select S3C_DEV_USB_HOST
+ select S3C_DEV_NAND
help
Say Y here if you want suppt for the Acer N30, Acer N35,
Navman PiN570, Yakumo AlphaX or Airis NC05 PDAs.
@@ -103,6 +105,7 @@ config ARCH_BAST
select S3C24XX_DCLK
select ISA
select S3C_DEV_USB_HOST
+ select S3C_DEV_NAND
help
Say Y here if you are using the Simtec Electronics EB2410ITX
development board (also known as BAST)
@@ -111,6 +114,7 @@ config MACH_OTOM
bool "NexVision OTOM Board"
select CPU_S3C2410
select S3C_DEV_USB_HOST
+ select S3C_DEV_NAND
help
Say Y here if you are using the Nex Vision OTOM board
@@ -154,6 +158,7 @@ config MACH_QT2410
bool "QT2410"
select CPU_S3C2410
select S3C_DEV_USB_HOST
+ select S3C_DEV_NAND
help
Say Y here if you are using the Armzone QT2410
diff --git a/arch/arm/mach-s3c2412/Kconfig b/arch/arm/mach-s3c2412/Kconfig
index 35c1bde89cf..c2bdc4635d1 100644
--- a/arch/arm/mach-s3c2412/Kconfig
+++ b/arch/arm/mach-s3c2412/Kconfig
@@ -48,6 +48,7 @@ config MACH_JIVE
bool "Logitech Jive"
select CPU_S3C2412
select S3C_DEV_USB_HOST
+ select S3C_DEV_NAND
help
Say Y here if you are using the Logitech Jive.
@@ -61,6 +62,7 @@ config MACH_SMDK2413
select MACH_S3C2413
select MACH_SMDK
select S3C_DEV_USB_HOST
+ select S3C_DEV_NAND
help
Say Y here if you are using an SMDK2413
@@ -84,6 +86,7 @@ config MACH_VSTMS
bool "VMSTMS"
select CPU_S3C2412
select S3C_DEV_USB_HOST
+ select S3C_DEV_NAND
help
Say Y here if you are using an VSTMS board
diff --git a/arch/arm/mach-s3c2440/Kconfig b/arch/arm/mach-s3c2440/Kconfig
index 8ae1b288f7f..d7bba919a77 100644
--- a/arch/arm/mach-s3c2440/Kconfig
+++ b/arch/arm/mach-s3c2440/Kconfig
@@ -48,6 +48,7 @@ config MACH_OSIRIS
select S3C2440_XTAL_12000000
select S3C2410_IOTIMING if S3C2440_CPUFREQ
select S3C_DEV_USB_HOST
+ select S3C_DEV_NAND
help
Say Y here if you are using the Simtec IM2440D20 module, also
known as the Osiris.
@@ -57,6 +58,7 @@ config MACH_RX3715
select CPU_S3C2440
select S3C2440_XTAL_16934400
select PM_H1940 if PM
+ select S3C_DEV_NAND
help
Say Y here if you are using the HP iPAQ rx3715.
@@ -66,6 +68,7 @@ config ARCH_S3C2440
select S3C2440_XTAL_16934400
select MACH_SMDK
select S3C_DEV_USB_HOST
+ select S3C_DEV_NAND
help
Say Y here if you are using the SMDK2440.
@@ -74,6 +77,7 @@ config MACH_NEXCODER_2440
select CPU_S3C2440
select S3C2440_XTAL_12000000
select S3C_DEV_USB_HOST
+ select S3C_DEV_NAND
help
Say Y here if you are using the Nex Vision NEXCODER 2440 Light Board
@@ -88,6 +92,7 @@ config MACH_AT2440EVB
bool "Avantech AT2440EVB development board"
select CPU_S3C2440
select S3C_DEV_USB_HOST
+ select S3C_DEV_NAND
help
Say Y here if you are using the AT2440EVB development board
@@ -97,6 +102,7 @@ config MACH_MINI2440
select EEPROM_AT24
select LEDS_TRIGGER_BACKLIGHT
select SND_S3C24XX_SOC_S3C24XX_UDA134X
+ select S3C_DEV_NAND
help
Say Y here to select support for the MINI2440. Is a 10cm x 10cm board
available via various sources. It can come with a 3.5" or 7" touch LCD.
diff --git a/arch/arm/mach-s3c6400/Kconfig b/arch/arm/mach-s3c6400/Kconfig
index f5af212066c..770b72067e3 100644
--- a/arch/arm/mach-s3c6400/Kconfig
+++ b/arch/arm/mach-s3c6400/Kconfig
@@ -26,6 +26,7 @@ config MACH_SMDK6400
bool "SMDK6400"
select CPU_S3C6400
select S3C_DEV_HSMMC
+ select S3C_DEV_NAND
select S3C6400_SETUP_SDHCI
help
Machine support for the Samsung SMDK6400
diff --git a/arch/arm/mach-s3c6410/Kconfig b/arch/arm/mach-s3c6410/Kconfig
index f9d0f09f976..53fc3ff657f 100644
--- a/arch/arm/mach-s3c6410/Kconfig
+++ b/arch/arm/mach-s3c6410/Kconfig
@@ -102,6 +102,7 @@ config MACH_HMT
bool "Airgoo HMT"
select CPU_S3C6410
select S3C_DEV_FB
+ select S3C_DEV_NAND
select S3C_DEV_USB_HOST
select S3C64XX_SETUP_FB_24BPP
select HAVE_PWM
diff --git a/arch/arm/mach-sa1100/dma.c b/arch/arm/mach-sa1100/dma.c
index 95f9c5a6d6d..cb4521a6f42 100644
--- a/arch/arm/mach-sa1100/dma.c
+++ b/arch/arm/mach-sa1100/dma.c
@@ -39,7 +39,7 @@ typedef struct {
static sa1100_dma_t dma_chan[SA1100_DMA_CHANNELS];
-static spinlock_t dma_list_lock;
+static DEFINE_SPINLOCK(dma_list_lock);
static irqreturn_t dma_irq_handler(int irq, void *dev_id)
diff --git a/arch/arm/mach-u300/Kconfig b/arch/arm/mach-u300/Kconfig
index 337b9aabce4..801b21e7f67 100644
--- a/arch/arm/mach-u300/Kconfig
+++ b/arch/arm/mach-u300/Kconfig
@@ -81,6 +81,18 @@ config MACH_U300_SEMI_IS_SHARED
Memory Interface) from both from access and application
side.
+config MACH_U300_SPIDUMMY
+ bool "SSP/SPI dummy chip"
+ select SPI
+ select SPI_MASTER
+ select SPI_PL022
+ help
+ This creates a small kernel module that creates a dummy
+ SPI device to be used for loopback tests. Regularly used
+ to test reference designs. If you're not testing SPI,
+ you don't need it. Selecting this will activate the
+ SPI framework and ARM PL022 support.
+
comment "All the settings below must match the bootloader's settings"
config MACH_U300_ACCESS_MEM_SIZE
diff --git a/arch/arm/mach-u300/Makefile b/arch/arm/mach-u300/Makefile
index 24950e0df4b..885b5c027c1 100644
--- a/arch/arm/mach-u300/Makefile
+++ b/arch/arm/mach-u300/Makefile
@@ -9,3 +9,6 @@ obj- :=
obj-$(CONFIG_ARCH_U300) += u300.o
obj-$(CONFIG_MMC) += mmc.o
+obj-$(CONFIG_SPI_PL022) += spi.o
+obj-$(CONFIG_MACH_U300_SPIDUMMY) += dummyspichip.o
+obj-$(CONFIG_I2C_STU300) += i2c.o
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c
index 2e9b8ccd8ec..be60d6deee8 100644
--- a/arch/arm/mach-u300/core.c
+++ b/arch/arm/mach-u300/core.c
@@ -32,6 +32,8 @@
#include "clock.h"
#include "mmc.h"
+#include "spi.h"
+#include "i2c.h"
/*
* Static I/O mappings that are needed for booting the U300 platforms. The
@@ -378,14 +380,14 @@ static struct platform_device wdog_device = {
};
static struct platform_device i2c0_device = {
- .name = "stddci2c",
+ .name = "stu300",
.id = 0,
.num_resources = ARRAY_SIZE(i2c0_resources),
.resource = i2c0_resources,
};
static struct platform_device i2c1_device = {
- .name = "stddci2c",
+ .name = "stu300",
.id = 1,
.num_resources = ARRAY_SIZE(i2c1_resources),
.resource = i2c1_resources,
@@ -611,6 +613,8 @@ void __init u300_init_devices(void)
/* Wait for the PLL208 to lock if not locked in yet */
while (!(readw(U300_SYSCON_VBASE + U300_SYSCON_CSR) &
U300_SYSCON_CSR_PLL208_LOCK_IND));
+ /* Initialize SPI device with some board specifics */
+ u300_spi_init(&pl022_device);
/* Register the AMBA devices in the AMBA bus abstraction layer */
u300_clock_primecells();
@@ -622,6 +626,12 @@ void __init u300_init_devices(void)
u300_assign_physmem();
+ /* Register subdevices on the I2C buses */
+ u300_i2c_register_board_devices();
+
+ /* Register subdevices on the SPI bus */
+ u300_spi_register_board_devices();
+
/* Register the platform devices */
platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
diff --git a/arch/arm/mach-u300/dummyspichip.c b/arch/arm/mach-u300/dummyspichip.c
new file mode 100644
index 00000000000..962f9de454d
--- /dev/null
+++ b/arch/arm/mach-u300/dummyspichip.c
@@ -0,0 +1,290 @@
+/*
+ * arch/arm/mach-u300/dummyspichip.c
+ *
+ * Copyright (C) 2007-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * This is a dummy loopback SPI "chip" used for testing SPI.
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/mutex.h>
+#include <linux/spi/spi.h>
+#include <linux/dma-mapping.h>
+/*
+ * WARNING! Do not include this pl022-specific controller header
+ * for any generic driver. It is only done in this dummy chip
+ * because we alter the chip configuration in order to test some
+ * different settings on the loopback device. Normal chip configs
+ * shall be STATIC and not altered by the driver!
+ */
+#include <linux/amba/pl022.h>
+
+struct dummy {
+ struct device *dev;
+ struct mutex lock;
+};
+
+#define DMA_TEST_SIZE 2048
+
+/* When we cat /sys/bus/spi/devices/spi0.0/looptest this will be triggered */
+static ssize_t dummy_looptest(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct dummy *p_dummy = dev_get_drvdata(&spi->dev);
+
+ /*
+ * WARNING! Do not dereference the chip-specific data in any normal
+ * driver for a chip. It is usually STATIC and shall not be read
+ * or written to. Your chip driver should NOT depend on fields in this
+ * struct, this is just used here to alter the behaviour of the chip
+ * in order to perform tests.
+ */
+ struct pl022_config_chip *chip_info = spi->controller_data;
+ int status;
+ u8 txbuf[14] = {0xDE, 0xAD, 0xBE, 0xEF, 0x2B, 0xAD,
+ 0xCA, 0xFE, 0xBA, 0xBE, 0xB1, 0x05,
+ 0xF0, 0x0D};
+ u8 rxbuf[14];
+ u8 *bigtxbuf_virtual;
+ u8 *bigrxbuf_virtual;
+
+ if (mutex_lock_interruptible(&p_dummy->lock))
+ return -ERESTARTSYS;
+
+ bigtxbuf_virtual = kmalloc(DMA_TEST_SIZE, GFP_KERNEL);
+ if (bigtxbuf_virtual == NULL) {
+ status = -ENOMEM;
+ goto out;
+ }
+ bigrxbuf_virtual = kmalloc(DMA_TEST_SIZE, GFP_KERNEL);
+
+ /* Fill TXBUF with some happy pattern */
+ memset(bigtxbuf_virtual, 0xAA, DMA_TEST_SIZE);
+
+ /*
+ * Force chip to 8 bit mode
+ * WARNING: NEVER DO THIS IN REAL DRIVER CODE, THIS SHOULD BE STATIC!
+ */
+ chip_info->data_size = SSP_DATA_BITS_8;
+ /* You should NOT DO THIS EITHER */
+ spi->master->setup(spi);
+
+ /* Now run the tests for 8bit mode */
+ pr_info("Simple test 1: write 0xAA byte, read back garbage byte "
+ "in 8bit mode\n");
+ status = spi_w8r8(spi, 0xAA);
+ if (status < 0)
+ pr_warning("Siple test 1: FAILURE: spi_write_then_read "
+ "failed with status %d\n", status);
+ else
+ pr_info("Simple test 1: SUCCESS!\n");
+
+ pr_info("Simple test 2: write 8 bytes, read back 8 bytes garbage "
+ "in 8bit mode (full FIFO)\n");
+ status = spi_write_then_read(spi, &txbuf[0], 8, &rxbuf[0], 8);
+ if (status < 0)
+ pr_warning("Simple test 2: FAILURE: spi_write_then_read() "
+ "failed with status %d\n", status);
+ else
+ pr_info("Simple test 2: SUCCESS!\n");
+
+ pr_info("Simple test 3: write 14 bytes, read back 14 bytes garbage "
+ "in 8bit mode (see if we overflow FIFO)\n");
+ status = spi_write_then_read(spi, &txbuf[0], 14, &rxbuf[0], 14);
+ if (status < 0)
+ pr_warning("Simple test 3: FAILURE: failed with status %d "
+ "(probably FIFO overrun)\n", status);
+ else
+ pr_info("Simple test 3: SUCCESS!\n");
+
+ pr_info("Simple test 4: write 8 bytes with spi_write(), read 8 "
+ "bytes garbage with spi_read() in 8bit mode\n");
+ status = spi_write(spi, &txbuf[0], 8);
+ if (status < 0)
+ pr_warning("Simple test 4 step 1: FAILURE: spi_write() "
+ "failed with status %d\n", status);
+ else
+ pr_info("Simple test 4 step 1: SUCCESS!\n");
+ status = spi_read(spi, &rxbuf[0], 8);
+ if (status < 0)
+ pr_warning("Simple test 4 step 2: FAILURE: spi_read() "
+ "failed with status %d\n", status);
+ else
+ pr_info("Simple test 4 step 2: SUCCESS!\n");
+
+ pr_info("Simple test 5: write 14 bytes with spi_write(), read "
+ "14 bytes garbage with spi_read() in 8bit mode\n");
+ status = spi_write(spi, &txbuf[0], 14);
+ if (status < 0)
+ pr_warning("Simple test 5 step 1: FAILURE: spi_write() "
+ "failed with status %d (probably FIFO overrun)\n",
+ status);
+ else
+ pr_info("Simple test 5 step 1: SUCCESS!\n");
+ status = spi_read(spi, &rxbuf[0], 14);
+ if (status < 0)
+ pr_warning("Simple test 5 step 2: FAILURE: spi_read() "
+ "failed with status %d (probably FIFO overrun)\n",
+ status);
+ else
+ pr_info("Simple test 5: SUCCESS!\n");
+
+ pr_info("Simple test 6: write %d bytes with spi_write(), "
+ "read %d bytes garbage with spi_read() in 8bit mode\n",
+ DMA_TEST_SIZE, DMA_TEST_SIZE);
+ status = spi_write(spi, &bigtxbuf_virtual[0], DMA_TEST_SIZE);
+ if (status < 0)
+ pr_warning("Simple test 6 step 1: FAILURE: spi_write() "
+ "failed with status %d (probably FIFO overrun)\n",
+ status);
+ else
+ pr_info("Simple test 6 step 1: SUCCESS!\n");
+ status = spi_read(spi, &bigrxbuf_virtual[0], DMA_TEST_SIZE);
+ if (status < 0)
+ pr_warning("Simple test 6 step 2: FAILURE: spi_read() "
+ "failed with status %d (probably FIFO overrun)\n",
+ status);
+ else
+ pr_info("Simple test 6: SUCCESS!\n");
+
+
+ /*
+ * Force chip to 16 bit mode
+ * WARNING: NEVER DO THIS IN REAL DRIVER CODE, THIS SHOULD BE STATIC!
+ */
+ chip_info->data_size = SSP_DATA_BITS_16;
+ /* You should NOT DO THIS EITHER */
+ spi->master->setup(spi);
+
+ pr_info("Simple test 7: write 0xAA byte, read back garbage byte "
+ "in 16bit bus mode\n");
+ status = spi_w8r8(spi, 0xAA);
+ if (status == -EIO)
+ pr_info("Simple test 7: SUCCESS! (expected failure with "
+ "status EIO)\n");
+ else if (status < 0)
+ pr_warning("Siple test 7: FAILURE: spi_write_then_read "
+ "failed with status %d\n", status);
+ else
+ pr_warning("Siple test 7: FAILURE: spi_write_then_read "
+ "succeeded but it was expected to fail!\n");
+
+ pr_info("Simple test 8: write 8 bytes, read back 8 bytes garbage "
+ "in 16bit mode (full FIFO)\n");
+ status = spi_write_then_read(spi, &txbuf[0], 8, &rxbuf[0], 8);
+ if (status < 0)
+ pr_warning("Simple test 8: FAILURE: spi_write_then_read() "
+ "failed with status %d\n", status);
+ else
+ pr_info("Simple test 8: SUCCESS!\n");
+
+ pr_info("Simple test 9: write 14 bytes, read back 14 bytes garbage "
+ "in 16bit mode (see if we overflow FIFO)\n");
+ status = spi_write_then_read(spi, &txbuf[0], 14, &rxbuf[0], 14);
+ if (status < 0)
+ pr_warning("Simple test 9: FAILURE: failed with status %d "
+ "(probably FIFO overrun)\n", status);
+ else
+ pr_info("Simple test 9: SUCCESS!\n");
+
+ pr_info("Simple test 10: write %d bytes with spi_write(), "
+ "read %d bytes garbage with spi_read() in 16bit mode\n",
+ DMA_TEST_SIZE, DMA_TEST_SIZE);
+ status = spi_write(spi, &bigtxbuf_virtual[0], DMA_TEST_SIZE);
+ if (status < 0)
+ pr_warning("Simple test 10 step 1: FAILURE: spi_write() "
+ "failed with status %d (probably FIFO overrun)\n",
+ status);
+ else
+ pr_info("Simple test 10 step 1: SUCCESS!\n");
+
+ status = spi_read(spi, &bigrxbuf_virtual[0], DMA_TEST_SIZE);
+ if (status < 0)
+ pr_warning("Simple test 10 step 2: FAILURE: spi_read() "
+ "failed with status %d (probably FIFO overrun)\n",
+ status);
+ else
+ pr_info("Simple test 10: SUCCESS!\n");
+
+ status = sprintf(buf, "loop test complete\n");
+ kfree(bigrxbuf_virtual);
+ kfree(bigtxbuf_virtual);
+ out:
+ mutex_unlock(&p_dummy->lock);
+ return status;
+}
+
+static DEVICE_ATTR(looptest, S_IRUGO, dummy_looptest, NULL);
+
+static int __devinit pl022_dummy_probe(struct spi_device *spi)
+{
+ struct dummy *p_dummy;
+ int status;
+
+ dev_info(&spi->dev, "probing dummy SPI device\n");
+
+ p_dummy = kzalloc(sizeof *p_dummy, GFP_KERNEL);
+ if (!p_dummy)
+ return -ENOMEM;
+
+ dev_set_drvdata(&spi->dev, p_dummy);
+ mutex_init(&p_dummy->lock);
+
+ /* sysfs hook */
+ status = device_create_file(&spi->dev, &dev_attr_looptest);
+ if (status) {
+ dev_dbg(&spi->dev, "device_create_file looptest failure.\n");
+ goto out_dev_create_looptest_failed;
+ }
+
+ return 0;
+
+out_dev_create_looptest_failed:
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(p_dummy);
+ return status;
+}
+
+static int __devexit pl022_dummy_remove(struct spi_device *spi)
+{
+ struct dummy *p_dummy = dev_get_drvdata(&spi->dev);
+
+ dev_info(&spi->dev, "removing dummy SPI device\n");
+ device_remove_file(&spi->dev, &dev_attr_looptest);
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(p_dummy);
+
+ return 0;
+}
+
+static struct spi_driver pl022_dummy_driver = {
+ .driver = {
+ .name = "spi-dummy",
+ .owner = THIS_MODULE,
+ },
+ .probe = pl022_dummy_probe,
+ .remove = __devexit_p(pl022_dummy_remove),
+};
+
+static int __init pl022_init_dummy(void)
+{
+ return spi_register_driver(&pl022_dummy_driver);
+}
+
+static void __exit pl022_exit_dummy(void)
+{
+ spi_unregister_driver(&pl022_dummy_driver);
+}
+
+module_init(pl022_init_dummy);
+module_exit(pl022_exit_dummy);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
+MODULE_DESCRIPTION("PL022 SSP/SPI DUMMY Linux driver");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-u300/gpio.c b/arch/arm/mach-u300/gpio.c
index 308cdb197a9..63c8f27fb15 100644
--- a/arch/arm/mach-u300/gpio.c
+++ b/arch/arm/mach-u300/gpio.c
@@ -25,11 +25,6 @@
#include <linux/platform_device.h>
#include <linux/gpio.h>
-/* Need access to SYSCON registers for PADmuxing */
-#include <mach/syscon.h>
-
-#include "padmux.h"
-
/* Reference to GPIO block clock */
static struct clk *clk;
@@ -606,14 +601,6 @@ static int __init gpio_probe(struct platform_device *pdev)
writel(U300_GPIO_CR_BLOCK_CLKRQ_ENABLE, virtbase + U300_GPIO_CR);
#endif
- /* Set up some padmuxing here */
-#ifdef CONFIG_MMC
- pmx_set_mission_mode_mmc();
-#endif
-#ifdef CONFIG_SPI_PL022
- pmx_set_mission_mode_spi();
-#endif
-
gpio_set_initial_values();
for (num_irqs = 0 ; num_irqs < U300_GPIO_NUM_PORTS; num_irqs++) {
diff --git a/arch/arm/mach-u300/i2c.c b/arch/arm/mach-u300/i2c.c
new file mode 100644
index 00000000000..10be1f888b2
--- /dev/null
+++ b/arch/arm/mach-u300/i2c.c
@@ -0,0 +1,43 @@
+/*
+ * arch/arm/mach-u300/i2c.c
+ *
+ * Copyright (C) 2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Register board i2c devices
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ */
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <mach/irqs.h>
+
+static struct i2c_board_info __initdata bus0_i2c_board_info[] = {
+ {
+ .type = "ab3100",
+ .addr = 0x48,
+ .irq = IRQ_U300_IRQ0_EXT,
+ },
+};
+
+static struct i2c_board_info __initdata bus1_i2c_board_info[] = {
+#ifdef CONFIG_MACH_U300_BS335
+ {
+ .type = "fwcam",
+ .addr = 0x10,
+ },
+ {
+ .type = "fwcam",
+ .addr = 0x5d,
+ },
+#else
+ { },
+#endif
+};
+
+void __init u300_i2c_register_board_devices(void)
+{
+ i2c_register_board_info(0, bus0_i2c_board_info,
+ ARRAY_SIZE(bus0_i2c_board_info));
+ i2c_register_board_info(1, bus1_i2c_board_info,
+ ARRAY_SIZE(bus1_i2c_board_info));
+}
diff --git a/arch/arm/mach-u300/i2c.h b/arch/arm/mach-u300/i2c.h
new file mode 100644
index 00000000000..485c02e5c06
--- /dev/null
+++ b/arch/arm/mach-u300/i2c.h
@@ -0,0 +1,23 @@
+/*
+ * arch/arm/mach-u300/i2c.h
+ *
+ * Copyright (C) 2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Register board i2c devices
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ */
+
+#ifndef MACH_U300_I2C_H
+#define MACH_U300_I2C_H
+
+#ifdef CONFIG_I2C_STU300
+void __init u300_i2c_register_board_devices(void);
+#else
+/* Compile out this stuff if no I2C adapter is available */
+static inline void __init u300_i2c_register_board_devices(void)
+{
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-u300/include/mach/memory.h b/arch/arm/mach-u300/include/mach/memory.h
index bf134bcc129..ab000df7fc0 100644
--- a/arch/arm/mach-u300/include/mach/memory.h
+++ b/arch/arm/mach-u300/include/mach/memory.h
@@ -35,6 +35,14 @@
#endif
/*
+ * TCM memory whereabouts
+ */
+#define ITCM_OFFSET 0xffff2000
+#define ITCM_END 0xffff3fff
+#define DTCM_OFFSET 0xffff4000
+#define DTCM_END 0xffff5fff
+
+/*
* We enable a real big DMA buffer if need be.
*/
#define CONSISTENT_DMA_SIZE SZ_4M
diff --git a/arch/arm/mach-u300/include/mach/syscon.h b/arch/arm/mach-u300/include/mach/syscon.h
index 1c90d1b1ccb..7444f5c7da9 100644
--- a/arch/arm/mach-u300/include/mach/syscon.h
+++ b/arch/arm/mach-u300/include/mach/syscon.h
@@ -240,8 +240,13 @@
#define U300_SYSCON_PMC1LR_CDI_MASK (0xC000)
#define U300_SYSCON_PMC1LR_CDI_CDI (0x0000)
#define U300_SYSCON_PMC1LR_CDI_EMIF (0x4000)
+#ifdef CONFIG_MACH_U300_BS335
+#define U300_SYSCON_PMC1LR_CDI_CDI2 (0x8000)
+#define U300_SYSCON_PMC1LR_CDI_WCDMA_APP_GPIO (0xC000)
+#elif CONFIG_MACH_U300_BS365
#define U300_SYSCON_PMC1LR_CDI_GPIO (0x8000)
#define U300_SYSCON_PMC1LR_CDI_WCDMA (0xC000)
+#endif
#define U300_SYSCON_PMC1LR_PDI_MASK (0x3000)
#define U300_SYSCON_PMC1LR_PDI_PDI (0x0000)
#define U300_SYSCON_PMC1LR_PDI_EGG (0x1000)
@@ -345,19 +350,69 @@
#define U300_SYSCON_MMCR_MASK (0x0003)
#define U300_SYSCON_MMCR_MMC_FB_CLK_SEL_ENABLE (0x0002)
#define U300_SYSCON_MMCR_MSPRO_FREQSEL_ENABLE (0x0001)
-
+/* Pull up/down control (R/W) */
+#define U300_SYSCON_PUCR (0x104)
+#define U300_SYSCON_PUCR_EMIF_1_WAIT_N_PU_ENABLE (0x0200)
+#define U300_SYSCON_PUCR_EMIF_1_NFIF_READY_PU_ENABLE (0x0100)
+#define U300_SYSCON_PUCR_EMIF_1_16BIT_PU_ENABLE (0x0080)
+#define U300_SYSCON_PUCR_EMIF_1_8BIT_PU_ENABLE (0x0040)
+#define U300_SYSCON_PUCR_KEY_IN_PU_EN_MASK (0x003F)
+/* Padmux 2 control */
+#define U300_SYSCON_PMC2R (0x100)
+#define U300_SYSCON_PMC2R_APP_MISC_0_MASK (0x00C0)
+#define U300_SYSCON_PMC2R_APP_MISC_0_APP_GPIO (0x0000)
+#define U300_SYSCON_PMC2R_APP_MISC_0_EMIF_SDRAM (0x0040)
+#define U300_SYSCON_PMC2R_APP_MISC_0_MMC (0x0080)
+#define U300_SYSCON_PMC2R_APP_MISC_0_CDI2 (0x00C0)
+#define U300_SYSCON_PMC2R_APP_MISC_1_MASK (0x0300)
+#define U300_SYSCON_PMC2R_APP_MISC_1_APP_GPIO (0x0000)
+#define U300_SYSCON_PMC2R_APP_MISC_1_EMIF_SDRAM (0x0100)
+#define U300_SYSCON_PMC2R_APP_MISC_1_MMC (0x0200)
+#define U300_SYSCON_PMC2R_APP_MISC_1_CDI2 (0x0300)
+#define U300_SYSCON_PMC2R_APP_MISC_2_MASK (0x0C00)
+#define U300_SYSCON_PMC2R_APP_MISC_2_APP_GPIO (0x0000)
+#define U300_SYSCON_PMC2R_APP_MISC_2_EMIF_SDRAM (0x0400)
+#define U300_SYSCON_PMC2R_APP_MISC_2_MMC (0x0800)
+#define U300_SYSCON_PMC2R_APP_MISC_2_CDI2 (0x0C00)
+#define U300_SYSCON_PMC2R_APP_MISC_3_MASK (0x3000)
+#define U300_SYSCON_PMC2R_APP_MISC_3_APP_GPIO (0x0000)
+#define U300_SYSCON_PMC2R_APP_MISC_3_EMIF_SDRAM (0x1000)
+#define U300_SYSCON_PMC2R_APP_MISC_3_MMC (0x2000)
+#define U300_SYSCON_PMC2R_APP_MISC_3_CDI2 (0x3000)
+#define U300_SYSCON_PMC2R_APP_MISC_4_MASK (0xC000)
+#define U300_SYSCON_PMC2R_APP_MISC_4_APP_GPIO (0x0000)
+#define U300_SYSCON_PMC2R_APP_MISC_4_EMIF_SDRAM (0x4000)
+#define U300_SYSCON_PMC2R_APP_MISC_4_MMC (0x8000)
+#define U300_SYSCON_PMC2R_APP_MISC_4_ACC_GPIO (0xC000)
/* TODO: More SYSCON registers missing */
#define U300_SYSCON_PMC3R (0x10c)
#define U300_SYSCON_PMC3R_APP_MISC_11_MASK (0xc000)
#define U300_SYSCON_PMC3R_APP_MISC_11_SPI (0x4000)
#define U300_SYSCON_PMC3R_APP_MISC_10_MASK (0x3000)
#define U300_SYSCON_PMC3R_APP_MISC_10_SPI (0x1000)
-/* TODO: Missing other configs, I just added the SPI stuff */
-
+/* TODO: Missing other configs */
+#define U300_SYSCON_PMC4R (0x168)
+#define U300_SYSCON_PMC4R_APP_MISC_12_MASK (0x0003)
+#define U300_SYSCON_PMC4R_APP_MISC_12_APP_GPIO (0x0000)
+#define U300_SYSCON_PMC4R_APP_MISC_13_MASK (0x000C)
+#define U300_SYSCON_PMC4R_APP_MISC_13_CDI (0x0000)
+#define U300_SYSCON_PMC4R_APP_MISC_13_SMIA (0x0004)
+#define U300_SYSCON_PMC4R_APP_MISC_13_SMIA2 (0x0008)
+#define U300_SYSCON_PMC4R_APP_MISC_13_APP_GPIO (0x000C)
+#define U300_SYSCON_PMC4R_APP_MISC_14_MASK (0x0030)
+#define U300_SYSCON_PMC4R_APP_MISC_14_CDI (0x0000)
+#define U300_SYSCON_PMC4R_APP_MISC_14_SMIA (0x0010)
+#define U300_SYSCON_PMC4R_APP_MISC_14_CDI2 (0x0020)
+#define U300_SYSCON_PMC4R_APP_MISC_14_APP_GPIO (0x0030)
+#define U300_SYSCON_PMC4R_APP_MISC_16_MASK (0x0300)
+#define U300_SYSCON_PMC4R_APP_MISC_16_APP_GPIO_13 (0x0000)
+#define U300_SYSCON_PMC4R_APP_MISC_16_APP_UART1_CTS (0x0100)
+#define U300_SYSCON_PMC4R_APP_MISC_16_EMIF_1_STATIC_CS5_N (0x0200)
/* SYS_0_CLK_CONTROL first clock control 16bit (R/W) */
#define U300_SYSCON_S0CCR (0x120)
#define U300_SYSCON_S0CCR_FIELD_MASK (0x43FF)
#define U300_SYSCON_S0CCR_CLOCK_REQ (0x4000)
+#define U300_SYSCON_S0CCR_CLOCK_REQ_MONITOR (0x2000)
#define U300_SYSCON_S0CCR_CLOCK_INV (0x0200)
#define U300_SYSCON_S0CCR_CLOCK_FREQ_MASK (0x01E0)
#define U300_SYSCON_S0CCR_CLOCK_SELECT_MASK (0x001E)
@@ -375,6 +430,7 @@
#define U300_SYSCON_S1CCR (0x124)
#define U300_SYSCON_S1CCR_FIELD_MASK (0x43FF)
#define U300_SYSCON_S1CCR_CLOCK_REQ (0x4000)
+#define U300_SYSCON_S1CCR_CLOCK_REQ_MONITOR (0x2000)
#define U300_SYSCON_S1CCR_CLOCK_INV (0x0200)
#define U300_SYSCON_S1CCR_CLOCK_FREQ_MASK (0x01E0)
#define U300_SYSCON_S1CCR_CLOCK_SELECT_MASK (0x001E)
@@ -393,6 +449,7 @@
#define U300_SYSCON_S2CCR_FIELD_MASK (0xC3FF)
#define U300_SYSCON_S2CCR_CLK_STEAL (0x8000)
#define U300_SYSCON_S2CCR_CLOCK_REQ (0x4000)
+#define U300_SYSCON_S2CCR_CLOCK_REQ_MONITOR (0x2000)
#define U300_SYSCON_S2CCR_CLOCK_INV (0x0200)
#define U300_SYSCON_S2CCR_CLOCK_FREQ_MASK (0x01E0)
#define U300_SYSCON_S2CCR_CLOCK_SELECT_MASK (0x001E)
@@ -425,6 +482,44 @@
#define U300_SYSCON_MCR_PMGEN_CR_0_EMIF_0_SDRAM (0x000C)
#define U300_SYSCON_MCR_PM1G_MODE_ENABLE (0x0002)
#define U300_SYSCON_MCR_PMTG5_MODE_ENABLE (0x0001)
+/* SC_PLL_IRQ_CONTROL 16bit (R/W) */
+#define U300_SYSCON_PICR (0x0130)
+#define U300_SYSCON_PICR_MASK (0x00FF)
+#define U300_SYSCON_PICR_FORCE_PLL208_LOCK_LOW_ENABLE (0x0080)
+#define U300_SYSCON_PICR_FORCE_PLL208_LOCK_HIGH_ENABLE (0x0040)
+#define U300_SYSCON_PICR_FORCE_PLL13_LOCK_LOW_ENABLE (0x0020)
+#define U300_SYSCON_PICR_FORCE_PLL13_LOCK_HIGH_ENABLE (0x0010)
+#define U300_SYSCON_PICR_IRQMASK_PLL13_UNLOCK_ENABLE (0x0008)
+#define U300_SYSCON_PICR_IRQMASK_PLL13_LOCK_ENABLE (0x0004)
+#define U300_SYSCON_PICR_IRQMASK_PLL208_UNLOCK_ENABLE (0x0002)
+#define U300_SYSCON_PICR_IRQMASK_PLL208_LOCK_ENABLE (0x0001)
+/* SC_PLL_IRQ_STATUS 16 bit (R/-) */
+#define U300_SYSCON_PISR (0x0134)
+#define U300_SYSCON_PISR_MASK (0x000F)
+#define U300_SYSCON_PISR_PLL13_UNLOCK_IND (0x0008)
+#define U300_SYSCON_PISR_PLL13_LOCK_IND (0x0004)
+#define U300_SYSCON_PISR_PLL208_UNLOCK_IND (0x0002)
+#define U300_SYSCON_PISR_PLL208_LOCK_IND (0x0001)
+/* SC_PLL_IRQ_CLEAR 16 bit (-/W) */
+#define U300_SYSCON_PICLR (0x0138)
+#define U300_SYSCON_PICLR_MASK (0x000F)
+#define U300_SYSCON_PICLR_RWMASK (0x0000)
+#define U300_SYSCON_PICLR_PLL13_UNLOCK_SC (0x0008)
+#define U300_SYSCON_PICLR_PLL13_LOCK_SC (0x0004)
+#define U300_SYSCON_PICLR_PLL208_UNLOCK_SC (0x0002)
+#define U300_SYSCON_PICLR_PLL208_LOCK_SC (0x0001)
+/* CAMIF_CONTROL 16 bit (-/W) */
+#define U300_SYSCON_CICR (0x013C)
+#define U300_SYSCON_CICR_MASK (0x0FFF)
+#define U300_SYSCON_CICR_APP_SUBLVDS_TESTMODE_MASK (0x0F00)
+#define U300_SYSCON_CICR_APP_SUBLVDS_TESTMODE_PORT1 (0x0C00)
+#define U300_SYSCON_CICR_APP_SUBLVDS_TESTMODE_PORT0 (0x0300)
+#define U300_SYSCON_CICR_APP_SUBLVDS_RESCON_MASK (0x00F0)
+#define U300_SYSCON_CICR_APP_SUBLVDS_RESCON_PORT1 (0x00C0)
+#define U300_SYSCON_CICR_APP_SUBLVDS_RESCON_PORT0 (0x0030)
+#define U300_SYSCON_CICR_APP_SUBLVDS_PWR_DWN_N_MASK (0x000F)
+#define U300_SYSCON_CICR_APP_SUBLVDS_PWR_DWN_N_PORT1 (0x000C)
+#define U300_SYSCON_CICR_APP_SUBLVDS_PWR_DWN_N_PORT0 (0x0003)
/* Clock activity observability register 0 */
#define U300_SYSCON_C0OAR (0x140)
#define U300_SYSCON_C0OAR_MASK (0xFFFF)
@@ -513,7 +608,7 @@
/**
* CPU medium frequency in MHz
*/
-#define SYSCON_CPU_CLOCK_MEDIUM 104
+#define SYSCON_CPU_CLOCK_MEDIUM 52
/**
* CPU low frequency in MHz
*/
@@ -527,7 +622,7 @@
/**
* EMIF medium frequency in MHz
*/
-#define SYSCON_EMIF_CLOCK_MEDIUM 104
+#define SYSCON_EMIF_CLOCK_MEDIUM 52
/**
* EMIF low frequency in MHz
*/
@@ -541,7 +636,7 @@
/**
* AHB medium frequency in MHz
*/
-#define SYSCON_AHB_CLOCK_MEDIUM 52
+#define SYSCON_AHB_CLOCK_MEDIUM 26
/**
* AHB low frequency in MHz
*/
@@ -553,6 +648,15 @@ enum syscon_busmaster {
SYSCON_BM_VIDEO_ENC
};
+/* Selectr a resistor or a set of resistors */
+enum syscon_pull_up_down {
+ SYSCON_PU_KEY_IN_EN,
+ SYSCON_PU_EMIF_1_8_BIT_EN,
+ SYSCON_PU_EMIF_1_16_BIT_EN,
+ SYSCON_PU_EMIF_1_NFIF_READY_EN,
+ SYSCON_PU_EMIF_1_NFIF_WAIT_N_EN,
+};
+
/*
* Note that this array must match the order of the array "clk_reg"
* in syscon.c
@@ -575,6 +679,7 @@ enum syscon_clk {
SYSCON_CLKCONTROL_SPI,
SYSCON_CLKCONTROL_I2S0_CORE,
SYSCON_CLKCONTROL_I2S1_CORE,
+ SYSCON_CLKCONTROL_UART1,
SYSCON_CLKCONTROL_AAIF,
SYSCON_CLKCONTROL_AHB,
SYSCON_CLKCONTROL_APEX,
@@ -604,7 +709,8 @@ enum syscon_sysclk_mode {
enum syscon_sysclk_req {
SYSCON_SYSCLKREQ_DISABLED,
- SYSCON_SYSCLKREQ_ACTIVE_LOW
+ SYSCON_SYSCLKREQ_ACTIVE_LOW,
+ SYSCON_SYSCLKREQ_MONITOR
};
enum syscon_clk_mode {
diff --git a/arch/arm/mach-u300/mmc.c b/arch/arm/mach-u300/mmc.c
index 585cc013639..7b6b016786b 100644
--- a/arch/arm/mach-u300/mmc.c
+++ b/arch/arm/mach-u300/mmc.c
@@ -19,15 +19,16 @@
#include <linux/regulator/consumer.h>
#include <linux/regulator/machine.h>
#include <linux/gpio.h>
+#include <linux/amba/mmci.h>
-#include <asm/mach/mmc.h>
#include "mmc.h"
+#include "padmux.h"
struct mmci_card_event {
struct input_dev *mmc_input;
int mmc_inserted;
struct work_struct workq;
- struct mmc_platform_data mmc0_plat_data;
+ struct mmci_platform_data mmc0_plat_data;
};
static unsigned int mmc_status(struct device *dev)
@@ -146,6 +147,7 @@ int __devinit mmc_init(struct amba_device *adev)
{
struct mmci_card_event *mmci_card;
struct device *mmcsd_device = &adev->dev;
+ struct pmx *pmx;
int ret = 0;
mmci_card = kzalloc(sizeof(struct mmci_card_event), GFP_KERNEL);
@@ -158,6 +160,8 @@ int __devinit mmc_init(struct amba_device *adev)
mmci_card->mmc0_plat_data.status = mmc_status;
mmci_card->mmc0_plat_data.gpio_wp = -1;
mmci_card->mmc0_plat_data.gpio_cd = -1;
+ mmci_card->mmc0_plat_data.capabilities = MMC_CAP_MMC_HIGHSPEED |
+ MMC_CAP_SD_HIGHSPEED | MMC_CAP_4_BIT_DATA;
mmcsd_device->platform_data = (void *) &mmci_card->mmc0_plat_data;
@@ -207,6 +211,20 @@ int __devinit mmc_init(struct amba_device *adev)
input_set_drvdata(mmci_card->mmc_input, mmci_card);
+ /*
+ * Setup padmuxing for MMC. Since this must always be
+ * compiled into the kernel, pmx is never released.
+ */
+ pmx = pmx_get(mmcsd_device, U300_APP_PMX_MMC_SETTING);
+
+ if (IS_ERR(pmx))
+ pr_warning("Could not get padmux handle\n");
+ else {
+ ret = pmx_activate(mmcsd_device, pmx);
+ if (IS_ERR_VALUE(ret))
+ pr_warning("Could not activate padmuxing\n");
+ }
+
ret = gpio_register_callback(U300_GPIO_PIN_MMC_CD, mmci_callback,
mmci_card);
diff --git a/arch/arm/mach-u300/padmux.c b/arch/arm/mach-u300/padmux.c
index f3664564f08..4c93c6cefd3 100644
--- a/arch/arm/mach-u300/padmux.c
+++ b/arch/arm/mach-u300/padmux.c
@@ -6,53 +6,362 @@
* Copyright (C) 2009 ST-Ericsson AB
* License terms: GNU General Public License (GPL) version 2
* U300 PADMUX functions
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- *
+ * Author: Martin Persson <martin.persson@stericsson.com>
*/
-#include <linux/io.h>
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/bug.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#include <mach/u300-regs.h>
#include <mach/syscon.h>
-
#include "padmux.h"
-/* Set the PAD MUX to route the MMC reader correctly to GPIO0. */
-void pmx_set_mission_mode_mmc(void)
-{
- u16 val;
-
- val = readw(U300_SYSCON_VBASE + U300_SYSCON_PMC1LR);
- val &= ~U300_SYSCON_PMC1LR_MMCSD_MASK;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_PMC1LR);
- val = readw(U300_SYSCON_VBASE + U300_SYSCON_PMC1HR);
- val &= ~U300_SYSCON_PMC1HR_APP_GPIO_1_MASK;
- val |= U300_SYSCON_PMC1HR_APP_GPIO_1_MMC;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_PMC1HR);
-}
-
-void pmx_set_mission_mode_spi(void)
-{
- u16 val;
-
- /* Set up padmuxing so the SPI port and its chipselects are active */
- val = readw(U300_SYSCON_VBASE + U300_SYSCON_PMC1HR);
- /*
- * Activate the SPI port (disable the use of these pins for generic
- * GPIO, DSP, AAIF
- */
- val &= ~U300_SYSCON_PMC1HR_APP_SPI_2_MASK;
- val |= U300_SYSCON_PMC1HR_APP_SPI_2_SPI;
- /*
- * Use GPIO pin SPI CS1 for CS1 actually (it can be used for other
- * things also)
- */
- val &= ~U300_SYSCON_PMC1HR_APP_SPI_CS_1_MASK;
- val |= U300_SYSCON_PMC1HR_APP_SPI_CS_1_SPI;
- /*
- * Use GPIO pin SPI CS2 for CS2 actually (it can be used for other
- * things also)
- */
- val &= ~U300_SYSCON_PMC1HR_APP_SPI_CS_2_MASK;
- val |= U300_SYSCON_PMC1HR_APP_SPI_CS_2_SPI;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_PMC1HR);
+static DEFINE_MUTEX(pmx_mutex);
+
+const u32 pmx_registers[] = {
+ (U300_SYSCON_VBASE + U300_SYSCON_PMC1LR),
+ (U300_SYSCON_VBASE + U300_SYSCON_PMC1HR),
+ (U300_SYSCON_VBASE + U300_SYSCON_PMC2R),
+ (U300_SYSCON_VBASE + U300_SYSCON_PMC3R),
+ (U300_SYSCON_VBASE + U300_SYSCON_PMC4R)
+};
+
+/* High level functionality */
+
+/* Lazy dog:
+ * onmask = {
+ * {"PMC1LR" mask, "PMC1LR" value},
+ * {"PMC1HR" mask, "PMC1HR" value},
+ * {"PMC2R" mask, "PMC2R" value},
+ * {"PMC3R" mask, "PMC3R" value},
+ * {"PMC4R" mask, "PMC4R" value}
+ * }
+ */
+static struct pmx mmc_setting = {
+ .setting = U300_APP_PMX_MMC_SETTING,
+ .default_on = false,
+ .activated = false,
+ .name = "MMC",
+ .onmask = {
+ {U300_SYSCON_PMC1LR_MMCSD_MASK,
+ U300_SYSCON_PMC1LR_MMCSD_MMCSD},
+ {0, 0},
+ {0, 0},
+ {0, 0},
+ {U300_SYSCON_PMC4R_APP_MISC_12_MASK,
+ U300_SYSCON_PMC4R_APP_MISC_12_APP_GPIO}
+ },
+};
+
+static struct pmx spi_setting = {
+ .setting = U300_APP_PMX_SPI_SETTING,
+ .default_on = false,
+ .activated = false,
+ .name = "SPI",
+ .onmask = {{0, 0},
+ {U300_SYSCON_PMC1HR_APP_SPI_2_MASK |
+ U300_SYSCON_PMC1HR_APP_SPI_CS_1_MASK |
+ U300_SYSCON_PMC1HR_APP_SPI_CS_2_MASK,
+ U300_SYSCON_PMC1HR_APP_SPI_2_SPI |
+ U300_SYSCON_PMC1HR_APP_SPI_CS_1_SPI |
+ U300_SYSCON_PMC1HR_APP_SPI_CS_2_SPI},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ },
+};
+
+/* Available padmux settings */
+static struct pmx *pmx_settings[] = {
+ &mmc_setting,
+ &spi_setting,
+};
+
+static void update_registers(struct pmx *pmx, bool activate)
+{
+ u16 regval, val, mask;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmx_registers); i++) {
+ if (activate)
+ val = pmx->onmask[i].val;
+ else
+ val = 0;
+
+ mask = pmx->onmask[i].mask;
+ if (mask != 0) {
+ regval = readw(pmx_registers[i]);
+ regval &= ~mask;
+ regval |= val;
+ writew(regval, pmx_registers[i]);
+ }
+ }
+}
+
+struct pmx *pmx_get(struct device *dev, enum pmx_settings setting)
+{
+ int i;
+ struct pmx *pmx = ERR_PTR(-ENOENT);
+
+ if (dev == NULL)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&pmx_mutex);
+ for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) {
+
+ if (setting == pmx_settings[i]->setting) {
+
+ if (pmx_settings[i]->dev != NULL) {
+ WARN(1, "padmux: required setting "
+ "in use by another consumer\n");
+ } else {
+ pmx = pmx_settings[i];
+ pmx->dev = dev;
+ dev_dbg(dev, "padmux: setting nr %d is now "
+ "bound to %s and ready to use\n",
+ setting, dev_name(dev));
+ break;
+ }
+ }
+ }
+ mutex_unlock(&pmx_mutex);
+
+ return pmx;
+}
+EXPORT_SYMBOL(pmx_get);
+
+int pmx_put(struct device *dev, struct pmx *pmx)
+{
+ int i;
+ int ret = -ENOENT;
+
+ if (pmx == NULL || dev == NULL)
+ return -EINVAL;
+
+ mutex_lock(&pmx_mutex);
+ for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) {
+
+ if (pmx->setting == pmx_settings[i]->setting) {
+
+ if (dev != pmx->dev) {
+ WARN(1, "padmux: cannot release handle as "
+ "it is bound to another consumer\n");
+ ret = -EINVAL;
+ break;
+ } else {
+ pmx_settings[i]->dev = NULL;
+ ret = 0;
+ break;
+ }
+ }
+ }
+ mutex_unlock(&pmx_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(pmx_put);
+
+int pmx_activate(struct device *dev, struct pmx *pmx)
+{
+ int i, j, ret;
+ ret = 0;
+
+ if (pmx == NULL || dev == NULL)
+ return -EINVAL;
+
+ mutex_lock(&pmx_mutex);
+
+ /* Make sure the required bits are not used */
+ for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) {
+
+ if (pmx_settings[i]->dev == NULL || pmx_settings[i] == pmx)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(pmx_registers); j++) {
+
+ if (pmx_settings[i]->onmask[j].mask & pmx->
+ onmask[j].mask) {
+ /* More than one entry on the same bits */
+ WARN(1, "padmux: cannot activate "
+ "setting. Bit conflict with "
+ "an active setting\n");
+
+ ret = -EUSERS;
+ goto exit;
+ }
+ }
+ }
+ update_registers(pmx, true);
+ pmx->activated = true;
+ dev_dbg(dev, "padmux: setting nr %d is activated\n",
+ pmx->setting);
+
+exit:
+ mutex_unlock(&pmx_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(pmx_activate);
+
+int pmx_deactivate(struct device *dev, struct pmx *pmx)
+{
+ int i;
+ int ret = -ENOENT;
+
+ if (pmx == NULL || dev == NULL)
+ return -EINVAL;
+
+ mutex_lock(&pmx_mutex);
+ for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) {
+
+ if (pmx_settings[i]->dev == NULL)
+ continue;
+
+ if (pmx->setting == pmx_settings[i]->setting) {
+
+ if (dev != pmx->dev) {
+ WARN(1, "padmux: cannot deactivate "
+ "pmx setting as it was activated "
+ "by another consumer\n");
+
+ ret = -EBUSY;
+ continue;
+ } else {
+ update_registers(pmx, false);
+ pmx_settings[i]->dev = NULL;
+ pmx->activated = false;
+ ret = 0;
+ dev_dbg(dev, "padmux: setting nr %d is deactivated",
+ pmx->setting);
+ break;
+ }
+ }
+ }
+ mutex_unlock(&pmx_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(pmx_deactivate);
+
+/*
+ * For internal use only. If it is to be exported,
+ * it should be reentrant. Notice that pmx_activate
+ * (i.e. runtime settings) always override default settings.
+ */
+static int pmx_set_default(void)
+{
+ /* Used to identify several entries on the same bits */
+ u16 modbits[ARRAY_SIZE(pmx_registers)];
+
+ int i, j;
+
+ memset(modbits, 0, ARRAY_SIZE(pmx_registers) * sizeof(u16));
+
+ for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) {
+
+ if (!pmx_settings[i]->default_on)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(pmx_registers); j++) {
+
+ /* Make sure there is only one entry on the same bits */
+ if (modbits[j] & pmx_settings[i]->onmask[j].mask) {
+ BUG();
+ return -EUSERS;
+ }
+ modbits[j] |= pmx_settings[i]->onmask[j].mask;
+ }
+ update_registers(pmx_settings[i], true);
+ }
+ return 0;
}
+
+#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
+static int pmx_show(struct seq_file *s, void *data)
+{
+ int i;
+ seq_printf(s, "-------------------------------------------------\n");
+ seq_printf(s, "SETTING BOUND TO DEVICE STATE\n");
+ seq_printf(s, "-------------------------------------------------\n");
+ mutex_lock(&pmx_mutex);
+ for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) {
+ /* Format pmx and device name nicely */
+ char cdp[33];
+ int chars;
+
+ chars = snprintf(&cdp[0], 17, "%s", pmx_settings[i]->name);
+ while (chars < 16) {
+ cdp[chars] = ' ';
+ chars++;
+ }
+ chars = snprintf(&cdp[16], 17, "%s", pmx_settings[i]->dev ?
+ dev_name(pmx_settings[i]->dev) : "N/A");
+ while (chars < 16) {
+ cdp[chars+16] = ' ';
+ chars++;
+ }
+ cdp[32] = '\0';
+
+ seq_printf(s,
+ "%s\t%s\n",
+ &cdp[0],
+ pmx_settings[i]->activated ?
+ "ACTIVATED" : "DEACTIVATED"
+ );
+
+ }
+ mutex_unlock(&pmx_mutex);
+ return 0;
+}
+
+static int pmx_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmx_show, NULL);
+}
+
+static const struct file_operations pmx_operations = {
+ .owner = THIS_MODULE,
+ .open = pmx_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init init_pmx_read_debugfs(void)
+{
+ /* Expose a simple debugfs interface to view pmx settings */
+ (void) debugfs_create_file("padmux", S_IFREG | S_IRUGO,
+ NULL, NULL,
+ &pmx_operations);
+ return 0;
+}
+
+/*
+ * This needs to come in after the core_initcall(),
+ * because debugfs is not available until
+ * the subsystems come up.
+ */
+module_init(init_pmx_read_debugfs);
+#endif
+
+static int __init pmx_init(void)
+{
+ int ret;
+
+ ret = pmx_set_default();
+
+ if (IS_ERR_VALUE(ret))
+ pr_crit("padmux: default settings could not be set\n");
+
+ return 0;
+}
+
+/* Should be initialized before consumers */
+core_initcall(pmx_init);
diff --git a/arch/arm/mach-u300/padmux.h b/arch/arm/mach-u300/padmux.h
index 8c2099ac504..6e8b8606409 100644
--- a/arch/arm/mach-u300/padmux.h
+++ b/arch/arm/mach-u300/padmux.h
@@ -6,14 +6,34 @@
* Copyright (C) 2009 ST-Ericsson AB
* License terms: GNU General Public License (GPL) version 2
* U300 PADMUX API
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- *
+ * Author: Martin Persson <martin.persson@stericsson.com>
*/
#ifndef __MACH_U300_PADMUX_H
#define __MACH_U300_PADMUX_H
-void pmx_set_mission_mode_mmc(void);
-void pmx_set_mission_mode_spi(void);
+enum pmx_settings {
+ U300_APP_PMX_MMC_SETTING,
+ U300_APP_PMX_SPI_SETTING
+};
+
+struct pmx_onmask {
+ u16 mask; /* Mask bits */
+ u16 val; /* Value when active */
+};
+
+struct pmx {
+ struct device *dev;
+ enum pmx_settings setting;
+ char *name;
+ bool activated;
+ bool default_on;
+ struct pmx_onmask onmask[];
+};
+
+struct pmx *pmx_get(struct device *dev, enum pmx_settings setting);
+int pmx_put(struct device *dev, struct pmx *pmx);
+int pmx_activate(struct device *dev, struct pmx *pmx);
+int pmx_deactivate(struct device *dev, struct pmx *pmx);
#endif
diff --git a/arch/arm/mach-u300/spi.c b/arch/arm/mach-u300/spi.c
new file mode 100644
index 00000000000..f0e887bea30
--- /dev/null
+++ b/arch/arm/mach-u300/spi.c
@@ -0,0 +1,124 @@
+/*
+ * arch/arm/mach-u300/spi.c
+ *
+ * Copyright (C) 2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ */
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+#include <linux/spi/spi.h>
+#include <linux/amba/pl022.h>
+#include <linux/err.h>
+#include "padmux.h"
+
+/*
+ * The following is for the actual devices on the SSP/SPI bus
+ */
+#ifdef CONFIG_MACH_U300_SPIDUMMY
+static void select_dummy_chip(u32 chipselect)
+{
+ pr_debug("CORE: %s called with CS=0x%x (%s)\n",
+ __func__,
+ chipselect,
+ chipselect ? "unselect chip" : "select chip");
+ /*
+ * Here you would write the chip select value to the GPIO pins if
+ * this was a real chip (but this is a loopback dummy).
+ */
+}
+
+struct pl022_config_chip dummy_chip_info = {
+ /* Nominally this is LOOPBACK_DISABLED, but this is our dummy chip! */
+ .lbm = LOOPBACK_ENABLED,
+ /*
+ * available POLLING_TRANSFER and INTERRUPT_TRANSFER,
+ * DMA_TRANSFER does not work
+ */
+ .com_mode = INTERRUPT_TRANSFER,
+ .iface = SSP_INTERFACE_MOTOROLA_SPI,
+ /* We can only act as master but SSP_SLAVE is possible in theory */
+ .hierarchy = SSP_MASTER,
+ /* 0 = drive TX even as slave, 1 = do not drive TX as slave */
+ .slave_tx_disable = 0,
+ /* LSB first */
+ .endian_tx = SSP_TX_LSB,
+ .endian_rx = SSP_RX_LSB,
+ .data_size = SSP_DATA_BITS_8, /* used to be 12 in some default */
+ .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM,
+ .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC,
+ .clk_phase = SSP_CLK_SECOND_EDGE,
+ .clk_pol = SSP_CLK_POL_IDLE_LOW,
+ .ctrl_len = SSP_BITS_12,
+ .wait_state = SSP_MWIRE_WAIT_ZERO,
+ .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
+ /*
+ * This is where you insert a call to a function to enable CS
+ * (usually GPIO) for a certain chip.
+ */
+ .cs_control = select_dummy_chip,
+};
+#endif
+
+static struct spi_board_info u300_spi_devices[] = {
+#ifdef CONFIG_MACH_U300_SPIDUMMY
+ {
+ /* A dummy chip used for loopback tests */
+ .modalias = "spi-dummy",
+ /* Really dummy, pass in additional chip config here */
+ .platform_data = NULL,
+ /* This defines how the controller shall handle the device */
+ .controller_data = &dummy_chip_info,
+ /* .irq - no external IRQ routed from this device */
+ .max_speed_hz = 1000000,
+ .bus_num = 0, /* Only one bus on this chip */
+ .chip_select = 0,
+ /* Means SPI_CS_HIGH, change if e.g low CS */
+ .mode = 0,
+ },
+#endif
+};
+
+static struct pl022_ssp_controller ssp_platform_data = {
+ /* If you have several SPI buses this varies, we have only bus 0 */
+ .bus_id = 0,
+ /* Set this to 1 when we think we got DMA working */
+ .enable_dma = 0,
+ /*
+ * On the APP CPU GPIO 4, 5 and 6 are connected as generic
+ * chip selects for SPI. (Same on U330, U335 and U365.)
+ * TODO: make sure the GPIO driver can select these properly
+ * and do padmuxing accordingly too.
+ */
+ .num_chipselect = 3,
+};
+
+
+void __init u300_spi_init(struct amba_device *adev)
+{
+ struct pmx *pmx;
+
+ adev->dev.platform_data = &ssp_platform_data;
+ /*
+ * Setup padmuxing for SPI. Since this must always be
+ * compiled into the kernel, pmx is never released.
+ */
+ pmx = pmx_get(&adev->dev, U300_APP_PMX_SPI_SETTING);
+
+ if (IS_ERR(pmx))
+ dev_warn(&adev->dev, "Could not get padmux handle\n");
+ else {
+ int ret;
+
+ ret = pmx_activate(&adev->dev, pmx);
+ if (IS_ERR_VALUE(ret))
+ dev_warn(&adev->dev, "Could not activate padmuxing\n");
+ }
+
+}
+void __init u300_spi_register_board_devices(void)
+{
+ /* Register any SPI devices */
+ spi_register_board_info(u300_spi_devices, ARRAY_SIZE(u300_spi_devices));
+}
diff --git a/arch/arm/mach-u300/spi.h b/arch/arm/mach-u300/spi.h
new file mode 100644
index 00000000000..bd3d867e240
--- /dev/null
+++ b/arch/arm/mach-u300/spi.h
@@ -0,0 +1,26 @@
+/*
+ * arch/arm/mach-u300/spi.h
+ *
+ * Copyright (C) 2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ */
+#ifndef SPI_H
+#define SPI_H
+#include <linux/amba/bus.h>
+
+#ifdef CONFIG_SPI_PL022
+void __init u300_spi_init(struct amba_device *adev);
+void __init u300_spi_register_board_devices(void);
+#else
+/* Compile out SPI support if PL022 is not selected */
+static inline void __init u300_spi_init(struct amba_device *adev)
+{
+}
+static inline void __init u300_spi_register_board_devices(void)
+{
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-u300/timer.c b/arch/arm/mach-u300/timer.c
index cce53204880..26d26f5100f 100644
--- a/arch/arm/mach-u300/timer.c
+++ b/arch/arm/mach-u300/timer.c
@@ -346,6 +346,21 @@ static struct clocksource clocksource_u300_1mhz = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+/*
+ * Override the global weak sched_clock symbol with this
+ * local implementation which uses the clocksource to get some
+ * better resolution when scheduling the kernel. We accept that
+ * this wraps around for now, since it is just a relative time
+ * stamp. (Inspired by OMAP implementation.)
+ */
+unsigned long long notrace sched_clock(void)
+{
+ return clocksource_cyc2ns(clocksource_u300_1mhz.read(
+ &clocksource_u300_1mhz),
+ clocksource_u300_1mhz.mult,
+ clocksource_u300_1mhz.shift);
+}
+
/*
* This sets up the system timers, clock source and clock event.
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 975eae41ee6..e13be7c444c 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -27,6 +27,7 @@
#include <linux/amba/bus.h>
#include <linux/amba/clcd.h>
#include <linux/amba/pl061.h>
+#include <linux/amba/mmci.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cnt32_to_63.h>
@@ -47,7 +48,6 @@
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
#include <asm/mach/map.h>
-#include <asm/mach/mmc.h>
#include "core.h"
#include "clock.h"
@@ -369,7 +369,7 @@ unsigned int mmc_status(struct device *dev)
return readl(VERSATILE_SYSMCI) & mask;
}
-static struct mmc_platform_data mmc0_plat_data = {
+static struct mmci_platform_data mmc0_plat_data = {
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.status = mmc_status,
.gpio_wp = -1,
diff --git a/arch/arm/mach-versatile/versatile_pb.c b/arch/arm/mach-versatile/versatile_pb.c
index 9af8d8154df..239cd30fc4f 100644
--- a/arch/arm/mach-versatile/versatile_pb.c
+++ b/arch/arm/mach-versatile/versatile_pb.c
@@ -24,6 +24,7 @@
#include <linux/sysdev.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl061.h>
+#include <linux/amba/mmci.h>
#include <linux/io.h>
#include <mach/hardware.h>
@@ -31,7 +32,6 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <asm/mach/mmc.h>
#include "core.h"
@@ -41,7 +41,7 @@
#define IRQ_MMCI1A IRQ_SIC_MMCI1A
#endif
-static struct mmc_platform_data mmc1_plat_data = {
+static struct mmci_platform_data mmc1_plat_data = {
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.status = mmc_status,
.gpio_wp = -1,
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 5fe595aeba6..8d43e58f924 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -771,3 +771,8 @@ config CACHE_XSC3L2
select OUTER_CACHE
help
This option enables the L2 cache on XScale3.
+
+config ARM_L1_CACHE_SHIFT
+ int
+ default 6 if ARCH_OMAP3
+ default 5
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index fc84fcc7438..6bda76a4319 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -59,6 +59,6 @@ void __new_context(struct mm_struct *mm)
}
spin_unlock(&cpu_asid_lock);
- mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id());
+ cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
mm->context.id = asid;
}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index cc8829d7e11..379f7855605 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -25,6 +25,19 @@
#include "fault.h"
+/*
+ * Fault status register encodings. We steal bit 31 for our own purposes.
+ */
+#define FSR_LNX_PF (1 << 31)
+#define FSR_WRITE (1 << 11)
+#define FSR_FS4 (1 << 10)
+#define FSR_FS3_0 (15)
+
+static inline int fsr_fs(unsigned int fsr)
+{
+ return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
+}
+
#ifdef CONFIG_MMU
#ifdef CONFIG_KPROBES
@@ -182,18 +195,35 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
#define VM_FAULT_BADMAP 0x010000
#define VM_FAULT_BADACCESS 0x020000
-static int
+/*
+ * Check that the permissions on the VMA allow for the fault which occurred.
+ * If we encountered a write fault, we must have write permission, otherwise
+ * we allow any permission.
+ */
+static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
+{
+ unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
+
+ if (fsr & FSR_WRITE)
+ mask = VM_WRITE;
+ if (fsr & FSR_LNX_PF)
+ mask = VM_EXEC;
+
+ return vma->vm_flags & mask ? false : true;
+}
+
+static int __kprobes
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
struct task_struct *tsk)
{
struct vm_area_struct *vma;
- int fault, mask;
+ int fault;
vma = find_vma(mm, addr);
fault = VM_FAULT_BADMAP;
- if (!vma)
+ if (unlikely(!vma))
goto out;
- if (vma->vm_start > addr)
+ if (unlikely(vma->vm_start > addr))
goto check_stack;
/*
@@ -201,47 +231,24 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
* memory access, so we can handle it.
*/
good_area:
- if (fsr & (1 << 11)) /* write? */
- mask = VM_WRITE;
- else
- mask = VM_READ|VM_EXEC|VM_WRITE;
-
- fault = VM_FAULT_BADACCESS;
- if (!(vma->vm_flags & mask))
+ if (access_error(fsr, vma)) {
+ fault = VM_FAULT_BADACCESS;
goto out;
+ }
/*
- * If for any reason at all we couldn't handle
- * the fault, make sure we exit gracefully rather
- * than endlessly redo the fault.
+ * If for any reason at all we couldn't handle the fault, make
+ * sure we exit gracefully rather than endlessly redo the fault.
*/
-survive:
- fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & (1 << 11)) ? FAULT_FLAG_WRITE : 0);
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
- else if (fault & VM_FAULT_SIGBUS)
- return fault;
- BUG();
- }
+ fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0);
+ if (unlikely(fault & VM_FAULT_ERROR))
+ return fault;
if (fault & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
return fault;
-out_of_memory:
- if (!is_global_init(tsk))
- goto out;
-
- /*
- * If we are out of memory for pid1, sleep for a while and retry
- */
- up_read(&mm->mmap_sem);
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
-
check_stack:
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
goto good_area;
@@ -278,6 +285,13 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
goto no_context;
down_read(&mm->mmap_sem);
+ } else {
+ /*
+ * The above down_read_trylock() might have succeeded in
+ * which case, we'll have missed the might_sleep() from
+ * down_read()
+ */
+ might_sleep();
}
fault = __do_page_fault(mm, addr, fsr, tsk);
@@ -289,6 +303,16 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
return 0;
+ if (fault & VM_FAULT_OOM) {
+ /*
+ * We ran out of memory, call the OOM killer, and return to
+ * userspace (which will retry the fault, or kill us if we
+ * got oom-killed)
+ */
+ pagefault_out_of_memory();
+ return 0;
+ }
+
/*
* If we are in kernel mode at this point, we
* have no context to handle this fault with.
@@ -296,16 +320,6 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (!user_mode(regs))
goto no_context;
- if (fault & VM_FAULT_OOM) {
- /*
- * We ran out of memory, or some other thing
- * happened to us that made us unable to handle
- * the page fault gracefully.
- */
- printk("VM: killing process %s\n", tsk->comm);
- do_group_exit(SIGKILL);
- return 0;
- }
if (fault & VM_FAULT_SIGBUS) {
/*
* We had some memory, but were unable to
@@ -489,10 +503,10 @@ hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *)
asmlinkage void __exception
do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
- const struct fsr_info *inf = fsr_info + (fsr & 15) + ((fsr & (1 << 10)) >> 6);
+ const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
struct siginfo info;
- if (!inf->fn(addr, fsr, regs))
+ if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
return;
printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
@@ -508,6 +522,6 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
asmlinkage void __exception
do_PrefetchAbort(unsigned long addr, struct pt_regs *regs)
{
- do_translation_fault(addr, 0, regs);
+ do_translation_fault(addr, FSR_LNX_PF, regs);
}
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 575f3ad722e..b27942909b2 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -50,7 +50,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
void flush_cache_mm(struct mm_struct *mm)
{
if (cache_is_vivt()) {
- if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
__cpuc_flush_user_all();
return;
}
@@ -73,7 +73,7 @@ void flush_cache_mm(struct mm_struct *mm)
void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
if (cache_is_vivt()) {
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
vma->vm_flags);
return;
@@ -97,7 +97,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{
if (cache_is_vivt()) {
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = user_addr & PAGE_MASK;
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
}
@@ -113,7 +113,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long len, int write)
{
if (cache_is_vivt()) {
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = (unsigned long)kaddr;
__cpuc_coherent_kern_range(addr, addr + len);
}
@@ -126,7 +126,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
}
/* VIPT non-aliasing cache */
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) &&
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) &&
vma->vm_flags & VM_EXEC) {
unsigned long addr = (unsigned long)kaddr;
/* only flushing the kernel mapping on non-aliasing VIPT */
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index ea36186f32c..877c492f8e1 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -596,8 +596,8 @@ void __init mem_init(void)
printk(KERN_NOTICE "Memory: %luKB available (%dK code, "
"%dK data, %dK init, %luK highmem)\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
- codesize >> 10, datasize >> 10, initsize >> 10,
+ nr_free_pages() << (PAGE_SHIFT-10), codesize >> 10,
+ datasize >> 10, initsize >> 10,
(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
@@ -613,6 +613,14 @@ void __init mem_init(void)
void free_initmem(void)
{
+#ifdef CONFIG_HAVE_TCM
+ extern char *__tcm_start, *__tcm_end;
+
+ totalram_pages += free_area(__phys_to_pfn(__pa(__tcm_start)),
+ __phys_to_pfn(__pa(__tcm_end)),
+ "TCM link");
+#endif
+
if (!machine_is_integrator() && !machine_is_cintegrator())
totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
__phys_to_pfn(__pa(__init_end)),
diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c
index 3c127aabe21..1ff6a37e893 100644
--- a/arch/arm/plat-iop/adma.c
+++ b/arch/arm/plat-iop/adma.c
@@ -179,7 +179,6 @@ static int __init iop3xx_adma_cap_init(void)
dma_cap_set(DMA_INTERRUPT, iop3xx_dma_0_data.cap_mask);
#else
dma_cap_set(DMA_MEMCPY, iop3xx_dma_0_data.cap_mask);
- dma_cap_set(DMA_MEMCPY_CRC32C, iop3xx_dma_0_data.cap_mask);
dma_cap_set(DMA_INTERRUPT, iop3xx_dma_0_data.cap_mask);
#endif
@@ -188,7 +187,6 @@ static int __init iop3xx_adma_cap_init(void)
dma_cap_set(DMA_INTERRUPT, iop3xx_dma_1_data.cap_mask);
#else
dma_cap_set(DMA_MEMCPY, iop3xx_dma_1_data.cap_mask);
- dma_cap_set(DMA_MEMCPY_CRC32C, iop3xx_dma_1_data.cap_mask);
dma_cap_set(DMA_INTERRUPT, iop3xx_dma_1_data.cap_mask);
#endif
@@ -198,7 +196,7 @@ static int __init iop3xx_adma_cap_init(void)
dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask);
#else
dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask);
- dma_cap_set(DMA_ZERO_SUM, iop3xx_aau_data.cap_mask);
+ dma_cap_set(DMA_XOR_VAL, iop3xx_aau_data.cap_mask);
dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask);
dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask);
#endif
diff --git a/arch/arm/plat-mxc/include/mach/spi.h b/arch/arm/plat-mxc/include/mach/spi.h
new file mode 100644
index 00000000000..08be445e8eb
--- /dev/null
+++ b/arch/arm/plat-mxc/include/mach/spi.h
@@ -0,0 +1,27 @@
+
+#ifndef __MACH_SPI_H_
+#define __MACH_SPI_H_
+
+/*
+ * struct spi_imx_master - device.platform_data for SPI controller devices.
+ * @chipselect: Array of chipselects for this master. Numbers >= 0 mean gpio
+ * pins, numbers < 0 mean internal CSPI chipselects according
+ * to MXC_SPI_CS(). Normally you want to use gpio based chip
+ * selects as the CSPI module tries to be intelligent about
+ * when to assert the chipselect: The CSPI module deasserts the
+ * chipselect once it runs out of input data. The other problem
+ * is that it is not possible to mix between high active and low
+ * active chipselects on one single bus using the internal
+ * chipselects. Unfortunately Freescale decided to put some
+ * chipselects on dedicated pins which are not usable as gpios,
+ * so we have to support the internal chipselects.
+ * @num_chipselect: ARRAY_SIZE(chipselect)
+ */
+struct spi_imx_master {
+ int *chipselect;
+ int num_chipselect;
+};
+
+#define MXC_SPI_CS(no) ((no) - 32)
+
+#endif /* __MACH_SPI_H_*/
diff --git a/arch/arm/plat-omap/include/mach/gpmc.h b/arch/arm/plat-omap/include/mach/gpmc.h
index 921b16532ff..9c99cda77ba 100644
--- a/arch/arm/plat-omap/include/mach/gpmc.h
+++ b/arch/arm/plat-omap/include/mach/gpmc.h
@@ -103,6 +103,10 @@ extern int gpmc_cs_request(int cs, unsigned long size, unsigned long *base);
extern void gpmc_cs_free(int cs);
extern int gpmc_cs_set_reserved(int cs, int reserved);
extern int gpmc_cs_reserved(int cs);
+extern int gpmc_prefetch_enable(int cs, int dma_mode,
+ unsigned int u32_count, int is_write);
+extern void gpmc_prefetch_reset(void);
+extern int gpmc_prefetch_status(void);
extern void __init gpmc_init(void);
#endif
diff --git a/arch/arm/plat-omap/include/mach/irqs.h b/arch/arm/plat-omap/include/mach/irqs.h
index fb7cb772399..28a165058b6 100644
--- a/arch/arm/plat-omap/include/mach/irqs.h
+++ b/arch/arm/plat-omap/include/mach/irqs.h
@@ -503,6 +503,7 @@
#define INT_44XX_FPKA_READY_IRQ (50 + IRQ_GIC_START)
#define INT_44XX_SHA1MD51_IRQ (51 + IRQ_GIC_START)
#define INT_44XX_RNG_IRQ (52 + IRQ_GIC_START)
+#define INT_44XX_MMC5_IRQ (59 + IRQ_GIC_START)
#define INT_44XX_I2C3_IRQ (61 + IRQ_GIC_START)
#define INT_44XX_FPKA_ERROR_IRQ (64 + IRQ_GIC_START)
#define INT_44XX_PBIAS_IRQ (75 + IRQ_GIC_START)
@@ -511,6 +512,7 @@
#define INT_44XX_TLL_IRQ (78 + IRQ_GIC_START)
#define INT_44XX_PARTHASH_IRQ (79 + IRQ_GIC_START)
#define INT_44XX_MMC3_IRQ (94 + IRQ_GIC_START)
+#define INT_44XX_MMC4_IRQ (96 + IRQ_GIC_START)
/* Max. 128 level 2 IRQs (OMAP1610), 192 GPIOs (OMAP730/850) and
diff --git a/arch/arm/plat-omap/include/mach/lcd_mipid.h b/arch/arm/plat-omap/include/mach/lcd_mipid.h
index f8fbc4801e5..8e52c657228 100644
--- a/arch/arm/plat-omap/include/mach/lcd_mipid.h
+++ b/arch/arm/plat-omap/include/mach/lcd_mipid.h
@@ -16,7 +16,12 @@ enum mipid_test_result {
struct mipid_platform_data {
int nreset_gpio;
int data_lines;
+
void (*shutdown)(struct mipid_platform_data *pdata);
+ void (*set_bklight_level)(struct mipid_platform_data *pdata,
+ int level);
+ int (*get_bklight_level)(struct mipid_platform_data *pdata);
+ int (*get_bklight_max)(struct mipid_platform_data *pdata);
};
#endif
diff --git a/arch/arm/plat-omap/include/mach/mmc.h b/arch/arm/plat-omap/include/mach/mmc.h
index 81d5b36534b..7229b959330 100644
--- a/arch/arm/plat-omap/include/mach/mmc.h
+++ b/arch/arm/plat-omap/include/mach/mmc.h
@@ -25,11 +25,18 @@
#define OMAP24XX_NR_MMC 2
#define OMAP34XX_NR_MMC 3
+#define OMAP44XX_NR_MMC 5
#define OMAP2420_MMC_SIZE OMAP1_MMC_SIZE
-#define HSMMC_SIZE 0x200
+#define OMAP3_HSMMC_SIZE 0x200
+#define OMAP4_HSMMC_SIZE 0x1000
#define OMAP2_MMC1_BASE 0x4809c000
#define OMAP2_MMC2_BASE 0x480b4000
#define OMAP3_MMC3_BASE 0x480ad000
+#define OMAP4_MMC4_BASE 0x480d1000
+#define OMAP4_MMC5_BASE 0x480d5000
+#define OMAP4_MMC_REG_OFFSET 0x100
+#define HSMMC5 (1 << 4)
+#define HSMMC4 (1 << 3)
#define HSMMC3 (1 << 2)
#define HSMMC2 (1 << 1)
#define HSMMC1 (1 << 0)
@@ -59,6 +66,9 @@ struct omap_mmc_platform_data {
int (*suspend)(struct device *dev, int slot);
int (*resume)(struct device *dev, int slot);
+ /* Return context loss count due to PM states changing */
+ int (*get_context_loss_count)(struct device *dev);
+
u64 dma_mask;
struct omap_mmc_slot_data {
@@ -80,12 +90,20 @@ struct omap_mmc_platform_data {
/* use the internal clock */
unsigned internal_clock:1;
+ /* nonremovable e.g. eMMC */
+ unsigned nonremovable:1;
+
+ /* Try to sleep or power off when possible */
+ unsigned power_saving:1;
+
int switch_pin; /* gpio (card detect) */
int gpio_wp; /* gpio (write protect) */
int (* set_bus_mode)(struct device *dev, int slot, int bus_mode);
int (* set_power)(struct device *dev, int slot, int power_on, int vdd);
int (* get_ro)(struct device *dev, int slot);
+ int (*set_sleep)(struct device *dev, int slot, int sleep,
+ int vdd, int cardsleep);
/* return MMC cover switch state, can be NULL if not supported.
*
diff --git a/arch/arm/plat-omap/include/mach/omapfb.h b/arch/arm/plat-omap/include/mach/omapfb.h
index 7b74d1255e0..b226bdf4573 100644
--- a/arch/arm/plat-omap/include/mach/omapfb.h
+++ b/arch/arm/plat-omap/include/mach/omapfb.h
@@ -276,8 +276,8 @@ typedef int (*omapfb_notifier_callback_t)(struct notifier_block *,
void *fbi);
struct omapfb_mem_region {
- dma_addr_t paddr;
- void *vaddr;
+ u32 paddr;
+ void __iomem *vaddr;
unsigned long size;
u8 type; /* OMAPFB_PLANE_MEM_* */
unsigned alloc:1; /* allocated by the driver */
diff --git a/arch/arm/plat-pxa/dma.c b/arch/arm/plat-pxa/dma.c
index 70aeee407f7..2975798d411 100644
--- a/arch/arm/plat-pxa/dma.c
+++ b/arch/arm/plat-pxa/dma.c
@@ -17,22 +17,266 @@
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
+#include <linux/dma-mapping.h>
#include <asm/system.h>
#include <asm/irq.h>
+#include <asm/memory.h>
#include <mach/hardware.h>
#include <mach/dma.h>
+#define DMA_DEBUG_NAME "pxa_dma"
+#define DMA_MAX_REQUESTERS 64
+
struct dma_channel {
char *name;
pxa_dma_prio prio;
void (*irq_handler)(int, void *);
void *data;
+ spinlock_t lock;
};
static struct dma_channel *dma_channels;
static int num_dma_channels;
+/*
+ * Debug fs
+ */
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+
+static struct dentry *dbgfs_root, *dbgfs_state, **dbgfs_chan;
+
+static int dbg_show_requester_chan(struct seq_file *s, void *p)
+{
+ int pos = 0;
+ int chan = (int)s->private;
+ int i;
+ u32 drcmr;
+
+ pos += seq_printf(s, "DMA channel %d requesters list :\n", chan);
+ for (i = 0; i < DMA_MAX_REQUESTERS; i++) {
+ drcmr = DRCMR(i);
+ if ((drcmr & DRCMR_CHLNUM) == chan)
+ pos += seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
+ !!(drcmr & DRCMR_MAPVLD));
+ }
+ return pos;
+}
+
+static inline int dbg_burst_from_dcmd(u32 dcmd)
+{
+ int burst = (dcmd >> 16) & 0x3;
+
+ return burst ? 4 << burst : 0;
+}
+
+static int is_phys_valid(unsigned long addr)
+{
+ return pfn_valid(__phys_to_pfn(addr));
+}
+
+#define DCSR_STR(flag) (dcsr & DCSR_##flag ? #flag" " : "")
+#define DCMD_STR(flag) (dcmd & DCMD_##flag ? #flag" " : "")
+
+static int dbg_show_descriptors(struct seq_file *s, void *p)
+{
+ int pos = 0;
+ int chan = (int)s->private;
+ int i, max_show = 20, burst, width;
+ u32 dcmd;
+ unsigned long phys_desc;
+ struct pxa_dma_desc *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dma_channels[chan].lock, flags);
+ phys_desc = DDADR(chan);
+
+ pos += seq_printf(s, "DMA channel %d descriptors :\n", chan);
+ pos += seq_printf(s, "[%03d] First descriptor unknown\n", 0);
+ for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) {
+ desc = phys_to_virt(phys_desc);
+ dcmd = desc->dcmd;
+ burst = dbg_burst_from_dcmd(dcmd);
+ width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
+
+ pos += seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n",
+ i, phys_desc, desc);
+ pos += seq_printf(s, "\tDDADR = %08x\n", desc->ddadr);
+ pos += seq_printf(s, "\tDSADR = %08x\n", desc->dsadr);
+ pos += seq_printf(s, "\tDTADR = %08x\n", desc->dtadr);
+ pos += seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d"
+ " width=%d len=%d)\n",
+ dcmd,
+ DCMD_STR(INCSRCADDR), DCMD_STR(INCTRGADDR),
+ DCMD_STR(FLOWSRC), DCMD_STR(FLOWTRG),
+ DCMD_STR(STARTIRQEN), DCMD_STR(ENDIRQEN),
+ DCMD_STR(ENDIAN), burst, width,
+ dcmd & DCMD_LENGTH);
+ phys_desc = desc->ddadr;
+ }
+ if (i == max_show)
+ pos += seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n",
+ i, phys_desc);
+ else
+ pos += seq_printf(s, "[%03d] Desc at %08lx is %s\n",
+ i, phys_desc, phys_desc == DDADR_STOP ?
+ "DDADR_STOP" : "invalid");
+
+ spin_unlock_irqrestore(&dma_channels[chan].lock, flags);
+ return pos;
+}
+
+static int dbg_show_chan_state(struct seq_file *s, void *p)
+{
+ int pos = 0;
+ int chan = (int)s->private;
+ u32 dcsr, dcmd;
+ int burst, width;
+ static char *str_prio[] = { "high", "normal", "low" };
+
+ dcsr = DCSR(chan);
+ dcmd = DCMD(chan);
+ burst = dbg_burst_from_dcmd(dcmd);
+ width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
+
+ pos += seq_printf(s, "DMA channel %d\n", chan);
+ pos += seq_printf(s, "\tPriority : %s\n",
+ str_prio[dma_channels[chan].prio]);
+ pos += seq_printf(s, "\tUnaligned transfer bit: %s\n",
+ DALGN & (1 << chan) ? "yes" : "no");
+ pos += seq_printf(s, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
+ dcsr, DCSR_STR(RUN), DCSR_STR(NODESC),
+ DCSR_STR(STOPIRQEN), DCSR_STR(EORIRQEN),
+ DCSR_STR(EORJMPEN), DCSR_STR(EORSTOPEN),
+ DCSR_STR(SETCMPST), DCSR_STR(CLRCMPST),
+ DCSR_STR(CMPST), DCSR_STR(EORINTR), DCSR_STR(REQPEND),
+ DCSR_STR(STOPSTATE), DCSR_STR(ENDINTR),
+ DCSR_STR(STARTINTR), DCSR_STR(BUSERR));
+
+ pos += seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d"
+ " len=%d)\n",
+ dcmd,
+ DCMD_STR(INCSRCADDR), DCMD_STR(INCTRGADDR),
+ DCMD_STR(FLOWSRC), DCMD_STR(FLOWTRG),
+ DCMD_STR(STARTIRQEN), DCMD_STR(ENDIRQEN),
+ DCMD_STR(ENDIAN), burst, width, dcmd & DCMD_LENGTH);
+ pos += seq_printf(s, "\tDSADR = %08x\n", DSADR(chan));
+ pos += seq_printf(s, "\tDTADR = %08x\n", DTADR(chan));
+ pos += seq_printf(s, "\tDDADR = %08x\n", DDADR(chan));
+ return pos;
+}
+
+static int dbg_show_state(struct seq_file *s, void *p)
+{
+ int pos = 0;
+
+ /* basic device status */
+ pos += seq_printf(s, "DMA engine status\n");
+ pos += seq_printf(s, "\tChannel number: %d\n", num_dma_channels);
+
+ return pos;
+}
+
+#define DBGFS_FUNC_DECL(name) \
+static int dbg_open_##name(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, dbg_show_##name, inode->i_private); \
+} \
+static const struct file_operations dbg_fops_##name = { \
+ .owner = THIS_MODULE, \
+ .open = dbg_open_##name, \
+ .llseek = seq_lseek, \
+ .read = seq_read, \
+ .release = single_release, \
+}
+
+DBGFS_FUNC_DECL(state);
+DBGFS_FUNC_DECL(chan_state);
+DBGFS_FUNC_DECL(descriptors);
+DBGFS_FUNC_DECL(requester_chan);
+
+static struct dentry *pxa_dma_dbg_alloc_chan(int ch, struct dentry *chandir)
+{
+ char chan_name[11];
+ struct dentry *chan, *chan_state = NULL, *chan_descr = NULL;
+ struct dentry *chan_reqs = NULL;
+ void *dt;
+
+ scnprintf(chan_name, sizeof(chan_name), "%d", ch);
+ chan = debugfs_create_dir(chan_name, chandir);
+ dt = (void *)ch;
+
+ if (chan)
+ chan_state = debugfs_create_file("state", 0400, chan, dt,
+ &dbg_fops_chan_state);
+ if (chan_state)
+ chan_descr = debugfs_create_file("descriptors", 0400, chan, dt,
+ &dbg_fops_descriptors);
+ if (chan_descr)
+ chan_reqs = debugfs_create_file("requesters", 0400, chan, dt,
+ &dbg_fops_requester_chan);
+ if (!chan_reqs)
+ goto err_state;
+
+ return chan;
+
+err_state:
+ debugfs_remove_recursive(chan);
+ return NULL;
+}
+
+static void pxa_dma_init_debugfs(void)
+{
+ int i;
+ struct dentry *chandir;
+
+ dbgfs_root = debugfs_create_dir(DMA_DEBUG_NAME, NULL);
+ if (IS_ERR(dbgfs_root) || !dbgfs_root)
+ goto err_root;
+
+ dbgfs_state = debugfs_create_file("state", 0400, dbgfs_root, NULL,
+ &dbg_fops_state);
+ if (!dbgfs_state)
+ goto err_state;
+
+ dbgfs_chan = kmalloc(sizeof(*dbgfs_state) * num_dma_channels,
+ GFP_KERNEL);
+ if (!dbgfs_state)
+ goto err_alloc;
+
+ chandir = debugfs_create_dir("channels", dbgfs_root);
+ if (!chandir)
+ goto err_chandir;
+
+ for (i = 0; i < num_dma_channels; i++) {
+ dbgfs_chan[i] = pxa_dma_dbg_alloc_chan(i, chandir);
+ if (!dbgfs_chan[i])
+ goto err_chans;
+ }
+
+ return;
+err_chans:
+err_chandir:
+ kfree(dbgfs_chan);
+err_alloc:
+err_state:
+ debugfs_remove_recursive(dbgfs_root);
+err_root:
+ pr_err("pxa_dma: debugfs is not available\n");
+}
+
+static void __exit pxa_dma_cleanup_debugfs(void)
+{
+ debugfs_remove_recursive(dbgfs_root);
+}
+#else
+static inline void pxa_dma_init_debugfs(void) {}
+static inline void pxa_dma_cleanup_debugfs(void) {}
+#endif
+
int pxa_request_dma (char *name, pxa_dma_prio prio,
void (*irq_handler)(int, void *),
void *data)
@@ -71,6 +315,7 @@ int pxa_request_dma (char *name, pxa_dma_prio prio,
local_irq_restore(flags);
return i;
}
+EXPORT_SYMBOL(pxa_request_dma);
void pxa_free_dma (int dma_ch)
{
@@ -88,24 +333,26 @@ void pxa_free_dma (int dma_ch)
dma_channels[dma_ch].name = NULL;
local_irq_restore(flags);
}
+EXPORT_SYMBOL(pxa_free_dma);
static irqreturn_t dma_irq_handler(int irq, void *dev_id)
{
int i, dint = DINT;
+ struct dma_channel *channel;
- for (i = 0; i < num_dma_channels; i++) {
- if (dint & (1 << i)) {
- struct dma_channel *channel = &dma_channels[i];
- if (channel->name && channel->irq_handler) {
- channel->irq_handler(i, channel->data);
- } else {
- /*
- * IRQ for an unregistered DMA channel:
- * let's clear the interrupts and disable it.
- */
- printk (KERN_WARNING "spurious IRQ for DMA channel %d\n", i);
- DCSR(i) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
- }
+ while (dint) {
+ i = __ffs(dint);
+ dint &= (dint - 1);
+ channel = &dma_channels[i];
+ if (channel->name && channel->irq_handler) {
+ channel->irq_handler(i, channel->data);
+ } else {
+ /*
+ * IRQ for an unregistered DMA channel:
+ * let's clear the interrupts and disable it.
+ */
+ printk (KERN_WARNING "spurious IRQ for DMA channel %d\n", i);
+ DCSR(i) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
}
}
return IRQ_HANDLED;
@@ -127,6 +374,7 @@ int __init pxa_init_dma(int irq, int num_ch)
for (i = 0; i < num_ch; i++) {
DCSR(i) = 0;
dma_channels[i].prio = min((i & 0xf) >> 2, DMA_PRIO_LOW);
+ spin_lock_init(&dma_channels[i].lock);
}
ret = request_irq(irq, dma_irq_handler, IRQF_DISABLED, "DMA", NULL);
@@ -135,10 +383,9 @@ int __init pxa_init_dma(int irq, int num_ch)
kfree(dma_channels);
return ret;
}
-
num_dma_channels = num_ch;
+
+ pxa_dma_init_debugfs();
+
return 0;
}
-
-EXPORT_SYMBOL(pxa_request_dma);
-EXPORT_SYMBOL(pxa_free_dma);
diff --git a/arch/arm/plat-pxa/include/plat/mfp.h b/arch/arm/plat-pxa/include/plat/mfp.h
index 64019464c8d..22086e696e8 100644
--- a/arch/arm/plat-pxa/include/plat/mfp.h
+++ b/arch/arm/plat-pxa/include/plat/mfp.h
@@ -150,6 +150,74 @@ enum {
MFP_PIN_GPIO125,
MFP_PIN_GPIO126,
MFP_PIN_GPIO127,
+
+ MFP_PIN_GPIO128,
+ MFP_PIN_GPIO129,
+ MFP_PIN_GPIO130,
+ MFP_PIN_GPIO131,
+ MFP_PIN_GPIO132,
+ MFP_PIN_GPIO133,
+ MFP_PIN_GPIO134,
+ MFP_PIN_GPIO135,
+ MFP_PIN_GPIO136,
+ MFP_PIN_GPIO137,
+ MFP_PIN_GPIO138,
+ MFP_PIN_GPIO139,
+ MFP_PIN_GPIO140,
+ MFP_PIN_GPIO141,
+ MFP_PIN_GPIO142,
+ MFP_PIN_GPIO143,
+ MFP_PIN_GPIO144,
+ MFP_PIN_GPIO145,
+ MFP_PIN_GPIO146,
+ MFP_PIN_GPIO147,
+ MFP_PIN_GPIO148,
+ MFP_PIN_GPIO149,
+ MFP_PIN_GPIO150,
+ MFP_PIN_GPIO151,
+ MFP_PIN_GPIO152,
+ MFP_PIN_GPIO153,
+ MFP_PIN_GPIO154,
+ MFP_PIN_GPIO155,
+ MFP_PIN_GPIO156,
+ MFP_PIN_GPIO157,
+ MFP_PIN_GPIO158,
+ MFP_PIN_GPIO159,
+ MFP_PIN_GPIO160,
+ MFP_PIN_GPIO161,
+ MFP_PIN_GPIO162,
+ MFP_PIN_GPIO163,
+ MFP_PIN_GPIO164,
+ MFP_PIN_GPIO165,
+ MFP_PIN_GPIO166,
+ MFP_PIN_GPIO167,
+ MFP_PIN_GPIO168,
+ MFP_PIN_GPIO169,
+ MFP_PIN_GPIO170,
+ MFP_PIN_GPIO171,
+ MFP_PIN_GPIO172,
+ MFP_PIN_GPIO173,
+ MFP_PIN_GPIO174,
+ MFP_PIN_GPIO175,
+ MFP_PIN_GPIO176,
+ MFP_PIN_GPIO177,
+ MFP_PIN_GPIO178,
+ MFP_PIN_GPIO179,
+ MFP_PIN_GPIO180,
+ MFP_PIN_GPIO181,
+ MFP_PIN_GPIO182,
+ MFP_PIN_GPIO183,
+ MFP_PIN_GPIO184,
+ MFP_PIN_GPIO185,
+ MFP_PIN_GPIO186,
+ MFP_PIN_GPIO187,
+ MFP_PIN_GPIO188,
+ MFP_PIN_GPIO189,
+ MFP_PIN_GPIO190,
+ MFP_PIN_GPIO191,
+
+ MFP_PIN_GPIO255 = 255,
+
MFP_PIN_GPIO0_2,
MFP_PIN_GPIO1_2,
MFP_PIN_GPIO2_2,
@@ -325,8 +393,9 @@ typedef unsigned long mfp_cfg_t;
#define MFP_PULL_LOW (0x1 << 21)
#define MFP_PULL_HIGH (0x2 << 21)
#define MFP_PULL_BOTH (0x3 << 21)
-#define MFP_PULL_MASK (0x3 << 21)
-#define MFP_PULL(x) (((x) >> 21) & 0x3)
+#define MFP_PULL_FLOAT (0x4 << 21)
+#define MFP_PULL_MASK (0x7 << 21)
+#define MFP_PULL(x) (((x) >> 21) & 0x7)
#define MFP_CFG_DEFAULT (MFP_AF0 | MFP_DS03X | MFP_LPM_DEFAULT |\
MFP_LPM_EDGE_NONE | MFP_PULL_NONE)
diff --git a/arch/arm/plat-pxa/mfp.c b/arch/arm/plat-pxa/mfp.c
index e716c622a17..9405d0379c8 100644
--- a/arch/arm/plat-pxa/mfp.c
+++ b/arch/arm/plat-pxa/mfp.c
@@ -77,11 +77,13 @@
* MFPR_PULL_LOW 1 0 1
* MFPR_PULL_HIGH 1 1 0
* MFPR_PULL_BOTH 1 1 1
+ * MFPR_PULL_FLOAT 1 0 0
*/
#define MFPR_PULL_NONE (0)
#define MFPR_PULL_LOW (MFPR_PULL_SEL | MFPR_PULLDOWN_EN)
#define MFPR_PULL_BOTH (MFPR_PULL_LOW | MFPR_PULLUP_EN)
#define MFPR_PULL_HIGH (MFPR_PULL_SEL | MFPR_PULLUP_EN)
+#define MFPR_PULL_FLOAT (MFPR_PULL_SEL)
/* mfp_spin_lock is used to ensure that MFP register configuration
* (most likely a read-modify-write operation) is atomic, and that
@@ -116,6 +118,7 @@ static const unsigned long mfpr_pull[] = {
MFPR_PULL_LOW,
MFPR_PULL_HIGH,
MFPR_PULL_BOTH,
+ MFPR_PULL_FLOAT,
};
/* mapping of MFP_LPM_EDGE_* definitions to MFPR_EDGE_* register bits */
diff --git a/arch/arm/plat-s3c/gpio.c b/arch/arm/plat-s3c/gpio.c
index 260fdc6ad68..5ff24e0f9f8 100644
--- a/arch/arm/plat-s3c/gpio.c
+++ b/arch/arm/plat-s3c/gpio.c
@@ -28,7 +28,7 @@ static __init void s3c_gpiolib_track(struct s3c_gpio_chip *chip)
gpn = chip->chip.base;
for (i = 0; i < chip->chip.ngpio; i++, gpn++) {
- BUG_ON(gpn > ARRAY_SIZE(s3c_gpios));
+ BUG_ON(gpn >= ARRAY_SIZE(s3c_gpios));
s3c_gpios[gpn] = chip;
}
}
diff --git a/arch/arm/plat-s3c64xx/dma.c b/arch/arm/plat-s3c64xx/dma.c
index 67aa93dbb69..266a10745a8 100644
--- a/arch/arm/plat-s3c64xx/dma.c
+++ b/arch/arm/plat-s3c64xx/dma.c
@@ -345,13 +345,13 @@ int s3c2410_dma_enqueue(unsigned int channel, void *id,
if (!chan)
return -EINVAL;
- buff = kzalloc(sizeof(struct s3c64xx_dma_buff), GFP_KERNEL);
+ buff = kzalloc(sizeof(struct s3c64xx_dma_buff), GFP_ATOMIC);
if (!buff) {
printk(KERN_ERR "%s: no memory for buffer\n", __func__);
return -ENOMEM;
}
- lli = dma_pool_alloc(dma_pool, GFP_KERNEL, &buff->lli_dma);
+ lli = dma_pool_alloc(dma_pool, GFP_ATOMIC, &buff->lli_dma);
if (!lli) {
printk(KERN_ERR "%s: no memory for lli\n", __func__);
ret = -ENOMEM;
@@ -697,7 +697,7 @@ static int __init s3c64xx_dma_init(void)
printk(KERN_INFO "%s: Registering DMA channels\n", __func__);
- dma_pool = dma_pool_create("DMA-LLI", NULL, 32, 16, 0);
+ dma_pool = dma_pool_create("DMA-LLI", NULL, sizeof(struct pl080s_lli), 16, 0);
if (!dma_pool) {
printk(KERN_ERR "%s: failed to create pool\n", __func__);
return -ENOMEM;
diff --git a/arch/arm/plat-s3c64xx/include/plat/dma-plat.h b/arch/arm/plat-s3c64xx/include/plat/dma-plat.h
index 0c30dd98672..8f76a1e474d 100644
--- a/arch/arm/plat-s3c64xx/include/plat/dma-plat.h
+++ b/arch/arm/plat-s3c64xx/include/plat/dma-plat.h
@@ -26,7 +26,7 @@ struct s3c64xx_dma_buff {
struct s3c64xx_dma_buff *next;
void *pw;
- struct pl080_lli *lli;
+ struct pl080s_lli *lli;
dma_addr_t lli_dma;
};
diff --git a/arch/arm/plat-s3c64xx/include/plat/irqs.h b/arch/arm/plat-s3c64xx/include/plat/irqs.h
index 743a70094d0..7956fd3bb19 100644
--- a/arch/arm/plat-s3c64xx/include/plat/irqs.h
+++ b/arch/arm/plat-s3c64xx/include/plat/irqs.h
@@ -194,9 +194,17 @@
#define IRQ_EINT_GROUP(group, no) (IRQ_EINT_GROUP##group##_BASE + (no))
+/* Define a group of interrupts for board-specific use (eg, for MFD
+ * interrupt controllers). */
+#define IRQ_BOARD_START (IRQ_EINT_GROUP9_BASE + IRQ_EINT_GROUP9_NR + 1)
+
+#define IRQ_BOARD_NR 16
+
+#define IRQ_BOARD_END (IRQ_BOARD_START + IRQ_BOARD_NR)
+
/* Set the default NR_IRQS */
-#define NR_IRQS (IRQ_EINT_GROUP9_BASE + IRQ_EINT_GROUP9_NR + 1)
+#define NR_IRQS (IRQ_BOARD_END + 1)
#endif /* __ASM_PLAT_S3C64XX_IRQS_H */
diff --git a/arch/arm/plat-s3c64xx/s3c6400-clock.c b/arch/arm/plat-s3c64xx/s3c6400-clock.c
index febac1950d8..9745852261e 100644
--- a/arch/arm/plat-s3c64xx/s3c6400-clock.c
+++ b/arch/arm/plat-s3c64xx/s3c6400-clock.c
@@ -302,8 +302,8 @@ static int s3c64xx_setrate_clksrc(struct clk *clk, unsigned long rate)
return -EINVAL;
val = __raw_readl(reg);
- val &= ~(0xf << sclk->shift);
- val |= (div - 1) << sclk->shift;
+ val &= ~(0xf << sclk->divider_shift);
+ val |= (div - 1) << sclk->divider_shift;
__raw_writel(val, reg);
return 0;
@@ -328,6 +328,8 @@ static int s3c64xx_setparent_clksrc(struct clk *clk, struct clk *parent)
clksrc |= src_nr << sclk->shift;
__raw_writel(clksrc, S3C_CLK_SRC);
+
+ clk->parent = parent;
return 0;
}
@@ -343,7 +345,7 @@ static unsigned long s3c64xx_roundrate_clksrc(struct clk *clk,
if (rate > parent_rate)
rate = parent_rate;
else {
- div = rate / parent_rate;
+ div = parent_rate / rate;
if (div == 0)
div = 1;
diff --git a/arch/arm/plat-stmp3xxx/dma.c b/arch/arm/plat-stmp3xxx/dma.c
index d2f497764dc..ef88f25fb87 100644
--- a/arch/arm/plat-stmp3xxx/dma.c
+++ b/arch/arm/plat-stmp3xxx/dma.c
@@ -264,7 +264,7 @@ int stmp3xxx_dma_make_chain(int ch, struct stmp37xx_circ_dma_chain *chain,
stmp3xxx_dma_free_command(ch,
&descriptors
[i]);
- } while (i-- >= 0);
+ } while (i-- > 0);
}
return err;
}
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index c8c55b46934..94be7bb6cb9 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
#
# http://www.arm.linux.org.uk/developer/machines/?action=new
#
-# Last update: Sat Sep 12 12:00:16 2009
+# Last update: Fri Sep 18 21:42:00 2009
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
@@ -1638,7 +1638,7 @@ mx35evb MACH_MX35EVB MX35EVB 1643
aml_m8050 MACH_AML_M8050 AML_M8050 1644
mx35_3ds MACH_MX35_3DS MX35_3DS 1645
mars MACH_MARS MARS 1646
-ntosd_644xa MACH_NTOSD_644XA NTOSD_644XA 1647
+neuros_osd2 MACH_NEUROS_OSD2 NEUROS_OSD2 1647
badger MACH_BADGER BADGER 1648
trizeps4wl MACH_TRIZEPS4WL TRIZEPS4WL 1649
trizeps5 MACH_TRIZEPS5 TRIZEPS5 1650
@@ -1654,7 +1654,7 @@ vf10xx MACH_VF10XX VF10XX 1659
zoran43xx MACH_ZORAN43XX ZORAN43XX 1660
sonix926 MACH_SONIX926 SONIX926 1661
celestialsemi MACH_CELESTIALSEMI CELESTIALSEMI 1662
-cc9m2443 MACH_CC9M2443 CC9M2443 1663
+cc9m2443js MACH_CC9M2443JS CC9M2443JS 1663
tw5334 MACH_TW5334 TW5334 1664
omap_htcartemis MACH_HTCARTEMIS HTCARTEMIS 1665
nal_hlite MACH_NAL_HLITE NAL_HLITE 1666
@@ -1802,7 +1802,7 @@ ccw9p9215js MACH_CCW9P9215JS CCW9P9215JS 1811
rd88f5181l_ge MACH_RD88F5181L_GE RD88F5181L_GE 1812
sifmain MACH_SIFMAIN SIFMAIN 1813
sam9_l9261 MACH_SAM9_L9261 SAM9_L9261 1814
-cc9m2443js MACH_CC9M2443JS CC9M2443JS 1815
+cc9m2443 MACH_CC9M2443 CC9M2443 1815
xaria300 MACH_XARIA300 XARIA300 1816
it9200 MACH_IT9200 IT9200 1817
rd88f5181l_fxo MACH_RD88F5181L_FXO RD88F5181L_FXO 1818
@@ -2409,3 +2409,15 @@ platypus MACH_PLATYPUS PLATYPUS 2422
pss2 MACH_PSS2 PSS2 2423
davinci_apm150 MACH_DAVINCI_APM150 DAVINCI_APM150 2424
str9100 MACH_STR9100 STR9100 2425
+net5big MACH_NET5BIG NET5BIG 2426
+seabed9263 MACH_SEABED9263 SEABED9263 2427
+mx51_m2id MACH_MX51_M2ID MX51_M2ID 2428
+octvocplus_eb MACH_OCTVOCPLUS_EB OCTVOCPLUS_EB 2429
+klk_firefox MACH_KLK_FIREFOX KLK_FIREFOX 2430
+klk_wirma_module MACH_KLK_WIRMA_MODULE KLK_WIRMA_MODULE 2431
+klk_wirma_mmi MACH_KLK_WIRMA_MMI KLK_WIRMA_MMI 2432
+supersonic MACH_SUPERSONIC SUPERSONIC 2433
+liberty MACH_LIBERTY LIBERTY 2434
+mh355 MACH_MH355 MH355 2435
+pc7802 MACH_PC7802 PC7802 2436
+gnet_sgc MACH_GNET_SGC GNET_SGC 2437
diff --git a/arch/avr32/include/asm/mman.h b/arch/avr32/include/asm/mman.h
index 9a92b15f6a6..8eebf89f5ab 100644
--- a/arch/avr32/include/asm/mman.h
+++ b/arch/avr32/include/asm/mman.h
@@ -1,17 +1 @@
-#ifndef __ASM_AVR32_MMAN_H__
-#define __ASM_AVR32_MMAN_H__
-
-#include <asm-generic/mman-common.h>
-
-#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define MAP_LOCKED 0x2000 /* pages are locked */
-#define MAP_NORESERVE 0x4000 /* don't check for reservations */
-#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */
-#define MAP_NONBLOCK 0x10000 /* do not block on IO */
-
-#define MCL_CURRENT 1 /* lock all current mappings */
-#define MCL_FUTURE 2 /* lock all future mappings */
-
-#endif /* __ASM_AVR32_MMAN_H__ */
+#include <asm-generic/mman.h>
diff --git a/arch/avr32/kernel/init_task.c b/arch/avr32/kernel/init_task.c
index 57ec9f2dcd9..6b2343e6fe3 100644
--- a/arch/avr32/kernel/init_task.c
+++ b/arch/avr32/kernel/init_task.c
@@ -18,9 +18,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure. Must be aligned on an 8192-byte boundary.
*/
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"))) =
- { INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data =
+ { INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c
index e819fa69a90..94925641e53 100644
--- a/arch/avr32/mm/init.c
+++ b/arch/avr32/mm/init.c
@@ -24,11 +24,9 @@
#include <asm/setup.h>
#include <asm/sections.h>
-#define __page_aligned __attribute__((section(".data.page_aligned")))
-
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned;
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data;
struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
@@ -141,7 +139,7 @@ void __init mem_init(void)
printk ("Memory: %luk/%luk available (%dk kernel code, "
"%dk reserved, %dk data, %dk init)\n",
- (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10),
+ nr_free_pages() << (PAGE_SHIFT - 10),
totalram_pages << (PAGE_SHIFT - 10),
codesize >> 10,
reservedpages << (PAGE_SHIFT - 10),
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index 6f9533c3d75..f063b772934 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -155,7 +155,7 @@ define archhelp
echo '* vmImage.gz - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.gz)'
echo ' vmImage.lzma - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.lzma)'
echo ' install - Install kernel using'
- echo ' (your) ~/bin/$(CROSS_COMPILE)installkernel or'
- echo ' (distribution) PATH: $(CROSS_COMPILE)installkernel or'
+ echo ' (your) ~/bin/$(INSTALLKERNEL) or'
+ echo ' (distribution) PATH: $(INSTALLKERNEL) or'
echo ' install to $$(INSTALL_PATH)'
endef
diff --git a/arch/blackfin/boot/install.sh b/arch/blackfin/boot/install.sh
index 9560a6b2910..e2c6e40902b 100644
--- a/arch/blackfin/boot/install.sh
+++ b/arch/blackfin/boot/install.sh
@@ -36,9 +36,9 @@ verify "$3"
# User may have a custom install script
-if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
-if which ${CROSS_COMPILE}installkernel >/dev/null 2>&1; then
- exec ${CROSS_COMPILE}installkernel "$@"
+if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+if which ${INSTALLKERNEL} >/dev/null 2>&1; then
+ exec ${INSTALLKERNEL} "$@"
fi
# Default install - same as make zlilo
diff --git a/arch/blackfin/include/asm/sections.h b/arch/blackfin/include/asm/sections.h
index e7fd0ecd73f..ae4dae1e370 100644
--- a/arch/blackfin/include/asm/sections.h
+++ b/arch/blackfin/include/asm/sections.h
@@ -1,9 +1,6 @@
#ifndef _BLACKFIN_SECTIONS_H
#define _BLACKFIN_SECTIONS_H
-/* nothing to see, move along */
-#include <asm-generic/sections.h>
-
/* only used when MTD_UCLINUX */
extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size;
@@ -15,4 +12,39 @@ extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[],
_stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[],
_ebss_l2[], _l2_lma_start[];
+#include <asm/mem_map.h>
+
+/* Blackfin systems have discontinuous memory map and no virtualized memory */
+static inline int arch_is_kernel_text(unsigned long addr)
+{
+ return
+ (L1_CODE_LENGTH &&
+ addr >= (unsigned long)_stext_l1 &&
+ addr < (unsigned long)_etext_l1)
+ ||
+ (L2_LENGTH &&
+ addr >= (unsigned long)_stext_l2 &&
+ addr < (unsigned long)_etext_l2);
+}
+#define arch_is_kernel_text(addr) arch_is_kernel_text(addr)
+
+static inline int arch_is_kernel_data(unsigned long addr)
+{
+ return
+ (L1_DATA_A_LENGTH &&
+ addr >= (unsigned long)_sdata_l1 &&
+ addr < (unsigned long)_ebss_l1)
+ ||
+ (L1_DATA_B_LENGTH &&
+ addr >= (unsigned long)_sdata_b_l1 &&
+ addr < (unsigned long)_ebss_b_l1)
+ ||
+ (L2_LENGTH &&
+ addr >= (unsigned long)_sdata_l2 &&
+ addr < (unsigned long)_ebss_l2);
+}
+#define arch_is_kernel_data(addr) arch_is_kernel_data(addr)
+
+#include <asm-generic/sections.h>
+
#endif
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h
index c8e7ee4768c..02b1529dad5 100644
--- a/arch/blackfin/include/asm/unistd.h
+++ b/arch/blackfin/include/asm/unistd.h
@@ -381,7 +381,7 @@
#define __NR_preadv 366
#define __NR_pwritev 367
#define __NR_rt_tgsigqueueinfo 368
-#define __NR_perf_counter_open 369
+#define __NR_perf_event_open 369
#define __NR_syscall 370
#define NR_syscalls __NR_syscall
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 21ac7c26079..ffd90fbbc8f 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -96,8 +96,7 @@ SECTIONS
{
__sdata = .;
/* This gets done first, so the glob doesn't suck it in */
- . = ALIGN(32);
- *(.data.cacheline_aligned)
+ CACHELINE_ALIGNED_DATA(32)
#if !L1_DATA_A_LENGTH
. = ALIGN(32);
@@ -116,12 +115,7 @@ SECTIONS
DATA_DATA
CONSTRUCTORS
- /* make sure the init_task is aligned to the
- * kernel thread size so we can locate the kernel
- * stack properly and quickly.
- */
- . = ALIGN(THREAD_SIZE);
- *(.init_task.data)
+ INIT_TASK_DATA(THREAD_SIZE)
__edata = .;
}
@@ -134,39 +128,10 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
___init_begin = .;
- .init.text :
- {
- . = ALIGN(PAGE_SIZE);
- __sinittext = .;
- INIT_TEXT
- __einittext = .;
- }
- .init.data :
- {
- . = ALIGN(16);
- INIT_DATA
- }
- .init.setup :
- {
- . = ALIGN(16);
- ___setup_start = .;
- *(.init.setup)
- ___setup_end = .;
- }
- .initcall.init :
- {
- ___initcall_start = .;
- INITCALLS
- ___initcall_end = .;
- }
- .con_initcall.init :
- {
- ___con_initcall_start = .;
- *(.con_initcall.init)
- ___con_initcall_end = .;
- }
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ . = ALIGN(16);
+ INIT_DATA_SECTION(16)
PERCPU(4)
- SECURITY_INIT
/* we have to discard exit text and such at runtime, not link time, to
* handle embedded cross-section references (alt instructions, bug
@@ -181,18 +146,9 @@ SECTIONS
EXIT_DATA
}
- .init.ramfs :
- {
- . = ALIGN(4);
- ___initramfs_start = .;
- *(.init.ramfs)
- . = ALIGN(4);
- ___initramfs_end = .;
- }
-
__l1_lma_start = .;
- .text_l1 L1_CODE_START : AT(LOADADDR(.init.ramfs) + SIZEOF(.init.ramfs))
+ .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
{
. = ALIGN(4);
__stext_l1 = .;
diff --git a/arch/blackfin/mach-bf538/include/mach/defBF539.h b/arch/blackfin/mach-bf538/include/mach/defBF539.h
index bdc330cd0e1..1c58914a874 100644
--- a/arch/blackfin/mach-bf538/include/mach/defBF539.h
+++ b/arch/blackfin/mach-bf538/include/mach/defBF539.h
@@ -2325,7 +2325,7 @@
#define AMBEN_B0_B1 0x0004 /* Enable Asynchronous Memory Banks 0 & 1 only */
#define AMBEN_B0_B1_B2 0x0006 /* Enable Asynchronous Memory Banks 0, 1, and 2 */
#define AMBEN_ALL 0x0008 /* Enable Asynchronous Memory Banks (all) 0, 1, 2, and 3 */
-#define CDPRIO 0x0100 /* DMA has priority over core for for external accesses */
+#define CDPRIO 0x0100 /* DMA has priority over core for external accesses */
/* EBIU_AMGCTL Bit Positions */
#define AMCKEN_P 0x0000 /* Enable CLKOUT */
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 01af24cde36..1e7cac23e25 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -1620,7 +1620,7 @@ ENTRY(_sys_call_table)
.long _sys_preadv
.long _sys_pwritev
.long _sys_rt_tgsigqueueinfo
- .long _sys_perf_counter_open
+ .long _sys_perf_event_open
.rept NR_syscalls-(.-_sys_call_table)/4
.long _sys_ni_syscall
diff --git a/arch/cris/Makefile b/arch/cris/Makefile
index 71e17d3eedd..29c2ceb38a7 100644
--- a/arch/cris/Makefile
+++ b/arch/cris/Makefile
@@ -42,8 +42,6 @@ LD = $(CROSS_COMPILE)ld -mcrislinux
OBJCOPYFLAGS := -O binary -R .note -R .comment -S
-CPPFLAGS_vmlinux.lds = -DDRAM_VIRTUAL_BASE=0x$(CONFIG_ETRAX_DRAM_VIRTUAL_BASE)
-
KBUILD_AFLAGS += -mlinux -march=$(arch-y) $(inc)
KBUILD_CFLAGS += -mlinux -march=$(arch-y) -pipe $(inc)
KBUILD_CPPFLAGS += $(inc)
diff --git a/arch/cris/arch-v10/kernel/time.c b/arch/cris/arch-v10/kernel/time.c
index 2b73c7a5b64..31ca1418d5a 100644
--- a/arch/cris/arch-v10/kernel/time.c
+++ b/arch/cris/arch-v10/kernel/time.c
@@ -28,7 +28,6 @@
extern void update_xtime_from_cmos(void);
extern int set_rtc_mmss(unsigned long nowtime);
-extern int setup_irq(int, struct irqaction *);
extern int have_rtc;
unsigned long get_ns_in_jiffie(void)
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index d2a3ff8c4d3..058adddf4e4 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -52,8 +52,6 @@ static struct mm_struct* flush_mm;
static struct vm_area_struct* flush_vma;
static unsigned long flush_addr;
-extern int setup_irq(int, struct irqaction *);
-
/* Mode registers */
static unsigned long irq_regs[NR_CPUS] = {
regi_irq,
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c
index 65633d0dab8..b1920d8de40 100644
--- a/arch/cris/arch-v32/kernel/time.c
+++ b/arch/cris/arch-v32/kernel/time.c
@@ -46,7 +46,6 @@ unsigned long timer_regs[NR_CPUS] =
extern void update_xtime_from_cmos(void);
extern int set_rtc_mmss(unsigned long nowtime);
-extern int setup_irq(int, struct irqaction *);
extern int have_rtc;
#ifdef CONFIG_CPU_FREQ
diff --git a/arch/cris/arch-v32/mach-a3/io.c b/arch/cris/arch-v32/mach-a3/io.c
index c22f67ecd9f..090ceb99ef0 100644
--- a/arch/cris/arch-v32/mach-a3/io.c
+++ b/arch/cris/arch-v32/mach-a3/io.c
@@ -36,7 +36,7 @@ struct crisv32_ioport crisv32_ioports[] = {
},
};
-#define NBR_OF_PORTS sizeof(crisv32_ioports)/sizeof(struct crisv32_ioport)
+#define NBR_OF_PORTS ARRAY_SIZE(crisv32_ioports)
struct crisv32_iopin crisv32_led_net0_green;
struct crisv32_iopin crisv32_led_net0_red;
diff --git a/arch/cris/arch-v32/mach-fs/io.c b/arch/cris/arch-v32/mach-fs/io.c
index cb6327b1f8f..a6958661fa8 100644
--- a/arch/cris/arch-v32/mach-fs/io.c
+++ b/arch/cris/arch-v32/mach-fs/io.c
@@ -52,7 +52,7 @@ struct crisv32_ioport crisv32_ioports[] = {
}
};
-#define NBR_OF_PORTS sizeof(crisv32_ioports)/sizeof(struct crisv32_ioport)
+#define NBR_OF_PORTS ARRAY_SIZE(crisv32_ioports)
struct crisv32_iopin crisv32_led_net0_green;
struct crisv32_iopin crisv32_led_net0_red;
diff --git a/arch/cris/include/arch-v10/arch/mmu.h b/arch/cris/include/arch-v10/arch/mmu.h
index df84f1716e6..e829e5a37bb 100644
--- a/arch/cris/include/arch-v10/arch/mmu.h
+++ b/arch/cris/include/arch-v10/arch/mmu.h
@@ -33,10 +33,10 @@ typedef struct
/* CRIS PTE bits (see R_TLB_LO in the register description)
*
- * Bit: 31-13 12-------4 3 2 1 0
- * ________________________________________________
- * | pfn | reserved | global | valid | kernel | we |
- * |_____|__________|________|_______|________|_____|
+ * Bit: 31 30-13 12-------4 3 2 1 0
+ * _______________________________________________________
+ * | cache |pfn | reserved | global | valid | kernel | we |
+ * |_______|____|__________|________|_______|________|_____|
*
* (pfn = physical frame number)
*/
@@ -53,6 +53,7 @@ typedef struct
#define _PAGE_VALID (1<<2) /* page is valid */
#define _PAGE_SILENT_READ (1<<2) /* synonym */
#define _PAGE_GLOBAL (1<<3) /* global page - context is ignored */
+#define _PAGE_NO_CACHE (1<<31) /* part of the uncached memory map */
/* Bits the HW doesn't care about but the kernel uses them in SW */
diff --git a/arch/cris/include/arch-v32/arch/mmu.h b/arch/cris/include/arch-v32/arch/mmu.h
index 6bcdc3fdf7d..c1a13e05e96 100644
--- a/arch/cris/include/arch-v32/arch/mmu.h
+++ b/arch/cris/include/arch-v32/arch/mmu.h
@@ -28,10 +28,10 @@ typedef struct
/*
* CRISv32 PTE bits:
*
- * Bit: 31-13 12-5 4 3 2 1 0
- * +-----+------+--------+-------+--------+-------+---------+
- * | pfn | zero | global | valid | kernel | write | execute |
- * +-----+------+--------+-------+--------+-------+---------+
+ * Bit: 31 30-13 12-5 4 3 2 1 0
+ * +-------+-----+------+--------+-------+--------+-------+---------+
+ * | cache | pfn | zero | global | valid | kernel | write | execute |
+ * +-------+-----+------+--------+-------+--------+-------+---------+
*/
/*
@@ -45,6 +45,8 @@ typedef struct
#define _PAGE_VALID (1 << 3) /* Page is valid. */
#define _PAGE_SILENT_READ (1 << 3) /* Same as above. */
#define _PAGE_GLOBAL (1 << 4) /* Global page. */
+#define _PAGE_NO_CACHE (1 << 31) /* part of the uncached memory map */
+
/*
* The hardware doesn't care about these bits, but the kernel uses them in
diff --git a/arch/cris/include/asm/hardirq.h b/arch/cris/include/asm/hardirq.h
index 74178adeb1c..17bb12d760b 100644
--- a/arch/cris/include/asm/hardirq.h
+++ b/arch/cris/include/asm/hardirq.h
@@ -2,16 +2,6 @@
#define __ASM_HARDIRQ_H
#include <asm/irq.h>
-#include <linux/threads.h>
-#include <linux/cache.h>
-
-typedef struct {
- unsigned int __softirq_pending;
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
-void ack_bad_irq(unsigned int irq);
#define HARDIRQ_BITS 8
@@ -24,4 +14,6 @@ void ack_bad_irq(unsigned int irq);
# error HARDIRQ_BITS is too low!
#endif
+#include <asm-generic/hardirq.h>
+
#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/cris/include/asm/mman.h b/arch/cris/include/asm/mman.h
index b7f0afba3ce..8eebf89f5ab 100644
--- a/arch/cris/include/asm/mman.h
+++ b/arch/cris/include/asm/mman.h
@@ -1,19 +1 @@
-#ifndef __CRIS_MMAN_H__
-#define __CRIS_MMAN_H__
-
-/* verbatim copy of asm-i386/ version */
-
-#include <asm-generic/mman-common.h>
-
-#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define MAP_LOCKED 0x2000 /* pages are locked */
-#define MAP_NORESERVE 0x4000 /* don't check for reservations */
-#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define MAP_NONBLOCK 0x10000 /* do not block on IO */
-
-#define MCL_CURRENT 1 /* lock all current mappings */
-#define MCL_FUTURE 2 /* lock all future mappings */
-
-#endif /* __CRIS_MMAN_H__ */
+#include <asm-generic/mman.h>
diff --git a/arch/cris/include/asm/pgtable.h b/arch/cris/include/asm/pgtable.h
index 50aa974aa83..1fcce00f01f 100644
--- a/arch/cris/include/asm/pgtable.h
+++ b/arch/cris/include/asm/pgtable.h
@@ -197,6 +197,8 @@ static inline pte_t __mk_pte(void * page, pgprot_t pgprot)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
+#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) | _PAGE_NO_CACHE))
+
/* pte_val refers to a page in the 0x4xxxxxxx physical DRAM interval
* __pte_page(pte_val) refers to the "virtual" DRAM interval
diff --git a/arch/cris/kernel/Makefile b/arch/cris/kernel/Makefile
index ee7bcd4d20b..b45640b3e60 100644
--- a/arch/cris/kernel/Makefile
+++ b/arch/cris/kernel/Makefile
@@ -3,6 +3,7 @@
# Makefile for the linux kernel.
#
+CPPFLAGS_vmlinux.lds := -DDRAM_VIRTUAL_BASE=0x$(CONFIG_ETRAX_DRAM_VIRTUAL_BASE)
extra-y := vmlinux.lds
obj-y := process.o traps.o irq.o ptrace.o setup.o time.o sys_cris.o
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c
index 7f642fcffbf..0ca7d9892cc 100644
--- a/arch/cris/kernel/irq.c
+++ b/arch/cris/kernel/irq.c
@@ -38,11 +38,6 @@
#include <asm/io.h>
-void ack_bad_irq(unsigned int irq)
-{
- printk("unexpected IRQ trap at vector %02x\n", irq);
-}
-
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c
index 51dcd04d277..c99aeab7cef 100644
--- a/arch/cris/kernel/process.c
+++ b/arch/cris/kernel/process.c
@@ -45,9 +45,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"))) =
- { INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data =
+ { INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index 6c81836b922..bbfda67d290 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -51,10 +51,7 @@ SECTIONS
_etext = . ; /* End of text section. */
__etext = .;
- . = ALIGN(4); /* Exception table. */
- __start___ex_table = .;
- __ex_table : { *(__ex_table) }
- __stop___ex_table = .;
+ EXCEPTION_TABLE(4)
RODATA
@@ -67,36 +64,24 @@ SECTIONS
__edata = . ; /* End of data section. */
_edata = . ;
- . = ALIGN(PAGE_SIZE); /* init_task and stack, must be aligned. */
- .data.init_task : { *(.data.init_task) }
+ INIT_TASK_DATA_SECTION(PAGE_SIZE)
. = ALIGN(PAGE_SIZE); /* Init code and data. */
__init_begin = .;
- .init.text : {
- _sinittext = .;
- INIT_TEXT
- _einittext = .;
- }
+ INIT_TEXT_SECTION(PAGE_SIZE)
.init.data : { INIT_DATA }
- . = ALIGN(16);
- __setup_start = .;
- .init.setup : { *(.init.setup) }
- __setup_end = .;
+ .init.setup : { INIT_SETUP(16) }
#ifdef CONFIG_ETRAX_ARCH_V32
__start___param = .;
__param : { *(__param) }
__stop___param = .;
#endif
.initcall.init : {
- __initcall_start = .;
- INITCALLS
- __initcall_end = .;
+ INIT_CALLS
}
.con_initcall.init : {
- __con_initcall_start = .;
- *(.con_initcall.init)
- __con_initcall_end = .;
+ CON_INITCALL
}
SECURITY_INIT
@@ -114,9 +99,7 @@ SECTIONS
PERCPU(PAGE_SIZE)
.init.ramfs : {
- __initramfs_start = .;
- *(.init.ramfs)
- __initramfs_end = .;
+ INIT_RAM_FS
}
#endif
@@ -130,11 +113,7 @@ SECTIONS
__init_end = .;
__data_end = . ; /* Move to _edata ? */
- __bss_start = .; /* BSS. */
- .bss : {
- *(COMMON)
- *(.bss)
- }
+ BSS_SECTION(0, 0, 0)
. = ALIGN (0x20);
_end = .;
diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c
index 514f46a4b23..ff68b9f516a 100644
--- a/arch/cris/mm/init.c
+++ b/arch/cris/mm/init.c
@@ -54,7 +54,7 @@ mem_init(void)
printk(KERN_INFO
"Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, "
"%dk init)\n" ,
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ nr_free_pages() << (PAGE_SHIFT-10),
max_mapnr << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index b86e19c9b5b..4b5830bcbe2 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -7,7 +7,7 @@ config FRV
default y
select HAVE_IDE
select HAVE_ARCH_TRACEHOOK
- select HAVE_PERF_COUNTERS
+ select HAVE_PERF_EVENTS
config ZONE_DMA
bool
diff --git a/arch/frv/include/asm/gdb-stub.h b/arch/frv/include/asm/gdb-stub.h
index 24f9738670b..2da716407ff 100644
--- a/arch/frv/include/asm/gdb-stub.h
+++ b/arch/frv/include/asm/gdb-stub.h
@@ -90,7 +90,6 @@ extern void gdbstub_do_rx(void);
extern asmlinkage void __debug_stub_init_break(void);
extern asmlinkage void __break_hijack_kernel_event(void);
extern asmlinkage void __break_hijack_kernel_event_breaks_here(void);
-extern asmlinkage void start_kernel(void);
extern asmlinkage void gdbstub_rx_handler(void);
extern asmlinkage void gdbstub_rx_irq(void);
diff --git a/arch/frv/include/asm/hardirq.h b/arch/frv/include/asm/hardirq.h
index fc47515822a..5fc8b6f5bc5 100644
--- a/arch/frv/include/asm/hardirq.h
+++ b/arch/frv/include/asm/hardirq.h
@@ -12,24 +12,15 @@
#ifndef __ASM_HARDIRQ_H
#define __ASM_HARDIRQ_H
-#include <linux/threads.h>
-#include <linux/irq.h>
-
-typedef struct {
- unsigned int __softirq_pending;
- unsigned long idle_timestamp;
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
-#ifdef CONFIG_SMP
-#error SMP not available on FR-V
-#endif /* CONFIG_SMP */
+#include <asm/atomic.h>
extern atomic_t irq_err_count;
static inline void ack_bad_irq(int irq)
{
atomic_inc(&irq_err_count);
}
+#define ack_bad_irq ack_bad_irq
+
+#include <asm-generic/hardirq.h>
#endif
diff --git a/arch/frv/include/asm/mman.h b/arch/frv/include/asm/mman.h
index 58c1d11e2ac..8eebf89f5ab 100644
--- a/arch/frv/include/asm/mman.h
+++ b/arch/frv/include/asm/mman.h
@@ -1,18 +1 @@
-#ifndef __ASM_MMAN_H__
-#define __ASM_MMAN_H__
-
-#include <asm-generic/mman-common.h>
-
-#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define MAP_LOCKED 0x2000 /* pages are locked */
-#define MAP_NORESERVE 0x4000 /* don't check for reservations */
-#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define MAP_NONBLOCK 0x10000 /* do not block on IO */
-
-#define MCL_CURRENT 1 /* lock all current mappings */
-#define MCL_FUTURE 2 /* lock all future mappings */
-
-#endif /* __ASM_MMAN_H__ */
-
+#include <asm-generic/mman.h>
diff --git a/arch/frv/include/asm/perf_counter.h b/arch/frv/include/asm/perf_event.h
index ccf726e61b2..a69e0155d14 100644
--- a/arch/frv/include/asm/perf_counter.h
+++ b/arch/frv/include/asm/perf_event.h
@@ -1,4 +1,4 @@
-/* FRV performance counter support
+/* FRV performance event support
*
* Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -9,9 +9,9 @@
* 2 of the Licence, or (at your option) any later version.
*/
-#ifndef _ASM_PERF_COUNTER_H
-#define _ASM_PERF_COUNTER_H
+#ifndef _ASM_PERF_EVENT_H
+#define _ASM_PERF_EVENT_H
-#define PERF_COUNTER_INDEX_OFFSET 0
+#define PERF_EVENT_INDEX_OFFSET 0
-#endif /* _ASM_PERF_COUNTER_H */
+#endif /* _ASM_PERF_EVENT_H */
diff --git a/arch/frv/include/asm/unistd.h b/arch/frv/include/asm/unistd.h
index 4a8fb427ce0..be6ef0f5cd4 100644
--- a/arch/frv/include/asm/unistd.h
+++ b/arch/frv/include/asm/unistd.h
@@ -342,7 +342,7 @@
#define __NR_preadv 333
#define __NR_pwritev 334
#define __NR_rt_tgsigqueueinfo 335
-#define __NR_perf_counter_open 336
+#define __NR_perf_event_open 336
#ifdef __KERNEL__
diff --git a/arch/frv/kernel/debug-stub.c b/arch/frv/kernel/debug-stub.c
index 2f6c60c921e..2845139c807 100644
--- a/arch/frv/kernel/debug-stub.c
+++ b/arch/frv/kernel/debug-stub.c
@@ -15,6 +15,7 @@
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/serial_reg.h>
+#include <linux/start_kernel.h>
#include <asm/system.h>
#include <asm/serial-regs.h>
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index fde1e446b44..189397ec012 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -1525,6 +1525,6 @@ sys_call_table:
.long sys_preadv
.long sys_pwritev
.long sys_rt_tgsigqueueinfo /* 335 */
- .long sys_perf_counter_open
+ .long sys_perf_event_open
syscall_table_size = (. - sys_call_table)
diff --git a/arch/frv/kernel/init_task.c b/arch/frv/kernel/init_task.c
index 1d3df1d9495..3c3e0b336a9 100644
--- a/arch/frv/kernel/init_task.c
+++ b/arch/frv/kernel/init_task.c
@@ -19,9 +19,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"))) =
- { INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data =
+ { INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
diff --git a/arch/frv/kernel/pm.c b/arch/frv/kernel/pm.c
index be722fc1acf..0d4d3e3a4cf 100644
--- a/arch/frv/kernel/pm.c
+++ b/arch/frv/kernel/pm.c
@@ -150,7 +150,7 @@ static int user_atoi(char __user *ubuf, size_t len)
/*
* Send us to sleep.
*/
-static int sysctl_pm_do_suspend(ctl_table *ctl, int write, struct file *filp,
+static int sysctl_pm_do_suspend(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *fpos)
{
int retval, mode;
@@ -198,13 +198,13 @@ static int try_set_cmode(int new_cmode)
}
-static int cmode_procctl(ctl_table *ctl, int write, struct file *filp,
+static int cmode_procctl(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *fpos)
{
int new_cmode;
if (!write)
- return proc_dointvec(ctl, write, filp, buffer, lenp, fpos);
+ return proc_dointvec(ctl, write, buffer, lenp, fpos);
new_cmode = user_atoi(buffer, *lenp);
@@ -301,13 +301,13 @@ static int try_set_cm(int new_cm)
return 0;
}
-static int p0_procctl(ctl_table *ctl, int write, struct file *filp,
+static int p0_procctl(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *fpos)
{
int new_p0;
if (!write)
- return proc_dointvec(ctl, write, filp, buffer, lenp, fpos);
+ return proc_dointvec(ctl, write, buffer, lenp, fpos);
new_p0 = user_atoi(buffer, *lenp);
@@ -345,13 +345,13 @@ static int p0_sysctl(ctl_table *table,
return 1;
}
-static int cm_procctl(ctl_table *ctl, int write, struct file *filp,
+static int cm_procctl(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *fpos)
{
int new_cm;
if (!write)
- return proc_dointvec(ctl, write, filp, buffer, lenp, fpos);
+ return proc_dointvec(ctl, write, buffer, lenp, fpos);
new_cm = user_atoi(buffer, *lenp);
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
index 0de50df7497..90425593821 100644
--- a/arch/frv/kernel/process.c
+++ b/arch/frv/kernel/process.c
@@ -83,13 +83,9 @@ void (*idle)(void) = core_sleep_idle;
*/
void cpu_idle(void)
{
- int cpu = smp_processor_id();
-
/* endless idle loop with no priority at all */
while (1) {
while (!need_resched()) {
- irq_stat[cpu].idle_timestamp = jiffies;
-
check_pgt_cache();
if (!frv_dma_inprogress && idle)
diff --git a/arch/frv/kernel/sys_frv.c b/arch/frv/kernel/sys_frv.c
index baadc97f862..2b6b5289cdc 100644
--- a/arch/frv/kernel/sys_frv.c
+++ b/arch/frv/kernel/sys_frv.c
@@ -21,7 +21,6 @@
#include <linux/stat.h>
#include <linux/mman.h>
#include <linux/file.h>
-#include <linux/utsname.h>
#include <linux/syscalls.h>
#include <linux/ipc.h>
diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile
index 0a377210c89..f4709756d0d 100644
--- a/arch/frv/lib/Makefile
+++ b/arch/frv/lib/Makefile
@@ -5,4 +5,4 @@
lib-y := \
__ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
- outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_counter.o
+ outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_event.o
diff --git a/arch/frv/lib/cache.S b/arch/frv/lib/cache.S
index 0e10ad8dc46..0c4fb204911 100644
--- a/arch/frv/lib/cache.S
+++ b/arch/frv/lib/cache.S
@@ -1,4 +1,4 @@
-/* cache.S: cache managment routines
+/* cache.S: cache management routines
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
diff --git a/arch/frv/lib/perf_counter.c b/arch/frv/lib/perf_event.c
index 2000feecd57..9ac5acfd2e9 100644
--- a/arch/frv/lib/perf_counter.c
+++ b/arch/frv/lib/perf_event.c
@@ -1,4 +1,4 @@
-/* Performance counter handling
+/* Performance event handling
*
* Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -9,11 +9,11 @@
* 2 of the Licence, or (at your option) any later version.
*/
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
/*
- * mark the performance counter as pending
+ * mark the performance event as pending
*/
-void set_perf_counter_pending(void)
+void set_perf_event_pending(void)
{
}
diff --git a/arch/frv/mb93090-mb00/Makefile b/arch/frv/mb93090-mb00/Makefile
index 76595e87073..b73b542f8f4 100644
--- a/arch/frv/mb93090-mb00/Makefile
+++ b/arch/frv/mb93090-mb00/Makefile
@@ -11,3 +11,5 @@ else
obj-y += pci-dma-nommu.o
endif
endif
+
+obj-$(CONFIG_MTD) += flash.o
diff --git a/arch/frv/mb93090-mb00/flash.c b/arch/frv/mb93090-mb00/flash.c
new file mode 100644
index 00000000000..c0e3707c229
--- /dev/null
+++ b/arch/frv/mb93090-mb00/flash.c
@@ -0,0 +1,90 @@
+/* Flash mappings for the MB93090-MB00 motherboard
+ *
+ * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+
+#define MB93090_BOOTROM_ADDR 0xFF000000 /* Boot ROM */
+#define MB93090_BOOTROM_SIZE (2 * 1024 * 1024)
+#define MB93090_USERROM_ADDR 0xFF200000 /* User ROM */
+#define MB93090_USERROM_SIZE (2 * 1024 * 1024)
+
+/*
+ * default MTD partition table for both main flash devices, expected to be
+ * overridden by RedBoot
+ */
+static struct mtd_partition mb93090_partitions[] = {
+ {
+ .name = "Filesystem",
+ .size = MTDPART_SIZ_FULL,
+ .offset = 0,
+ }
+};
+
+/*
+ * Definition of the MB93090 Boot ROM (on the CPU card)
+ */
+static struct physmap_flash_data mb93090_bootrom_data = {
+ .width = 2,
+ .nr_parts = 2,
+ .parts = mb93090_partitions,
+};
+
+static struct resource mb93090_bootrom_resource = {
+ .start = MB93090_BOOTROM_ADDR,
+ .end = MB93090_BOOTROM_ADDR + MB93090_BOOTROM_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device mb93090_bootrom = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev.platform_data = &mb93090_bootrom_data,
+ .num_resources = 1,
+ .resource = &mb93090_bootrom_resource,
+};
+
+/*
+ * Definition of the MB93090 User ROM definition (on the motherboard)
+ */
+static struct physmap_flash_data mb93090_userrom_data = {
+ .width = 2,
+ .nr_parts = 2,
+ .parts = mb93090_partitions,
+};
+
+static struct resource mb93090_userrom_resource = {
+ .start = MB93090_USERROM_ADDR,
+ .end = MB93090_USERROM_ADDR + MB93090_USERROM_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device mb93090_userrom = {
+ .name = "physmap-flash",
+ .id = 1,
+ .dev.platform_data = &mb93090_userrom_data,
+ .num_resources = 1,
+ .resource = &mb93090_userrom_resource,
+};
+
+/*
+ * register the MB93090 flashes
+ */
+static int __init mb93090_mtd_init(void)
+{
+ platform_device_register(&mb93090_bootrom);
+ platform_device_register(&mb93090_userrom);
+ return 0;
+}
+
+module_init(mb93090_mtd_init);
diff --git a/arch/h8300/include/asm/hardirq.h b/arch/h8300/include/asm/hardirq.h
index 9d7f7a7462b..c2e1aa0f0d1 100644
--- a/arch/h8300/include/asm/hardirq.h
+++ b/arch/h8300/include/asm/hardirq.h
@@ -1,18 +1,7 @@
#ifndef __H8300_HARDIRQ_H
#define __H8300_HARDIRQ_H
-#include <linux/kernel.h>
-#include <linux/threads.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-
-typedef struct {
- unsigned int __softirq_pending;
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
-extern void ack_bad_irq(unsigned int irq);
+#include <asm/irq.h>
#define HARDIRQ_BITS 8
@@ -25,4 +14,6 @@ extern void ack_bad_irq(unsigned int irq);
# error HARDIRQ_BITS is too low!
#endif
+#include <asm-generic/hardirq.h>
+
#endif
diff --git a/arch/h8300/include/asm/mman.h b/arch/h8300/include/asm/mman.h
index cf35f0a6f12..8eebf89f5ab 100644
--- a/arch/h8300/include/asm/mman.h
+++ b/arch/h8300/include/asm/mman.h
@@ -1,17 +1 @@
-#ifndef __H8300_MMAN_H__
-#define __H8300_MMAN_H__
-
-#include <asm-generic/mman-common.h>
-
-#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define MAP_LOCKED 0x2000 /* pages are locked */
-#define MAP_NORESERVE 0x4000 /* don't check for reservations */
-#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define MAP_NONBLOCK 0x10000 /* do not block on IO */
-
-#define MCL_CURRENT 1 /* lock all current mappings */
-#define MCL_FUTURE 2 /* lock all future mappings */
-
-#endif /* __H8300_MMAN_H__ */
+#include <asm-generic/mman.h>
diff --git a/arch/h8300/kernel/init_task.c b/arch/h8300/kernel/init_task.c
index 089c65ed6eb..54c1062ee80 100644
--- a/arch/h8300/kernel/init_task.c
+++ b/arch/h8300/kernel/init_task.c
@@ -31,7 +31,6 @@ EXPORT_SYMBOL(init_task);
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"))) =
- { INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data =
+ { INIT_THREAD_INFO(init_task) };
diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c
index 74f8dd7b34d..5c913d47211 100644
--- a/arch/h8300/kernel/irq.c
+++ b/arch/h8300/kernel/irq.c
@@ -81,11 +81,6 @@ struct irq_chip h8300irq_chip = {
.end = h8300_end_irq,
};
-void ack_bad_irq(unsigned int irq)
-{
- printk("unexpected IRQ trap at vector %02x\n", irq);
-}
-
#if defined(CONFIG_RAMKERNEL)
static unsigned long __init *get_vector_address(void)
{
diff --git a/arch/h8300/kernel/sys_h8300.c b/arch/h8300/kernel/sys_h8300.c
index 2745656dcc5..8cb5d73a0e3 100644
--- a/arch/h8300/kernel/sys_h8300.c
+++ b/arch/h8300/kernel/sys_h8300.c
@@ -17,7 +17,6 @@
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
-#include <linux/utsname.h>
#include <linux/fs.h>
#include <linux/ipc.h>
diff --git a/arch/h8300/kernel/timer/tpu.c b/arch/h8300/kernel/timer/tpu.c
index e7c6e614a75..2193a2e2859 100644
--- a/arch/h8300/kernel/timer/tpu.c
+++ b/arch/h8300/kernel/timer/tpu.c
@@ -7,7 +7,6 @@
*
*/
-#include <linux/config.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S
index 662b02ecb86..b9e24907e6e 100644
--- a/arch/h8300/kernel/vmlinux.lds.S
+++ b/arch/h8300/kernel/vmlinux.lds.S
@@ -1,5 +1,6 @@
#define VMLINUX_SYMBOL(_sym_) _##_sym_
#include <asm-generic/vmlinux.lds.h>
+#include <asm/page.h>
/* target memory map */
#ifdef CONFIG_H8300H_GENERIC
@@ -79,11 +80,8 @@ SECTIONS
SCHED_TEXT
LOCK_TEXT
__etext = . ;
- . = ALIGN(16); /* Exception table */
- ___start___ex_table = .;
- *(__ex_table)
- ___stop___ex_table = .;
}
+ EXCEPTION_TABLE(16)
RODATA
#if defined(CONFIG_ROMKERNEL)
@@ -100,8 +98,7 @@ SECTIONS
__sdata = . ;
___data_start = . ;
- . = ALIGN(0x2000) ;
- *(.data.init_task)
+ INIT_TASK_DATA(0x2000)
. = ALIGN(0x4) ;
DATA_DATA
. = ALIGN(0x4) ;
@@ -114,24 +111,16 @@ SECTIONS
__einittext = .;
INIT_DATA
. = ALIGN(0x4) ;
+ INIT_SETUP(0x4)
___setup_start = .;
*(.init.setup)
. = ALIGN(0x4) ;
___setup_end = .;
- ___initcall_start = .;
- INITCALLS
- ___initcall_end = .;
- ___con_initcall_start = .;
- *(.con_initcall.init)
- ___con_initcall_end = .;
+ INIT_CALLS
+ CON_INITCALL
EXIT_TEXT
EXIT_DATA
-#if defined(CONFIG_BLK_DEV_INITRD)
- . = ALIGN(4);
- ___initramfs_start = .;
- *(.init.ramfs)
- ___initramfs_end = .;
-#endif
+ INIT_RAM_FS
. = ALIGN(0x4) ;
___init_end = .;
__edata = . ;
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 011a1cdf0eb..6851e52ed5a 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -500,6 +500,10 @@ config HAVE_ARCH_NODEDATA_EXTENSION
def_bool y
depends on NUMA
+config ARCH_PROC_KCORE_TEXT
+ def_bool y
+ depends on PROC_KCORE
+
config IA32_SUPPORT
bool "Support for Linux/x86 binaries"
help
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 8cfb001092a..674a8374c6d 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -2026,24 +2026,21 @@ acpi_sba_ioc_add(struct acpi_device *device)
struct ioc *ioc;
acpi_status status;
u64 hpa, length;
- struct acpi_buffer buffer;
struct acpi_device_info *dev_info;
status = hp_acpi_csr_space(device->handle, &hpa, &length);
if (ACPI_FAILURE(status))
return 1;
- buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
- status = acpi_get_object_info(device->handle, &buffer);
+ status = acpi_get_object_info(device->handle, &dev_info);
if (ACPI_FAILURE(status))
return 1;
- dev_info = buffer.pointer;
/*
* For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
* root bridges, and its CSR space includes the IOC function.
*/
- if (strncmp("HWP0001", dev_info->hardware_id.value, 7) == 0) {
+ if (strncmp("HWP0001", dev_info->hardware_id.string, 7) == 0) {
hpa += ZX1_IOC_OFFSET;
/* zx1 based systems default to kernel page size iommu pages */
if (!iovp_shift)
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 16ef61a91d9..625ed8f76fc 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -1270,7 +1270,7 @@ putreg (struct task_struct *child, int regno, unsigned int value)
case PT_CS:
if (value != __USER_CS)
printk(KERN_ERR
- "ia32.putreg: attempt to to set invalid segment register %d = %x\n",
+ "ia32.putreg: attempt to set invalid segment register %d = %x\n",
regno, value);
break;
default:
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h
index d20b998cb91..7fa8a859466 100644
--- a/arch/ia64/include/asm/cputime.h
+++ b/arch/ia64/include/asm/cputime.h
@@ -30,6 +30,7 @@ typedef u64 cputime_t;
typedef u64 cputime64_t;
#define cputime_zero ((cputime_t)0)
+#define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_max ((~((cputime_t)0) >> 1) - 1)
#define cputime_add(__a, __b) ((__a) + (__b))
#define cputime_sub(__a, __b) ((__a) - (__b))
diff --git a/arch/ia64/include/asm/mman.h b/arch/ia64/include/asm/mman.h
index 48cf8b98a0b..4459028e5aa 100644
--- a/arch/ia64/include/asm/mman.h
+++ b/arch/ia64/include/asm/mman.h
@@ -8,19 +8,9 @@
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
*/
-#include <asm-generic/mman-common.h>
+#include <asm-generic/mman.h>
-#define MAP_GROWSDOWN 0x00100 /* stack-like segment */
-#define MAP_GROWSUP 0x00200 /* register stack-like segment */
-#define MAP_DENYWRITE 0x00800 /* ETXTBSY */
-#define MAP_EXECUTABLE 0x01000 /* mark it as an executable */
-#define MAP_LOCKED 0x02000 /* pages are locked */
-#define MAP_NORESERVE 0x04000 /* don't check for reservations */
-#define MAP_POPULATE 0x08000 /* populate (prefault) pagetables */
-#define MAP_NONBLOCK 0x10000 /* do not block on IO */
-
-#define MCL_CURRENT 1 /* lock all current mappings */
-#define MCL_FUTURE 2 /* lock all future mappings */
+#define MAP_GROWSUP 0x0200 /* register stack-like segment */
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h
index d217d1d4e05..0b3b3997dec 100644
--- a/arch/ia64/include/asm/smp.h
+++ b/arch/ia64/include/asm/smp.h
@@ -127,7 +127,6 @@ extern int is_multithreading_enabled(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
#else /* CONFIG_SMP */
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index d0141fbf51d..3ddb4e709db 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -33,7 +33,6 @@
/*
* Returns a bitmask of CPUs on Node 'node'.
*/
-#define node_to_cpumask(node) (node_to_cpu_mask[node])
#define cpumask_of_node(node) (&node_to_cpu_mask[node])
/*
@@ -104,8 +103,6 @@ void build_cpu_to_node_map(void);
#ifdef CONFIG_SMP
#define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id)
#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
-#define topology_core_siblings(cpu) (cpu_core_map[cpu])
-#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
#define smt_capable() (smp_num_siblings > 1)
diff --git a/arch/ia64/install.sh b/arch/ia64/install.sh
index 929e780026d..0e932f5dcd1 100644
--- a/arch/ia64/install.sh
+++ b/arch/ia64/install.sh
@@ -21,8 +21,8 @@
# User may have a custom install script
-if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi
-if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi
+if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
# Default install - same as make zlilo
diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate
index 1d87f84069b..ab9b03a9adc 100644
--- a/arch/ia64/kernel/Makefile.gate
+++ b/arch/ia64/kernel/Makefile.gate
@@ -10,7 +10,7 @@ quiet_cmd_gate = GATE $@
cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@
GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
- $(call ld-option, -Wl$(comma)--hash-style=sysv)
+ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE
$(call if_changed,gate)
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
index c475fc281be..e253ab8fcbc 100644
--- a/arch/ia64/kernel/init_task.c
+++ b/arch/ia64/kernel/init_task.c
@@ -33,7 +33,8 @@ union {
struct thread_info thread_info;
} s;
unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)];
-} init_task_mem asm ("init_task") __attribute__((section(".data.init_task"))) = {{
+} init_task_mem asm ("init_task") __init_task_data =
+ {{
.task = INIT_TASK(init_task_mem.s.task),
.thread_info = INIT_THREAD_INFO(init_task_mem.s.task)
}};
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 223abb13410..285aae8431c 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -46,7 +46,7 @@ void __init swiotlb_dma_init(void)
void __init pci_swiotlb_init(void)
{
- if (!iommu_detected || iommu_pass_through) {
+ if (!iommu_detected) {
#ifdef CONFIG_IA64_GENERIC
swiotlb = 1;
printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 93ebfea43c6..dabeefe2113 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -302,7 +302,7 @@ smp_flush_tlb_mm (struct mm_struct *mm)
return;
}
- smp_call_function_mask(mm->cpu_vm_mask,
+ smp_call_function_many(mm_cpumask(mm),
(void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
local_irq_disable();
local_finish_flush_tlb_mm(mm);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index b115b3bbf04..1857766a63c 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -617,7 +617,6 @@ mem_init (void)
long reserved_pages, codesize, datasize, initsize;
pg_data_t *pgdat;
int i;
- static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
@@ -639,10 +638,6 @@ mem_init (void)
high_memory = __va(max_low_pfn * PAGE_SIZE);
- kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE);
- kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
- kclist_add(&kcore_kernel, _stext, _end - _stext);
-
for_each_online_pgdat(pgdat)
if (pgdat->bdata->node_bootmem_map)
totalram_pages += free_all_bootmem_node(pgdat);
@@ -655,7 +650,7 @@ mem_init (void)
initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
- "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
+ "%luk data, %luk init)\n", nr_free_pages() << (PAGE_SHIFT - 10),
num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index cabba332cc4..c41234f1b82 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -41,6 +41,12 @@ config HZ
int
default 100
+config GENERIC_TIME
+ def_bool y
+
+config ARCH_USES_GETTIMEOFFSET
+ def_bool y
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/m32r/boot/compressed/install.sh b/arch/m32r/boot/compressed/install.sh
index 6d72e9e7269..16e5a0a1343 100644
--- a/arch/m32r/boot/compressed/install.sh
+++ b/arch/m32r/boot/compressed/install.sh
@@ -24,8 +24,8 @@
# User may have a custom install script
-if [ -x /sbin/installkernel ]; then
- exec /sbin/installkernel "$@"
+if [ -x /sbin/${INSTALLKERNEL} ]; then
+ exec /sbin/${INSTALLKERNEL} "$@"
fi
if [ "$2" = "zImage" ]; then
diff --git a/arch/m32r/include/asm/hardirq.h b/arch/m32r/include/asm/hardirq.h
index cb8aa762f23..4c31c0ae215 100644
--- a/arch/m32r/include/asm/hardirq.h
+++ b/arch/m32r/include/asm/hardirq.h
@@ -2,14 +2,7 @@
#ifndef __ASM_HARDIRQ_H
#define __ASM_HARDIRQ_H
-#include <linux/threads.h>
-#include <linux/irq.h>
-
-typedef struct {
- unsigned int __softirq_pending;
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+#include <asm/irq.h>
#if NR_IRQS > 256
#define HARDIRQ_BITS 9
@@ -26,11 +19,7 @@ typedef struct {
# error HARDIRQ_BITS is too low!
#endif
-static inline void ack_bad_irq(int irq)
-{
- printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
- BUG();
-}
+#include <asm-generic/hardirq.h>
#endif /* __ASM_HARDIRQ_H */
#endif /* __KERNEL__ */
diff --git a/arch/m32r/include/asm/mman.h b/arch/m32r/include/asm/mman.h
index 04a5f40aa40..8eebf89f5ab 100644
--- a/arch/m32r/include/asm/mman.h
+++ b/arch/m32r/include/asm/mman.h
@@ -1,17 +1 @@
-#ifndef __M32R_MMAN_H__
-#define __M32R_MMAN_H__
-
-#include <asm-generic/mman-common.h>
-
-#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define MAP_LOCKED 0x2000 /* pages are locked */
-#define MAP_NORESERVE 0x4000 /* don't check for reservations */
-#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define MAP_NONBLOCK 0x10000 /* do not block on IO */
-
-#define MCL_CURRENT 1 /* lock all current mappings */
-#define MCL_FUTURE 2 /* lock all future mappings */
-
-#endif /* __M32R_MMAN_H__ */
+#include <asm-generic/mman.h>
diff --git a/arch/m32r/include/asm/mmu_context.h b/arch/m32r/include/asm/mmu_context.h
index 91909e5dd9d..a70a3df3363 100644
--- a/arch/m32r/include/asm/mmu_context.h
+++ b/arch/m32r/include/asm/mmu_context.h
@@ -127,7 +127,7 @@ static inline void switch_mm(struct mm_struct *prev,
if (prev != next) {
#ifdef CONFIG_SMP
- cpu_set(cpu, next->cpu_vm_mask);
+ cpumask_set_cpu(cpu, mm_cpumask(next));
#endif /* CONFIG_SMP */
/* Set MPTB = next->pgd */
*(volatile unsigned long *)MPTB = (unsigned long)next->pgd;
@@ -135,7 +135,7 @@ static inline void switch_mm(struct mm_struct *prev,
}
#ifdef CONFIG_SMP
else
- if (!cpu_test_and_set(cpu, next->cpu_vm_mask))
+ if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)))
activate_context(next);
#endif /* CONFIG_SMP */
}
diff --git a/arch/m32r/include/asm/page.h b/arch/m32r/include/asm/page.h
index 11777f7a562..725ede8f288 100644
--- a/arch/m32r/include/asm/page.h
+++ b/arch/m32r/include/asm/page.h
@@ -1,9 +1,11 @@
#ifndef _ASM_M32R_PAGE_H
#define _ASM_M32R_PAGE_H
+#include <linux/const.h>
+
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
-#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#ifndef __ASSEMBLY__
diff --git a/arch/m32r/include/asm/processor.h b/arch/m32r/include/asm/processor.h
index 1a997fc148a..8397c249989 100644
--- a/arch/m32r/include/asm/processor.h
+++ b/arch/m32r/include/asm/processor.h
@@ -140,8 +140,6 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.lr)
#define KSTK_ESP(tsk) ((tsk)->thread.sp)
-#define THREAD_SIZE (2*PAGE_SIZE)
-
#define cpu_relax() barrier()
#endif /* _ASM_M32R_PROCESSOR_H */
diff --git a/arch/m32r/include/asm/smp.h b/arch/m32r/include/asm/smp.h
index b96a6d2ffbc..e67ded1aab9 100644
--- a/arch/m32r/include/asm/smp.h
+++ b/arch/m32r/include/asm/smp.h
@@ -88,7 +88,7 @@ extern void smp_send_timer(void);
extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#endif /* not __ASSEMBLY__ */
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h
index 71578151a40..ed240b6e8e7 100644
--- a/arch/m32r/include/asm/thread_info.h
+++ b/arch/m32r/include/asm/thread_info.h
@@ -55,6 +55,8 @@ struct thread_info {
#define PREEMPT_ACTIVE 0x10000000
+#define THREAD_SIZE (PAGE_SIZE << 1)
+
/*
* macros/functions for gaining access to the thread information structure
*/
@@ -76,8 +78,6 @@ struct thread_info {
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
-#define THREAD_SIZE (2*PAGE_SIZE)
-
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
@@ -125,17 +125,6 @@ static inline unsigned int get_thread_fault_code(void)
return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT;
}
-#else /* !__ASSEMBLY__ */
-
-#define THREAD_SIZE 8192
-
-/* how to get the thread information struct from ASM */
-#define GET_THREAD_INFO(reg) GET_THREAD_INFO reg
- .macro GET_THREAD_INFO reg
- ldi \reg, #-THREAD_SIZE
- and \reg, sp
- .endm
-
#endif
/*
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
index 612d35b082a..403869833b9 100644
--- a/arch/m32r/kernel/entry.S
+++ b/arch/m32r/kernel/entry.S
@@ -118,6 +118,13 @@
#define resume_kernel restore_all
#endif
+/* how to get the thread information struct from ASM */
+#define GET_THREAD_INFO(reg) GET_THREAD_INFO reg
+ .macro GET_THREAD_INFO reg
+ ldi \reg, #-THREAD_SIZE
+ and \reg, sp
+ .endm
+
ENTRY(ret_from_fork)
pop r0
bl schedule_tail
diff --git a/arch/m32r/kernel/head.S b/arch/m32r/kernel/head.S
index 0a7194439eb..a46652dd83e 100644
--- a/arch/m32r/kernel/head.S
+++ b/arch/m32r/kernel/head.S
@@ -268,13 +268,13 @@ ENTRY(empty_zero_page)
/*------------------------------------------------------------------------
* Stack area
*/
- .section .spi
+ .section .init.data, "aw"
ALIGN
.global spi_stack_top
.zero 1024
spi_stack_top:
- .section .spu
+ .section .init.data, "aw"
ALIGN
.global spu_stack_top
.zero 1024
diff --git a/arch/m32r/kernel/init_task.c b/arch/m32r/kernel/init_task.c
index fce57e5d3f9..6c42d5f8df5 100644
--- a/arch/m32r/kernel/init_task.c
+++ b/arch/m32r/kernel/init_task.c
@@ -20,9 +20,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"))) =
- { INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data =
+ { INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 98b8feb12ed..98682bba0ed 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -77,7 +77,7 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
struct user * dummy = NULL;
#endif
- if ((off & 3) || (off < 0) || (off > sizeof(struct user) - 3))
+ if ((off & 3) || off > sizeof(struct user) - 3)
return -EIO;
off >>= 2;
@@ -139,8 +139,7 @@ static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
struct user * dummy = NULL;
#endif
- if ((off & 3) || off < 0 ||
- off > sizeof(struct user) - 3)
+ if ((off & 3) || off > sizeof(struct user) - 3)
return -EIO;
off >>= 2;
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index 929e5c9d3ad..1b7598e6f6e 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -85,7 +85,7 @@ void smp_ipi_timer_interrupt(struct pt_regs *);
void smp_local_timer_interrupt(void);
static void send_IPI_allbutself(int, int);
-static void send_IPI_mask(cpumask_t, int, int);
+static void send_IPI_mask(const struct cpumask *, int, int);
unsigned long send_IPI_mask_phys(cpumask_t, int, int);
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
@@ -113,7 +113,7 @@ unsigned long send_IPI_mask_phys(cpumask_t, int, int);
void smp_send_reschedule(int cpu_id)
{
WARN_ON(cpu_is_offline(cpu_id));
- send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1);
+ send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1);
}
/*==========================================================================*
@@ -168,7 +168,7 @@ void smp_flush_cache_all(void)
spin_lock(&flushcache_lock);
mask=cpus_addr(cpumask);
atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
- send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0);
+ send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
_flush_cache_copyback_all();
while (flushcache_cpumask)
mb();
@@ -264,7 +264,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
preempt_disable();
cpu_id = smp_processor_id();
mmc = &mm->context[cpu_id];
- cpu_mask = mm->cpu_vm_mask;
+ cpu_mask = *mm_cpumask(mm);
cpu_clear(cpu_id, cpu_mask);
if (*mmc != NO_CONTEXT) {
@@ -273,7 +273,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
if (mm == current->mm)
activate_context(mm);
else
- cpu_clear(cpu_id, mm->cpu_vm_mask);
+ cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
local_irq_restore(flags);
}
if (!cpus_empty(cpu_mask))
@@ -334,7 +334,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
preempt_disable();
cpu_id = smp_processor_id();
mmc = &mm->context[cpu_id];
- cpu_mask = mm->cpu_vm_mask;
+ cpu_mask = *mm_cpumask(mm);
cpu_clear(cpu_id, cpu_mask);
#ifdef DEBUG_SMP
@@ -424,7 +424,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
* We have to send the IPI only to
* CPUs affected.
*/
- send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0);
+ send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
while (!cpus_empty(flush_cpumask)) {
/* nothing. lockup detection does not belong here */
@@ -469,7 +469,7 @@ void smp_invalidate_interrupt(void)
if (flush_mm == current->active_mm)
activate_context(flush_mm);
else
- cpu_clear(cpu_id, flush_mm->cpu_vm_mask);
+ cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm));
} else {
unsigned long va = flush_va;
@@ -546,14 +546,14 @@ static void stop_this_cpu(void *dummy)
for ( ; ; );
}
-void arch_send_call_function_ipi(cpumask_t mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
}
void arch_send_call_function_single_ipi(int cpu)
{
- send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0);
+ send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0);
}
/*==========================================================================*
@@ -729,7 +729,7 @@ static void send_IPI_allbutself(int ipi_num, int try)
cpumask = cpu_online_map;
cpu_clear(smp_processor_id(), cpumask);
- send_IPI_mask(cpumask, ipi_num, try);
+ send_IPI_mask(&cpumask, ipi_num, try);
}
/*==========================================================================*
@@ -752,7 +752,7 @@ static void send_IPI_allbutself(int ipi_num, int try)
* ---------- --- --------------------------------------------------------
*
*==========================================================================*/
-static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
+static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
{
cpumask_t physid_mask, tmp;
int cpu_id, phys_id;
@@ -761,11 +761,11 @@ static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
if (num_cpus <= 1) /* NO MP */
return;
- cpus_and(tmp, cpumask, cpu_online_map);
- BUG_ON(!cpus_equal(cpumask, tmp));
+ cpumask_and(&tmp, cpumask, cpu_online_mask);
+ BUG_ON(!cpumask_equal(cpumask, &tmp));
physid_mask = CPU_MASK_NONE;
- for_each_cpu_mask(cpu_id, cpumask){
+ for_each_cpu(cpu_id, cpumask) {
if ((phys_id = cpu_to_physid(cpu_id)) != -1)
cpu_set(phys_id, physid_mask);
}
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 2547d6c4a82..e034844cfc0 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
physid_set(phys_id, phys_cpu_present_map);
#ifndef CONFIG_HOTPLUG_CPU
- cpu_present_map = cpu_possible_map;
+ init_cpu_present(&cpu_possible_map);
#endif
show_mp_info(nr_cpu);
@@ -213,7 +213,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (!physid_isset(phys_id, phys_cpu_present_map))
continue;
- if ((max_cpus >= 0) && (max_cpus <= cpucount + 1))
+ if (max_cpus <= cpucount + 1)
continue;
do_boot_cpu(phys_id);
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
index cada3ba4b99..ba61c4c7320 100644
--- a/arch/m32r/kernel/time.c
+++ b/arch/m32r/kernel/time.c
@@ -48,7 +48,7 @@ extern void smp_local_timer_interrupt(void);
static unsigned long latch;
-static unsigned long do_gettimeoffset(void)
+u32 arch_gettimeoffset(void)
{
unsigned long elapsed_time = 0; /* [us] */
@@ -93,79 +93,10 @@ static unsigned long do_gettimeoffset(void)
#error no chip configuration
#endif
- return elapsed_time;
+ return elapsed_time * 1000;
}
/*
- * This version of gettimeofday has near microsecond resolution.
- */
-void do_gettimeofday(struct timeval *tv)
-{
- unsigned long seq;
- unsigned long usec, sec;
- unsigned long max_ntp_tick = tick_usec - tickadj;
-
- do {
- seq = read_seqbegin(&xtime_lock);
-
- usec = do_gettimeoffset();
-
- /*
- * If time_adjust is negative then NTP is slowing the clock
- * so make sure not to go into next possible interval.
- * Better to lose some accuracy than have time go backwards..
- */
- if (unlikely(time_adjust < 0))
- usec = min(usec, max_ntp_tick);
-
- sec = xtime.tv_sec;
- usec += (xtime.tv_nsec / 1000);
- } while (read_seqretry(&xtime_lock, seq));
-
- while (usec >= 1000000) {
- usec -= 1000000;
- sec++;
- }
-
- tv->tv_sec = sec;
- tv->tv_usec = usec;
-}
-
-EXPORT_SYMBOL(do_gettimeofday);
-
-int do_settimeofday(struct timespec *tv)
-{
- time_t wtm_sec, sec = tv->tv_sec;
- long wtm_nsec, nsec = tv->tv_nsec;
-
- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
- return -EINVAL;
-
- write_seqlock_irq(&xtime_lock);
- /*
- * This is revolting. We need to set "xtime" correctly. However, the
- * value in this location is the value at the most recent update of
- * wall time. Discover what correction gettimeofday() would have
- * made, and then undo it!
- */
- nsec -= do_gettimeoffset() * NSEC_PER_USEC;
-
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-
- ntp_clear();
- write_sequnlock_irq(&xtime_lock);
- clock_was_set();
-
- return 0;
-}
-
-EXPORT_SYMBOL(do_settimeofday);
-
-/*
* In order to set the CMOS clock precisely, set_rtc_mmss has to be
* called 500 ms after the second nowtime has started, because when
* nowtime is written into the registers of the CMOS clock, it will
@@ -192,6 +123,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
#ifndef CONFIG_SMP
profile_tick(CPU_PROFILING);
#endif
+ /* XXX FIXME. Uh, the xtime_lock should be held here, no? */
do_timer(1);
#ifndef CONFIG_SMP
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S
index de5e21cca6a..8ceb6181d80 100644
--- a/arch/m32r/kernel/vmlinux.lds.S
+++ b/arch/m32r/kernel/vmlinux.lds.S
@@ -4,6 +4,7 @@
#include <asm-generic/vmlinux.lds.h>
#include <asm/addrspace.h>
#include <asm/page.h>
+#include <asm/thread_info.h>
OUTPUT_ARCH(m32r)
#if defined(__LITTLE_ENDIAN__)
@@ -40,83 +41,22 @@ SECTIONS
#endif
_etext = .; /* End of text section */
- . = ALIGN(16); /* Exception table */
- __start___ex_table = .;
- __ex_table : { *(__ex_table) }
- __stop___ex_table = .;
-
+ EXCEPTION_TABLE(16)
RODATA
-
- /* writeable */
- .data : { /* Data */
- *(.spu)
- *(.spi)
- DATA_DATA
- CONSTRUCTORS
- }
-
- . = ALIGN(4096);
- __nosave_begin = .;
- .data_nosave : { *(.data.nosave) }
- . = ALIGN(4096);
- __nosave_end = .;
-
- . = ALIGN(32);
- .data.cacheline_aligned : { *(.data.cacheline_aligned) }
-
+ RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE)
_edata = .; /* End of data section */
- . = ALIGN(8192); /* init_task */
- .data.init_task : { *(.data.init_task) }
-
/* will be freed after init */
- . = ALIGN(4096); /* Init code and data */
+ . = ALIGN(PAGE_SIZE); /* Init code and data */
__init_begin = .;
- .init.text : {
- _sinittext = .;
- INIT_TEXT
- _einittext = .;
- }
- .init.data : { INIT_DATA }
- . = ALIGN(16);
- __setup_start = .;
- .init.setup : { *(.init.setup) }
- __setup_end = .;
- __initcall_start = .;
- .initcall.init : {
- INITCALLS
- }
- __initcall_end = .;
- __con_initcall_start = .;
- .con_initcall.init : { *(.con_initcall.init) }
- __con_initcall_end = .;
- SECURITY_INIT
- . = ALIGN(4);
- __alt_instructions = .;
- .altinstructions : { *(.altinstructions) }
- __alt_instructions_end = .;
- .altinstr_replacement : { *(.altinstr_replacement) }
- /* .exit.text is discard at runtime, not link time, to deal with references
- from .altinstructions and .eh_frame */
- .exit.text : { EXIT_TEXT }
- .exit.data : { EXIT_DATA }
-
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(4096);
- __initramfs_start = .;
- .init.ramfs : { *(.init.ramfs) }
- __initramfs_end = .;
-#endif
-
- PERCPU(4096)
- . = ALIGN(4096);
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(16)
+ PERCPU(PAGE_SIZE)
+ . = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
- __bss_start = .; /* BSS */
- .bss : { *(.bss) }
- . = ALIGN(4);
- __bss_stop = .;
+ BSS_SECTION(0, 0, 4)
_end = . ;
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c
index 24d429f9358..9f581df3952 100644
--- a/arch/m32r/mm/init.c
+++ b/arch/m32r/mm/init.c
@@ -171,7 +171,7 @@ void __init mem_init(void)
printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
"%dk reserved, %dk data, %dk init)\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index fb87c08c6b5..29dd8489ffe 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -58,6 +58,12 @@ config HZ
int
default 100
+config GENERIC_TIME
+ def_bool y
+
+config ARCH_USES_GETTIMEOFFSET
+ def_bool y
+
mainmenu "Linux/68k Kernel Configuration"
source "init/Kconfig"
diff --git a/arch/m68k/include/asm/hardirq_mm.h b/arch/m68k/include/asm/hardirq_mm.h
index 394ee946015..554f65b6cd3 100644
--- a/arch/m68k/include/asm/hardirq_mm.h
+++ b/arch/m68k/include/asm/hardirq_mm.h
@@ -1,16 +1,8 @@
#ifndef __M68K_HARDIRQ_H
#define __M68K_HARDIRQ_H
-#include <linux/threads.h>
-#include <linux/cache.h>
-
-/* entry.S is sensitive to the offsets of these fields */
-typedef struct {
- unsigned int __softirq_pending;
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
#define HARDIRQ_BITS 8
+#include <asm-generic/hardirq.h>
+
#endif
diff --git a/arch/m68k/include/asm/mman.h b/arch/m68k/include/asm/mman.h
index 9f5c4c4b3c7..8eebf89f5ab 100644
--- a/arch/m68k/include/asm/mman.h
+++ b/arch/m68k/include/asm/mman.h
@@ -1,17 +1 @@
-#ifndef __M68K_MMAN_H__
-#define __M68K_MMAN_H__
-
-#include <asm-generic/mman-common.h>
-
-#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define MAP_LOCKED 0x2000 /* pages are locked */
-#define MAP_NORESERVE 0x4000 /* don't check for reservations */
-#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define MAP_NONBLOCK 0x10000 /* do not block on IO */
-
-#define MCL_CURRENT 1 /* lock all current mappings */
-#define MCL_FUTURE 2 /* lock all future mappings */
-
-#endif /* __M68K_MMAN_H__ */
+#include <asm-generic/mman.h>
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 946d8691f2b..48b87f5ced5 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -335,7 +335,7 @@
#define __NR_preadv 329
#define __NR_pwritev 330
#define __NR_rt_tgsigqueueinfo 331
-#define __NR_perf_counter_open 332
+#define __NR_perf_event_open 332
#ifdef __KERNEL__
diff --git a/arch/m68k/install.sh b/arch/m68k/install.sh
index 9c6bae6112e..57d640d4382 100644
--- a/arch/m68k/install.sh
+++ b/arch/m68k/install.sh
@@ -33,8 +33,8 @@ verify "$3"
# User may have a custom install script
-if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
-if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi
+if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
# Default install - same as make zlilo
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index 922f52e7ed1..c5b33634c98 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -756,5 +756,5 @@ sys_call_table:
.long sys_preadv
.long sys_pwritev /* 330 */
.long sys_rt_tgsigqueueinfo
- .long sys_perf_counter_open
+ .long sys_perf_event_open
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index 72bad65dba3..41230c595a8 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -42,9 +42,9 @@
*/
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-union thread_union init_thread_union
-__attribute__((section(".data.init_task"), aligned(THREAD_SIZE)))
- = { INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data
+ __attribute__((aligned(THREAD_SIZE))) =
+ { INIT_THREAD_INFO(init_task) };
/* initial task structure */
struct task_struct init_task = INIT_TASK(init_task);
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 7f54efaf60b..7deb402bfc7 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -20,7 +20,6 @@
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
-#include <linux/utsname.h>
#include <linux/ipc.h>
#include <asm/setup.h>
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index 54d980795fc..17dc2a31a7c 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -91,77 +91,11 @@ void __init time_init(void)
mach_sched_init(timer_interrupt);
}
-/*
- * This version of gettimeofday has near microsecond resolution.
- */
-void do_gettimeofday(struct timeval *tv)
+u32 arch_gettimeoffset(void)
{
- unsigned long flags;
- unsigned long seq;
- unsigned long usec, sec;
- unsigned long max_ntp_tick = tick_usec - tickadj;
-
- do {
- seq = read_seqbegin_irqsave(&xtime_lock, flags);
-
- usec = mach_gettimeoffset();
-
- /*
- * If time_adjust is negative then NTP is slowing the clock
- * so make sure not to go into next possible interval.
- * Better to lose some accuracy than have time go backwards..
- */
- if (unlikely(time_adjust < 0))
- usec = min(usec, max_ntp_tick);
-
- sec = xtime.tv_sec;
- usec += xtime.tv_nsec/1000;
- } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
-
-
- while (usec >= 1000000) {
- usec -= 1000000;
- sec++;
- }
-
- tv->tv_sec = sec;
- tv->tv_usec = usec;
-}
-
-EXPORT_SYMBOL(do_gettimeofday);
-
-int do_settimeofday(struct timespec *tv)
-{
- time_t wtm_sec, sec = tv->tv_sec;
- long wtm_nsec, nsec = tv->tv_nsec;
-
- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
- return -EINVAL;
-
- write_seqlock_irq(&xtime_lock);
- /* This is revolting. We need to set the xtime.tv_nsec
- * correctly. However, the value in this location is
- * is value at the last tick.
- * Discover what correction gettimeofday
- * would have done, and then undo it!
- */
- nsec -= 1000 * mach_gettimeoffset();
-
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-
- ntp_clear();
- write_sequnlock_irq(&xtime_lock);
- clock_was_set();
- return 0;
+ return mach_gettimeoffset() * 1000;
}
-EXPORT_SYMBOL(do_settimeofday);
-
-
static int __init rtc_init(void)
{
struct platform_device *pdev;
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 0007b2adf3a..774549accd2 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -126,7 +126,7 @@ void __init mem_init(void)
#endif
printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n",
- (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
+ nr_free_pages() << (PAGE_SHIFT-10),
totalram_pages << (PAGE_SHIFT-10),
codepages << (PAGE_SHIFT-10),
datapages << (PAGE_SHIFT-10),
diff --git a/arch/m68knommu/kernel/init_task.c b/arch/m68knommu/kernel/init_task.c
index 45e97a207fe..cbf9dc3cc51 100644
--- a/arch/m68knommu/kernel/init_task.c
+++ b/arch/m68knommu/kernel/init_task.c
@@ -31,7 +31,6 @@ EXPORT_SYMBOL(init_task);
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"))) =
- { INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data =
+ { INIT_THREAD_INFO(init_task) };
diff --git a/arch/m68knommu/kernel/sys_m68k.c b/arch/m68knommu/kernel/sys_m68k.c
index 70028163862..efdd090778a 100644
--- a/arch/m68knommu/kernel/sys_m68k.c
+++ b/arch/m68knommu/kernel/sys_m68k.c
@@ -17,7 +17,6 @@
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
-#include <linux/utsname.h>
#include <linux/ipc.h>
#include <linux/fs.h>
diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S
index 0ae123e0898..23535cc415a 100644
--- a/arch/m68knommu/kernel/syscalltable.S
+++ b/arch/m68knommu/kernel/syscalltable.S
@@ -350,7 +350,7 @@ ENTRY(sys_call_table)
.long sys_preadv
.long sys_pwritev /* 330 */
.long sys_rt_tgsigqueueinfo
- .long sys_perf_counter_open
+ .long sys_perf_event_open
.rept NR_syscalls-(.-sys_call_table)/4
.long sys_ni_syscall
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 2db722d80d4..bbd8327f189 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -6,6 +6,7 @@ mainmenu "Linux/Microblaze Kernel Configuration"
config MICROBLAZE
def_bool y
select HAVE_LMB
+ select USB_ARCH_HAS_EHCI
select ARCH_WANT_OPTIONAL_GPIOLIB
config SWAP
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 8439598d465..34187354304 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -37,12 +37,12 @@ CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR) += -mxl-pattern-compare
CPUFLAGS-1 += $(call cc-option,-mcpu=v$(CPU_VER))
# r31 holds current when in kernel mode
-KBUILD_KERNEL += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2)
+KBUILD_CFLAGS += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2)
LDFLAGS :=
LDFLAGS_vmlinux :=
-LIBGCC := $(shell $(CC) $(KBUILD_KERNEL) -print-libgcc-file-name)
+LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
head-y := arch/microblaze/kernel/head.o
libs-y += arch/microblaze/lib/
@@ -53,22 +53,41 @@ core-y += arch/microblaze/platform/
boot := arch/microblaze/boot
+# Are we making a simpleImage.<boardname> target? If so, crack out the boardname
+DTB:=$(subst simpleImage.,,$(filter simpleImage.%, $(MAKECMDGOALS)))
+
+ifneq ($(DTB),)
+ core-y += $(boot)/
+endif
+
# defines filename extension depending memory management type
ifeq ($(CONFIG_MMU),)
MMU := -nommu
endif
-export MMU
+export MMU DTB
all: linux.bin
+BOOT_TARGETS = linux.bin linux.bin.gz simpleImage.%
+
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
-linux.bin linux.bin.gz: vmlinux
+$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
define archhelp
- echo '* linux.bin - Create raw binary'
- echo ' linux.bin.gz - Create compressed raw binary'
+ echo '* linux.bin - Create raw binary'
+ echo ' linux.bin.gz - Create compressed raw binary'
+ echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in'
+ echo ' - stripped elf with fdt blob
+ echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob'
+ echo ' *_defconfig - Select default config from arch/microblaze/configs'
+ echo ''
+ echo ' Targets with <dt> embed a device tree blob inside the image'
+ echo ' These targets support board with firmware that does not'
+ echo ' support passing a device tree directly. Replace <dt> with the'
+ echo ' name of a dts file from the arch/microblaze/boot/dts/ directory'
+ echo ' (minus the .dts extension).'
endef
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index c2bb043a029..21f13322a4c 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -2,10 +2,24 @@
# arch/microblaze/boot/Makefile
#
-targets := linux.bin linux.bin.gz
+obj-y += linked_dtb.o
+
+targets := linux.bin linux.bin.gz simpleImage.%
OBJCOPYFLAGS_linux.bin := -O binary
+# Where the DTS files live
+dtstree := $(srctree)/$(src)/dts
+
+# Ensure system.dtb exists
+$(obj)/linked_dtb.o: $(obj)/system.dtb
+
+# Generate system.dtb from $(DTB).dtb
+ifneq ($(DTB),system)
+$(obj)/system.dtb: $(obj)/$(DTB).dtb
+ $(call if_changed,cp)
+endif
+
$(obj)/linux.bin: vmlinux FORCE
[ -n $(CONFIG_INITRAMFS_SOURCE) ] && [ ! -e $(CONFIG_INITRAMFS_SOURCE) ] && \
touch $(CONFIG_INITRAMFS_SOURCE) || echo "No CPIO image"
@@ -16,4 +30,27 @@ $(obj)/linux.bin.gz: $(obj)/linux.bin FORCE
$(call if_changed,gzip)
@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
-clean-kernel += linux.bin linux.bin.gz
+quiet_cmd_cp = CP $< $@$2
+ cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false)
+
+quiet_cmd_strip = STRIP $@
+ cmd_strip = $(STRIP) -K _start -K _end -K __log_buf -K _fdt_start vmlinux -o $@
+
+$(obj)/simpleImage.%: vmlinux FORCE
+ $(call if_changed,cp,.unstrip)
+ $(call if_changed,strip)
+ @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
+
+# Rule to build device tree blobs
+DTC = $(objtree)/scripts/dtc/dtc
+
+# Rule to build device tree blobs
+quiet_cmd_dtc = DTC $@
+ cmd_dtc = $(DTC) -O dtb -o $(obj)/$*.dtb -b 0 -p 1024 $(dtstree)/$*.dts
+
+$(obj)/%.dtb: $(dtstree)/%.dts FORCE
+ $(call if_changed,dtc)
+
+clean-kernel += linux.bin linux.bin.gz simpleImage.*
+
+clean-files += *.dtb
diff --git a/arch/microblaze/boot/dts/system.dts b/arch/microblaze/boot/dts/system.dts
new file mode 120000
index 00000000000..7cb657892f2
--- /dev/null
+++ b/arch/microblaze/boot/dts/system.dts
@@ -0,0 +1 @@
+../../platform/generic/system.dts \ No newline at end of file
diff --git a/arch/microblaze/boot/linked_dtb.S b/arch/microblaze/boot/linked_dtb.S
new file mode 100644
index 00000000000..cb2b537aebe
--- /dev/null
+++ b/arch/microblaze/boot/linked_dtb.S
@@ -0,0 +1,3 @@
+.section __fdt_blob,"a"
+.incbin "arch/microblaze/boot/system.dtb"
+
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index 09c32962b66..bb7c374713a 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.31-rc6
-# Tue Aug 18 11:00:02 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 10:28:50 2009
#
CONFIG_MICROBLAZE=y
# CONFIG_SWAP is not set
@@ -42,11 +42,12 @@ CONFIG_SYSVIPC_SYSCTL=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=17
@@ -260,6 +261,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -357,12 +359,10 @@ CONFIG_NET_ETHERNET=y
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_KS8842 is not set
+CONFIG_XILINX_EMACLITE=y
CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -460,6 +460,7 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
# CONFIG_DISPLAY_SUPPORT is not set
# CONFIG_SOUND is not set
# CONFIG_USB_SUPPORT is not set
+CONFIG_USB_ARCH_HAS_EHCI=y
# CONFIG_MMC is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
@@ -488,6 +489,7 @@ CONFIG_EXT2_FS=y
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
@@ -546,7 +548,6 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
@@ -671,18 +672,20 @@ CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
# CONFIG_PAGE_POISONING is not set
# CONFIG_SAMPLES is not set
# CONFIG_KMEMCHECK is not set
CONFIG_EARLY_PRINTK=y
-CONFIG_HEART_BEAT=y
+# CONFIG_HEART_BEAT is not set
CONFIG_DEBUG_BOOTMEM=y
#
@@ -697,7 +700,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
@@ -729,11 +731,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig
index 8b638615a97..adb839bab70 100644
--- a/arch/microblaze/configs/nommu_defconfig
+++ b/arch/microblaze/configs/nommu_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.31-rc6
-# Tue Aug 18 10:35:30 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 10:29:43 2009
#
CONFIG_MICROBLAZE=y
# CONFIG_SWAP is not set
@@ -44,11 +44,12 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=17
@@ -243,6 +244,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -272,6 +274,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
CONFIG_WIRELESS_OLD_REGULATORY=y
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -279,7 +282,6 @@ CONFIG_WIRELESS_OLD_REGULATORY=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -304,6 +306,7 @@ CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_OF_PARTS is not set
# CONFIG_MTD_AR7_PARTS is not set
#
@@ -349,6 +352,7 @@ CONFIG_MTD_RAM=y
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_PHYSMAP_OF is not set
CONFIG_MTD_UCLINUX=y
# CONFIG_MTD_PLATRAM is not set
@@ -429,12 +433,10 @@ CONFIG_NET_ETHERNET=y
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_KS8842 is not set
+# CONFIG_XILINX_EMACLITE is not set
CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -535,7 +537,7 @@ CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
# CONFIG_USB_ARCH_HAS_OHCI is not set
-# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB_ARCH_HAS_EHCI=y
# CONFIG_USB is not set
# CONFIG_USB_OTG_WHITELIST is not set
# CONFIG_USB_OTG_BLACKLIST_HUB is not set
@@ -579,6 +581,7 @@ CONFIG_FS_POSIX_ACL=y
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
@@ -639,7 +642,6 @@ CONFIG_ROMFS_BACKED_BY_BLOCK=y
CONFIG_ROMFS_ON_BLOCK=y
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
@@ -710,18 +712,20 @@ CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_SG=y
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_PAGE_POISONING is not set
# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_EARLY_PRINTK=y
-CONFIG_HEART_BEAT=y
+# CONFIG_HEART_BEAT is not set
# CONFIG_DEBUG_BOOTMEM is not set
#
@@ -736,7 +740,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
@@ -768,11 +771,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
diff --git a/arch/microblaze/include/asm/asm-compat.h b/arch/microblaze/include/asm/asm-compat.h
new file mode 100644
index 00000000000..e7bc9dc11b5
--- /dev/null
+++ b/arch/microblaze/include/asm/asm-compat.h
@@ -0,0 +1,17 @@
+#ifndef _ASM_MICROBLAZE_ASM_COMPAT_H
+#define _ASM_MICROBLAZE_ASM_COMPAT_H
+
+#include <asm/types.h>
+
+#ifdef __ASSEMBLY__
+# define stringify_in_c(...) __VA_ARGS__
+# define ASM_CONST(x) x
+#else
+/* This version of stringify will deal with commas... */
+# define __stringify_in_c(...) #__VA_ARGS__
+# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
+# define __ASM_CONST(x) x##UL
+# define ASM_CONST(x) __ASM_CONST(x)
+#endif
+
+#endif /* _ASM_MICROBLAZE_ASM_COMPAT_H */
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index 7c3ec13b44d..fc9997b73c0 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -210,6 +210,9 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
#define in_be32(a) __raw_readl((const void __iomem __force *)(a))
#define in_be16(a) __raw_readw(a)
+#define writel_be(v, a) out_be32((__force unsigned *)a, v)
+#define readl_be(a) in_be32((__force unsigned *)a)
+
/*
* Little endian
*/
diff --git a/arch/microblaze/include/asm/ipc.h b/arch/microblaze/include/asm/ipc.h
deleted file mode 100644
index a46e3d9c2a3..00000000000
--- a/arch/microblaze/include/asm/ipc.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipc.h>
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index 72aceae8868..880c988c223 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -17,6 +17,7 @@
#include <linux/pfn.h>
#include <asm/setup.h>
+#include <asm/asm-compat.h>
#include <linux/const.h>
#ifdef __KERNEL__
@@ -26,6 +27,8 @@
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
+#define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_KERNEL_BASE_ADDR))
+
#ifndef __ASSEMBLY__
#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h
index 27f8dafd8c3..ed67c9ed15b 100644
--- a/arch/microblaze/include/asm/setup.h
+++ b/arch/microblaze/include/asm/setup.h
@@ -38,7 +38,7 @@ extern void early_console_reg_tlb_alloc(unsigned int addr);
void time_init(void);
void init_IRQ(void);
void machine_early_init(const char *cmdline, unsigned int ram,
- unsigned int fdt);
+ unsigned int fdt, unsigned int msr);
void machine_restart(char *cmd);
void machine_shutdown(void);
diff --git a/arch/microblaze/include/asm/syscall.h b/arch/microblaze/include/asm/syscall.h
new file mode 100644
index 00000000000..048dfcd8d89
--- /dev/null
+++ b/arch/microblaze/include/asm/syscall.h
@@ -0,0 +1,99 @@
+#ifndef __ASM_MICROBLAZE_SYSCALL_H
+#define __ASM_MICROBLAZE_SYSCALL_H
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/ptrace.h>
+
+/* The system call number is given by the user in R12 */
+static inline long syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->r12;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ /* TODO. */
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return IS_ERR_VALUE(regs->r3) ? regs->r3 : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->r3;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ if (error)
+ regs->r3 = -error;
+ else
+ regs->r3 = val;
+}
+
+static inline microblaze_reg_t microblaze_get_syscall_arg(struct pt_regs *regs,
+ unsigned int n)
+{
+ switch (n) {
+ case 5: return regs->r10;
+ case 4: return regs->r9;
+ case 3: return regs->r8;
+ case 2: return regs->r7;
+ case 1: return regs->r6;
+ case 0: return regs->r5;
+ default:
+ BUG();
+ }
+ return ~0;
+}
+
+static inline void microblaze_set_syscall_arg(struct pt_regs *regs,
+ unsigned int n,
+ unsigned long val)
+{
+ switch (n) {
+ case 5:
+ regs->r10 = val;
+ case 4:
+ regs->r9 = val;
+ case 3:
+ regs->r8 = val;
+ case 2:
+ regs->r7 = val;
+ case 1:
+ regs->r6 = val;
+ case 0:
+ regs->r5 = val;
+ default:
+ BUG();
+ }
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ unsigned long *args)
+{
+ while (n--)
+ *args++ = microblaze_get_syscall_arg(regs, i++);
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ const unsigned long *args)
+{
+ while (n--)
+ microblaze_set_syscall_arg(regs, i++, *args++);
+}
+
+#endif /* __ASM_MICROBLAZE_SYSCALL_H */
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index 0b852327c0e..cb05a07e55e 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -381,7 +381,7 @@
#define __NR_preadv 363 /* new */
#define __NR_pwritev 364 /* new */
#define __NR_rt_tgsigqueueinfo 365 /* new */
-#define __NR_perf_counter_open 366 /* new */
+#define __NR_perf_event_open 366 /* new */
#define __NR_syscalls 367
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index c411c6757de..3539babc1c1 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -28,6 +28,7 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
{"7.10.d", 0x0b},
{"7.20.a", 0x0c},
{"7.20.b", 0x0d},
+ {"7.20.c", 0x0e},
/* FIXME There is no keycode defined in MBV for these versions */
{"2.10.a", 0x10},
{"3.00.a", 0x20},
@@ -49,6 +50,8 @@ const struct family_string_key family_string_lookup[] = {
{"spartan3a", 0xa},
{"spartan3an", 0xb},
{"spartan3adsp", 0xc},
+ {"spartan6", 0xd},
+ {"virtex6", 0xe},
/* FIXME There is no key code defined for spartan2 */
{"spartan2", 0xf0},
{NULL, 0},
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index c7353e79f4a..acc1f05d1e2 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -308,38 +308,69 @@ C_ENTRY(_user_exception):
swi r12, r1, PTO+PT_R0;
tovirt(r1,r1)
- la r15, r0, ret_from_trap-8
/* where the trap should return need -8 to adjust for rtsd r15, 8*/
/* Jump to the appropriate function for the system call number in r12
* (r12 is not preserved), or return an error if r12 is not valid. The LP
* register should point to the location where
* the called function should return. [note that MAKE_SYS_CALL uses label 1] */
- /* See if the system call number is valid. */
+
+ # Step into virtual mode.
+ set_vms;
+ addik r11, r0, 3f
+ rtid r11, 0
+ nop
+3:
+ add r11, r0, CURRENT_TASK /* Get current task ptr into r11 */
+ lwi r11, r11, TS_THREAD_INFO /* get thread info */
+ lwi r11, r11, TI_FLAGS /* get flags in thread info */
+ andi r11, r11, _TIF_WORK_SYSCALL_MASK
+ beqi r11, 4f
+
+ addik r3, r0, -ENOSYS
+ swi r3, r1, PTO + PT_R3
+ brlid r15, do_syscall_trace_enter
+ addik r5, r1, PTO + PT_R0
+
+ # do_syscall_trace_enter returns the new syscall nr.
+ addk r12, r0, r3
+ lwi r5, r1, PTO+PT_R5;
+ lwi r6, r1, PTO+PT_R6;
+ lwi r7, r1, PTO+PT_R7;
+ lwi r8, r1, PTO+PT_R8;
+ lwi r9, r1, PTO+PT_R9;
+ lwi r10, r1, PTO+PT_R10;
+4:
+/* Jump to the appropriate function for the system call number in r12
+ * (r12 is not preserved), or return an error if r12 is not valid.
+ * The LP register should point to the location where the called function
+ * should return. [note that MAKE_SYS_CALL uses label 1] */
+ /* See if the system call number is valid */
addi r11, r12, -__NR_syscalls;
- bgei r11,1f;
+ bgei r11,5f;
/* Figure out which function to use for this system call. */
/* Note Microblaze barrel shift is optional, so don't rely on it */
add r12, r12, r12; /* convert num -> ptr */
add r12, r12, r12;
/* Trac syscalls and stored them to r0_ram */
- lwi r3, r12, 0x400 + TOPHYS(r0_ram)
+ lwi r3, r12, 0x400 + r0_ram
addi r3, r3, 1
- swi r3, r12, 0x400 + TOPHYS(r0_ram)
+ swi r3, r12, 0x400 + r0_ram
+
+ # Find and jump into the syscall handler.
+ lwi r12, r12, sys_call_table
+ /* where the trap should return need -8 to adjust for rtsd r15, 8 */
+ la r15, r0, ret_from_trap-8
+ bra r12
- lwi r12, r12, TOPHYS(sys_call_table); /* Function ptr */
- /* Make the system call. to r12*/
- set_vms;
- rtid r12, 0;
- nop;
/* The syscall number is invalid, return an error. */
-1: VM_ON; /* RETURN() expects virtual mode*/
+5:
addi r3, r0, -ENOSYS;
rtsd r15,8; /* looks like a normal subroutine return */
or r0, r0, r0
-/* Entry point used to return from a syscall/trap. */
+/* Entry point used to return from a syscall/trap */
/* We re-enable BIP bit before state restore */
C_ENTRY(ret_from_trap):
set_bip; /* Ints masked for state restore*/
@@ -349,6 +380,23 @@ C_ENTRY(ret_from_trap):
/* We're returning to user mode, so check for various conditions that
* trigger rescheduling. */
+ # FIXME: Restructure all these flag checks.
+ add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
+ lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+ lwi r11, r11, TI_FLAGS; /* get flags in thread info */
+ andi r11, r11, _TIF_WORK_SYSCALL_MASK
+ beqi r11, 1f
+
+ swi r3, r1, PTO + PT_R3
+ swi r4, r1, PTO + PT_R4
+ brlid r15, do_syscall_trace_leave
+ addik r5, r1, PTO + PT_R0
+ lwi r3, r1, PTO + PT_R3
+ lwi r4, r1, PTO + PT_R4
+1:
+
+ /* We're returning to user mode, so check for various conditions that
+ * trigger rescheduling. */
/* Get current task ptr into r11 */
add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
lwi r11, r11, TS_THREAD_INFO; /* get thread info */
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c
index 0cb64a31e89..d9f70f83097 100644
--- a/arch/microblaze/kernel/exceptions.c
+++ b/arch/microblaze/kernel/exceptions.c
@@ -72,7 +72,8 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
#endif
#if 0
- printk(KERN_WARNING "Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n",
+ printk(KERN_WARNING "Exception %02x in %s mode, FSR=%08x PC=%08x " \
+ "ESR=%08x\n",
type, user_mode(regs) ? "user" : "kernel", fsr,
(unsigned int) regs->pc, (unsigned int) regs->esr);
#endif
@@ -80,42 +81,50 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
switch (type & 0x1F) {
case MICROBLAZE_ILL_OPCODE_EXCEPTION:
if (user_mode(regs)) {
- printk(KERN_WARNING "Illegal opcode exception in user mode.\n");
+ pr_debug(KERN_WARNING "Illegal opcode exception " \
+ "in user mode.\n");
_exception(SIGILL, regs, ILL_ILLOPC, addr);
return;
}
- printk(KERN_WARNING "Illegal opcode exception in kernel mode.\n");
+ printk(KERN_WARNING "Illegal opcode exception " \
+ "in kernel mode.\n");
die("opcode exception", regs, SIGBUS);
break;
case MICROBLAZE_IBUS_EXCEPTION:
if (user_mode(regs)) {
- printk(KERN_WARNING "Instruction bus error exception in user mode.\n");
+ pr_debug(KERN_WARNING "Instruction bus error " \
+ "exception in user mode.\n");
_exception(SIGBUS, regs, BUS_ADRERR, addr);
return;
}
- printk(KERN_WARNING "Instruction bus error exception in kernel mode.\n");
+ printk(KERN_WARNING "Instruction bus error exception " \
+ "in kernel mode.\n");
die("bus exception", regs, SIGBUS);
break;
case MICROBLAZE_DBUS_EXCEPTION:
if (user_mode(regs)) {
- printk(KERN_WARNING "Data bus error exception in user mode.\n");
+ pr_debug(KERN_WARNING "Data bus error exception " \
+ "in user mode.\n");
_exception(SIGBUS, regs, BUS_ADRERR, addr);
return;
}
- printk(KERN_WARNING "Data bus error exception in kernel mode.\n");
+ printk(KERN_WARNING "Data bus error exception " \
+ "in kernel mode.\n");
die("bus exception", regs, SIGBUS);
break;
case MICROBLAZE_DIV_ZERO_EXCEPTION:
if (user_mode(regs)) {
- printk(KERN_WARNING "Divide by zero exception in user mode\n");
- _exception(SIGILL, regs, ILL_ILLOPC, addr);
+ pr_debug(KERN_WARNING "Divide by zero exception " \
+ "in user mode\n");
+ _exception(SIGILL, regs, FPE_INTDIV, addr);
return;
}
- printk(KERN_WARNING "Divide by zero exception in kernel mode.\n");
+ printk(KERN_WARNING "Divide by zero exception " \
+ "in kernel mode.\n");
die("Divide by exception", regs, SIGBUS);
break;
case MICROBLAZE_FPU_EXCEPTION:
- printk(KERN_WARNING "FPU exception\n");
+ pr_debug(KERN_WARNING "FPU exception\n");
/* IEEE FP exception */
/* I removed fsr variable and use code var for storing fsr */
if (fsr & FSR_IO)
@@ -133,7 +142,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
#ifdef CONFIG_MMU
case MICROBLAZE_PRIVILEGED_EXCEPTION:
- printk(KERN_WARNING "Privileged exception\n");
+ pr_debug(KERN_WARNING "Privileged exception\n");
/* "brk r0,r0" - used as debug breakpoint */
if (get_user(code, (unsigned long *)regs->pc) == 0
&& code == 0x980c0000) {
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index e41c6ce2a7b..697ce3007f3 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -54,6 +54,16 @@ ENTRY(_start)
mfs r1, rmsr
andi r1, r1, ~2
mts rmsr, r1
+/*
+ * Here is checking mechanism which check if Microblaze has msr instructions
+ * We load msr and compare it with previous r1 value - if is the same,
+ * msr instructions works if not - cpu don't have them.
+ */
+ /* r8=0 - I have msr instr, 1 - I don't have them */
+ rsubi r0, r0, 1 /* set the carry bit */
+ msrclr r0, 0x4 /* try to clear it */
+ /* read the carry bit, r8 will be '0' if msrclr exists */
+ addik r8, r0, 0
/* r7 may point to an FDT, or there may be one linked in.
if it's in r7, we've got to save it away ASAP.
@@ -209,8 +219,8 @@ start_here:
* Please see $(ARCH)/mach-$(SUBARCH)/setup.c for
* the function.
*/
- la r8, r0, machine_early_init
- brald r15, r8
+ la r9, r0, machine_early_init
+ brald r15, r9
nop
#ifndef CONFIG_MMU
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 3288c973767..6b0288ebccd 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -84,9 +84,10 @@
#define NUM_TO_REG(num) r ## num
#ifdef CONFIG_MMU
-/* FIXME you can't change first load of MSR because there is
- * hardcoded jump bri 4 */
#define RESTORE_STATE \
+ lwi r5, r1, 0; \
+ mts rmsr, r5; \
+ nop; \
lwi r3, r1, PT_R3; \
lwi r4, r1, PT_R4; \
lwi r5, r1, PT_R5; \
@@ -309,6 +310,9 @@ _hw_exception_handler:
lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)) /* get saved current */
#endif
+ mfs r5, rmsr;
+ nop
+ swi r5, r1, 0;
mfs r3, resr
nop
mfs r4, rear;
@@ -380,6 +384,8 @@ handle_other_ex: /* Handle Other exceptions here */
addk r8, r17, r0; /* Load exception address */
bralid r15, full_exception; /* Branch to the handler */
nop;
+ mts r0, rfsr; /* Clear sticky fsr */
+ nop
/*
* Trigger execution of the signal handler by enabling
diff --git a/arch/microblaze/kernel/init_task.c b/arch/microblaze/kernel/init_task.c
index 67da22579b6..b5d711f94ff 100644
--- a/arch/microblaze/kernel/init_task.c
+++ b/arch/microblaze/kernel/init_task.c
@@ -19,9 +19,8 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"))) =
-{ INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data =
+ { INIT_THREAD_INFO(init_task) };
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 00b12c6d532..4201c743cc9 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -235,6 +235,7 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
regs->pc = pc;
regs->r1 = usp;
regs->pt_mode = 0;
+ regs->msr |= MSR_UMS;
}
#ifdef CONFIG_MMU
diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c
index 53ff39af6a5..4b3ac32754d 100644
--- a/arch/microblaze/kernel/ptrace.c
+++ b/arch/microblaze/kernel/ptrace.c
@@ -29,6 +29,10 @@
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/signal.h>
+#include <linux/elf.h>
+#include <linux/audit.h>
+#include <linux/seccomp.h>
+#include <linux/tracehook.h>
#include <linux/errno.h>
#include <asm/processor.h>
@@ -174,6 +178,64 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return rval;
}
+asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
+{
+ long ret = 0;
+
+ secure_computing(regs->r12);
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ /*
+ * Tracing decided this syscall should not happen.
+ * We'll return a bogus call number to get an ENOSYS
+ * error, but leave the original number in regs->regs[0].
+ */
+ ret = -1L;
+
+ if (unlikely(current->audit_context))
+ audit_syscall_entry(EM_XILINX_MICROBLAZE, regs->r12,
+ regs->r5, regs->r6,
+ regs->r7, regs->r8);
+
+ return ret ?: regs->r12;
+}
+
+asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
+{
+ int step;
+
+ if (unlikely(current->audit_context))
+ audit_syscall_exit(AUDITSC_RESULT(regs->r3), regs->r3);
+
+ step = test_thread_flag(TIF_SINGLESTEP);
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
+}
+
+#if 0
+static asmlinkage void syscall_trace(void)
+{
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return;
+ if (!(current->ptrace & PT_PTRACED))
+ return;
+ /* The 0x80 provides a way for the tracing parent to distinguish
+ between a syscall stop and SIGTRAP delivery */
+ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+ ? 0x80 : 0));
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (current->exit_code) {
+ send_sig(current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+}
+#endif
+
void ptrace_disable(struct task_struct *child)
{
/* nothing to do */
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 2a97bf513b6..8c1e0f4dcf1 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -94,7 +94,7 @@ inline unsigned get_romfs_len(unsigned *addr)
#endif /* CONFIG_MTD_UCLINUX_EBSS */
void __init machine_early_init(const char *cmdline, unsigned int ram,
- unsigned int fdt)
+ unsigned int fdt, unsigned int msr)
{
unsigned long *src, *dst = (unsigned long *)0x0;
@@ -157,6 +157,16 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
early_printk("New klimit: 0x%08x\n", (unsigned)klimit);
#endif
+#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
+ if (msr)
+ early_printk("!!!Your kernel has setup MSR instruction but "
+ "CPU don't have it %d\n", msr);
+#else
+ if (!msr)
+ early_printk("!!!Your kernel not setup MSR instruction but "
+ "CPU have it %d\n", msr);
+#endif
+
for (src = __ivt_start; src < __ivt_end; src++, dst++)
*dst = *src;
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c
index b96f1682bb2..07cabed4b94 100644
--- a/arch/microblaze/kernel/sys_microblaze.c
+++ b/arch/microblaze/kernel/sys_microblaze.c
@@ -23,7 +23,6 @@
#include <linux/mman.h>
#include <linux/sys.h>
#include <linux/ipc.h>
-#include <linux/utsname.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/err.h>
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 457216097df..ecec1915513 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -370,4 +370,4 @@ ENTRY(sys_call_table)
.long sys_ni_syscall
.long sys_ni_syscall
.long sys_rt_tgsigqueueinfo /* 365 */
- .long sys_perf_counter_open
+ .long sys_perf_event_open
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index ec5fa91a48d..e704188d785 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -12,13 +12,16 @@ OUTPUT_FORMAT("elf32-microblaze", "elf32-microblaze", "elf32-microblaze")
OUTPUT_ARCH(microblaze)
ENTRY(_start)
+#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
+#include <asm/thread_info.h>
jiffies = jiffies_64 + 4;
SECTIONS {
. = CONFIG_KERNEL_START;
- .text : {
+ _start = CONFIG_KERNEL_BASE_ADDR;
+ .text : AT(ADDR(.text) - LOAD_OFFSET) {
_text = . ;
_stext = . ;
*(.text .text.*)
@@ -33,24 +36,22 @@ SECTIONS {
}
. = ALIGN (4) ;
- _fdt_start = . ; /* place for fdt blob */
- . = . + 0x4000;
- _fdt_end = . ;
+ __fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET) {
+ _fdt_start = . ; /* place for fdt blob */
+ *(__fdt_blob) ; /* Any link-placed DTB */
+ . = _fdt_start + 0x4000; /* Pad up to 16kbyte */
+ _fdt_end = . ;
+ }
. = ALIGN(16);
RODATA
- . = ALIGN(16);
- __ex_table : {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
- }
+ EXCEPTION_TABLE(16)
/*
* sdata2 section can go anywhere, but must be word aligned
* and SDA2_BASE must point to the middle of it
*/
- .sdata2 : {
+ .sdata2 : AT(ADDR(.sdata2) - LOAD_OFFSET) {
_ssrw = .;
. = ALIGN(4096); /* page aligned when MMU used - origin 0x8 */
*(.sdata2)
@@ -61,12 +62,7 @@ SECTIONS {
}
_sdata = . ;
- .data ALIGN (4096) : { /* page aligned when MMU used - origin 0x4 */
- DATA_DATA
- CONSTRUCTORS
- }
- . = ALIGN(32);
- .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+ RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE)
_edata = . ;
/* Reserve some low RAM for r0 based memory references */
@@ -74,18 +70,14 @@ SECTIONS {
r0_ram = . ;
. = . + 4096; /* a page should be enough */
- /* The initial task */
- . = ALIGN(8192);
- .data.init_task : { *(.data.init_task) }
-
/* Under the microblaze ABI, .sdata and .sbss must be contiguous */
. = ALIGN(8);
- .sdata : {
+ .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
_ssro = .;
*(.sdata)
}
- .sbss : {
+ .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) {
_ssbss = .;
*(.sbss)
_esbss = .;
@@ -96,47 +88,36 @@ SECTIONS {
__init_begin = .;
- . = ALIGN(4096);
- .init.text : {
- _sinittext = . ;
- INIT_TEXT
- _einittext = .;
- }
+ INIT_TEXT_SECTION(PAGE_SIZE)
- .init.data : {
+ .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
INIT_DATA
}
. = ALIGN(4);
- .init.ivt : {
+ .init.ivt : AT(ADDR(.init.ivt) - LOAD_OFFSET) {
__ivt_start = .;
*(.init.ivt)
__ivt_end = .;
}
- .init.setup : {
- __setup_start = .;
- *(.init.setup)
- __setup_end = .;
+ .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
+ INIT_SETUP(0)
}
- .initcall.init : {
- __initcall_start = .;
- INITCALLS
- __initcall_end = .;
+ .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET ) {
+ INIT_CALLS
}
- .con_initcall.init : {
- __con_initcall_start = .;
- *(.con_initcall.init)
- __con_initcall_end = .;
+ .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
+ CON_INITCALL
}
SECURITY_INIT
__init_end_before_initramfs = .;
- .init.ramfs ALIGN(4096) : {
+ .init.ramfs ALIGN(4096) : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
__initramfs_start = .;
*(.init.ramfs)
__initramfs_end = .;
@@ -152,7 +133,8 @@ SECTIONS {
}
__init_end = .;
- .bss ALIGN (4096) : { /* page aligned when MMU used */
+ .bss ALIGN (4096) : AT(ADDR(.bss) - LOAD_OFFSET) {
+ /* page aligned when MMU used */
__bss_start = . ;
*(.bss*)
*(COMMON)
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index f207f1a94db..a44892e7cd5 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -180,7 +180,8 @@ void free_initrd_mem(unsigned long start, unsigned long end)
totalram_pages++;
pages++;
}
- printk(KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages);
+ printk(KERN_NOTICE "Freeing initrd memory: %dk freed\n",
+ (int)(pages * (PAGE_SIZE / 1024)));
}
#endif
@@ -204,7 +205,7 @@ void __init mem_init(void)
totalram_pages += free_all_bootmem();
printk(KERN_INFO "Memory: %luk/%luk available\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT-10));
#ifdef CONFIG_MMU
mem_init_done = 1;
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index c825b14b4ed..77f5021218d 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -627,16 +627,6 @@ endif
cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic
drivers-$(CONFIG_PCI) += arch/mips/pci/
-ifdef CONFIG_32BIT
-ifdef CONFIG_CPU_LITTLE_ENDIAN
-JIFFIES = jiffies_64
-else
-JIFFIES = jiffies_64 + 4
-endif
-else
-JIFFIES = jiffies_64
-endif
-
#
# Automatically detect the build format. By default we choose
# the elf format according to the load address.
@@ -660,8 +650,9 @@ ifdef CONFIG_64BIT
endif
KBUILD_AFLAGS += $(cflags-y)
-KBUILD_CFLAGS += $(cflags-y) \
- -D"VMLINUX_LOAD_ADDRESS=$(load-y)"
+KBUILD_CFLAGS += $(cflags-y)
+KBUILD_CPPFLAGS += -D"VMLINUX_LOAD_ADDRESS=$(load-y)"
+KBUILD_CPPFLAGS += -D"DATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)"
LDFLAGS += -m $(ld-emul)
@@ -676,18 +667,6 @@ endif
OBJCOPYFLAGS += --remove-section=.reginfo
-#
-# Choosing incompatible machines durings configuration will result in
-# error messages during linking. Select a default linkscript if
-# none has been choosen above.
-#
-
-CPPFLAGS_vmlinux.lds := \
- $(KBUILD_CFLAGS) \
- -D"LOADADDR=$(load-y)" \
- -D"JIFFIES=$(JIFFIES)" \
- -D"DATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)"
-
head-y := arch/mips/kernel/head.o arch/mips/kernel/init_task.o
libs-y += arch/mips/lib/
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
index f34ff860194..379a664809b 100644
--- a/arch/mips/alchemy/common/time.c
+++ b/arch/mips/alchemy/common/time.c
@@ -88,7 +88,7 @@ static struct clock_event_device au1x_rtcmatch2_clockdev = {
.irq = AU1000_RTC_MATCH2_INT,
.set_next_event = au1x_rtcmatch2_set_next_event,
.set_mode = au1x_rtcmatch2_set_mode,
- .cpumask = CPU_MASK_ALL_PTR,
+ .cpumask = cpu_all_mask,
};
static struct irqaction au1x_rtcmatch2_irqaction = {
diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h
index 23059170700..f6837422fe6 100644
--- a/arch/mips/include/asm/mach-ip27/topology.h
+++ b/arch/mips/include/asm/mach-ip27/topology.h
@@ -24,12 +24,10 @@ extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
#define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid)
#define parent_node(node) (node)
-#define node_to_cpumask(node) (hub_data(node)->h_cpus)
#define cpumask_of_node(node) (&hub_data(node)->h_cpus)
struct pci_bus;
extern int pcibus_to_node(struct pci_bus *);
-#define pcibus_to_cpumask(bus) (cpu_online_map)
#define cpumask_of_pcibus(bus) (cpu_online_mask)
extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
diff --git a/arch/mips/include/asm/mman.h b/arch/mips/include/asm/mman.h
index e4d6f1fb1cf..a2250f390a2 100644
--- a/arch/mips/include/asm/mman.h
+++ b/arch/mips/include/asm/mman.h
@@ -46,6 +46,8 @@
#define MAP_LOCKED 0x8000 /* pages are locked */
#define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x20000 /* do not block on IO */
+#define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */
+#define MAP_HUGETLB 0x80000 /* create a huge page mapping */
/*
* Flags for msync
@@ -71,6 +73,9 @@
#define MADV_DONTFORK 10 /* don't inherit across fork */
#define MADV_DOFORK 11 /* do inherit across fork */
+#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
+#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index d3bea88d874..d9743536a62 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -178,8 +178,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* Mark current->active_mm as not "active" anymore.
* We don't want to mislead possible IPI tlb flush routines.
*/
- cpu_clear(cpu, prev->cpu_vm_mask);
- cpu_set(cpu, next->cpu_vm_mask);
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
local_irq_restore(flags);
}
@@ -235,8 +235,8 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
/* mark mmu ownership change */
- cpu_clear(cpu, prev->cpu_vm_mask);
- cpu_set(cpu, next->cpu_vm_mask);
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
local_irq_restore(flags);
}
@@ -258,7 +258,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
local_irq_save(flags);
- if (cpu_isset(cpu, mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
get_new_mmu_context(mm, cpu);
#ifdef CONFIG_MIPS_MT_SMTC
/* See comments for similar code above */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 1a9f9b25755..d6eb6134abe 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -76,6 +76,16 @@ extern unsigned long zero_page_mask;
#define ZERO_PAGE(vaddr) \
(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
+#define is_zero_pfn is_zero_pfn
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ extern unsigned long zero_pfn;
+ unsigned long offset_from_zero_pfn = pfn - zero_pfn;
+ return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
+}
+
+#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
+
extern void paging_init(void);
/*
diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h
index fd545547b8a..9e09af34c8a 100644
--- a/arch/mips/include/asm/smp-ops.h
+++ b/arch/mips/include/asm/smp-ops.h
@@ -19,7 +19,7 @@ struct task_struct;
struct plat_smp_ops {
void (*send_ipi_single)(int cpu, unsigned int action);
- void (*send_ipi_mask)(cpumask_t mask, unsigned int action);
+ void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action);
void (*init_secondary)(void);
void (*smp_finish)(void);
void (*cpus_done)(void);
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index aaa2d4ab26d..e15f11a0931 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -78,6 +78,6 @@ extern void play_dead(void);
extern asmlinkage void smp_call_function_interrupt(void);
extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#endif /* __ASM_SMP_H */
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index e753a777949..8c9dfa9e901 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -353,7 +353,7 @@
#define __NR_preadv (__NR_Linux + 330)
#define __NR_pwritev (__NR_Linux + 331)
#define __NR_rt_tgsigqueueinfo (__NR_Linux + 332)
-#define __NR_perf_counter_open (__NR_Linux + 333)
+#define __NR_perf_event_open (__NR_Linux + 333)
#define __NR_accept4 (__NR_Linux + 334)
/*
@@ -664,7 +664,7 @@
#define __NR_preadv (__NR_Linux + 289)
#define __NR_pwritev (__NR_Linux + 290)
#define __NR_rt_tgsigqueueinfo (__NR_Linux + 291)
-#define __NR_perf_counter_open (__NR_Linux + 292)
+#define __NR_perf_event_open (__NR_Linux + 292)
#define __NR_accept4 (__NR_Linux + 293)
/*
@@ -979,7 +979,7 @@
#define __NR_preadv (__NR_Linux + 293)
#define __NR_pwritev (__NR_Linux + 294)
#define __NR_rt_tgsigqueueinfo (__NR_Linux + 295)
-#define __NR_perf_counter_open (__NR_Linux + 296)
+#define __NR_perf_event_open (__NR_Linux + 296)
#define __NR_accept4 (__NR_Linux + 297)
/*
diff --git a/arch/mips/kernel/init_task.c b/arch/mips/kernel/init_task.c
index 5b457a40c78..6d6ca530589 100644
--- a/arch/mips/kernel/init_task.c
+++ b/arch/mips/kernel/init_task.c
@@ -21,9 +21,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
*
* The things we do for performance..
*/
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"),
- __aligned__(THREAD_SIZE))) =
+union thread_union init_thread_union __init_task_data
+ __attribute__((__aligned__(THREAD_SIZE))) =
{ INIT_THREAD_INFO(init_task) };
/*
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 7c2de4f091c..fd2a9bb620d 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -581,7 +581,7 @@ einval: li v0, -ENOSYS
sys sys_preadv 6 /* 4330 */
sys sys_pwritev 6
sys sys_rt_tgsigqueueinfo 4
- sys sys_perf_counter_open 5
+ sys sys_perf_event_open 5
sys sys_accept4 4
.endm
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index b97b993846d..18bf7f32c5e 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -418,6 +418,6 @@ sys_call_table:
PTR sys_preadv
PTR sys_pwritev /* 5390 */
PTR sys_rt_tgsigqueueinfo
- PTR sys_perf_counter_open
+ PTR sys_perf_event_open
PTR sys_accept4
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 1a6ae124635..6ebc0797669 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -416,6 +416,6 @@ EXPORT(sysn32_call_table)
PTR sys_preadv
PTR sys_pwritev
PTR compat_sys_rt_tgsigqueueinfo /* 5295 */
- PTR sys_perf_counter_open
+ PTR sys_perf_event_open
PTR sys_accept4
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index cd31087a651..9bbf9775e0b 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -536,6 +536,6 @@ sys_call_table:
PTR compat_sys_preadv /* 4330 */
PTR compat_sys_pwritev
PTR compat_sys_rt_tgsigqueueinfo
- PTR sys_perf_counter_open
+ PTR sys_perf_event_open
PTR sys_accept4
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index ad0ff5dc4d5..cc81771b882 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -80,11 +80,11 @@ void cmp_send_ipi_single(int cpu, unsigned int action)
local_irq_restore(flags);
}
-static void cmp_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void cmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
cmp_send_ipi_single(i, action);
}
@@ -171,7 +171,7 @@ void __init cmp_smp_setup(void)
for (i = 1; i < NR_CPUS; i++) {
if (amon_cpu_avail(i)) {
- cpu_set(i, cpu_possible_map);
+ set_cpu_possible(i, true);
__cpu_number_map[i] = ++ncpu;
__cpu_logical_map[ncpu] = i;
}
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 6f7ee5ac46e..43e7cdc5ded 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -70,7 +70,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
write_vpe_c0_vpeconf0(tmp);
/* Record this as available CPU */
- cpu_set(tc, cpu_possible_map);
+ set_cpu_possible(tc, true);
__cpu_number_map[tc] = ++ncpu;
__cpu_logical_map[ncpu] = tc;
}
@@ -141,11 +141,11 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action)
local_irq_restore(flags);
}
-static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
vsmp_send_ipi_single(i, action);
}
diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c
index 2508d55d68f..00500fea275 100644
--- a/arch/mips/kernel/smp-up.c
+++ b/arch/mips/kernel/smp-up.c
@@ -18,7 +18,8 @@ static void up_send_ipi_single(int cpu, unsigned int action)
panic(KERN_ERR "%s called", __func__);
}
-static inline void up_send_ipi_mask(cpumask_t mask, unsigned int action)
+static inline void up_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
{
panic(KERN_ERR "%s called", __func__);
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 64668a93248..4eb106c6a3e 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -128,7 +128,7 @@ asmlinkage __cpuinit void start_secondary(void)
cpu_idle();
}
-void arch_send_call_function_ipi(cpumask_t mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
}
@@ -183,15 +183,15 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
mp_ops->prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
#ifndef CONFIG_HOTPLUG_CPU
- cpu_present_map = cpu_possible_map;
+ init_cpu_present(&cpu_possible_map);
#endif
}
/* preload SMP state for boot cpu */
void __devinit smp_prepare_boot_cpu(void)
{
- cpu_set(0, cpu_possible_map);
- cpu_set(0, cpu_online_map);
+ set_cpu_possible(0, true);
+ set_cpu_online(0, true);
cpu_set(0, cpu_callin_map);
}
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 1a466baf0ed..67153a0dc26 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -305,7 +305,7 @@ int __init smtc_build_cpu_map(int start_cpu_slot)
*/
ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
- cpu_set(i, cpu_possible_map);
+ set_cpu_possible(i, true);
__cpu_number_map[i] = i;
__cpu_logical_map[i] = i;
}
@@ -525,8 +525,8 @@ void smtc_prepare_cpus(int cpus)
* Pull any physically present but unused TCs out of circulation.
*/
while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
- cpu_clear(tc, cpu_possible_map);
- cpu_clear(tc, cpu_present_map);
+ set_cpu_possible(tc, false);
+ set_cpu_present(tc, false);
tc++;
}
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 2769bed3d2a..9bf0e3df7c5 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -10,7 +10,16 @@ PHDRS {
text PT_LOAD FLAGS(7); /* RWX */
note PT_NOTE FLAGS(4); /* R__ */
}
-jiffies = JIFFIES;
+
+ifdef CONFIG_32BIT
+ ifdef CONFIG_CPU_LITTLE_ENDIAN
+ jiffies = jiffies_64;
+ else
+ jiffies = jiffies_64 + 4;
+ endif
+else
+ jiffies = jiffies_64;
+endif
SECTIONS
{
@@ -29,7 +38,7 @@ SECTIONS
/* . = 0xa800000000300000; */
. = 0xffffffff80300000;
#endif
- . = LOADADDR;
+ . = VMLINUX_LOAD_ADDRESS;
/* read-only */
_text = .; /* Text and read-only data */
.text : {
diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c
index 3f04d4c406b..b3deed8db61 100644
--- a/arch/mips/lasat/sysctl.c
+++ b/arch/mips/lasat/sysctl.c
@@ -56,12 +56,12 @@ int sysctl_lasatstring(ctl_table *table,
/* And the same for proc */
-int proc_dolasatstring(ctl_table *table, int write, struct file *filp,
+int proc_dolasatstring(ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int r;
- r = proc_dostring(table, write, filp, buffer, lenp, ppos);
+ r = proc_dostring(table, write, buffer, lenp, ppos);
if ((!write) || r)
return r;
@@ -71,12 +71,12 @@ int proc_dolasatstring(ctl_table *table, int write, struct file *filp,
}
/* proc function to write EEPROM after changing int entry */
-int proc_dolasatint(ctl_table *table, int write, struct file *filp,
+int proc_dolasatint(ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int r;
- r = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ r = proc_dointvec(table, write, buffer, lenp, ppos);
if ((!write) || r)
return r;
@@ -89,7 +89,7 @@ int proc_dolasatint(ctl_table *table, int write, struct file *filp,
static int rtctmp;
/* proc function to read/write RealTime Clock */
-int proc_dolasatrtc(ctl_table *table, int write, struct file *filp,
+int proc_dolasatrtc(ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct timespec ts;
@@ -102,7 +102,7 @@ int proc_dolasatrtc(ctl_table *table, int write, struct file *filp,
if (rtctmp < 0)
rtctmp = 0;
}
- r = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ r = proc_dointvec(table, write, buffer, lenp, ppos);
if (r)
return r;
@@ -154,7 +154,7 @@ int sysctl_lasat_rtc(ctl_table *table,
#endif
#ifdef CONFIG_INET
-int proc_lasat_ip(ctl_table *table, int write, struct file *filp,
+int proc_lasat_ip(ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
unsigned int ip;
@@ -231,12 +231,12 @@ static int sysctl_lasat_prid(ctl_table *table,
return 0;
}
-int proc_lasat_prid(ctl_table *table, int write, struct file *filp,
+int proc_lasat_prid(ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int r;
- r = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ r = proc_dointvec(table, write, buffer, lenp, ppos);
if (r < 0)
return r;
if (write) {
diff --git a/arch/mips/mipssim/sim_smtc.c b/arch/mips/mipssim/sim_smtc.c
index d6e4f656ad1..5da30b6a65b 100644
--- a/arch/mips/mipssim/sim_smtc.c
+++ b/arch/mips/mipssim/sim_smtc.c
@@ -43,11 +43,12 @@ static void ssmtc_send_ipi_single(int cpu, unsigned int action)
/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
}
-static inline void ssmtc_send_ipi_mask(cpumask_t mask, unsigned int action)
+static inline void ssmtc_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
ssmtc_send_ipi_single(i, action);
}
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 10ab69f7183..94e05e5733c 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -79,7 +79,7 @@ static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
* cores it has been used on
*/
if (vma)
- mask = vma->vm_mm->cpu_vm_mask;
+ mask = *mm_cpumask(vma->vm_mm);
else
mask = cpu_online_map;
cpu_clear(cpu, mask);
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 38c79c55b06..15aa1902a78 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -352,7 +352,6 @@ void __init paging_init(void)
free_area_init_nodes(max_zone_pfns);
}
-static struct kcore_list kcore_mem, kcore_vmalloc;
#ifdef CONFIG_64BIT
static struct kcore_list kcore_kseg0;
#endif
@@ -409,15 +408,13 @@ void __init mem_init(void)
if ((unsigned long) &_text > (unsigned long) CKSEG0)
/* The -4 is a hack so that user tools don't have to handle
the overflow. */
- kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4);
+ kclist_add(&kcore_kseg0, (void *) CKSEG0,
+ 0x80000000 - 4, KCORE_TEXT);
#endif
- kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
- kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
- VMALLOC_END-VMALLOC_START);
printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
"%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ nr_free_pages() << (PAGE_SHIFT-10),
ram << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index 499ffe5475d..192cfd2a539 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -21,11 +21,11 @@ static void msmtc_send_ipi_single(int cpu, unsigned int action)
smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
}
-static void msmtc_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
msmtc_send_ipi_single(i, action);
}
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index 8ace2771623..326fe7a392e 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -97,11 +97,11 @@ static void yos_send_ipi_single(int cpu, unsigned int action)
}
}
-static void yos_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void yos_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
yos_send_ipi_single(i, action);
}
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 060d853d7b3..f61c164d1e6 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -421,7 +421,7 @@ static void __init node_mem_init(cnodeid_t node)
/*
* A node with nothing. We use it to avoid any special casing in
- * node_to_cpumask
+ * cpumask_of_node
*/
static struct node_data null_node = {
.hub = {
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index cbcd7eb83bd..9aa8f2951df 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -165,11 +165,11 @@ static void ip27_send_ipi_single(int destid, unsigned int action)
REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq);
}
-static void ip27_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void ip27_send_ipi(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
ip27_send_ipi_single(i, action);
}
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index 314691648c9..47b347c992e 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -82,11 +82,12 @@ static void bcm1480_send_ipi_single(int cpu, unsigned int action)
__raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]);
}
-static void bcm1480_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void bcm1480_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
bcm1480_send_ipi_single(i, action);
}
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index cad14003b84..c00a5cb1128 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -70,11 +70,12 @@ static void sb1250_send_ipi_single(int cpu, unsigned int action)
__raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]);
}
-static inline void sb1250_send_ipi_mask(cpumask_t mask, unsigned int action)
+static inline void sb1250_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
sb1250_send_ipi_single(i, action);
}
diff --git a/arch/mn10300/include/asm/cacheflush.h b/arch/mn10300/include/asm/cacheflush.h
index 2db746a251f..1a55d61f0d0 100644
--- a/arch/mn10300/include/asm/cacheflush.h
+++ b/arch/mn10300/include/asm/cacheflush.h
@@ -17,7 +17,7 @@
#include <linux/mm.h>
/*
- * virtually-indexed cache managment (our cache is physically indexed)
+ * virtually-indexed cache management (our cache is physically indexed)
*/
#define flush_cache_all() do {} while (0)
#define flush_cache_mm(mm) do {} while (0)
@@ -31,7 +31,7 @@
#define flush_dcache_mmap_unlock(mapping) do {} while (0)
/*
- * physically-indexed cache managment
+ * physically-indexed cache management
*/
#ifndef CONFIG_MN10300_CACHE_DISABLED
diff --git a/arch/mn10300/include/asm/gdb-stub.h b/arch/mn10300/include/asm/gdb-stub.h
index e5a6368559a..556cce99254 100644
--- a/arch/mn10300/include/asm/gdb-stub.h
+++ b/arch/mn10300/include/asm/gdb-stub.h
@@ -109,7 +109,6 @@ extern asmlinkage int gdbstub_intercept(struct pt_regs *, enum exception_code);
extern asmlinkage void gdbstub_exception(struct pt_regs *, enum exception_code);
extern asmlinkage void __gdbstub_bug_trap(void);
extern asmlinkage void __gdbstub_pause(void);
-extern asmlinkage void start_kernel(void);
#ifndef CONFIG_MN10300_CACHE_DISABLED
extern asmlinkage void gdbstub_purge_cache(void);
diff --git a/arch/mn10300/include/asm/mman.h b/arch/mn10300/include/asm/mman.h
index d04fac1da5a..8eebf89f5ab 100644
--- a/arch/mn10300/include/asm/mman.h
+++ b/arch/mn10300/include/asm/mman.h
@@ -1,28 +1 @@
-/* MN10300 Constants for mmap and co.
- *
- * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * - Derived from asm-x86/mman.h
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#ifndef _ASM_MMAN_H
-#define _ASM_MMAN_H
-
-#include <asm-generic/mman-common.h>
-
-#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define MAP_LOCKED 0x2000 /* pages are locked */
-#define MAP_NORESERVE 0x4000 /* don't check for reservations */
-#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define MAP_NONBLOCK 0x10000 /* do not block on IO */
-
-#define MCL_CURRENT 1 /* lock all current mappings */
-#define MCL_FUTURE 2 /* lock all future mappings */
-
-#endif /* _ASM_MMAN_H */
+#include <asm-generic/mman.h>
diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h
index a9e2e34f69b..cb294c244de 100644
--- a/arch/mn10300/include/asm/mmu_context.h
+++ b/arch/mn10300/include/asm/mmu_context.h
@@ -38,13 +38,13 @@ extern unsigned long mmu_context_cache[NR_CPUS];
#define enter_lazy_tlb(mm, tsk) do {} while (0)
#ifdef CONFIG_SMP
-#define cpu_ran_vm(cpu, task) \
- cpu_set((cpu), (task)->cpu_vm_mask)
-#define cpu_maybe_ran_vm(cpu, task) \
- cpu_test_and_set((cpu), (task)->cpu_vm_mask)
+#define cpu_ran_vm(cpu, mm) \
+ cpumask_set_cpu((cpu), mm_cpumask(mm))
+#define cpu_maybe_ran_vm(cpu, mm) \
+ cpumask_test_and_set_cpu((cpu), mm_cpumask(mm))
#else
-#define cpu_ran_vm(cpu, task) do {} while (0)
-#define cpu_maybe_ran_vm(cpu, task) true
+#define cpu_ran_vm(cpu, mm) do {} while (0)
+#define cpu_maybe_ran_vm(cpu, mm) true
#endif /* CONFIG_SMP */
/*
diff --git a/arch/mn10300/include/asm/unistd.h b/arch/mn10300/include/asm/unistd.h
index fad68616af3..2a983931c11 100644
--- a/arch/mn10300/include/asm/unistd.h
+++ b/arch/mn10300/include/asm/unistd.h
@@ -347,7 +347,7 @@
#define __NR_preadv 334
#define __NR_pwritev 335
#define __NR_rt_tgsigqueueinfo 336
-#define __NR_perf_counter_open 337
+#define __NR_perf_event_open 337
#ifdef __KERNEL__
diff --git a/arch/mn10300/kernel/asm-offsets.c b/arch/mn10300/kernel/asm-offsets.c
index 82b40079ad7..02dc7e461fe 100644
--- a/arch/mn10300/kernel/asm-offsets.c
+++ b/arch/mn10300/kernel/asm-offsets.c
@@ -85,7 +85,7 @@ void foo(void)
OFFSET(__rx_buffer, mn10300_serial_port, rx_buffer);
OFFSET(__rx_inp, mn10300_serial_port, rx_inp);
OFFSET(__rx_outp, mn10300_serial_port, rx_outp);
- OFFSET(__tx_info_buffer, mn10300_serial_port, uart.info);
+ OFFSET(__uart_state, mn10300_serial_port, uart.state);
OFFSET(__tx_xchar, mn10300_serial_port, tx_xchar);
OFFSET(__tx_break, mn10300_serial_port, tx_break);
OFFSET(__intr_flags, mn10300_serial_port, intr_flags);
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index e0d2563af4f..a94e7ea3faa 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -723,7 +723,7 @@ ENTRY(sys_call_table)
.long sys_preadv
.long sys_pwritev /* 335 */
.long sys_rt_tgsigqueueinfo
- .long sys_perf_counter_open
+ .long sys_perf_event_open
nr_syscalls=(.-sys_call_table)/4
diff --git a/arch/mn10300/kernel/init_task.c b/arch/mn10300/kernel/init_task.c
index 80d423b80af..a481b043bea 100644
--- a/arch/mn10300/kernel/init_task.c
+++ b/arch/mn10300/kernel/init_task.c
@@ -27,9 +27,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"))) =
- { INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data =
+ { INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
diff --git a/arch/mn10300/kernel/mn10300-serial-low.S b/arch/mn10300/kernel/mn10300-serial-low.S
index 22448538822..66702d25661 100644
--- a/arch/mn10300/kernel/mn10300-serial-low.S
+++ b/arch/mn10300/kernel/mn10300-serial-low.S
@@ -130,7 +130,7 @@ ENTRY(mn10300_serial_vdma_tx_handler)
or d2,d2
bne mnsc_vdma_tx_xchar
- mov (__tx_info_buffer,a3),a2 # get the uart_info struct for Tx
+ mov (__uart_state,a3),a2 # see if the TTY Tx queue has anything in it
mov (__xmit_tail,a2),d3
mov (__xmit_head,a2),d2
cmp d3,d2
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c
index 2fd59664d00..229b710fc5d 100644
--- a/arch/mn10300/kernel/mn10300-serial.c
+++ b/arch/mn10300/kernel/mn10300-serial.c
@@ -391,7 +391,7 @@ static int mask_test_and_clear(volatile u8 *ptr, u8 mask)
static void mn10300_serial_receive_interrupt(struct mn10300_serial_port *port)
{
struct uart_icount *icount = &port->uart.icount;
- struct tty_struct *tty = port->uart.info->port.tty;
+ struct tty_struct *tty = port->uart.state->port.tty;
unsigned ix;
int count;
u8 st, ch, push, status, overrun;
@@ -566,16 +566,16 @@ static void mn10300_serial_transmit_interrupt(struct mn10300_serial_port *port)
{
_enter("%s", port->name);
- if (!port->uart.info || !port->uart.info->port.tty) {
+ if (!port->uart.state || !port->uart.state->port.tty) {
mn10300_serial_dis_tx_intr(port);
return;
}
if (uart_tx_stopped(&port->uart) ||
- uart_circ_empty(&port->uart.info->xmit))
+ uart_circ_empty(&port->uart.state->xmit))
mn10300_serial_dis_tx_intr(port);
- if (uart_circ_chars_pending(&port->uart.info->xmit) < WAKEUP_CHARS)
+ if (uart_circ_chars_pending(&port->uart.state->xmit) < WAKEUP_CHARS)
uart_write_wakeup(&port->uart);
}
@@ -596,7 +596,7 @@ static void mn10300_serial_cts_changed(struct mn10300_serial_port *port, u8 st)
*port->_control = ctr;
uart_handle_cts_change(&port->uart, st & SC2STR_CTS);
- wake_up_interruptible(&port->uart.info->delta_msr_wait);
+ wake_up_interruptible(&port->uart.state->port.delta_msr_wait);
}
/*
@@ -705,8 +705,8 @@ static void mn10300_serial_start_tx(struct uart_port *_port)
_enter("%s{%lu}",
port->name,
- CIRC_CNT(&port->uart.info->xmit.head,
- &port->uart.info->xmit.tail,
+ CIRC_CNT(&port->uart.state->xmit.head,
+ &port->uart.state->xmit.tail,
UART_XMIT_SIZE));
/* kick the virtual DMA controller */
diff --git a/arch/mn10300/kernel/setup.c b/arch/mn10300/kernel/setup.c
index 79890edfd67..3f24c298a3a 100644
--- a/arch/mn10300/kernel/setup.c
+++ b/arch/mn10300/kernel/setup.c
@@ -285,7 +285,7 @@ static void c_stop(struct seq_file *m, void *v)
{
}
-struct seq_operations cpuinfo_op = {
+const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
diff --git a/arch/mn10300/kernel/sys_mn10300.c b/arch/mn10300/kernel/sys_mn10300.c
index 3e52a105432..8ca5af00334 100644
--- a/arch/mn10300/kernel/sys_mn10300.c
+++ b/arch/mn10300/kernel/sys_mn10300.c
@@ -19,7 +19,6 @@
#include <linux/stat.h>
#include <linux/mman.h>
#include <linux/file.h>
-#include <linux/utsname.h>
#include <linux/tty.h>
#include <asm/uaccess.h>
diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S
index 76f41bdb79c..10549dcfb61 100644
--- a/arch/mn10300/kernel/vmlinux.lds.S
+++ b/arch/mn10300/kernel/vmlinux.lds.S
@@ -44,24 +44,8 @@ SECTIONS
RO_DATA(PAGE_SIZE)
/* writeable */
- .data : { /* Data */
- DATA_DATA
- CONSTRUCTORS
- }
-
- .data_nosave : { NOSAVE_DATA; }
-
- .data.page_aligned : { PAGE_ALIGNED_DATA(PAGE_SIZE); }
- .data.cacheline_aligned : { CACHELINE_ALIGNED_DATA(32); }
-
- /* rarely changed data like cpu maps */
- . = ALIGN(32);
- .data.read_mostly : AT(ADDR(.data.read_mostly)) {
- READ_MOSTLY_DATA(32);
- _edata = .; /* End of data section */
- }
-
- .data.init_task : { INIT_TASK_DATA(THREAD_SIZE); }
+ RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
/* might get freed after init */
. = ALIGN(PAGE_SIZE);
@@ -74,22 +58,8 @@ SECTIONS
/* will be freed after init */
. = ALIGN(PAGE_SIZE); /* Init code and data */
__init_begin = .;
- .init.text : {
- _sinittext = .;
- INIT_TEXT;
- _einittext = .;
- }
- .init.data : { INIT_DATA; }
- .setup.init : { INIT_SETUP(16); }
-
- __initcall_start = .;
- .initcall.init : {
- INITCALLS
- }
- __initcall_end = .;
- .con_initcall.init : { CON_INITCALL; }
-
- SECURITY_INIT
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(16)
. = ALIGN(4);
__alt_instructions = .;
.altinstructions : { *(.altinstructions) }
@@ -100,8 +70,6 @@ SECTIONS
.exit.text : { EXIT_TEXT; }
.exit.data : { EXIT_DATA; }
- .init.ramfs : { INIT_RAM_FS; }
-
PERCPU(32)
. = ALIGN(PAGE_SIZE);
__init_end = .;
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c
index 8cee387a24f..ec1420562dc 100644
--- a/arch/mn10300/mm/init.c
+++ b/arch/mn10300/mm/init.c
@@ -112,7 +112,7 @@ void __init mem_init(void)
"Memory: %luk/%luk available"
" (%dk kernel code, %dk reserved, %dk data, %dk init,"
" %ldk highmem)\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
+ nr_free_pages() << (PAGE_SHIFT - 10),
max_mapnr << (PAGE_SHIFT - 10),
codesize >> 10,
reservedpages << (PAGE_SHIFT - 10),
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 06f8d5b5b0f..f388dc68f60 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -16,7 +16,7 @@ config PARISC
select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE
select BUG
- select HAVE_PERF_COUNTERS
+ select HAVE_PERF_EVENTS
select GENERIC_ATOMIC64 if !64BIT
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index da6f66901c9..55cca1dac43 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -118,8 +118,8 @@ define archhelp
@echo '* vmlinux - Uncompressed kernel image (./vmlinux)'
@echo ' palo - Bootable image (./lifimage)'
@echo ' install - Install kernel using'
- @echo ' (your) ~/bin/installkernel or'
- @echo ' (distribution) /sbin/installkernel or'
+ @echo ' (your) ~/bin/$(INSTALLKERNEL) or'
+ @echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
@echo ' copy to $$(INSTALL_PATH)'
endef
diff --git a/arch/parisc/include/asm/fcntl.h b/arch/parisc/include/asm/fcntl.h
index 1e1c824764e..5f39d5597ce 100644
--- a/arch/parisc/include/asm/fcntl.h
+++ b/arch/parisc/include/asm/fcntl.h
@@ -28,6 +28,8 @@
#define F_SETOWN 12 /* for sockets. */
#define F_SETSIG 13 /* for sockets. */
#define F_GETSIG 14 /* for sockets. */
+#define F_GETOWN_EX 15
+#define F_SETOWN_EX 16
/* for posix fcntl() and lockf() */
#define F_RDLCK 01
diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h
index defe752cc99..9749c8afe83 100644
--- a/arch/parisc/include/asm/mman.h
+++ b/arch/parisc/include/asm/mman.h
@@ -22,6 +22,8 @@
#define MAP_GROWSDOWN 0x8000 /* stack-like segment */
#define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x20000 /* do not block on IO */
+#define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */
+#define MAP_HUGETLB 0x80000 /* create a huge page mapping */
#define MS_SYNC 1 /* synchronous memory sync */
#define MS_ASYNC 2 /* sync memory asynchronously */
@@ -54,6 +56,9 @@
#define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */
#define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */
+#define MADV_MERGEABLE 65 /* KSM may merge identical pages */
+#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */
+
/* compatibility flags */
#define MAP_FILE 0
#define MAP_VARIABLE 0
diff --git a/arch/parisc/include/asm/perf_counter.h b/arch/parisc/include/asm/perf_counter.h
deleted file mode 100644
index dc9e829f701..00000000000
--- a/arch/parisc/include/asm/perf_counter.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_PARISC_PERF_COUNTER_H
-#define __ASM_PARISC_PERF_COUNTER_H
-
-/* parisc only supports software counters through this interface. */
-static inline void set_perf_counter_pending(void) { }
-
-#endif /* __ASM_PARISC_PERF_COUNTER_H */
diff --git a/arch/parisc/include/asm/perf_event.h b/arch/parisc/include/asm/perf_event.h
new file mode 100644
index 00000000000..cc146427d8f
--- /dev/null
+++ b/arch/parisc/include/asm/perf_event.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_PARISC_PERF_EVENT_H
+#define __ASM_PARISC_PERF_EVENT_H
+
+/* parisc only supports software events through this interface. */
+static inline void set_perf_event_pending(void) { }
+
+#endif /* __ASM_PARISC_PERF_EVENT_H */
diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h
index 21eb45a5262..2e73623feb6 100644
--- a/arch/parisc/include/asm/smp.h
+++ b/arch/parisc/include/asm/smp.h
@@ -30,7 +30,6 @@ extern void smp_send_all_nop(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
#endif /* !ASSEMBLY */
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index f3d3b8b012c..cda158318c6 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -810,9 +810,9 @@
#define __NR_preadv (__NR_Linux + 315)
#define __NR_pwritev (__NR_Linux + 316)
#define __NR_rt_tgsigqueueinfo (__NR_Linux + 317)
-#define __NR_perf_counter_open (__NR_Linux + 318)
+#define __NR_perf_event_open (__NR_Linux + 318)
-#define __NR_Linux_syscalls (__NR_perf_counter_open + 1)
+#define __NR_Linux_syscalls (__NR_perf_event_open + 1)
#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/install.sh b/arch/parisc/install.sh
index 9632b3e164c..e593fc8d58b 100644
--- a/arch/parisc/install.sh
+++ b/arch/parisc/install.sh
@@ -21,8 +21,8 @@
# User may have a custom install script
-if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi
-if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi
+if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
# Default install
diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c
index 82974b20fc1..d020eae6525 100644
--- a/arch/parisc/kernel/init_task.c
+++ b/arch/parisc/kernel/init_task.c
@@ -43,8 +43,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
-union thread_union init_thread_union
- __attribute__((aligned(128))) __attribute__((__section__(".data.init_task"))) =
+union thread_union init_thread_union __init_task_data
+ __attribute__((aligned(128))) =
{ INIT_THREAD_INFO(init_task) };
#if PT_NLEVELS == 3
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index 92a0acaa0d1..561388b17c9 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -18,7 +18,6 @@
#include <linux/signal.h>
#include <linux/resource.h>
#include <linux/times.h>
-#include <linux/utsname.h>
#include <linux/time.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index cf145eb026b..843f423dec6 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -416,7 +416,7 @@
ENTRY_COMP(preadv) /* 315 */
ENTRY_COMP(pwritev)
ENTRY_COMP(rt_tgsigqueueinfo)
- ENTRY_SAME(perf_counter_open)
+ ENTRY_SAME(perf_event_open)
/* Nothing yet */
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index aea1784edbd..775be2791bc 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -77,13 +77,7 @@ SECTIONS
*/
. = ALIGN(PAGE_SIZE);
data_start = .;
- . = ALIGN(16);
- /* Exception table */
- __ex_table : {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
- }
+ EXCEPTION_TABLE(16)
NOTES
@@ -94,23 +88,8 @@ SECTIONS
__stop___unwind = .;
}
- /* rarely changed data like cpu maps */
- . = ALIGN(16);
- .data.read_mostly : {
- *(.data.read_mostly)
- }
-
- . = ALIGN(L1_CACHE_BYTES);
/* Data */
- .data : {
- DATA_DATA
- CONSTRUCTORS
- }
-
- . = ALIGN(L1_CACHE_BYTES);
- .data.cacheline_aligned : {
- *(.data.cacheline_aligned)
- }
+ RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
/* PA-RISC locks requires 16-byte alignment */
. = ALIGN(16);
@@ -118,17 +97,6 @@ SECTIONS
*(.data.lock_aligned)
}
- /* nosave data is really only used for software suspend...it's here
- * just in case we ever implement it
- */
- . = ALIGN(PAGE_SIZE);
- __nosave_begin = .;
- .data_nosave : {
- *(.data.nosave)
- }
- . = ALIGN(PAGE_SIZE);
- __nosave_end = .;
-
/* End of data section */
_edata = .;
@@ -147,14 +115,6 @@ SECTIONS
}
__bss_stop = .;
-
- /* assembler code expects init_task to be 16k aligned */
- . = ALIGN(16384);
- /* init_task */
- .data.init_task : {
- *(.data.init_task)
- }
-
#ifdef CONFIG_64BIT
. = ALIGN(16);
/* Linkage tables */
@@ -172,64 +132,8 @@ SECTIONS
/* reserve space for interrupt stack by aligning __init* to 16k */
. = ALIGN(16384);
__init_begin = .;
- .init.text : {
- _sinittext = .;
- INIT_TEXT
- _einittext = .;
- }
- .init.data : {
- INIT_DATA
- }
- . = ALIGN(16);
- .init.setup : {
- __setup_start = .;
- *(.init.setup)
- __setup_end = .;
- }
- .initcall.init : {
- __initcall_start = .;
- INITCALLS
- __initcall_end = .;
- }
- .con_initcall.init : {
- __con_initcall_start = .;
- *(.con_initcall.init)
- __con_initcall_end = .;
- }
- SECURITY_INIT
-
- /* alternate instruction replacement. This is a mechanism x86 uses
- * to detect the CPU type and replace generic instruction sequences
- * with CPU specific ones. We don't currently do this in PA, but
- * it seems like a good idea...
- */
- . = ALIGN(4);
- .altinstructions : {
- __alt_instructions = .;
- *(.altinstructions)
- __alt_instructions_end = .;
- }
- .altinstr_replacement : {
- *(.altinstr_replacement)
- }
-
- /* .exit.text is discard at runtime, not link time, to deal with references
- * from .altinstructions and .eh_frame
- */
- .exit.text : {
- EXIT_TEXT
- }
- .exit.data : {
- EXIT_DATA
- }
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(PAGE_SIZE);
- .init.ramfs : {
- __initramfs_start = .;
- *(.init.ramfs)
- __initramfs_end = .;
- }
-#endif
+ INIT_TEXT_SECTION(16384)
+ INIT_DATA_SECTION(16)
PERCPU(PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index b0831d9e35c..d5aca31fddb 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -506,7 +506,7 @@ void __init mem_init(void)
#endif
printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
- (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
+ nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 8250902265c..10a0a5488a4 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -129,7 +129,7 @@ config PPC
select HAVE_OPROFILE
select HAVE_SYSCALL_WRAPPERS if PPC64
select GENERIC_ATOMIC64 if PPC32
- select HAVE_PERF_COUNTERS
+ select HAVE_PERF_EVENTS
config EARLY_PRINTK
bool
@@ -385,9 +385,15 @@ config NUMA
config NODES_SHIFT
int
+ default "8" if PPC64
default "4"
depends on NEED_MULTIPLE_NODES
+config MAX_ACTIVE_REGIONS
+ int
+ default "256" if PPC64
+ default "32"
+
config ARCH_SELECT_MEMORY_MODEL
def_bool y
depends on PPC64
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 952a3963e9e..1a54a3b3a3f 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -158,14 +158,23 @@ drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
# Default to zImage, override when needed
all: zImage
-CPPFLAGS_vmlinux.lds := -Upowerpc
-
BOOT_TARGETS = zImage zImage.initrd uImage zImage% dtbImage% treeImage.% cuImage.% simpleImage.%
PHONY += $(BOOT_TARGETS)
boot := arch/$(ARCH)/boot
+ifeq ($(CONFIG_RELOCATABLE),y)
+quiet_cmd_relocs_check = CALL $<
+ cmd_relocs_check = perl $< "$(OBJDUMP)" "$(obj)/vmlinux"
+
+PHONY += relocs_check
+relocs_check: arch/powerpc/relocs_check.pl vmlinux
+ $(call cmd,relocs_check)
+
+zImage: relocs_check
+endif
+
$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
@@ -182,8 +191,8 @@ define archhelp
@echo ' simpleImage.<dt> - Firmware independent image.'
@echo ' treeImage.<dt> - Support for older IBM 4xx firmware (not U-Boot)'
@echo ' install - Install kernel using'
- @echo ' (your) ~/bin/installkernel or'
- @echo ' (distribution) /sbin/installkernel or'
+ @echo ' (your) ~/bin/$(INSTALLKERNEL) or'
+ @echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
@echo ' install to $$(INSTALL_PATH) and run lilo'
@echo ' *_defconfig - Select default config from arch/$(ARCH)/configs'
@echo ''
diff --git a/arch/powerpc/boot/dts/mpc8377_mds.dts b/arch/powerpc/boot/dts/mpc8377_mds.dts
index f32c2811c6d..855782c5e5e 100644
--- a/arch/powerpc/boot/dts/mpc8377_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8377_mds.dts
@@ -159,6 +159,7 @@
reg = <0x2e000 0x1000>;
interrupts = <42 0x8>;
interrupt-parent = <&ipic>;
+ sdhci,wp-inverted;
/* Filled in by U-Boot */
clock-frequency = <0>;
};
diff --git a/arch/powerpc/boot/dts/mpc8377_rdb.dts b/arch/powerpc/boot/dts/mpc8377_rdb.dts
index 28e022ac417..9e2264b1000 100644
--- a/arch/powerpc/boot/dts/mpc8377_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8377_rdb.dts
@@ -173,6 +173,7 @@
reg = <0x2e000 0x1000>;
interrupts = <42 0x8>;
interrupt-parent = <&ipic>;
+ sdhci,wp-inverted;
/* Filled in by U-Boot */
clock-frequency = <111111111>;
};
diff --git a/arch/powerpc/boot/dts/mpc8377_wlan.dts b/arch/powerpc/boot/dts/mpc8377_wlan.dts
index 3febc4e91b1..9a603695723 100644
--- a/arch/powerpc/boot/dts/mpc8377_wlan.dts
+++ b/arch/powerpc/boot/dts/mpc8377_wlan.dts
@@ -150,6 +150,7 @@
reg = <0x2e000 0x1000>;
interrupts = <42 0x8>;
interrupt-parent = <&ipic>;
+ sdhci,wp-inverted;
clock-frequency = <133333333>;
};
};
diff --git a/arch/powerpc/boot/dts/mpc8378_mds.dts b/arch/powerpc/boot/dts/mpc8378_mds.dts
index f720ab9af30..f70cf600083 100644
--- a/arch/powerpc/boot/dts/mpc8378_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8378_mds.dts
@@ -159,6 +159,7 @@
reg = <0x2e000 0x1000>;
interrupts = <42 0x8>;
interrupt-parent = <&ipic>;
+ sdhci,wp-inverted;
/* Filled in by U-Boot */
clock-frequency = <0>;
};
diff --git a/arch/powerpc/boot/dts/mpc8378_rdb.dts b/arch/powerpc/boot/dts/mpc8378_rdb.dts
index a11ead8214b..4e6a1a407bb 100644
--- a/arch/powerpc/boot/dts/mpc8378_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8378_rdb.dts
@@ -173,6 +173,7 @@
reg = <0x2e000 0x1000>;
interrupts = <42 0x8>;
interrupt-parent = <&ipic>;
+ sdhci,wp-inverted;
/* Filled in by U-Boot */
clock-frequency = <111111111>;
};
diff --git a/arch/powerpc/boot/dts/mpc8379_mds.dts b/arch/powerpc/boot/dts/mpc8379_mds.dts
index 4fa221fd9bd..645ec51cc6e 100644
--- a/arch/powerpc/boot/dts/mpc8379_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8379_mds.dts
@@ -157,6 +157,7 @@
reg = <0x2e000 0x1000>;
interrupts = <42 0x8>;
interrupt-parent = <&ipic>;
+ sdhci,wp-inverted;
/* Filled in by U-Boot */
clock-frequency = <0>;
};
diff --git a/arch/powerpc/boot/dts/mpc8379_rdb.dts b/arch/powerpc/boot/dts/mpc8379_rdb.dts
index e35dfba587c..72336d50452 100644
--- a/arch/powerpc/boot/dts/mpc8379_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8379_rdb.dts
@@ -171,6 +171,7 @@
reg = <0x2e000 0x1000>;
interrupts = <42 0x8>;
interrupt-parent = <&ipic>;
+ sdhci,wp-inverted;
/* Filled in by U-Boot */
clock-frequency = <111111111>;
};
diff --git a/arch/powerpc/boot/install.sh b/arch/powerpc/boot/install.sh
index 98312d169c8..b6a256bc96e 100644
--- a/arch/powerpc/boot/install.sh
+++ b/arch/powerpc/boot/install.sh
@@ -23,8 +23,8 @@ set -e
# User may have a custom install script
-if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
-if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi
+if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
# Default install
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index f42e623030e..fa19f3fe05f 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -18,6 +18,9 @@
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
#include <asm-generic/cputime.h>
+#ifdef __KERNEL__
+static inline void setup_cputime_one_jiffy(void) { }
+#endif
#else
#include <linux/types.h>
@@ -49,6 +52,11 @@ typedef u64 cputime64_t;
#ifdef __KERNEL__
/*
+ * One jiffy in timebase units computed during initialization
+ */
+extern cputime_t cputime_one_jiffy;
+
+/*
* Convert cputime <-> jiffies
*/
extern u64 __cputime_jiffies_factor;
@@ -89,6 +97,11 @@ static inline cputime_t jiffies_to_cputime(const unsigned long jif)
return ct;
}
+static inline void setup_cputime_one_jiffy(void)
+{
+ cputime_one_jiffy = jiffies_to_cputime(1);
+}
+
static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
{
cputime_t ct;
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index 9dade15d1ab..6d94d27ed85 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -15,7 +15,16 @@ struct dev_archdata {
/* DMA operations on that device */
struct dma_map_ops *dma_ops;
- void *dma_data;
+
+ /*
+ * When an iommu is in use, dma_data is used as a ptr to the base of the
+ * iommu_table. Otherwise, it is a simple numerical offset.
+ */
+ union {
+ dma_addr_t dma_offset;
+ void *iommu_table_base;
+ } dma_data;
+
#ifdef CONFIG_SWIOTLB
dma_addr_t max_direct_dma_addr;
#endif
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index cb2ca41dd52..e281daebddc 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -26,7 +26,6 @@ extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
extern void dma_direct_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
-extern unsigned long get_dma_direct_offset(struct device *dev);
#ifdef CONFIG_NOT_COHERENT_CACHE
/*
@@ -90,6 +89,28 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
dev->archdata.dma_ops = ops;
}
+/*
+ * get_dma_offset()
+ *
+ * Get the dma offset on configurations where the dma address can be determined
+ * from the physical address by looking at a simple offset. Direct dma and
+ * swiotlb use this function, but it is typically not used by implementations
+ * with an iommu.
+ */
+static inline dma_addr_t get_dma_offset(struct device *dev)
+{
+ if (dev)
+ return dev->archdata.dma_data.dma_offset;
+
+ return PCI_DRAM_OFFSET;
+}
+
+static inline void set_dma_offset(struct device *dev, dma_addr_t off)
+{
+ if (dev)
+ dev->archdata.dma_data.dma_offset = off;
+}
+
/* this will be removed soon */
#define flush_write_buffers()
@@ -181,12 +202,12 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
- return paddr + get_dma_direct_offset(dev);
+ return paddr + get_dma_offset(dev);
}
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
{
- return daddr - get_dma_direct_offset(dev);
+ return daddr - get_dma_offset(dev);
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
diff --git a/arch/powerpc/include/asm/fsldma.h b/arch/powerpc/include/asm/fsldma.h
new file mode 100644
index 00000000000..a67aeed17d4
--- /dev/null
+++ b/arch/powerpc/include/asm/fsldma.h
@@ -0,0 +1,136 @@
+/*
+ * Freescale MPC83XX / MPC85XX DMA Controller
+ *
+ * Copyright (c) 2009 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
+#define __ARCH_POWERPC_ASM_FSLDMA_H__
+
+#include <linux/dmaengine.h>
+
+/*
+ * Definitions for the Freescale DMA controller's DMA_SLAVE implemention
+ *
+ * The Freescale DMA_SLAVE implementation was designed to handle many-to-many
+ * transfers. An example usage would be an accelerated copy between two
+ * scatterlists. Another example use would be an accelerated copy from
+ * multiple non-contiguous device buffers into a single scatterlist.
+ *
+ * A DMA_SLAVE transaction is defined by a struct fsl_dma_slave. This
+ * structure contains a list of hardware addresses that should be copied
+ * to/from the scatterlist passed into device_prep_slave_sg(). The structure
+ * also has some fields to enable hardware-specific features.
+ */
+
+/**
+ * struct fsl_dma_hw_addr
+ * @entry: linked list entry
+ * @address: the hardware address
+ * @length: length to transfer
+ *
+ * Holds a single physical hardware address / length pair for use
+ * with the DMAEngine DMA_SLAVE API.
+ */
+struct fsl_dma_hw_addr {
+ struct list_head entry;
+
+ dma_addr_t address;
+ size_t length;
+};
+
+/**
+ * struct fsl_dma_slave
+ * @addresses: a linked list of struct fsl_dma_hw_addr structures
+ * @request_count: value for DMA request count
+ * @src_loop_size: setup and enable constant source-address DMA transfers
+ * @dst_loop_size: setup and enable constant destination address DMA transfers
+ * @external_start: enable externally started DMA transfers
+ * @external_pause: enable externally paused DMA transfers
+ *
+ * Holds a list of address / length pairs for use with the DMAEngine
+ * DMA_SLAVE API implementation for the Freescale DMA controller.
+ */
+struct fsl_dma_slave {
+
+ /* List of hardware address/length pairs */
+ struct list_head addresses;
+
+ /* Support for extra controller features */
+ unsigned int request_count;
+ unsigned int src_loop_size;
+ unsigned int dst_loop_size;
+ bool external_start;
+ bool external_pause;
+};
+
+/**
+ * fsl_dma_slave_append - add an address/length pair to a struct fsl_dma_slave
+ * @slave: the &struct fsl_dma_slave to add to
+ * @address: the hardware address to add
+ * @length: the length of bytes to transfer from @address
+ *
+ * Add a hardware address/length pair to a struct fsl_dma_slave. Returns 0 on
+ * success, -ERRNO otherwise.
+ */
+static inline int fsl_dma_slave_append(struct fsl_dma_slave *slave,
+ dma_addr_t address, size_t length)
+{
+ struct fsl_dma_hw_addr *addr;
+
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
+ if (!addr)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&addr->entry);
+ addr->address = address;
+ addr->length = length;
+
+ list_add_tail(&addr->entry, &slave->addresses);
+ return 0;
+}
+
+/**
+ * fsl_dma_slave_free - free a struct fsl_dma_slave
+ * @slave: the struct fsl_dma_slave to free
+ *
+ * Free a struct fsl_dma_slave and all associated address/length pairs
+ */
+static inline void fsl_dma_slave_free(struct fsl_dma_slave *slave)
+{
+ struct fsl_dma_hw_addr *addr, *tmp;
+
+ if (slave) {
+ list_for_each_entry_safe(addr, tmp, &slave->addresses, entry) {
+ list_del(&addr->entry);
+ kfree(addr);
+ }
+
+ kfree(slave);
+ }
+}
+
+/**
+ * fsl_dma_slave_alloc - allocate a struct fsl_dma_slave
+ * @gfp: the flags to pass to kmalloc when allocating this structure
+ *
+ * Allocate a struct fsl_dma_slave for use by the DMA_SLAVE API. Returns a new
+ * struct fsl_dma_slave on success, or NULL on failure.
+ */
+static inline struct fsl_dma_slave *fsl_dma_slave_alloc(gfp_t gfp)
+{
+ struct fsl_dma_slave *slave;
+
+ slave = kzalloc(sizeof(*slave), gfp);
+ if (!slave)
+ return NULL;
+
+ INIT_LIST_HEAD(&slave->addresses);
+ return slave;
+}
+
+#endif /* __ARCH_POWERPC_ASM_FSLDMA_H__ */
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index e73d554538d..abbc2aaaced 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -135,43 +135,43 @@ static inline int irqs_disabled_flags(unsigned long flags)
*/
struct irq_chip;
-#ifdef CONFIG_PERF_COUNTERS
+#ifdef CONFIG_PERF_EVENTS
#ifdef CONFIG_PPC64
-static inline unsigned long test_perf_counter_pending(void)
+static inline unsigned long test_perf_event_pending(void)
{
unsigned long x;
asm volatile("lbz %0,%1(13)"
: "=r" (x)
- : "i" (offsetof(struct paca_struct, perf_counter_pending)));
+ : "i" (offsetof(struct paca_struct, perf_event_pending)));
return x;
}
-static inline void set_perf_counter_pending(void)
+static inline void set_perf_event_pending(void)
{
asm volatile("stb %0,%1(13)" : :
"r" (1),
- "i" (offsetof(struct paca_struct, perf_counter_pending)));
+ "i" (offsetof(struct paca_struct, perf_event_pending)));
}
-static inline void clear_perf_counter_pending(void)
+static inline void clear_perf_event_pending(void)
{
asm volatile("stb %0,%1(13)" : :
"r" (0),
- "i" (offsetof(struct paca_struct, perf_counter_pending)));
+ "i" (offsetof(struct paca_struct, perf_event_pending)));
}
#endif /* CONFIG_PPC64 */
-#else /* CONFIG_PERF_COUNTERS */
+#else /* CONFIG_PERF_EVENTS */
-static inline unsigned long test_perf_counter_pending(void)
+static inline unsigned long test_perf_event_pending(void)
{
return 0;
}
-static inline void clear_perf_counter_pending(void) {}
-#endif /* CONFIG_PERF_COUNTERS */
+static inline void clear_perf_event_pending(void) {}
+#endif /* CONFIG_PERF_EVENTS */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_HW_IRQ_H */
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 7464c0daddd..edfc9803ec9 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -70,6 +70,16 @@ struct iommu_table {
struct scatterlist;
+static inline void set_iommu_table_base(struct device *dev, void *base)
+{
+ dev->archdata.dma_data.iommu_table_base = base;
+}
+
+static inline void *get_iommu_table_base(struct device *dev)
+{
+ return dev->archdata.dma_data.iommu_table_base;
+}
+
/* Frees table for an individual device node */
extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index 7b1c49811a2..d4a7f645c5d 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/include/asm/mman.h
@@ -25,6 +25,8 @@
#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
+#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
+#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
#ifdef __KERNEL__
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index b634456ea89..7d8514cecea 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -122,7 +122,7 @@ struct paca_struct {
u8 soft_enabled; /* irq soft-enable flag */
u8 hard_enabled; /* set if irqs are enabled in MSR */
u8 io_sync; /* writel() needs spin_unlock sync */
- u8 perf_counter_pending; /* PM interrupt while soft-disabled */
+ u8 perf_event_pending; /* PM interrupt while soft-disabled */
/* Stuff for accurate time accounting */
u64 user_time; /* accumulated usermode TB ticks */
diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_event.h
index 0ea0639fcf7..3288ce3997e 100644
--- a/arch/powerpc/include/asm/perf_counter.h
+++ b/arch/powerpc/include/asm/perf_event.h
@@ -1,5 +1,5 @@
/*
- * Performance counter support - PowerPC-specific definitions.
+ * Performance event support - PowerPC-specific definitions.
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
*
@@ -12,7 +12,7 @@
#include <asm/hw_irq.h>
-#define MAX_HWCOUNTERS 8
+#define MAX_HWEVENTS 8
#define MAX_EVENT_ALTERNATIVES 8
#define MAX_LIMITED_HWCOUNTERS 2
@@ -28,12 +28,12 @@ struct power_pmu {
unsigned long test_adder;
int (*compute_mmcr)(u64 events[], int n_ev,
unsigned int hwc[], unsigned long mmcr[]);
- int (*get_constraint)(u64 event, unsigned long *mskp,
+ int (*get_constraint)(u64 event_id, unsigned long *mskp,
unsigned long *valp);
- int (*get_alternatives)(u64 event, unsigned int flags,
+ int (*get_alternatives)(u64 event_id, unsigned int flags,
u64 alt[]);
void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
- int (*limited_pmc_event)(u64 event);
+ int (*limited_pmc_event)(u64 event_id);
u32 flags;
int n_generic;
int *generic_events;
@@ -61,10 +61,10 @@ struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
-#define PERF_COUNTER_INDEX_OFFSET 1
+#define PERF_EVENT_INDEX_OFFSET 1
/*
- * Only override the default definitions in include/linux/perf_counter.h
+ * Only override the default definitions in include/linux/perf_event.h
* if we have hardware PMU support.
*/
#ifdef CONFIG_PPC_PERF_CTRS
@@ -73,14 +73,14 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
/*
* The power_pmu.get_constraint function returns a 32/64-bit value and
- * a 32/64-bit mask that express the constraints between this event and
+ * a 32/64-bit mask that express the constraints between this event_id and
* other events.
*
* The value and mask are divided up into (non-overlapping) bitfields
* of three different types:
*
* Select field: this expresses the constraint that some set of bits
- * in MMCR* needs to be set to a specific value for this event. For a
+ * in MMCR* needs to be set to a specific value for this event_id. For a
* select field, the mask contains 1s in every bit of the field, and
* the value contains a unique value for each possible setting of the
* MMCR* bits. The constraint checking code will ensure that two events
@@ -102,9 +102,9 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
* possible.) For N classes, the field is N+1 bits wide, and each class
* is assigned one bit from the least-significant N bits. The mask has
* only the most-significant bit set, and the value has only the bit
- * for the event's class set. The test_adder has the least significant
+ * for the event_id's class set. The test_adder has the least significant
* bit set in the field.
*
- * If an event is not subject to the constraint expressed by a particular
+ * If an event_id is not subject to the constraint expressed by a particular
* field, then it will have 0 in both the mask and value for that field.
*/
diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h
index ccc68b50d05..5a9ede4962c 100644
--- a/arch/powerpc/include/asm/pmc.h
+++ b/arch/powerpc/include/asm/pmc.h
@@ -29,7 +29,7 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq);
void release_pmc_hardware(void);
void ppc_enable_pmcs(void);
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/lppaca.h>
static inline void ppc_set_pmu_inuse(int inuse)
diff --git a/arch/powerpc/include/asm/pte-40x.h b/arch/powerpc/include/asm/pte-40x.h
index 6c3e1f4378d..ec0b0b0d1df 100644
--- a/arch/powerpc/include/asm/pte-40x.h
+++ b/arch/powerpc/include/asm/pte-40x.h
@@ -43,6 +43,7 @@
#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
+#define _PAGE_SPECIAL 0x020 /* software: Special page */
#define _PAGE_RW 0x040 /* software: Writes permitted */
#define _PAGE_DIRTY 0x080 /* software: dirty page */
#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
diff --git a/arch/powerpc/include/asm/pte-8xx.h b/arch/powerpc/include/asm/pte-8xx.h
index 94e979718dc..dd5ea95fe61 100644
--- a/arch/powerpc/include/asm/pte-8xx.h
+++ b/arch/powerpc/include/asm/pte-8xx.h
@@ -32,6 +32,7 @@
#define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */
#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
#define _PAGE_SHARED 0x0004 /* No ASID (context) compare */
+#define _PAGE_SPECIAL 0x0008 /* SW entry, forced to 0 by the TLB miss */
/* These five software bits must be masked out when the entry is loaded
* into the TLB.
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index c3b65076a26..f2b370180a0 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -25,9 +25,6 @@
#ifndef _PAGE_WRITETHRU
#define _PAGE_WRITETHRU 0
#endif
-#ifndef _PAGE_SPECIAL
-#define _PAGE_SPECIAL 0
-#endif
#ifndef _PAGE_4K_PFN
#define _PAGE_4K_PFN 0
#endif
@@ -179,7 +176,5 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
#define HAVE_PAGE_AGP
/* Advertise support for _PAGE_SPECIAL */
-#ifdef _PAGE_SPECIAL
#define __HAVE_ARCH_PTE_SPECIAL
-#endif
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index c0d3b8af931..d9ea8d39c34 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -146,7 +146,7 @@ extern void smp_generic_take_timebase(void);
extern struct smp_ops_t *smp_ops;
extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
/* Definitions relative to the secondary CPU spin loop
* and entry point. Not all of them exist on both 32 and
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index ed24bd92fe4..c7d671a7d9a 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -322,7 +322,7 @@ SYSCALL_SPU(epoll_create1)
SYSCALL_SPU(dup3)
SYSCALL_SPU(pipe2)
SYSCALL(inotify_init1)
-SYSCALL_SPU(perf_counter_open)
+SYSCALL_SPU(perf_event_open)
COMPAT_SYS_SPU(preadv)
COMPAT_SYS_SPU(pwritev)
COMPAT_SYS(rt_tgsigqueueinfo)
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 394edcbcce7..22f738d12ad 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -17,11 +17,6 @@ static inline int cpu_to_node(int cpu)
#define parent_node(node) (node)
-static inline cpumask_t node_to_cpumask(int node)
-{
- return numa_cpumask_lookup_table[node];
-}
-
#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
int of_node_to_nid(struct device_node *device);
@@ -36,11 +31,6 @@ static inline int pcibus_to_node(struct pci_bus *bus)
}
#endif
-#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
- CPU_MASK_ALL : \
- node_to_cpumask(pcibus_to_node(bus)) \
- )
-
#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
cpu_all_mask : \
cpumask_of_node(pcibus_to_node(bus)))
@@ -104,8 +94,6 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
#ifdef CONFIG_PPC64
#include <asm/smp.h>
-#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
-#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index cef080bfc60..f6ca7617676 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -341,7 +341,7 @@
#define __NR_dup3 316
#define __NR_pipe2 317
#define __NR_inotify_init1 318
-#define __NR_perf_counter_open 319
+#define __NR_perf_event_open 319
#define __NR_preadv 320
#define __NR_pwritev 321
#define __NR_rt_tgsigqueueinfo 322
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 569f79ccd31..b23664a0b86 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -97,7 +97,7 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
-obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o perf_callchain.o
+obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o perf_callchain.o
obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
power5+-pmu.o power6-pmu.o power7-pmu.o
obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index f0df285f0f8..0812b0f414b 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -133,7 +133,7 @@ int main(void)
DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
- DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending));
+ DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_event_pending));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
#ifdef CONFIG_PPC_MM_SLICES
DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 87ddb3fb948..37771a51811 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -18,7 +18,7 @@
static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
- return iommu_alloc_coherent(dev, dev->archdata.dma_data, size,
+ return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
dma_handle, device_to_mask(dev), flag,
dev_to_node(dev));
}
@@ -26,7 +26,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
static void dma_iommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
- iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle);
+ iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
}
/* Creates TCEs for a user provided buffer. The user buffer must be
@@ -39,8 +39,8 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
- return iommu_map_page(dev, dev->archdata.dma_data, page, offset, size,
- device_to_mask(dev), direction, attrs);
+ return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
+ size, device_to_mask(dev), direction, attrs);
}
@@ -48,7 +48,7 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
- iommu_unmap_page(dev->archdata.dma_data, dma_handle, size, direction,
+ iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
attrs);
}
@@ -57,7 +57,7 @@ static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
- return iommu_map_sg(dev, dev->archdata.dma_data, sglist, nelems,
+ return iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
device_to_mask(dev), direction, attrs);
}
@@ -65,14 +65,14 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
- iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction,
+ iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, direction,
attrs);
}
/* We support DMA to/from any memory page via the iommu */
static int dma_iommu_dma_supported(struct device *dev, u64 mask)
{
- struct iommu_table *tbl = dev->archdata.dma_data;
+ struct iommu_table *tbl = get_iommu_table_base(dev);
if (!tbl || tbl->it_offset > mask) {
printk(KERN_INFO
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 21b784d7e7d..6215062caf8 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -21,13 +21,6 @@
* default the offset is PCI_DRAM_OFFSET.
*/
-unsigned long get_dma_direct_offset(struct device *dev)
-{
- if (dev)
- return (unsigned long)dev->archdata.dma_data;
-
- return PCI_DRAM_OFFSET;
-}
void *dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
@@ -37,7 +30,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
if (ret == NULL)
return NULL;
- *dma_handle += get_dma_direct_offset(dev);
+ *dma_handle += get_dma_offset(dev);
return ret;
#else
struct page *page;
@@ -51,7 +44,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
return NULL;
ret = page_address(page);
memset(ret, 0, size);
- *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
+ *dma_handle = virt_to_abs(ret) + get_dma_offset(dev);
return ret;
#endif
@@ -75,7 +68,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
int i;
for_each_sg(sgl, sg, nents, i) {
- sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
+ sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
sg->dma_length = sg->length;
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
}
@@ -110,7 +103,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
{
BUG_ON(dir == DMA_NONE);
__dma_sync_page(page, offset, size, dir);
- return page_to_phys(page) + offset + get_dma_direct_offset(dev);
+ return page_to_phys(page) + offset + get_dma_offset(dev);
}
static inline void dma_direct_unmap_page(struct device *dev,
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 66bcda34a6b..900e0eea009 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -556,14 +556,14 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
2:
TRACE_AND_RESTORE_IRQ(r5);
-#ifdef CONFIG_PERF_COUNTERS
- /* check paca->perf_counter_pending if we're enabling ints */
+#ifdef CONFIG_PERF_EVENTS
+ /* check paca->perf_event_pending if we're enabling ints */
lbz r3,PACAPERFPEND(r13)
and. r3,r3,r5
beq 27f
- bl .perf_counter_do_pending
+ bl .perf_event_do_pending
27:
-#endif /* CONFIG_PERF_COUNTERS */
+#endif /* CONFIG_PERF_EVENTS */
/* extract EE bit and use it to restore paca->hard_enabled */
ld r3,_MSR(r1)
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 9048f96237f..24dcc0ecf24 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -17,7 +17,6 @@
#include <asm/cputable.h>
#include <asm/setup.h>
#include <asm/thread_info.h>
-#include <asm/reg.h>
#include <asm/exception-64e.h>
#include <asm/bug.h>
#include <asm/irqflags.h>
diff --git a/arch/powerpc/kernel/init_task.c b/arch/powerpc/kernel/init_task.c
index ffc4253fef5..2375b7eb1c7 100644
--- a/arch/powerpc/kernel/init_task.c
+++ b/arch/powerpc/kernel/init_task.c
@@ -16,9 +16,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"))) =
- { INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data =
+ { INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index f7f376ea7b1..e5d12117798 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -53,7 +53,7 @@
#include <linux/bootmem.h>
#include <linux/pci.h>
#include <linux/debugfs.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -138,9 +138,9 @@ notrace void raw_local_irq_restore(unsigned long en)
}
#endif /* CONFIG_PPC_STD_MMU_64 */
- if (test_perf_counter_pending()) {
- clear_perf_counter_pending();
- perf_counter_do_pending();
+ if (test_perf_event_pending()) {
+ clear_perf_event_pending();
+ perf_event_do_pending();
}
/*
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 49e705fcee6..040bd1de8d9 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -13,6 +13,7 @@
#include <linux/kexec.h>
#include <linux/smp.h>
#include <linux/thread_info.h>
+#include <linux/init_task.h>
#include <linux/errno.h>
#include <asm/page.h>
@@ -249,8 +250,8 @@ static void kexec_prepare_cpus(void)
* We could use a smaller stack if we don't care about anything using
* current, but that audit has not been performed.
*/
-static union thread_union kexec_stack
- __attribute__((__section__(".data.init_task"))) = { };
+static union thread_union kexec_stack __init_task_data =
+ { };
/* Our assembly helper, in kexec_stub.S */
extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start,
diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c
index cc466d039af..09d72028f31 100644
--- a/arch/powerpc/kernel/mpc7450-pmu.c
+++ b/arch/powerpc/kernel/mpc7450-pmu.c
@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/string.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/reg.h>
#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index e9f4840096b..bb8209e3493 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1117,7 +1117,7 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
/* Hook up default DMA ops */
sd->dma_ops = pci_dma_ops;
- sd->dma_data = (void *)PCI_DRAM_OFFSET;
+ set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
/* Additional platform DMA/iommu setup */
if (ppc_md.pci_dma_dev_setup)
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c
index f74b62c6751..0a03cf70d24 100644
--- a/arch/powerpc/kernel/perf_callchain.c
+++ b/arch/powerpc/kernel/perf_callchain.c
@@ -10,7 +10,7 @@
*/
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_event.c
index 5ccf9bca96c..bbcbae183e9 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -1,5 +1,5 @@
/*
- * Performance counter support - powerpc architecture code
+ * Performance event support - powerpc architecture code
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
*
@@ -10,7 +10,7 @@
*/
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/reg.h>
@@ -19,24 +19,24 @@
#include <asm/firmware.h>
#include <asm/ptrace.h>
-struct cpu_hw_counters {
- int n_counters;
+struct cpu_hw_events {
+ int n_events;
int n_percpu;
int disabled;
int n_added;
int n_limited;
u8 pmcs_enabled;
- struct perf_counter *counter[MAX_HWCOUNTERS];
- u64 events[MAX_HWCOUNTERS];
- unsigned int flags[MAX_HWCOUNTERS];
+ struct perf_event *event[MAX_HWEVENTS];
+ u64 events[MAX_HWEVENTS];
+ unsigned int flags[MAX_HWEVENTS];
unsigned long mmcr[3];
- struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
+ struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
- u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
- unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
- unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+ u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
+ unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
+ unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
};
-DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
+DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
struct power_pmu *ppmu;
@@ -47,7 +47,7 @@ struct power_pmu *ppmu;
* where the hypervisor bit is forced to 1 (as on Apple G5 processors),
* then we need to use the FCHV bit to ignore kernel events.
*/
-static unsigned int freeze_counters_kernel = MMCR0_FCS;
+static unsigned int freeze_events_kernel = MMCR0_FCS;
/*
* 32-bit doesn't have MMCRA but does have an MMCR2,
@@ -122,14 +122,14 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
if (ppmu->flags & PPMU_ALT_SIPR) {
if (mmcra & POWER6_MMCRA_SIHV)
- return PERF_EVENT_MISC_HYPERVISOR;
+ return PERF_RECORD_MISC_HYPERVISOR;
return (mmcra & POWER6_MMCRA_SIPR) ?
- PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL;
+ PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL;
}
if (mmcra & MMCRA_SIHV)
- return PERF_EVENT_MISC_HYPERVISOR;
- return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
- PERF_EVENT_MISC_KERNEL;
+ return PERF_RECORD_MISC_HYPERVISOR;
+ return (mmcra & MMCRA_SIPR) ? PERF_RECORD_MISC_USER :
+ PERF_RECORD_MISC_KERNEL;
}
/*
@@ -152,9 +152,9 @@ static inline int perf_intr_is_nmi(struct pt_regs *regs)
#endif /* CONFIG_PPC64 */
-static void perf_counter_interrupt(struct pt_regs *regs);
+static void perf_event_interrupt(struct pt_regs *regs);
-void perf_counter_print_debug(void)
+void perf_event_print_debug(void)
{
}
@@ -240,15 +240,15 @@ static void write_pmc(int idx, unsigned long val)
* Check if a set of events can all go on the PMU at once.
* If they can't, this will look at alternative codes for the events
* and see if any combination of alternative codes is feasible.
- * The feasible set is returned in event[].
+ * The feasible set is returned in event_id[].
*/
-static int power_check_constraints(struct cpu_hw_counters *cpuhw,
- u64 event[], unsigned int cflags[],
+static int power_check_constraints(struct cpu_hw_events *cpuhw,
+ u64 event_id[], unsigned int cflags[],
int n_ev)
{
unsigned long mask, value, nv;
- unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
- int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
+ unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
+ int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS];
int i, j;
unsigned long addf = ppmu->add_fields;
unsigned long tadd = ppmu->test_adder;
@@ -259,12 +259,12 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
/* First see if the events will go on as-is */
for (i = 0; i < n_ev; ++i) {
if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
- && !ppmu->limited_pmc_event(event[i])) {
- ppmu->get_alternatives(event[i], cflags[i],
+ && !ppmu->limited_pmc_event(event_id[i])) {
+ ppmu->get_alternatives(event_id[i], cflags[i],
cpuhw->alternatives[i]);
- event[i] = cpuhw->alternatives[i][0];
+ event_id[i] = cpuhw->alternatives[i][0];
}
- if (ppmu->get_constraint(event[i], &cpuhw->amasks[i][0],
+ if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
&cpuhw->avalues[i][0]))
return -1;
}
@@ -287,7 +287,7 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
return -1;
for (i = 0; i < n_ev; ++i) {
choice[i] = 0;
- n_alt[i] = ppmu->get_alternatives(event[i], cflags[i],
+ n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i],
cpuhw->alternatives[i]);
for (j = 1; j < n_alt[i]; ++j)
ppmu->get_constraint(cpuhw->alternatives[i][j],
@@ -307,7 +307,7 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
j = choice[i];
}
/*
- * See if any alternative k for event i,
+ * See if any alternative k for event_id i,
* where k > j, will satisfy the constraints.
*/
while (++j < n_alt[i]) {
@@ -321,16 +321,16 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
if (j >= n_alt[i]) {
/*
* No feasible alternative, backtrack
- * to event i-1 and continue enumerating its
+ * to event_id i-1 and continue enumerating its
* alternatives from where we got up to.
*/
if (--i < 0)
return -1;
} else {
/*
- * Found a feasible alternative for event i,
- * remember where we got up to with this event,
- * go on to the next event, and start with
+ * Found a feasible alternative for event_id i,
+ * remember where we got up to with this event_id,
+ * go on to the next event_id, and start with
* the first alternative for it.
*/
choice[i] = j;
@@ -345,21 +345,21 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
/* OK, we have a feasible combination, tell the caller the solution */
for (i = 0; i < n_ev; ++i)
- event[i] = cpuhw->alternatives[i][choice[i]];
+ event_id[i] = cpuhw->alternatives[i][choice[i]];
return 0;
}
/*
- * Check if newly-added counters have consistent settings for
+ * Check if newly-added events have consistent settings for
* exclude_{user,kernel,hv} with each other and any previously
- * added counters.
+ * added events.
*/
-static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
+static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
int n_prev, int n_new)
{
int eu = 0, ek = 0, eh = 0;
int i, n, first;
- struct perf_counter *counter;
+ struct perf_event *event;
n = n_prev + n_new;
if (n <= 1)
@@ -371,15 +371,15 @@ static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
continue;
}
- counter = ctrs[i];
+ event = ctrs[i];
if (first) {
- eu = counter->attr.exclude_user;
- ek = counter->attr.exclude_kernel;
- eh = counter->attr.exclude_hv;
+ eu = event->attr.exclude_user;
+ ek = event->attr.exclude_kernel;
+ eh = event->attr.exclude_hv;
first = 0;
- } else if (counter->attr.exclude_user != eu ||
- counter->attr.exclude_kernel != ek ||
- counter->attr.exclude_hv != eh) {
+ } else if (event->attr.exclude_user != eu ||
+ event->attr.exclude_kernel != ek ||
+ event->attr.exclude_hv != eh) {
return -EAGAIN;
}
}
@@ -392,11 +392,11 @@ static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
return 0;
}
-static void power_pmu_read(struct perf_counter *counter)
+static void power_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
- if (!counter->hw.idx)
+ if (!event->hw.idx)
return;
/*
* Performance monitor interrupts come even when interrupts
@@ -404,21 +404,21 @@ static void power_pmu_read(struct perf_counter *counter)
* Therefore we treat them like NMIs.
*/
do {
- prev = atomic64_read(&counter->hw.prev_count);
+ prev = atomic64_read(&event->hw.prev_count);
barrier();
- val = read_pmc(counter->hw.idx);
- } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev);
+ val = read_pmc(event->hw.idx);
+ } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
/* The counters are only 32 bits wide */
delta = (val - prev) & 0xfffffffful;
- atomic64_add(delta, &counter->count);
- atomic64_sub(delta, &counter->hw.period_left);
+ atomic64_add(delta, &event->count);
+ atomic64_sub(delta, &event->hw.period_left);
}
/*
* On some machines, PMC5 and PMC6 can't be written, don't respect
* the freeze conditions, and don't generate interrupts. This tells
- * us if `counter' is using such a PMC.
+ * us if `event' is using such a PMC.
*/
static int is_limited_pmc(int pmcnum)
{
@@ -426,53 +426,53 @@ static int is_limited_pmc(int pmcnum)
&& (pmcnum == 5 || pmcnum == 6);
}
-static void freeze_limited_counters(struct cpu_hw_counters *cpuhw,
+static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
unsigned long pmc5, unsigned long pmc6)
{
- struct perf_counter *counter;
+ struct perf_event *event;
u64 val, prev, delta;
int i;
for (i = 0; i < cpuhw->n_limited; ++i) {
- counter = cpuhw->limited_counter[i];
- if (!counter->hw.idx)
+ event = cpuhw->limited_counter[i];
+ if (!event->hw.idx)
continue;
- val = (counter->hw.idx == 5) ? pmc5 : pmc6;
- prev = atomic64_read(&counter->hw.prev_count);
- counter->hw.idx = 0;
+ val = (event->hw.idx == 5) ? pmc5 : pmc6;
+ prev = atomic64_read(&event->hw.prev_count);
+ event->hw.idx = 0;
delta = (val - prev) & 0xfffffffful;
- atomic64_add(delta, &counter->count);
+ atomic64_add(delta, &event->count);
}
}
-static void thaw_limited_counters(struct cpu_hw_counters *cpuhw,
+static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
unsigned long pmc5, unsigned long pmc6)
{
- struct perf_counter *counter;
+ struct perf_event *event;
u64 val;
int i;
for (i = 0; i < cpuhw->n_limited; ++i) {
- counter = cpuhw->limited_counter[i];
- counter->hw.idx = cpuhw->limited_hwidx[i];
- val = (counter->hw.idx == 5) ? pmc5 : pmc6;
- atomic64_set(&counter->hw.prev_count, val);
- perf_counter_update_userpage(counter);
+ event = cpuhw->limited_counter[i];
+ event->hw.idx = cpuhw->limited_hwidx[i];
+ val = (event->hw.idx == 5) ? pmc5 : pmc6;
+ atomic64_set(&event->hw.prev_count, val);
+ perf_event_update_userpage(event);
}
}
/*
- * Since limited counters don't respect the freeze conditions, we
+ * Since limited events don't respect the freeze conditions, we
* have to read them immediately after freezing or unfreezing the
- * other counters. We try to keep the values from the limited
- * counters as consistent as possible by keeping the delay (in
+ * other events. We try to keep the values from the limited
+ * events as consistent as possible by keeping the delay (in
* cycles and instructions) between freezing/unfreezing and reading
- * the limited counters as small and consistent as possible.
- * Therefore, if any limited counters are in use, we read them
+ * the limited events as small and consistent as possible.
+ * Therefore, if any limited events are in use, we read them
* both, and always in the same order, to minimize variability,
* and do it inside the same asm that writes MMCR0.
*/
-static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
+static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
{
unsigned long pmc5, pmc6;
@@ -485,7 +485,7 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
* Write MMCR0, then read PMC5 and PMC6 immediately.
* To ensure we don't get a performance monitor interrupt
* between writing MMCR0 and freezing/thawing the limited
- * counters, we first write MMCR0 with the counter overflow
+ * events, we first write MMCR0 with the event overflow
* interrupt enable bits turned off.
*/
asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
@@ -500,7 +500,7 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
thaw_limited_counters(cpuhw, pmc5, pmc6);
/*
- * Write the full MMCR0 including the counter overflow interrupt
+ * Write the full MMCR0 including the event overflow interrupt
* enable bits, if necessary.
*/
if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
@@ -508,18 +508,18 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
}
/*
- * Disable all counters to prevent PMU interrupts and to allow
- * counters to be added or removed.
+ * Disable all events to prevent PMU interrupts and to allow
+ * events to be added or removed.
*/
void hw_perf_disable(void)
{
- struct cpu_hw_counters *cpuhw;
+ struct cpu_hw_events *cpuhw;
unsigned long flags;
if (!ppmu)
return;
local_irq_save(flags);
- cpuhw = &__get_cpu_var(cpu_hw_counters);
+ cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) {
cpuhw->disabled = 1;
@@ -545,7 +545,7 @@ void hw_perf_disable(void)
/*
* Set the 'freeze counters' bit.
* The barrier is to make sure the mtspr has been
- * executed and the PMU has frozen the counters
+ * executed and the PMU has frozen the events
* before we return.
*/
write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
@@ -555,26 +555,26 @@ void hw_perf_disable(void)
}
/*
- * Re-enable all counters if disable == 0.
- * If we were previously disabled and counters were added, then
+ * Re-enable all events if disable == 0.
+ * If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
void hw_perf_enable(void)
{
- struct perf_counter *counter;
- struct cpu_hw_counters *cpuhw;
+ struct perf_event *event;
+ struct cpu_hw_events *cpuhw;
unsigned long flags;
long i;
unsigned long val;
s64 left;
- unsigned int hwc_index[MAX_HWCOUNTERS];
+ unsigned int hwc_index[MAX_HWEVENTS];
int n_lim;
int idx;
if (!ppmu)
return;
local_irq_save(flags);
- cpuhw = &__get_cpu_var(cpu_hw_counters);
+ cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) {
local_irq_restore(flags);
return;
@@ -582,23 +582,23 @@ void hw_perf_enable(void)
cpuhw->disabled = 0;
/*
- * If we didn't change anything, or only removed counters,
+ * If we didn't change anything, or only removed events,
* no need to recalculate MMCR* settings and reset the PMCs.
* Just reenable the PMU with the current MMCR* settings
- * (possibly updated for removal of counters).
+ * (possibly updated for removal of events).
*/
if (!cpuhw->n_added) {
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
- if (cpuhw->n_counters == 0)
+ if (cpuhw->n_events == 0)
ppc_set_pmu_inuse(0);
goto out_enable;
}
/*
- * Compute MMCR* values for the new set of counters
+ * Compute MMCR* values for the new set of events
*/
- if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index,
+ if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
cpuhw->mmcr)) {
/* shouldn't ever get here */
printk(KERN_ERR "oops compute_mmcr failed\n");
@@ -607,22 +607,22 @@ void hw_perf_enable(void)
/*
* Add in MMCR0 freeze bits corresponding to the
- * attr.exclude_* bits for the first counter.
- * We have already checked that all counters have the
- * same values for these bits as the first counter.
+ * attr.exclude_* bits for the first event.
+ * We have already checked that all events have the
+ * same values for these bits as the first event.
*/
- counter = cpuhw->counter[0];
- if (counter->attr.exclude_user)
+ event = cpuhw->event[0];
+ if (event->attr.exclude_user)
cpuhw->mmcr[0] |= MMCR0_FCP;
- if (counter->attr.exclude_kernel)
- cpuhw->mmcr[0] |= freeze_counters_kernel;
- if (counter->attr.exclude_hv)
+ if (event->attr.exclude_kernel)
+ cpuhw->mmcr[0] |= freeze_events_kernel;
+ if (event->attr.exclude_hv)
cpuhw->mmcr[0] |= MMCR0_FCHV;
/*
* Write the new configuration to MMCR* with the freeze
- * bit set and set the hardware counters to their initial values.
- * Then unfreeze the counters.
+ * bit set and set the hardware events to their initial values.
+ * Then unfreeze the events.
*/
ppc_set_pmu_inuse(1);
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
@@ -631,43 +631,43 @@ void hw_perf_enable(void)
| MMCR0_FC);
/*
- * Read off any pre-existing counters that need to move
+ * Read off any pre-existing events that need to move
* to another PMC.
*/
- for (i = 0; i < cpuhw->n_counters; ++i) {
- counter = cpuhw->counter[i];
- if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) {
- power_pmu_read(counter);
- write_pmc(counter->hw.idx, 0);
- counter->hw.idx = 0;
+ for (i = 0; i < cpuhw->n_events; ++i) {
+ event = cpuhw->event[i];
+ if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
+ power_pmu_read(event);
+ write_pmc(event->hw.idx, 0);
+ event->hw.idx = 0;
}
}
/*
- * Initialize the PMCs for all the new and moved counters.
+ * Initialize the PMCs for all the new and moved events.
*/
cpuhw->n_limited = n_lim = 0;
- for (i = 0; i < cpuhw->n_counters; ++i) {
- counter = cpuhw->counter[i];
- if (counter->hw.idx)
+ for (i = 0; i < cpuhw->n_events; ++i) {
+ event = cpuhw->event[i];
+ if (event->hw.idx)
continue;
idx = hwc_index[i] + 1;
if (is_limited_pmc(idx)) {
- cpuhw->limited_counter[n_lim] = counter;
+ cpuhw->limited_counter[n_lim] = event;
cpuhw->limited_hwidx[n_lim] = idx;
++n_lim;
continue;
}
val = 0;
- if (counter->hw.sample_period) {
- left = atomic64_read(&counter->hw.period_left);
+ if (event->hw.sample_period) {
+ left = atomic64_read(&event->hw.period_left);
if (left < 0x80000000L)
val = 0x80000000L - left;
}
- atomic64_set(&counter->hw.prev_count, val);
- counter->hw.idx = idx;
+ atomic64_set(&event->hw.prev_count, val);
+ event->hw.idx = idx;
write_pmc(idx, val);
- perf_counter_update_userpage(counter);
+ perf_event_update_userpage(event);
}
cpuhw->n_limited = n_lim;
cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
@@ -688,85 +688,85 @@ void hw_perf_enable(void)
local_irq_restore(flags);
}
-static int collect_events(struct perf_counter *group, int max_count,
- struct perf_counter *ctrs[], u64 *events,
+static int collect_events(struct perf_event *group, int max_count,
+ struct perf_event *ctrs[], u64 *events,
unsigned int *flags)
{
int n = 0;
- struct perf_counter *counter;
+ struct perf_event *event;
- if (!is_software_counter(group)) {
+ if (!is_software_event(group)) {
if (n >= max_count)
return -1;
ctrs[n] = group;
- flags[n] = group->hw.counter_base;
+ flags[n] = group->hw.event_base;
events[n++] = group->hw.config;
}
- list_for_each_entry(counter, &group->sibling_list, list_entry) {
- if (!is_software_counter(counter) &&
- counter->state != PERF_COUNTER_STATE_OFF) {
+ list_for_each_entry(event, &group->sibling_list, group_entry) {
+ if (!is_software_event(event) &&
+ event->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
- ctrs[n] = counter;
- flags[n] = counter->hw.counter_base;
- events[n++] = counter->hw.config;
+ ctrs[n] = event;
+ flags[n] = event->hw.event_base;
+ events[n++] = event->hw.config;
}
}
return n;
}
-static void counter_sched_in(struct perf_counter *counter, int cpu)
+static void event_sched_in(struct perf_event *event, int cpu)
{
- counter->state = PERF_COUNTER_STATE_ACTIVE;
- counter->oncpu = cpu;
- counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped;
- if (is_software_counter(counter))
- counter->pmu->enable(counter);
+ event->state = PERF_EVENT_STATE_ACTIVE;
+ event->oncpu = cpu;
+ event->tstamp_running += event->ctx->time - event->tstamp_stopped;
+ if (is_software_event(event))
+ event->pmu->enable(event);
}
/*
- * Called to enable a whole group of counters.
+ * Called to enable a whole group of events.
* Returns 1 if the group was enabled, or -EAGAIN if it could not be.
* Assumes the caller has disabled interrupts and has
* frozen the PMU with hw_perf_save_disable.
*/
-int hw_perf_group_sched_in(struct perf_counter *group_leader,
+int hw_perf_group_sched_in(struct perf_event *group_leader,
struct perf_cpu_context *cpuctx,
- struct perf_counter_context *ctx, int cpu)
+ struct perf_event_context *ctx, int cpu)
{
- struct cpu_hw_counters *cpuhw;
+ struct cpu_hw_events *cpuhw;
long i, n, n0;
- struct perf_counter *sub;
+ struct perf_event *sub;
if (!ppmu)
return 0;
- cpuhw = &__get_cpu_var(cpu_hw_counters);
- n0 = cpuhw->n_counters;
+ cpuhw = &__get_cpu_var(cpu_hw_events);
+ n0 = cpuhw->n_events;
n = collect_events(group_leader, ppmu->n_counter - n0,
- &cpuhw->counter[n0], &cpuhw->events[n0],
+ &cpuhw->event[n0], &cpuhw->events[n0],
&cpuhw->flags[n0]);
if (n < 0)
return -EAGAIN;
- if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n))
+ if (check_excludes(cpuhw->event, cpuhw->flags, n0, n))
return -EAGAIN;
i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0);
if (i < 0)
return -EAGAIN;
- cpuhw->n_counters = n0 + n;
+ cpuhw->n_events = n0 + n;
cpuhw->n_added += n;
/*
- * OK, this group can go on; update counter states etc.,
- * and enable any software counters
+ * OK, this group can go on; update event states etc.,
+ * and enable any software events
*/
for (i = n0; i < n0 + n; ++i)
- cpuhw->counter[i]->hw.config = cpuhw->events[i];
+ cpuhw->event[i]->hw.config = cpuhw->events[i];
cpuctx->active_oncpu += n;
n = 1;
- counter_sched_in(group_leader, cpu);
- list_for_each_entry(sub, &group_leader->sibling_list, list_entry) {
- if (sub->state != PERF_COUNTER_STATE_OFF) {
- counter_sched_in(sub, cpu);
+ event_sched_in(group_leader, cpu);
+ list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
+ if (sub->state != PERF_EVENT_STATE_OFF) {
+ event_sched_in(sub, cpu);
++n;
}
}
@@ -776,14 +776,14 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader,
}
/*
- * Add a counter to the PMU.
- * If all counters are not already frozen, then we disable and
+ * Add a event to the PMU.
+ * If all events are not already frozen, then we disable and
* re-enable the PMU in order to get hw_perf_enable to do the
* actual work of reconfiguring the PMU.
*/
-static int power_pmu_enable(struct perf_counter *counter)
+static int power_pmu_enable(struct perf_event *event)
{
- struct cpu_hw_counters *cpuhw;
+ struct cpu_hw_events *cpuhw;
unsigned long flags;
int n0;
int ret = -EAGAIN;
@@ -792,23 +792,23 @@ static int power_pmu_enable(struct perf_counter *counter)
perf_disable();
/*
- * Add the counter to the list (if there is room)
+ * Add the event to the list (if there is room)
* and check whether the total set is still feasible.
*/
- cpuhw = &__get_cpu_var(cpu_hw_counters);
- n0 = cpuhw->n_counters;
+ cpuhw = &__get_cpu_var(cpu_hw_events);
+ n0 = cpuhw->n_events;
if (n0 >= ppmu->n_counter)
goto out;
- cpuhw->counter[n0] = counter;
- cpuhw->events[n0] = counter->hw.config;
- cpuhw->flags[n0] = counter->hw.counter_base;
- if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1))
+ cpuhw->event[n0] = event;
+ cpuhw->events[n0] = event->hw.config;
+ cpuhw->flags[n0] = event->hw.event_base;
+ if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
goto out;
if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
goto out;
- counter->hw.config = cpuhw->events[n0];
- ++cpuhw->n_counters;
+ event->hw.config = cpuhw->events[n0];
+ ++cpuhw->n_events;
++cpuhw->n_added;
ret = 0;
@@ -819,36 +819,36 @@ static int power_pmu_enable(struct perf_counter *counter)
}
/*
- * Remove a counter from the PMU.
+ * Remove a event from the PMU.
*/
-static void power_pmu_disable(struct perf_counter *counter)
+static void power_pmu_disable(struct perf_event *event)
{
- struct cpu_hw_counters *cpuhw;
+ struct cpu_hw_events *cpuhw;
long i;
unsigned long flags;
local_irq_save(flags);
perf_disable();
- power_pmu_read(counter);
-
- cpuhw = &__get_cpu_var(cpu_hw_counters);
- for (i = 0; i < cpuhw->n_counters; ++i) {
- if (counter == cpuhw->counter[i]) {
- while (++i < cpuhw->n_counters)
- cpuhw->counter[i-1] = cpuhw->counter[i];
- --cpuhw->n_counters;
- ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr);
- if (counter->hw.idx) {
- write_pmc(counter->hw.idx, 0);
- counter->hw.idx = 0;
+ power_pmu_read(event);
+
+ cpuhw = &__get_cpu_var(cpu_hw_events);
+ for (i = 0; i < cpuhw->n_events; ++i) {
+ if (event == cpuhw->event[i]) {
+ while (++i < cpuhw->n_events)
+ cpuhw->event[i-1] = cpuhw->event[i];
+ --cpuhw->n_events;
+ ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
+ if (event->hw.idx) {
+ write_pmc(event->hw.idx, 0);
+ event->hw.idx = 0;
}
- perf_counter_update_userpage(counter);
+ perf_event_update_userpage(event);
break;
}
}
for (i = 0; i < cpuhw->n_limited; ++i)
- if (counter == cpuhw->limited_counter[i])
+ if (event == cpuhw->limited_counter[i])
break;
if (i < cpuhw->n_limited) {
while (++i < cpuhw->n_limited) {
@@ -857,8 +857,8 @@ static void power_pmu_disable(struct perf_counter *counter)
}
--cpuhw->n_limited;
}
- if (cpuhw->n_counters == 0) {
- /* disable exceptions if no counters are running */
+ if (cpuhw->n_events == 0) {
+ /* disable exceptions if no events are running */
cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
}
@@ -867,28 +867,28 @@ static void power_pmu_disable(struct perf_counter *counter)
}
/*
- * Re-enable interrupts on a counter after they were throttled
+ * Re-enable interrupts on a event after they were throttled
* because they were coming too fast.
*/
-static void power_pmu_unthrottle(struct perf_counter *counter)
+static void power_pmu_unthrottle(struct perf_event *event)
{
s64 val, left;
unsigned long flags;
- if (!counter->hw.idx || !counter->hw.sample_period)
+ if (!event->hw.idx || !event->hw.sample_period)
return;
local_irq_save(flags);
perf_disable();
- power_pmu_read(counter);
- left = counter->hw.sample_period;
- counter->hw.last_period = left;
+ power_pmu_read(event);
+ left = event->hw.sample_period;
+ event->hw.last_period = left;
val = 0;
if (left < 0x80000000L)
val = 0x80000000L - left;
- write_pmc(counter->hw.idx, val);
- atomic64_set(&counter->hw.prev_count, val);
- atomic64_set(&counter->hw.period_left, left);
- perf_counter_update_userpage(counter);
+ write_pmc(event->hw.idx, val);
+ atomic64_set(&event->hw.prev_count, val);
+ atomic64_set(&event->hw.period_left, left);
+ perf_event_update_userpage(event);
perf_enable();
local_irq_restore(flags);
}
@@ -901,29 +901,29 @@ struct pmu power_pmu = {
};
/*
- * Return 1 if we might be able to put counter on a limited PMC,
+ * Return 1 if we might be able to put event on a limited PMC,
* or 0 if not.
- * A counter can only go on a limited PMC if it counts something
+ * A event can only go on a limited PMC if it counts something
* that a limited PMC can count, doesn't require interrupts, and
* doesn't exclude any processor mode.
*/
-static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
+static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
unsigned int flags)
{
int n;
u64 alt[MAX_EVENT_ALTERNATIVES];
- if (counter->attr.exclude_user
- || counter->attr.exclude_kernel
- || counter->attr.exclude_hv
- || counter->attr.sample_period)
+ if (event->attr.exclude_user
+ || event->attr.exclude_kernel
+ || event->attr.exclude_hv
+ || event->attr.sample_period)
return 0;
if (ppmu->limited_pmc_event(ev))
return 1;
/*
- * The requested event isn't on a limited PMC already;
+ * The requested event_id isn't on a limited PMC already;
* see if any alternative code goes on a limited PMC.
*/
if (!ppmu->get_alternatives)
@@ -936,9 +936,9 @@ static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
}
/*
- * Find an alternative event that goes on a normal PMC, if possible,
- * and return the event code, or 0 if there is no such alternative.
- * (Note: event code 0 is "don't count" on all machines.)
+ * Find an alternative event_id that goes on a normal PMC, if possible,
+ * and return the event_id code, or 0 if there is no such alternative.
+ * (Note: event_id code 0 is "don't count" on all machines.)
*/
static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
{
@@ -952,26 +952,26 @@ static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
return alt[0];
}
-/* Number of perf_counters counting hardware events */
-static atomic_t num_counters;
+/* Number of perf_events counting hardware events */
+static atomic_t num_events;
/* Used to avoid races in calling reserve/release_pmc_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
/*
- * Release the PMU if this is the last perf_counter.
+ * Release the PMU if this is the last perf_event.
*/
-static void hw_perf_counter_destroy(struct perf_counter *counter)
+static void hw_perf_event_destroy(struct perf_event *event)
{
- if (!atomic_add_unless(&num_counters, -1, 1)) {
+ if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
- if (atomic_dec_return(&num_counters) == 0)
+ if (atomic_dec_return(&num_events) == 0)
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
/*
- * Translate a generic cache event config to a raw event code.
+ * Translate a generic cache event_id config to a raw event_id code.
*/
static int hw_perf_cache_event(u64 config, u64 *eventp)
{
@@ -1000,39 +1000,39 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
return 0;
}
-const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
+const struct pmu *hw_perf_event_init(struct perf_event *event)
{
u64 ev;
unsigned long flags;
- struct perf_counter *ctrs[MAX_HWCOUNTERS];
- u64 events[MAX_HWCOUNTERS];
- unsigned int cflags[MAX_HWCOUNTERS];
+ struct perf_event *ctrs[MAX_HWEVENTS];
+ u64 events[MAX_HWEVENTS];
+ unsigned int cflags[MAX_HWEVENTS];
int n;
int err;
- struct cpu_hw_counters *cpuhw;
+ struct cpu_hw_events *cpuhw;
if (!ppmu)
return ERR_PTR(-ENXIO);
- switch (counter->attr.type) {
+ switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
- ev = counter->attr.config;
+ ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return ERR_PTR(-EOPNOTSUPP);
ev = ppmu->generic_events[ev];
break;
case PERF_TYPE_HW_CACHE:
- err = hw_perf_cache_event(counter->attr.config, &ev);
+ err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
return ERR_PTR(err);
break;
case PERF_TYPE_RAW:
- ev = counter->attr.config;
+ ev = event->attr.config;
break;
default:
return ERR_PTR(-EINVAL);
}
- counter->hw.config_base = ev;
- counter->hw.idx = 0;
+ event->hw.config_base = ev;
+ event->hw.idx = 0;
/*
* If we are not running on a hypervisor, force the
@@ -1040,28 +1040,28 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
* the user set it to.
*/
if (!firmware_has_feature(FW_FEATURE_LPAR))
- counter->attr.exclude_hv = 0;
+ event->attr.exclude_hv = 0;
/*
- * If this is a per-task counter, then we can use
+ * If this is a per-task event, then we can use
* PM_RUN_* events interchangeably with their non RUN_*
* equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
* XXX we should check if the task is an idle task.
*/
flags = 0;
- if (counter->ctx->task)
+ if (event->ctx->task)
flags |= PPMU_ONLY_COUNT_RUN;
/*
- * If this machine has limited counters, check whether this
- * event could go on a limited counter.
+ * If this machine has limited events, check whether this
+ * event_id could go on a limited event.
*/
if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
- if (can_go_on_limited_pmc(counter, ev, flags)) {
+ if (can_go_on_limited_pmc(event, ev, flags)) {
flags |= PPMU_LIMITED_PMC_OK;
} else if (ppmu->limited_pmc_event(ev)) {
/*
- * The requested event is on a limited PMC,
+ * The requested event_id is on a limited PMC,
* but we can't use a limited PMC; see if any
* alternative goes on a normal PMC.
*/
@@ -1073,50 +1073,50 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
/*
* If this is in a group, check if it can go on with all the
- * other hardware counters in the group. We assume the counter
+ * other hardware events in the group. We assume the event
* hasn't been linked into its leader's sibling list at this point.
*/
n = 0;
- if (counter->group_leader != counter) {
- n = collect_events(counter->group_leader, ppmu->n_counter - 1,
+ if (event->group_leader != event) {
+ n = collect_events(event->group_leader, ppmu->n_counter - 1,
ctrs, events, cflags);
if (n < 0)
return ERR_PTR(-EINVAL);
}
events[n] = ev;
- ctrs[n] = counter;
+ ctrs[n] = event;
cflags[n] = flags;
if (check_excludes(ctrs, cflags, n, 1))
return ERR_PTR(-EINVAL);
- cpuhw = &get_cpu_var(cpu_hw_counters);
+ cpuhw = &get_cpu_var(cpu_hw_events);
err = power_check_constraints(cpuhw, events, cflags, n + 1);
- put_cpu_var(cpu_hw_counters);
+ put_cpu_var(cpu_hw_events);
if (err)
return ERR_PTR(-EINVAL);
- counter->hw.config = events[n];
- counter->hw.counter_base = cflags[n];
- counter->hw.last_period = counter->hw.sample_period;
- atomic64_set(&counter->hw.period_left, counter->hw.last_period);
+ event->hw.config = events[n];
+ event->hw.event_base = cflags[n];
+ event->hw.last_period = event->hw.sample_period;
+ atomic64_set(&event->hw.period_left, event->hw.last_period);
/*
* See if we need to reserve the PMU.
- * If no counters are currently in use, then we have to take a
+ * If no events are currently in use, then we have to take a
* mutex to ensure that we don't race with another task doing
* reserve_pmc_hardware or release_pmc_hardware.
*/
err = 0;
- if (!atomic_inc_not_zero(&num_counters)) {
+ if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex);
- if (atomic_read(&num_counters) == 0 &&
- reserve_pmc_hardware(perf_counter_interrupt))
+ if (atomic_read(&num_events) == 0 &&
+ reserve_pmc_hardware(perf_event_interrupt))
err = -EBUSY;
else
- atomic_inc(&num_counters);
+ atomic_inc(&num_events);
mutex_unlock(&pmc_reserve_mutex);
}
- counter->destroy = hw_perf_counter_destroy;
+ event->destroy = hw_perf_event_destroy;
if (err)
return ERR_PTR(err);
@@ -1128,24 +1128,24 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
* things if requested. Note that interrupts are hard-disabled
* here so there is no possibility of being interrupted.
*/
-static void record_and_restart(struct perf_counter *counter, unsigned long val,
+static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs, int nmi)
{
- u64 period = counter->hw.sample_period;
+ u64 period = event->hw.sample_period;
s64 prev, delta, left;
int record = 0;
/* we don't have to worry about interrupts here */
- prev = atomic64_read(&counter->hw.prev_count);
+ prev = atomic64_read(&event->hw.prev_count);
delta = (val - prev) & 0xfffffffful;
- atomic64_add(delta, &counter->count);
+ atomic64_add(delta, &event->count);
/*
- * See if the total period for this counter has expired,
+ * See if the total period for this event has expired,
* and update for the next period.
*/
val = 0;
- left = atomic64_read(&counter->hw.period_left) - delta;
+ left = atomic64_read(&event->hw.period_left) - delta;
if (period) {
if (left <= 0) {
left += period;
@@ -1163,18 +1163,18 @@ static void record_and_restart(struct perf_counter *counter, unsigned long val,
if (record) {
struct perf_sample_data data = {
.addr = 0,
- .period = counter->hw.last_period,
+ .period = event->hw.last_period,
};
- if (counter->attr.sample_type & PERF_SAMPLE_ADDR)
+ if (event->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr);
- if (perf_counter_overflow(counter, nmi, &data, regs)) {
+ if (perf_event_overflow(event, nmi, &data, regs)) {
/*
* Interrupts are coming too fast - throttle them
- * by setting the counter to 0, so it will be
+ * by setting the event to 0, so it will be
* at least 2^30 cycles until the next interrupt
- * (assuming each counter counts at most 2 counts
+ * (assuming each event counts at most 2 counts
* per cycle).
*/
val = 0;
@@ -1182,15 +1182,15 @@ static void record_and_restart(struct perf_counter *counter, unsigned long val,
}
}
- write_pmc(counter->hw.idx, val);
- atomic64_set(&counter->hw.prev_count, val);
- atomic64_set(&counter->hw.period_left, left);
- perf_counter_update_userpage(counter);
+ write_pmc(event->hw.idx, val);
+ atomic64_set(&event->hw.prev_count, val);
+ atomic64_set(&event->hw.period_left, left);
+ perf_event_update_userpage(event);
}
/*
* Called from generic code to get the misc flags (i.e. processor mode)
- * for an event.
+ * for an event_id.
*/
unsigned long perf_misc_flags(struct pt_regs *regs)
{
@@ -1198,13 +1198,13 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
if (flags)
return flags;
- return user_mode(regs) ? PERF_EVENT_MISC_USER :
- PERF_EVENT_MISC_KERNEL;
+ return user_mode(regs) ? PERF_RECORD_MISC_USER :
+ PERF_RECORD_MISC_KERNEL;
}
/*
* Called from generic code to get the instruction pointer
- * for an event.
+ * for an event_id.
*/
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
@@ -1220,11 +1220,11 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
/*
* Performance monitor interrupt stuff
*/
-static void perf_counter_interrupt(struct pt_regs *regs)
+static void perf_event_interrupt(struct pt_regs *regs)
{
int i;
- struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
- struct perf_counter *counter;
+ struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct perf_event *event;
unsigned long val;
int found = 0;
int nmi;
@@ -1241,21 +1241,21 @@ static void perf_counter_interrupt(struct pt_regs *regs)
else
irq_enter();
- for (i = 0; i < cpuhw->n_counters; ++i) {
- counter = cpuhw->counter[i];
- if (!counter->hw.idx || is_limited_pmc(counter->hw.idx))
+ for (i = 0; i < cpuhw->n_events; ++i) {
+ event = cpuhw->event[i];
+ if (!event->hw.idx || is_limited_pmc(event->hw.idx))
continue;
- val = read_pmc(counter->hw.idx);
+ val = read_pmc(event->hw.idx);
if ((int)val < 0) {
- /* counter has overflowed */
+ /* event has overflowed */
found = 1;
- record_and_restart(counter, val, regs, nmi);
+ record_and_restart(event, val, regs, nmi);
}
}
/*
- * In case we didn't find and reset the counter that caused
- * the interrupt, scan all counters and reset any that are
+ * In case we didn't find and reset the event that caused
+ * the interrupt, scan all events and reset any that are
* negative, to avoid getting continual interrupts.
* Any that we processed in the previous loop will not be negative.
*/
@@ -1273,7 +1273,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
* Reset MMCR0 to its normal value. This will set PMXE and
* clear FC (freeze counters) and PMAO (perf mon alert occurred)
* and thus allow interrupts to occur again.
- * XXX might want to use MSR.PM to keep the counters frozen until
+ * XXX might want to use MSR.PM to keep the events frozen until
* we get back out of this interrupt.
*/
write_mmcr0(cpuhw, cpuhw->mmcr[0]);
@@ -1284,9 +1284,9 @@ static void perf_counter_interrupt(struct pt_regs *regs)
irq_exit();
}
-void hw_perf_counter_setup(int cpu)
+void hw_perf_event_setup(int cpu)
{
- struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu);
+ struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
if (!ppmu)
return;
@@ -1308,7 +1308,7 @@ int register_power_pmu(struct power_pmu *pmu)
* Use FCHV to ignore kernel events if MSR.HV is set.
*/
if (mfmsr() & MSR_HV)
- freeze_counters_kernel = MMCR0_FCHV;
+ freeze_events_kernel = MMCR0_FCHV;
#endif /* CONFIG_PPC64 */
return 0;
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c
index 3c90a3d9173..2a361cdda63 100644
--- a/arch/powerpc/kernel/power4-pmu.c
+++ b/arch/powerpc/kernel/power4-pmu.c
@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/string.h>
#include <asm/reg.h>
#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c
index 31918af3e35..0f4c1c73a6a 100644
--- a/arch/powerpc/kernel/power5+-pmu.c
+++ b/arch/powerpc/kernel/power5+-pmu.c
@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/string.h>
#include <asm/reg.h>
#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c
index 867f6f66396..c351b3a57fb 100644
--- a/arch/powerpc/kernel/power5-pmu.c
+++ b/arch/powerpc/kernel/power5-pmu.c
@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/string.h>
#include <asm/reg.h>
#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c
index fa21890531d..ca399ba5034 100644
--- a/arch/powerpc/kernel/power6-pmu.c
+++ b/arch/powerpc/kernel/power6-pmu.c
@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/string.h>
#include <asm/reg.h>
#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c
index 018d094d92f..28a4daacdc0 100644
--- a/arch/powerpc/kernel/power7-pmu.c
+++ b/arch/powerpc/kernel/power7-pmu.c
@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/string.h>
#include <asm/reg.h>
#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c
index 75dccb71a04..479574413a9 100644
--- a/arch/powerpc/kernel/ppc970-pmu.c
+++ b/arch/powerpc/kernel/ppc970-pmu.c
@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/string.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/reg.h>
#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 0a321643305..1168c5f440a 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1165,7 +1165,22 @@ static inline unsigned long brk_rnd(void)
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
+ unsigned long base = mm->brk;
+ unsigned long ret;
+
+#ifdef CONFIG_PPC64
+ /*
+ * If we are using 1TB segments and we are allowed to randomise
+ * the heap, we can put it above 1TB so it is backed by a 1TB
+ * segment. Otherwise the heap will be in the bottom 1TB
+ * which always uses 256MB segments and this may result in a
+ * performance penalty.
+ */
+ if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
+ base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
+#endif
+
+ ret = PAGE_ALIGN(base + brk_rnd());
if (ret < mm->brk)
return mm->brk;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 864334b337a..bafac2e41ae 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -800,7 +800,7 @@ static void __init prom_send_capabilities(void)
root = call_prom("open", 1, 1, ADDR("/"));
if (root != 0) {
/* try calling the ibm,client-architecture-support method */
- prom_printf("Calling ibm,client-architecture...");
+ prom_printf("Calling ibm,client-architecture-support...");
if (call_prom_ret("call-method", 3, 2, &ret,
ADDR("ibm,client-architecture-support"),
root,
@@ -814,6 +814,7 @@ static void __init prom_send_capabilities(void)
return;
}
call_prom("close", 1, 0, root);
+ prom_printf(" not implemented\n");
}
/* no ibm,client-architecture-support call, try the old way */
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 02fed27af7f..4271f7a655a 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -24,7 +24,6 @@
#include <linux/seq_file.h>
#include <linux/ioport.h>
#include <linux/console.h>
-#include <linux/utsname.h>
#include <linux/screen_info.h>
#include <linux/root_dev.h>
#include <linux/notifier.h>
@@ -328,7 +327,7 @@ static void c_stop(struct seq_file *m, void *v)
{
}
-struct seq_operations cpuinfo_op = {
+const struct seq_operations cpuinfo_op = {
.start =c_start,
.next = c_next,
.stop = c_stop,
@@ -432,9 +431,9 @@ void __init smp_setup_cpu_maps(void)
for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
DBG(" thread %d -> cpu %d (hard id %d)\n",
j, cpu, intserv[j]);
- cpu_set(cpu, cpu_present_map);
+ set_cpu_present(cpu, true);
set_hard_smp_processor_id(cpu, intserv[j]);
- cpu_set(cpu, cpu_possible_map);
+ set_cpu_possible(cpu, true);
cpu++;
}
}
@@ -480,7 +479,7 @@ void __init smp_setup_cpu_maps(void)
maxcpus);
for (cpu = 0; cpu < maxcpus; cpu++)
- cpu_set(cpu, cpu_possible_map);
+ set_cpu_possible(cpu, true);
out:
of_node_put(dn);
}
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index d387b3937cc..9b86a74d281 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -189,11 +189,11 @@ void arch_send_call_function_single_ipi(int cpu)
smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
}
-void arch_send_call_function_ipi(cpumask_t mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
unsigned int cpu;
- for_each_cpu_mask(cpu, mask)
+ for_each_cpu(cpu, mask)
smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
}
@@ -287,7 +287,7 @@ void __devinit smp_prepare_boot_cpu(void)
{
BUG_ON(smp_processor_id() != boot_cpuid);
- cpu_set(boot_cpuid, cpu_online_map);
+ set_cpu_online(boot_cpuid, true);
cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid));
#ifdef CONFIG_PPC64
@@ -307,7 +307,7 @@ int generic_cpu_disable(void)
if (cpu == boot_cpuid)
return -EBUSY;
- cpu_clear(cpu, cpu_online_map);
+ set_cpu_online(cpu, false);
#ifdef CONFIG_PPC64
vdso_data->processorCount--;
fixup_irqs(cpu_online_map);
@@ -361,7 +361,7 @@ void generic_mach_cpu_die(void)
smp_wmb();
while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
cpu_relax();
- cpu_set(cpu, cpu_online_map);
+ set_cpu_online(cpu, true);
local_irq_enable();
}
#endif
@@ -508,7 +508,7 @@ int __devinit start_secondary(void *unused)
ipi_call_lock();
notify_cpu_starting(cpu);
- cpu_set(cpu, cpu_online_map);
+ set_cpu_online(cpu, true);
/* Update sibling maps */
base = cpu_first_thread_in_core(cpu);
for (i = 0; i < threads_per_core; i++) {
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index 1cc5e9e5da9..b97c2d67f4a 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -22,7 +22,6 @@
#include <linux/signal.h>
#include <linux/resource.h>
#include <linux/times.h>
-#include <linux/utsname.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/sem.h>
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 465e498bcb3..92dc844299b 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -53,7 +53,7 @@
#include <linux/posix-timers.h>
#include <linux/irq.h>
#include <linux/delay.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/io.h>
#include <asm/processor.h>
@@ -193,6 +193,8 @@ EXPORT_SYMBOL(__cputime_clockt_factor);
DEFINE_PER_CPU(unsigned long, cputime_last_delta);
DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
+cputime_t cputime_one_jiffy;
+
static void calc_cputime_factors(void)
{
struct div_result res;
@@ -501,6 +503,7 @@ static int __init iSeries_tb_recal(void)
tb_to_xs = divres.result_low;
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
vdso_data->tb_to_xs = tb_to_xs;
+ setup_cputime_one_jiffy();
}
else {
printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
@@ -527,25 +530,25 @@ void __init iSeries_time_init_early(void)
}
#endif /* CONFIG_PPC_ISERIES */
-#if defined(CONFIG_PERF_COUNTERS) && defined(CONFIG_PPC32)
-DEFINE_PER_CPU(u8, perf_counter_pending);
+#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32)
+DEFINE_PER_CPU(u8, perf_event_pending);
-void set_perf_counter_pending(void)
+void set_perf_event_pending(void)
{
- get_cpu_var(perf_counter_pending) = 1;
+ get_cpu_var(perf_event_pending) = 1;
set_dec(1);
- put_cpu_var(perf_counter_pending);
+ put_cpu_var(perf_event_pending);
}
-#define test_perf_counter_pending() __get_cpu_var(perf_counter_pending)
-#define clear_perf_counter_pending() __get_cpu_var(perf_counter_pending) = 0
+#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
+#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
-#else /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */
+#else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
-#define test_perf_counter_pending() 0
-#define clear_perf_counter_pending()
+#define test_perf_event_pending() 0
+#define clear_perf_event_pending()
-#endif /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */
+#endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
/*
* For iSeries shared processors, we have to let the hypervisor
@@ -573,9 +576,9 @@ void timer_interrupt(struct pt_regs * regs)
set_dec(DECREMENTER_MAX);
#ifdef CONFIG_PPC32
- if (test_perf_counter_pending()) {
- clear_perf_counter_pending();
- perf_counter_do_pending();
+ if (test_perf_event_pending()) {
+ clear_perf_event_pending();
+ perf_event_do_pending();
}
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
@@ -960,6 +963,7 @@ void __init time_init(void)
tb_ticks_per_usec = ppc_tb_freq / 1000000;
tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
calc_cputime_factors();
+ setup_cputime_one_jiffy();
/*
* Calculate the length of each tick in ns. It will not be
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index acb74a17bbb..b4b167b3364 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -1,5 +1,5 @@
/*
- * udbg for for NS16550 compatable serial ports
+ * udbg for NS16550 compatable serial ports
*
* Copyright (C) 2001-2005 PPC 64 Team, IBM Corp
*
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index a0abce251d0..94e2df3cae0 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -1,3 +1,4 @@
+
/*
* Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
@@ -74,7 +75,7 @@ static int vdso_ready;
static union {
struct vdso_data data;
u8 page[PAGE_SIZE];
-} vdso_data_store __attribute__((__section__(".data.page_aligned")));
+} vdso_data_store __page_aligned_data;
struct vdso_data *vdso_data = &vdso_data_store.data;
/* Format of the patch table */
@@ -240,6 +241,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
}
/*
+ * Put vDSO base into mm struct. We need to do this before calling
+ * install_special_mapping or the perf counter mmap tracking code
+ * will fail to recognise it as a vDSO (since arch_vma_name fails).
+ */
+ current->mm->context.vdso_base = vdso_base;
+
+ /*
* our vma flags don't have VM_WRITE so by default, the process isn't
* allowed to write those pages.
* gdb can break that with ptrace interface, and thus trigger COW on
@@ -259,11 +267,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
VM_ALWAYSDUMP,
vdso_pagelist);
- if (rc)
+ if (rc) {
+ current->mm->context.vdso_base = 0;
goto fail_mmapsem;
-
- /* Put vDSO base into mm struct */
- current->mm->context.vdso_base = vdso_base;
+ }
up_write(&mm->mmap_sem);
return 0;
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile
index b54b8168813..51ead52141b 100644
--- a/arch/powerpc/kernel/vdso32/Makefile
+++ b/arch/powerpc/kernel/vdso32/Makefile
@@ -16,7 +16,7 @@ GCOV_PROFILE := n
EXTRA_CFLAGS := -shared -fno-common -fno-builtin
EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
- $(call ld-option, -Wl$(comma)--hash-style=sysv)
+ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
EXTRA_AFLAGS := -D__VDSO32__ -s
obj-y += vdso32_wrapper.o
diff --git a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
index 556f0caa5d8..6e8f507ed32 100644
--- a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
+++ b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
@@ -1,7 +1,8 @@
#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/page.h>
- .section ".data.page_aligned"
+ __PAGE_ALIGNED_DATA
.globl vdso32_start, vdso32_end
.balign PAGE_SIZE
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile
index dd0c8e93677..79da65d44a2 100644
--- a/arch/powerpc/kernel/vdso64/Makefile
+++ b/arch/powerpc/kernel/vdso64/Makefile
@@ -11,7 +11,7 @@ GCOV_PROFILE := n
EXTRA_CFLAGS := -shared -fno-common -fno-builtin
EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
- $(call ld-option, -Wl$(comma)--hash-style=sysv)
+ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
EXTRA_AFLAGS := -D__VDSO64__ -s
obj-y += vdso64_wrapper.o
diff --git a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
index 0529cb9e3b9..b8553d62b79 100644
--- a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
+++ b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
@@ -1,7 +1,8 @@
#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/page.h>
- .section ".data.page_aligned"
+ __PAGE_ALIGNED_DATA
.globl vdso64_start, vdso64_end
.balign PAGE_SIZE
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index bc7b41edbdf..77f64218abf 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1054,6 +1054,8 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
return NULL;
tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
+ if (tbl == NULL)
+ return NULL;
of_parse_dma_window(dev->dev.archdata.of_node, dma_window,
&tbl->it_index, &offset, &size);
@@ -1233,7 +1235,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
vio_cmo_set_dma_ops(viodev);
else
viodev->dev.archdata.dma_ops = &dma_iommu_ops;
- viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev);
+ set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev));
set_dev_node(&viodev->dev, of_node_to_nid(of_node));
/* init generic 'struct device' fields: */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 58da4070723..f56429362a1 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -6,6 +6,7 @@
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
+#include <asm/thread_info.h>
ENTRY(_stext)
@@ -71,12 +72,7 @@ SECTIONS
/* Read-only data */
RODATA
- /* Exception & bug tables */
- __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
- }
+ EXCEPTION_TABLE(0)
NOTES :kernel :notes
@@ -93,12 +89,7 @@ SECTIONS
*/
. = ALIGN(PAGE_SIZE);
__init_begin = .;
-
- .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
- _sinittext = .;
- INIT_TEXT
- _einittext = .;
- } :kernel
+ INIT_TEXT_SECTION(PAGE_SIZE) :kernel
/* .exit.text is discarded at runtime, not link time,
* to deal with references from __bug_table
@@ -122,23 +113,16 @@ SECTIONS
#endif
}
- . = ALIGN(16);
.init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
- __setup_start = .;
- *(.init.setup)
- __setup_end = .;
+ INIT_SETUP(16)
}
.initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
- __initcall_start = .;
- INITCALLS
- __initcall_end = .;
- }
+ INIT_CALLS
+ }
.con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
- __con_initcall_start = .;
- *(.con_initcall.init)
- __con_initcall_end = .;
+ CON_INITCALL
}
SECURITY_INIT
@@ -169,14 +153,10 @@ SECTIONS
__stop___fw_ftr_fixup = .;
}
#endif
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(PAGE_SIZE);
.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
- __initramfs_start = .;
- *(.init.ramfs)
- __initramfs_end = .;
+ INIT_RAM_FS
}
-#endif
+
PERCPU(PAGE_SIZE)
. = ALIGN(8);
@@ -240,36 +220,24 @@ SECTIONS
#endif
/* The initial task and kernel stack */
-#ifdef CONFIG_PPC32
- . = ALIGN(8192);
-#else
- . = ALIGN(16384);
-#endif
.data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
- *(.data.init_task)
+ INIT_TASK_DATA(THREAD_SIZE)
}
- . = ALIGN(PAGE_SIZE);
.data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
- *(.data.page_aligned)
+ PAGE_ALIGNED_DATA(PAGE_SIZE)
}
- . = ALIGN(L1_CACHE_BYTES);
.data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
- *(.data.cacheline_aligned)
+ CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
}
- . = ALIGN(L1_CACHE_BYTES);
.data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
- *(.data.read_mostly)
+ READ_MOSTLY_DATA(L1_CACHE_BYTES)
}
- . = ALIGN(PAGE_SIZE);
.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
- __nosave_begin = .;
- *(.data.nosave)
- . = ALIGN(PAGE_SIZE);
- __nosave_end = .;
+ NOSAVE_DATA
}
. = ALIGN(PAGE_SIZE);
@@ -280,14 +248,7 @@ SECTIONS
* And finally the bss
*/
- .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
- __bss_start = .;
- *(.sbss) *(.scommon)
- *(.dynbss)
- *(.bss)
- *(COMMON)
- __bss_stop = .;
- }
+ BSS_SECTION(0, 0, 0)
. = ALIGN(PAGE_SIZE);
_end = . ;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 830bef0a113..e7dae82c128 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -29,7 +29,7 @@
#include <linux/module.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/firmware.h>
#include <asm/page.h>
@@ -171,7 +171,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
die("Weird page fault", regs, SIGSEGV);
}
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
@@ -312,7 +312,7 @@ good_area:
}
if (ret & VM_FAULT_MAJOR) {
current->maj_flt++;
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
#ifdef CONFIG_PPC_SMLPAR
if (firmware_has_feature(FW_FEATURE_CMO)) {
@@ -323,7 +323,7 @@ good_area:
#endif
} else {
current->min_flt++;
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
up_read(&mm->mmap_sem);
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 3ef5084b90c..9ddcfb4dc13 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -242,39 +242,3 @@ void free_initrd_mem(unsigned long start, unsigned long end)
}
#endif
-#ifdef CONFIG_PROC_KCORE
-static struct kcore_list kcore_vmem;
-
-static int __init setup_kcore(void)
-{
- int i;
-
- for (i = 0; i < lmb.memory.cnt; i++) {
- unsigned long base;
- unsigned long size;
- struct kcore_list *kcore_mem;
-
- base = lmb.memory.region[i].base;
- size = lmb.memory.region[i].size;
-
- kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
- if (!kcore_mem)
- panic("%s: kmalloc failed\n", __func__);
-
- /* must stay under 32 bits */
- if ( 0xfffffffful - (unsigned long)__va(base) < size) {
- size = 0xfffffffful - (unsigned long)(__va(base));
- printk(KERN_DEBUG "setup_kcore: restrict size=%lx\n",
- size);
- }
-
- kclist_add(kcore_mem, __va(base), size);
- }
-
- kclist_add(&kcore_vmem, (void *)VMALLOC_START,
- VMALLOC_END-VMALLOC_START);
-
- return 0;
-}
-module_init(setup_kcore);
-#endif
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 31582329cd6..335c578b9cc 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -109,35 +109,6 @@ void free_initrd_mem(unsigned long start, unsigned long end)
}
#endif
-#ifdef CONFIG_PROC_KCORE
-static struct kcore_list kcore_vmem;
-
-static int __init setup_kcore(void)
-{
- int i;
-
- for (i=0; i < lmb.memory.cnt; i++) {
- unsigned long base, size;
- struct kcore_list *kcore_mem;
-
- base = lmb.memory.region[i].base;
- size = lmb.memory.region[i].size;
-
- /* GFP_ATOMIC to avoid might_sleep warnings during boot */
- kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
- if (!kcore_mem)
- panic("%s: kmalloc failed\n", __func__);
-
- kclist_add(kcore_mem, __va(base), size);
- }
-
- kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
-
- return 0;
-}
-module_init(setup_kcore);
-#endif
-
static void pgd_ctor(void *addr)
{
memset(addr, 0, PGD_TABLE_SIZE);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 579382c163a..59736317bf0 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -143,8 +143,8 @@ int arch_add_memory(int nid, u64 start, u64 size)
* memory regions, find holes and callback for contiguous regions.
*/
int
-walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
- int (*func)(unsigned long, unsigned long, void *))
+walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
+ void *arg, int (*func)(unsigned long, unsigned long, void *))
{
struct lmb_property res;
unsigned long pfn, len;
@@ -166,7 +166,7 @@ walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
}
return ret;
}
-EXPORT_SYMBOL_GPL(walk_memory_resource);
+EXPORT_SYMBOL_GPL(walk_system_ram_range);
/*
* Initialize the bootmem system and give it all the memory we
@@ -372,7 +372,7 @@ void __init mem_init(void)
printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
"%luk reserved, %luk data, %luk bss, %luk init)\n",
- (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
+ nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 83f1551ec2c..53040931de3 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -30,6 +30,8 @@
#include <asm/tlbflush.h>
#include <asm/tlb.h>
+#include "mmu_decl.h"
+
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
#ifdef CONFIG_SMP
@@ -166,7 +168,7 @@ struct page * maybe_pte_to_page(pte_t pte)
* support falls into the same category.
*/
-static pte_t set_pte_filter(pte_t pte)
+static pte_t set_pte_filter(pte_t pte, unsigned long addr)
{
pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
@@ -175,6 +177,17 @@ static pte_t set_pte_filter(pte_t pte)
if (!pg)
return pte;
if (!test_bit(PG_arch_1, &pg->flags)) {
+#ifdef CONFIG_8xx
+ /* On 8xx, cache control instructions (particularly
+ * "dcbst" from flush_dcache_icache) fault as write
+ * operation if there is an unpopulated TLB entry
+ * for the address in question. To workaround that,
+ * we invalidate the TLB here, thus avoiding dcbst
+ * misbehaviour.
+ */
+ /* 8xx doesn't care about PID, size or ind args */
+ _tlbil_va(addr, 0, 0, 0);
+#endif /* CONFIG_8xx */
flush_dcache_icache_page(pg);
set_bit(PG_arch_1, &pg->flags);
}
@@ -194,7 +207,7 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
* as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
* instead we "filter out" the exec permission for non clean pages.
*/
-static pte_t set_pte_filter(pte_t pte)
+static pte_t set_pte_filter(pte_t pte, unsigned long addr)
{
struct page *pg;
@@ -276,7 +289,7 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
* this context might not have been activated yet when this
* is called.
*/
- pte = set_pte_filter(pte);
+ pte = set_pte_filter(pte, addr);
/* Perform the setting of the PTE */
__set_pte_at(mm, addr, ptep, pte, 0);
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index ef1cccf7117..f288279e679 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -18,7 +18,6 @@
#include <asm/asm-offsets.h>
#include <asm/cputable.h>
#include <asm/pgtable.h>
-#include <asm/reg.h>
#include <asm/exception-64e.h>
#include <asm/ppc-opcode.h>
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 9efc8bda01b..e382cae678b 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -280,9 +280,9 @@ config PPC_HAVE_PMU_SUPPORT
config PPC_PERF_CTRS
def_bool y
- depends on PERF_COUNTERS && PPC_HAVE_PMU_SUPPORT
+ depends on PERF_EVENTS && PPC_HAVE_PMU_SUPPORT
help
- This enables the powerpc-specific perf_counter back-end.
+ This enables the powerpc-specific perf_event back-end.
config SMP
depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE
diff --git a/arch/powerpc/platforms/cell/beat_iommu.c b/arch/powerpc/platforms/cell/beat_iommu.c
index 93b0efddd65..39d361c5c6d 100644
--- a/arch/powerpc/platforms/cell/beat_iommu.c
+++ b/arch/powerpc/platforms/cell/beat_iommu.c
@@ -77,7 +77,7 @@ static void __init celleb_init_direct_mapping(void)
static void celleb_dma_dev_setup(struct device *dev)
{
dev->archdata.dma_ops = get_pci_dma_ops();
- dev->archdata.dma_data = (void *)celleb_dma_direct_offset;
+ set_dma_offset(dev, celleb_dma_direct_offset);
}
static void celleb_pci_dma_dev_setup(struct pci_dev *pdev)
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 416db17eb18..ca5bfdfe47f 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -657,15 +657,13 @@ static void cell_dma_dev_setup_fixed(struct device *dev);
static void cell_dma_dev_setup(struct device *dev)
{
- struct dev_archdata *archdata = &dev->archdata;
-
/* Order is important here, these are not mutually exclusive */
if (get_dma_ops(dev) == &dma_iommu_fixed_ops)
cell_dma_dev_setup_fixed(dev);
else if (get_pci_dma_ops() == &dma_iommu_ops)
- archdata->dma_data = cell_get_iommu_table(dev);
+ set_iommu_table_base(dev, cell_get_iommu_table(dev));
else if (get_pci_dma_ops() == &dma_direct_ops)
- archdata->dma_data = (void *)cell_dma_direct_offset;
+ set_dma_offset(dev, cell_dma_direct_offset);
else
BUG();
}
@@ -973,11 +971,10 @@ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask)
static void cell_dma_dev_setup_fixed(struct device *dev)
{
- struct dev_archdata *archdata = &dev->archdata;
u64 addr;
addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base;
- archdata->dma_data = (void *)addr;
+ set_dma_offset(dev, addr);
dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
}
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 24b30b6909c..fc1b1c42b1d 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -119,7 +119,7 @@ spufs_new_file(struct super_block *sb, struct dentry *dentry,
const struct file_operations *fops, int mode,
size_t size, struct spu_context *ctx)
{
- static struct inode_operations spufs_file_iops = {
+ static const struct inode_operations spufs_file_iops = {
.setattr = spufs_setattr,
};
struct inode *inode;
@@ -773,7 +773,7 @@ static int
spufs_fill_super(struct super_block *sb, void *data, int silent)
{
struct spufs_sb_info *info;
- static struct super_operations s_ops = {
+ static const struct super_operations s_ops = {
.alloc_inode = spufs_alloc_inode,
.destroy_inode = spufs_destroy_inode,
.statfs = simple_statfs,
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
index 6c1e1011959..9d53cb481a7 100644
--- a/arch/powerpc/platforms/iseries/iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -193,7 +193,7 @@ static void pci_dma_dev_setup_iseries(struct pci_dev *pdev)
pdn->iommu_table = iommu_init_table(tbl, -1);
else
kfree(tbl);
- pdev->dev.archdata.dma_data = pdn->iommu_table;
+ set_iommu_table_base(&pdev->dev, pdn->iommu_table);
}
#else
#define pci_dma_dev_setup_iseries NULL
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index a0ff03a3d8d..7b1d608ea3c 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -189,7 +189,7 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
}
#endif
- dev->dev.archdata.dma_data = &iommu_table_iobmap;
+ set_iommu_table_base(&dev->dev, &iommu_table_iobmap);
}
static void pci_dma_bus_setup_null(struct pci_bus *b) { }
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 937a38e7317..b40c22d697f 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -320,7 +320,7 @@ static int __init smp_psurge_probe(void)
if (ncpus > NR_CPUS)
ncpus = NR_CPUS;
for (i = 1; i < ncpus ; ++i)
- cpu_set(i, cpu_present_map);
+ set_cpu_present(i, true);
if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
@@ -867,7 +867,7 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
int smp_core99_cpu_disable(void)
{
- cpu_clear(smp_processor_id(), cpu_online_map);
+ set_cpu_online(smp_processor_id(), false);
/* XXX reset cpu affinity here */
mpic_cpu_set_priority(0xf);
@@ -952,7 +952,7 @@ void __init pmac_setup_smp(void)
int cpu;
for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
- cpu_set(cpu, cpu_possible_map);
+ set_cpu_possible(cpu, true);
smp_ops = &psurge_smp_ops;
}
#endif /* CONFIG_PPC32 */
diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c
index 572771fd846..9490157da62 100644
--- a/arch/powerpc/platforms/powermac/udbg_scc.c
+++ b/arch/powerpc/platforms/powermac/udbg_scc.c
@@ -1,5 +1,5 @@
/*
- * udbg for for zilog scc ports as found on Apple PowerMacs
+ * udbg for zilog scc ports as found on Apple PowerMacs
*
* Copyright (C) 2001-2005 PPC 64 Team, IBM Corp
*
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index a20ead87153..ebff6d9a4e3 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -94,7 +94,7 @@ static int pseries_cpu_disable(void)
{
int cpu = smp_processor_id();
- cpu_clear(cpu, cpu_online_map);
+ set_cpu_online(cpu, false);
vdso_data->processorCount--;
/*fix boot_cpuid here*/
@@ -185,7 +185,7 @@ static int pseries_add_processor(struct device_node *np)
for_each_cpu_mask(cpu, tmp) {
BUG_ON(cpu_isset(cpu, cpu_present_map));
- cpu_set(cpu, cpu_present_map);
+ set_cpu_present(cpu, true);
set_hard_smp_processor_id(cpu, *intserv++);
}
err = 0;
@@ -217,7 +217,7 @@ static void pseries_remove_processor(struct device_node *np)
if (get_hard_smp_processor_id(cpu) != intserv[i])
continue;
BUG_ON(cpu_online(cpu));
- cpu_clear(cpu, cpu_present_map);
+ set_cpu_present(cpu, false);
set_hard_smp_processor_id(cpu, -1);
break;
}
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index eae51ef9af2..3631a4f277e 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -71,7 +71,7 @@ static int hc_show(struct seq_file *m, void *p)
return 0;
}
-static struct seq_operations hcall_inst_seq_ops = {
+static const struct seq_operations hcall_inst_seq_ops = {
.start = hc_start,
.next = hc_next,
.stop = hc_stop,
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 661c8e02bcb..1a0000a4b6d 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -482,7 +482,7 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
phb->node);
iommu_table_setparms(phb, dn, tbl);
PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node);
- dev->dev.archdata.dma_data = PCI_DN(dn)->iommu_table;
+ set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table);
return;
}
@@ -494,7 +494,7 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
dn = dn->parent;
if (dn && PCI_DN(dn))
- dev->dev.archdata.dma_data = PCI_DN(dn)->iommu_table;
+ set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table);
else
printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
pci_name(dev));
@@ -538,7 +538,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
*/
if (dma_window == NULL || pdn->parent == NULL) {
pr_debug(" no dma window for device, linking to parent\n");
- dev->dev.archdata.dma_data = PCI_DN(pdn)->iommu_table;
+ set_iommu_table_base(&dev->dev, PCI_DN(pdn)->iommu_table);
return;
}
@@ -554,7 +554,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
pr_debug(" found DMA window, table: %p\n", pci->iommu_table);
}
- dev->dev.archdata.dma_data = pci->iommu_table;
+ set_iommu_table_base(&dev->dev, pci->iommu_table);
}
#else /* CONFIG_PCI */
#define pci_dma_bus_setup_pSeries NULL
diff --git a/arch/powerpc/relocs_check.pl b/arch/powerpc/relocs_check.pl
new file mode 100755
index 00000000000..d2571096c3e
--- /dev/null
+++ b/arch/powerpc/relocs_check.pl
@@ -0,0 +1,56 @@
+#!/usr/bin/perl
+
+# Copyright © 2009 IBM Corporation
+
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version
+# 2 of the License, or (at your option) any later version.
+
+# This script checks the relcoations of a vmlinux for "suspicious"
+# relocations.
+
+use strict;
+use warnings;
+
+if ($#ARGV != 1) {
+ die "$0 [path to objdump] [path to vmlinux]\n";
+}
+
+# Have Kbuild supply the path to objdump so we handle cross compilation.
+my $objdump = shift;
+my $vmlinux = shift;
+my $bad_relocs_count = 0;
+my $bad_relocs = "";
+my $old_binutils = 0;
+
+open(FD, "$objdump -R $vmlinux|") or die;
+while (<FD>) {
+ study $_;
+
+ # Only look at relcoation lines.
+ next if (!/\s+R_/);
+
+ # These relocations are okay
+ next if (/R_PPC64_RELATIVE/ or /R_PPC64_NONE/ or
+ /R_PPC64_ADDR64\s+mach_/);
+
+ # If we see this type of relcoation it's an idication that
+ # we /may/ be using an old version of binutils.
+ if (/R_PPC64_UADDR64/) {
+ $old_binutils++;
+ }
+
+ $bad_relocs_count++;
+ $bad_relocs .= $_;
+}
+
+if ($bad_relocs_count) {
+ print "WARNING: $bad_relocs_count bad relocations\n";
+ print $bad_relocs;
+}
+
+if ($old_binutils) {
+ print "WARNING: You need at binutils >= 2.19 to build a ".
+ "CONFIG_RELCOATABLE kernel\n";
+}
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index a4779912a5c..88f4ae78783 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -165,7 +165,7 @@ axon_ram_direct_access(struct block_device *device, sector_t sector,
return 0;
}
-static struct block_device_operations axon_ram_devops = {
+static const struct block_device_operations axon_ram_devops = {
.owner = THIS_MODULE,
.direct_access = axon_ram_direct_access
};
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 89639ecbf38..ae3c4db86fe 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -297,7 +297,7 @@ static void pci_dma_dev_setup_dart(struct pci_dev *dev)
/* We only have one iommu table on the mac for now, which makes
* things simple. Setup all PCI devices to point to this table
*/
- dev->dev.archdata.dma_data = &iommu_table_dart;
+ set_iommu_table_base(&dev->dev, &iommu_table_dart);
}
static void pci_dma_bus_setup_dart(struct pci_bus *bus)
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 0e09a45ac79..c6f0a71b405 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -335,6 +335,16 @@ int cpus_are_in_xmon(void)
}
#endif
+static inline int unrecoverable_excp(struct pt_regs *regs)
+{
+#ifdef CONFIG_4xx
+ /* We have no MSR_RI bit on 4xx, so we simply return false */
+ return 0;
+#else
+ return ((regs->msr & MSR_RI) == 0);
+#endif
+}
+
static int xmon_core(struct pt_regs *regs, int fromipi)
{
int cmd = 0;
@@ -388,7 +398,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
bp = NULL;
if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF))
bp = at_breakpoint(regs->nip);
- if (bp || (regs->msr & MSR_RI) == 0)
+ if (bp || unrecoverable_excp(regs))
fromipi = 0;
if (!fromipi) {
@@ -399,7 +409,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
cpu, BP_NUM(bp));
xmon_print_symbol(regs->nip, " ", ")\n");
}
- if ((regs->msr & MSR_RI) == 0)
+ if (unrecoverable_excp(regs))
printf("WARNING: exception is not recoverable, "
"can't continue\n");
release_output_lock();
@@ -490,7 +500,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
printf("Stopped at breakpoint %x (", BP_NUM(bp));
xmon_print_symbol(regs->nip, " ", ")\n");
}
- if ((regs->msr & MSR_RI) == 0)
+ if (unrecoverable_excp(regs))
printf("WARNING: exception is not recoverable, "
"can't continue\n");
remove_bpts();
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 1c866efd217..43c0acad716 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -94,7 +94,7 @@ config S390
select HAVE_KVM if 64BIT
select HAVE_ARCH_TRACEHOOK
select INIT_ALL_POSSIBLE
- select HAVE_PERF_COUNTERS
+ select HAVE_PERF_EVENTS
config SCHED_OMIT_FRAME_POINTER
bool
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 264528e4f58..b55fd7ed1c3 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -50,10 +50,9 @@ static struct platform_device *appldata_pdev;
* /proc entries (sysctl)
*/
static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
-static int appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
+static int appldata_timer_handler(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
static int appldata_interval_handler(ctl_table *ctl, int write,
- struct file *filp,
void __user *buffer,
size_t *lenp, loff_t *ppos);
@@ -247,7 +246,7 @@ __appldata_vtimer_setup(int cmd)
* Start/Stop timer, show status of timer (0 = not active, 1 = active)
*/
static int
-appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
+appldata_timer_handler(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int len;
@@ -289,7 +288,7 @@ out:
* current timer interval.
*/
static int
-appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
+appldata_interval_handler(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int len, interval;
@@ -335,7 +334,7 @@ out:
* monitoring (0 = not in process, 1 = in process)
*/
static int
-appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
+appldata_generic_handler(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct appldata_ops *ops = NULL, *tmp_ops;
diff --git a/arch/s390/boot/install.sh b/arch/s390/boot/install.sh
index d4026f62cb0..aed3069699b 100644
--- a/arch/s390/boot/install.sh
+++ b/arch/s390/boot/install.sh
@@ -21,8 +21,8 @@
# User may have a custom install script
-if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
-if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi
+if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
# Default install - same as make zlilo
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 4e91a2573cc..ab4464486b7 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Mon Jun 22 11:08:16 2009
+# Linux kernel version: 2.6.31
+# Tue Sep 22 17:43:13 2009
#
CONFIG_SCHED_MC=y
CONFIG_MMU=y
@@ -24,6 +24,7 @@ CONFIG_PGSTE=y
CONFIG_VIRT_CPU_ACCOUNTING=y
CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
CONFIG_S390=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -48,11 +49,12 @@ CONFIG_AUDIT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=64
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=17
@@ -103,11 +105,12 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
-CONFIG_HAVE_PERF_COUNTERS=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
# CONFIG_STRIP_ASM_SYMS is not set
@@ -116,7 +119,6 @@ CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_HAVE_SYSCALL_WRAPPERS=y
@@ -176,6 +178,7 @@ CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
CONFIG_64BIT=y
+# CONFIG_KTIME_SCALAR is not set
CONFIG_SMP=y
CONFIG_NR_CPUS=32
CONFIG_HOTPLUG_CPU=y
@@ -257,7 +260,6 @@ CONFIG_FORCE_MAX_ZONEORDER=9
CONFIG_PFAULT=y
# CONFIG_SHARED_KERNEL is not set
# CONFIG_CMM is not set
-# CONFIG_PAGE_STATES is not set
# CONFIG_APPLDATA_BASE is not set
CONFIG_HZ_100=y
# CONFIG_HZ_250 is not set
@@ -280,6 +282,7 @@ CONFIG_PM_SLEEP_SMP=y
CONFIG_PM_SLEEP=y
CONFIG_HIBERNATION=y
CONFIG_PM_STD_PARTITION=""
+# CONFIG_PM_RUNTIME is not set
CONFIG_NET=y
#
@@ -394,6 +397,7 @@ CONFIG_IP_SCTP=m
# CONFIG_SCTP_HMAC_NONE is not set
# CONFIG_SCTP_HMAC_SHA1 is not set
CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -487,6 +491,7 @@ CONFIG_CCW=y
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
@@ -501,6 +506,7 @@ CONFIG_BLK_DEV=y
CONFIG_BLK_DEV_LOOP=m
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
CONFIG_BLK_DEV_NBD=m
+# CONFIG_BLK_DEV_OSD is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
@@ -594,8 +600,11 @@ CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_SNAPSHOT=y
CONFIG_DM_MIRROR=y
+# CONFIG_DM_LOG_USERSPACE is not set
CONFIG_DM_ZERO=y
CONFIG_DM_MULTIPATH=m
+# CONFIG_DM_MULTIPATH_QL is not set
+# CONFIG_DM_MULTIPATH_ST is not set
# CONFIG_DM_DELAY is not set
# CONFIG_DM_UEVENT is not set
CONFIG_NETDEVICES=y
@@ -615,7 +624,6 @@ CONFIG_NET_ETHERNET=y
# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
-# CONFIG_KS8842 is not set
CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
# CONFIG_TR is not set
@@ -678,6 +686,7 @@ CONFIG_SCLP_CONSOLE=y
CONFIG_SCLP_VT220_TTY=y
CONFIG_SCLP_VT220_CONSOLE=y
CONFIG_SCLP_CPI=m
+CONFIG_SCLP_ASYNC=m
CONFIG_S390_TAPE=m
#
@@ -737,6 +746,7 @@ CONFIG_FS_POSIX_ACL=y
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -798,7 +808,6 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
# CONFIG_EXOFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
@@ -885,11 +894,13 @@ CONFIG_DEBUG_MEMORY_INIT=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_KPROBES_SANITY_TEST is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
@@ -979,11 +990,13 @@ CONFIG_CRYPTO_PCBC=m
#
CONFIG_CRYPTO_HMAC=m
# CONFIG_CRYPTO_XCBC is not set
+CONFIG_CRYPTO_VMAC=m
#
# Digest
#
CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_GHASH=m
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=m
# CONFIG_CRYPTO_MICHAEL_MIC is not set
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index bd9914b8948..341aff2687a 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -41,7 +41,7 @@ struct hypfs_sb_info {
static const struct file_operations hypfs_file_ops;
static struct file_system_type hypfs_type;
-static struct super_operations hypfs_s_ops;
+static const struct super_operations hypfs_s_ops;
/* start of list of all dentries, which have to be deleted on update */
static struct dentry *hypfs_last_dentry;
@@ -472,7 +472,7 @@ static struct file_system_type hypfs_type = {
.kill_sb = hypfs_kill_super
};
-static struct super_operations hypfs_s_ops = {
+static const struct super_operations hypfs_s_ops = {
.statfs = simple_statfs,
.drop_inode = hypfs_drop_inode,
.show_options = hypfs_show_options,
@@ -496,7 +496,7 @@ static int __init hypfs_init(void)
}
s390_kobj = kobject_create_and_add("s390", hypervisor_kobj);
if (!s390_kobj) {
- rc = -ENOMEM;;
+ rc = -ENOMEM;
goto fail_sysfs;
}
rc = register_filesystem(&hypfs_type);
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 7a3817a656d..24b1244aadb 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -42,6 +42,7 @@ __div(unsigned long long n, unsigned int base)
#endif /* __s390x__ */
#define cputime_zero (0ULL)
+#define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_max ((~0UL >> 1) - 1)
#define cputime_add(__a, __b) ((__a) + (__b))
#define cputime_sub(__a, __b) ((__a) - (__b))
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 6bc9426a6fb..f2ef4b619ce 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -86,6 +86,7 @@
#define __LC_PGM_OLD_PSW 0x0150
#define __LC_MCK_OLD_PSW 0x0160
#define __LC_IO_OLD_PSW 0x0170
+#define __LC_RESTART_PSW 0x01a0
#define __LC_EXT_NEW_PSW 0x01b0
#define __LC_SVC_NEW_PSW 0x01c0
#define __LC_PGM_NEW_PSW 0x01d0
@@ -189,6 +190,14 @@ union save_area {
#define SAVE_AREA_BASE SAVE_AREA_BASE_S390X
#endif
+#ifndef __s390x__
+#define LC_ORDER 0
+#else
+#define LC_ORDER 1
+#endif
+
+#define LC_PAGES (1UL << LC_ORDER)
+
struct _lowcore
{
#ifndef __s390x__
diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h
index f63fe7b431e..4e9c8ae0a63 100644
--- a/arch/s390/include/asm/mman.h
+++ b/arch/s390/include/asm/mman.h
@@ -9,18 +9,7 @@
#ifndef __S390_MMAN_H__
#define __S390_MMAN_H__
-#include <asm-generic/mman-common.h>
-
-#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define MAP_LOCKED 0x2000 /* pages are locked */
-#define MAP_NORESERVE 0x4000 /* don't check for reservations */
-#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define MAP_NONBLOCK 0x10000 /* do not block on IO */
-
-#define MCL_CURRENT 1 /* lock all current mappings */
-#define MCL_FUTURE 2 /* lock all future mappings */
+#include <asm-generic/mman.h>
#if defined(__KERNEL__) && !defined(__ASSEMBLY__) && defined(CONFIG_64BIT)
int s390_mmap_check(unsigned long addr, unsigned long len);
diff --git a/arch/s390/include/asm/perf_counter.h b/arch/s390/include/asm/perf_counter.h
deleted file mode 100644
index 7015188c2cc..00000000000
--- a/arch/s390/include/asm/perf_counter.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Performance counter support - s390 specific definitions.
- *
- * Copyright 2009 Martin Schwidefsky, IBM Corporation.
- */
-
-static inline void set_perf_counter_pending(void) {}
-static inline void clear_perf_counter_pending(void) {}
-
-#define PERF_COUNTER_INDEX_OFFSET 0
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
new file mode 100644
index 00000000000..3840cbe7763
--- /dev/null
+++ b/arch/s390/include/asm/perf_event.h
@@ -0,0 +1,10 @@
+/*
+ * Performance event support - s390 specific definitions.
+ *
+ * Copyright 2009 Martin Schwidefsky, IBM Corporation.
+ */
+
+static inline void set_perf_event_pending(void) {}
+static inline void clear_perf_event_pending(void) {}
+
+#define PERF_EVENT_INDEX_OFFSET 0
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index cf8eed3fa77..b4271545831 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -295,7 +295,7 @@ static inline void ATTRIB_NORET disabled_wait(unsigned long code)
" oi 0x384(1),0x10\n"/* fake protection bit */
" lpswe 0(%1)"
: "=m" (ctl_buf)
- : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0");
+ : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1");
#endif /* __s390x__ */
while (1);
}
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index c991fe6473c..a868b272c25 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -62,7 +62,7 @@ extern struct mutex smp_cpu_state_mutex;
extern int smp_cpu_polarization[];
extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#endif
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 5e0ad618dc4..6e7211abd95 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -9,7 +9,6 @@ const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
extern cpumask_t cpu_core_map[NR_CPUS];
-#define topology_core_siblings(cpu) (cpu_core_map[cpu])
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
int topology_set_cpu_management(int fc);
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index c80602d7c88..cb5232df151 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -268,7 +268,7 @@
#define __NR_preadv 328
#define __NR_pwritev 329
#define __NR_rt_tgsigqueueinfo 330
-#define __NR_perf_counter_open 331
+#define __NR_perf_event_open 331
#define NR_syscalls 332
/*
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index fa9905ce7d0..63e46433e81 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -7,6 +7,7 @@
#include <linux/sched.h>
#include <linux/kbuild.h>
#include <asm/vdso.h>
+#include <asm/sigp.h>
int main(void)
{
@@ -59,6 +60,10 @@ int main(void)
DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
-
+ /* constants for SIGP */
+ DEFINE(__SIGP_STOP, sigp_stop);
+ DEFINE(__SIGP_RESTART, sigp_restart);
+ DEFINE(__SIGP_SENSE, sigp_sense);
+ DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset);
return 0;
}
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 9ab188d67a3..0debcec23a3 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -24,7 +24,6 @@
#include <linux/signal.h>
#include <linux/resource.h>
#include <linux/times.h>
-#include <linux/utsname.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/sem.h>
@@ -443,66 +442,28 @@ sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
* sys32_execve() executes a new program after the asm stub has set
* things up for us. This should basically do what I want it to.
*/
-asmlinkage long sys32_execve(void)
+asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv,
+ compat_uptr_t __user *envp)
{
struct pt_regs *regs = task_pt_regs(current);
char *filename;
- unsigned long result;
- int rc;
-
- filename = getname(compat_ptr(regs->orig_gpr2));
- if (IS_ERR(filename)) {
- result = PTR_ERR(filename);
- goto out;
- }
- rc = compat_do_execve(filename, compat_ptr(regs->gprs[3]),
- compat_ptr(regs->gprs[4]), regs);
- if (rc) {
- result = rc;
- goto out_putname;
- }
+ long rc;
+
+ filename = getname(name);
+ rc = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ return rc;
+ rc = compat_do_execve(filename, argv, envp, regs);
+ if (rc)
+ goto out;
current->thread.fp_regs.fpc=0;
asm volatile("sfpc %0,0" : : "d" (0));
- result = regs->gprs[2];
-out_putname:
- putname(filename);
+ rc = regs->gprs[2];
out:
- return result;
-}
-
-
-#ifdef CONFIG_MODULES
-
-asmlinkage long
-sys32_init_module(void __user *umod, unsigned long len,
- const char __user *uargs)
-{
- return sys_init_module(umod, len, uargs);
-}
-
-asmlinkage long
-sys32_delete_module(const char __user *name_user, unsigned int flags)
-{
- return sys_delete_module(name_user, flags);
-}
-
-#else /* CONFIG_MODULES */
-
-asmlinkage long
-sys32_init_module(void __user *umod, unsigned long len,
- const char __user *uargs)
-{
- return -ENOSYS;
+ putname(filename);
+ return rc;
}
-asmlinkage long
-sys32_delete_module(const char __user *name_user, unsigned int flags)
-{
- return -ENOSYS;
-}
-
-#endif /* CONFIG_MODULES */
-
asmlinkage long sys32_pread64(unsigned int fd, char __user *ubuf,
size_t count, u32 poshi, u32 poslo)
{
@@ -801,23 +762,6 @@ asmlinkage long sys32_write(unsigned int fd, char __user * buf, size_t count)
return sys_write(fd, buf, count);
}
-asmlinkage long sys32_clone(void)
-{
- struct pt_regs *regs = task_pt_regs(current);
- unsigned long clone_flags;
- unsigned long newsp;
- int __user *parent_tidptr, *child_tidptr;
-
- clone_flags = regs->gprs[3] & 0xffffffffUL;
- newsp = regs->orig_gpr2 & 0x7fffffffUL;
- parent_tidptr = compat_ptr(regs->gprs[4]);
- child_tidptr = compat_ptr(regs->gprs[5]);
- if (!newsp)
- newsp = regs->gprs[15];
- return do_fork(clone_flags, newsp, regs, 0,
- parent_tidptr, child_tidptr);
-}
-
/*
* 31 bit emulation wrapper functions for sys_fadvise64/fadvise64_64.
* These need to rewrite the advise values for POSIX_FADV_{DONTNEED,NOREUSE}
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index 836a2884290..c07f9ca05ad 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -198,7 +198,8 @@ long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
compat_sigset_t __user *oset, size_t sigsetsize);
long sys32_rt_sigpending(compat_sigset_t __user *set, size_t sigsetsize);
long sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo);
-long sys32_execve(void);
+long sys32_execve(char __user *name, compat_uptr_t __user *argv,
+ compat_uptr_t __user *envp);
long sys32_init_module(void __user *umod, unsigned long len,
const char __user *uargs);
long sys32_delete_module(const char __user *name_user, unsigned int flags);
@@ -222,7 +223,6 @@ unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg);
long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg);
long sys32_read(unsigned int fd, char __user * buf, size_t count);
long sys32_write(unsigned int fd, char __user * buf, size_t count);
-long sys32_clone(void);
long sys32_fadvise64(int fd, loff_t offset, size_t len, int advise);
long sys32_fadvise64_64(struct fadvise64_64_args __user *args);
long sys32_sigaction(int sig, const struct old_sigaction32 __user *act,
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 88a83366819..682fb69dba2 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -568,18 +568,18 @@ compat_sys_sigprocmask_wrapper:
llgtr %r4,%r4 # compat_old_sigset_t *
jg compat_sys_sigprocmask # branch to system call
- .globl sys32_init_module_wrapper
-sys32_init_module_wrapper:
+ .globl sys_init_module_wrapper
+sys_init_module_wrapper:
llgtr %r2,%r2 # void *
llgfr %r3,%r3 # unsigned long
llgtr %r4,%r4 # char *
- jg sys32_init_module # branch to system call
+ jg sys_init_module # branch to system call
- .globl sys32_delete_module_wrapper
-sys32_delete_module_wrapper:
+ .globl sys_delete_module_wrapper
+sys_delete_module_wrapper:
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # unsigned int
- jg sys32_delete_module # branch to system call
+ jg sys_delete_module # branch to system call
.globl sys32_quotactl_wrapper
sys32_quotactl_wrapper:
@@ -1832,11 +1832,26 @@ compat_sys_rt_tgsigqueueinfo_wrapper:
llgtr %r5,%r5 # struct compat_siginfo *
jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call
- .globl sys_perf_counter_open_wrapper
-sys_perf_counter_open_wrapper:
- llgtr %r2,%r2 # const struct perf_counter_attr *
+ .globl sys_perf_event_open_wrapper
+sys_perf_event_open_wrapper:
+ llgtr %r2,%r2 # const struct perf_event_attr *
lgfr %r3,%r3 # pid_t
lgfr %r4,%r4 # int
lgfr %r5,%r5 # int
llgfr %r6,%r6 # unsigned long
- jg sys_perf_counter_open # branch to system call
+ jg sys_perf_event_open # branch to system call
+
+ .globl sys_clone_wrapper
+sys_clone_wrapper:
+ llgfr %r2,%r2 # unsigned long
+ llgfr %r3,%r3 # unsigned long
+ llgtr %r4,%r4 # int *
+ llgtr %r5,%r5 # int *
+ jg sys_clone # branch to system call
+
+ .globl sys32_execve_wrapper
+sys32_execve_wrapper:
+ llgtr %r2,%r2 # char *
+ llgtr %r3,%r3 # compat_uptr_t *
+ llgtr %r4,%r4 # compat_uptr_t *
+ jg sys32_execve # branch to system call
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 4c512561687..20f282c911c 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -881,11 +881,11 @@ static int debug_active=1;
* if debug_active is already off
*/
static int
-s390dbf_procactive(ctl_table *table, int write, struct file *filp,
+s390dbf_procactive(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
if (!write || debug_stoppable || !debug_active)
- return proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ return proc_dointvec(table, write, buffer, lenp, ppos);
else
return 0;
}
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 950c59c6688..e1e5e767ab5 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -42,10 +42,12 @@ long sys_s390_fadvise64_64(struct fadvise64_64_args __user *args);
long sys_s390_fallocate(int fd, int mode, loff_t offset, u32 len_high,
u32 len_low);
long sys_fork(void);
-long sys_clone(void);
+long sys_clone(unsigned long newsp, unsigned long clone_flags,
+ int __user *parent_tidptr, int __user *child_tidptr);
long sys_vfork(void);
void execve_tail(void);
-long sys_execve(void);
+long sys_execve(char __user *name, char __user * __user *argv,
+ char __user * __user *envp);
long sys_sigsuspend(int history0, int history1, old_sigset_t mask);
long sys_sigaction(int sig, const struct old_sigaction __user *act,
struct old_sigaction __user *oact);
diff --git a/arch/s390/kernel/init_task.c b/arch/s390/kernel/init_task.c
index fe787f9e5f3..4d1c9fb0b54 100644
--- a/arch/s390/kernel/init_task.c
+++ b/arch/s390/kernel/init_task.c
@@ -25,9 +25,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"))) =
- { INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data =
+ { INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 5a43f27eec1..5417eb57271 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -27,11 +27,11 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/notifier.h>
-#include <linux/utsname.h>
#include <linux/tick.h>
#include <linux/elfcore.h>
#include <linux/kernel_stat.h>
#include <linux/syscalls.h>
+#include <linux/compat.h>
#include <asm/compat.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -230,17 +230,11 @@ SYSCALL_DEFINE0(fork)
return do_fork(SIGCHLD, regs->gprs[15], regs, 0, NULL, NULL);
}
-SYSCALL_DEFINE0(clone)
+SYSCALL_DEFINE4(clone, unsigned long, newsp, unsigned long, clone_flags,
+ int __user *, parent_tidptr, int __user *, child_tidptr)
{
struct pt_regs *regs = task_pt_regs(current);
- unsigned long clone_flags;
- unsigned long newsp;
- int __user *parent_tidptr, *child_tidptr;
- clone_flags = regs->gprs[3];
- newsp = regs->orig_gpr2;
- parent_tidptr = (int __user *) regs->gprs[4];
- child_tidptr = (int __user *) regs->gprs[5];
if (!newsp)
newsp = regs->gprs[15];
return do_fork(clone_flags, newsp, regs, 0,
@@ -274,30 +268,25 @@ asmlinkage void execve_tail(void)
/*
* sys_execve() executes a new program.
*/
-SYSCALL_DEFINE0(execve)
+SYSCALL_DEFINE3(execve, char __user *, name, char __user * __user *, argv,
+ char __user * __user *, envp)
{
struct pt_regs *regs = task_pt_regs(current);
char *filename;
- unsigned long result;
- int rc;
+ long rc;
- filename = getname((char __user *) regs->orig_gpr2);
- if (IS_ERR(filename)) {
- result = PTR_ERR(filename);
+ filename = getname(name);
+ rc = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ return rc;
+ rc = do_execve(filename, argv, envp, regs);
+ if (rc)
goto out;
- }
- rc = do_execve(filename, (char __user * __user *) regs->gprs[3],
- (char __user * __user *) regs->gprs[4], regs);
- if (rc) {
- result = rc;
- goto out_putname;
- }
execve_tail();
- result = regs->gprs[2];
-out_putname:
- putname(filename);
+ rc = regs->gprs[2];
out:
- return result;
+ putname(filename);
+ return rc;
}
/*
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index f3ddd7ac06c..a8738676b26 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -339,24 +339,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
int copied, ret;
switch (request) {
- case PTRACE_PEEKTEXT:
- case PTRACE_PEEKDATA:
- /* Remove high order bit from address (only for 31 bit). */
- addr &= PSW_ADDR_INSN;
- /* read word at location addr. */
- return generic_ptrace_peekdata(child, addr, data);
-
case PTRACE_PEEKUSR:
/* read the word at location addr in the USER area. */
return peek_user(child, addr, data);
- case PTRACE_POKETEXT:
- case PTRACE_POKEDATA:
- /* Remove high order bit from address (only for 31 bit). */
- addr &= PSW_ADDR_INSN;
- /* write the word at location addr. */
- return generic_ptrace_pokedata(child, addr, data);
-
case PTRACE_POKEUSR:
/* write the word at location addr in the USER area */
return poke_user(child, addr, data);
@@ -386,8 +372,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
copied += sizeof(unsigned long);
}
return 0;
+ default:
+ /* Removing high order bit from addr (only for 31 bit). */
+ addr &= PSW_ADDR_INSN;
+ return ptrace_request(child, request, addr, data);
}
- return ptrace_request(child, request, addr, data);
}
#ifdef CONFIG_COMPAT
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index 20639dfe0c4..e27ca63076d 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -24,8 +24,6 @@ LC_EXT_INT_CODE = 0x86 # addr of ext int code
# R3 = external interruption parameter if R2=0
#
-.section ".init.text","ax"
-
_sclp_wait_int:
stm %r6,%r15,24(%r15) # save registers
basr %r13,0 # get base register
@@ -318,9 +316,8 @@ _sclp_print_early:
.long _sclp_work_area
.Lascebc:
.long _ascebc
-.previous
-.section ".init.data","a"
+.section .data,"aw",@progbits
.balign 4096
_sclp_work_area:
.fill 4096
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 56c16876b91..c932caa5e85 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -147,11 +147,11 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
udelay(10);
}
-void arch_send_call_function_ipi(cpumask_t mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
int cpu;
- for_each_cpu_mask(cpu, mask)
+ for_each_cpu(cpu, mask)
smp_ext_bitcall(cpu, ec_call_function);
}
@@ -475,10 +475,8 @@ static int __cpuinit smp_alloc_lowcore(int cpu)
{
unsigned long async_stack, panic_stack;
struct _lowcore *lowcore;
- int lc_order;
- lc_order = sizeof(long) == 8 ? 1 : 0;
- lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
+ lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
if (!lowcore)
return -ENOMEM;
async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
@@ -509,16 +507,14 @@ static int __cpuinit smp_alloc_lowcore(int cpu)
out:
free_page(panic_stack);
free_pages(async_stack, ASYNC_ORDER);
- free_pages((unsigned long) lowcore, lc_order);
+ free_pages((unsigned long) lowcore, LC_ORDER);
return -ENOMEM;
}
static void smp_free_lowcore(int cpu)
{
struct _lowcore *lowcore;
- int lc_order;
- lc_order = sizeof(long) == 8 ? 1 : 0;
lowcore = lowcore_ptr[cpu];
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE)
@@ -528,7 +524,7 @@ static void smp_free_lowcore(int cpu)
#endif
free_page(lowcore->panic_stack - PAGE_SIZE);
free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
- free_pages((unsigned long) lowcore, lc_order);
+ free_pages((unsigned long) lowcore, LC_ORDER);
lowcore_ptr[cpu] = NULL;
}
@@ -664,7 +660,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
unsigned long async_stack, panic_stack;
struct _lowcore *lowcore;
unsigned int cpu;
- int lc_order;
smp_detect_cpus();
@@ -674,8 +669,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
print_cpu_info();
/* Reallocate current lowcore, but keep its contents. */
- lc_order = sizeof(long) == 8 ? 1 : 0;
- lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
+ lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
panic_stack = __get_free_page(GFP_KERNEL);
async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
BUG_ON(!lowcore || !panic_stack || !async_stack);
@@ -1047,42 +1041,6 @@ out:
static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
dispatching_store);
-/*
- * If the resume kernel runs on another cpu than the suspended kernel,
- * we have to switch the cpu IDs in the logical map.
- */
-void smp_switch_boot_cpu_in_resume(u32 resume_phys_cpu_id,
- struct _lowcore *suspend_lowcore)
-{
- int cpu, suspend_cpu_id, resume_cpu_id;
- u32 suspend_phys_cpu_id;
-
- suspend_phys_cpu_id = __cpu_logical_map[suspend_lowcore->cpu_nr];
- suspend_cpu_id = suspend_lowcore->cpu_nr;
-
- for_each_present_cpu(cpu) {
- if (__cpu_logical_map[cpu] == resume_phys_cpu_id) {
- resume_cpu_id = cpu;
- goto found;
- }
- }
- panic("Could not find resume cpu in logical map.\n");
-
-found:
- printk("Resume cpu ID: %i/%i\n", resume_phys_cpu_id, resume_cpu_id);
- printk("Suspend cpu ID: %i/%i\n", suspend_phys_cpu_id, suspend_cpu_id);
-
- __cpu_logical_map[resume_cpu_id] = suspend_phys_cpu_id;
- __cpu_logical_map[suspend_cpu_id] = resume_phys_cpu_id;
-
- lowcore_ptr[suspend_cpu_id]->cpu_addr = resume_phys_cpu_id;
-}
-
-u32 smp_get_phys_cpu_id(void)
-{
- return __cpu_logical_map[smp_processor_id()];
-}
-
static int __init topology_init(void)
{
int cpu;
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index 086bee970ca..cf9e5c6d552 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -6,36 +6,26 @@
* Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
*/
-#include <linux/suspend.h>
-#include <linux/reboot.h>
#include <linux/pfn.h>
-#include <linux/mm.h>
-#include <asm/sections.h>
#include <asm/system.h>
-#include <asm/ipl.h>
/*
* References to section boundaries
*/
extern const void __nosave_begin, __nosave_end;
-/*
- * check if given pfn is in the 'nosave' or in the read only NSS section
- */
int pfn_is_nosave(unsigned long pfn)
{
- unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
- unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end))
- >> PAGE_SHIFT;
- unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
- unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
+ unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
+ unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
+ /* Always save lowcore pages (LC protection might be enabled). */
+ if (pfn <= LC_PAGES)
+ return 0;
if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
return 1;
- if (pfn >= stext_pfn && pfn <= eshared_pfn) {
- if (ipl_info.type == IPL_TYPE_NSS)
- return 1;
- } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0))
+ /* Skip memory holes and read-only pages (NSS, DCSS, ...). */
+ if (tprot(PFN_PHYS(pfn)))
return 1;
return 0;
}
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index 7cd6b096f0d..fe927d0bc20 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -9,6 +9,7 @@
#include <asm/page.h>
#include <asm/ptrace.h>
+#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
/*
@@ -41,6 +42,9 @@ swsusp_arch_suspend:
/* Get pointer to save area */
lghi %r1,0x1000
+ /* Save CPU address */
+ stap __LC_CPU_ADDRESS(%r1)
+
/* Store registers */
mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */
stfpc 0x31c(%r1) /* store fpu control */
@@ -102,11 +106,10 @@ swsusp_arch_resume:
aghi %r15,-STACK_FRAME_OVERHEAD
stg %r1,__SF_BACKCHAIN(%r15)
-#ifdef CONFIG_SMP
- /* Save boot cpu number */
- brasl %r14,smp_get_phys_cpu_id
- lgr %r10,%r2
-#endif
+ /* Make all free pages stable */
+ lghi %r2,1
+ brasl %r14,arch_set_page_states
+
/* Deactivate DAT */
stnsm __SF_EMPTY(%r15),0xfb
@@ -133,6 +136,69 @@ swsusp_arch_resume:
2:
ptlb /* flush tlb */
+ /* Reset System */
+ larl %r1,restart_entry
+ larl %r2,.Lrestart_diag308_psw
+ og %r1,0(%r2)
+ stg %r1,0(%r0)
+ larl %r1,.Lnew_pgm_check_psw
+ epsw %r2,%r3
+ stm %r2,%r3,0(%r1)
+ mvc __LC_PGM_NEW_PSW(16,%r0),0(%r1)
+ lghi %r0,0
+ diag %r0,%r0,0x308
+restart_entry:
+ lhi %r1,1
+ sigp %r1,%r0,0x12
+ sam64
+ larl %r1,.Lnew_pgm_check_psw
+ lpswe 0(%r1)
+pgm_check_entry:
+
+ /* Switch to original suspend CPU */
+ larl %r1,.Lresume_cpu /* Resume CPU address: r2 */
+ stap 0(%r1)
+ llgh %r2,0(%r1)
+ lghi %r3,0x1000
+ llgh %r1,__LC_CPU_ADDRESS(%r3) /* Suspend CPU address: r1 */
+ cgr %r1,%r2
+ je restore_registers /* r1 = r2 -> nothing to do */
+ larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */
+ mvc __LC_RESTART_PSW(16,%r0),0(%r4)
+3:
+ sigp %r9,%r1,__SIGP_INITIAL_CPU_RESET
+ brc 8,4f /* accepted */
+ brc 2,3b /* busy, try again */
+
+ /* Suspend CPU not available -> panic */
+ larl %r15,init_thread_union
+ ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER)
+ larl %r2,.Lpanic_string
+ larl %r3,_sclp_print_early
+ lghi %r1,0
+ sam31
+ sigp %r1,%r0,0x12
+ basr %r14,%r3
+ larl %r3,.Ldisabled_wait_31
+ lpsw 0(%r3)
+4:
+ /* Switch to suspend CPU */
+ sigp %r9,%r1,__SIGP_RESTART /* start suspend CPU */
+ brc 2,4b /* busy, try again */
+5:
+ sigp %r9,%r2,__SIGP_STOP /* stop resume (current) CPU */
+6: j 6b
+
+restart_suspend:
+ larl %r1,.Lresume_cpu
+ llgh %r2,0(%r1)
+7:
+ sigp %r9,%r2,__SIGP_SENSE /* Wait for resume CPU */
+ brc 2,7b /* busy, try again */
+ tmll %r9,0x40 /* Test if resume CPU is stopped */
+ jz 7b
+
+restore_registers:
/* Restore registers */
lghi %r13,0x1000 /* %r1 = pointer to save arae */
@@ -166,19 +232,33 @@ swsusp_arch_resume:
/* Pointer to save area */
lghi %r13,0x1000
-#ifdef CONFIG_SMP
- /* Switch CPUs */
- lgr %r2,%r10 /* get cpu id */
- llgf %r3,0x318(%r13)
- brasl %r14,smp_switch_boot_cpu_in_resume
-#endif
/* Restore prefix register */
spx 0x318(%r13)
/* Activate DAT */
stosm __SF_EMPTY(%r15),0x04
+ /* Make all free pages unstable */
+ lghi %r2,0
+ brasl %r14,arch_set_page_states
+
/* Return 0 */
lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
lghi %r2,0
br %r14
+
+ .section .data.nosave,"aw",@progbits
+ .align 8
+.Ldisabled_wait_31:
+ .long 0x000a0000,0x00000000
+.Lpanic_string:
+ .asciz "Resume not possible because suspend CPU is no longer available"
+ .align 8
+.Lrestart_diag308_psw:
+ .long 0x00080000,0x80000000
+.Lrestart_suspend_psw:
+ .quad 0x0000000180000000,restart_suspend
+.Lnew_pgm_check_psw:
+ .quad 0,pgm_check_entry
+.Lresume_cpu:
+ .byte 0,0
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index ad1acd20038..30eca070d42 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -19,7 +19,7 @@ SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall)
SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper)
SYSCALL(sys_link,sys_link,sys32_link_wrapper)
SYSCALL(sys_unlink,sys_unlink,sys32_unlink_wrapper) /* 10 */
-SYSCALL(sys_execve,sys_execve,sys32_execve)
+SYSCALL(sys_execve,sys_execve,sys32_execve_wrapper)
SYSCALL(sys_chdir,sys_chdir,sys32_chdir_wrapper)
SYSCALL(sys_time,sys_ni_syscall,sys32_time_wrapper) /* old time syscall */
SYSCALL(sys_mknod,sys_mknod,sys32_mknod_wrapper)
@@ -128,7 +128,7 @@ SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper)
SYSCALL(sys_ipc,sys_ipc,sys32_ipc_wrapper)
SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper)
SYSCALL(sys_sigreturn,sys_sigreturn,sys32_sigreturn)
-SYSCALL(sys_clone,sys_clone,sys32_clone) /* 120 */
+SYSCALL(sys_clone,sys_clone,sys_clone_wrapper) /* 120 */
SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper)
SYSCALL(sys_newuname,sys_s390_newuname,sys32_newuname_wrapper)
NI_SYSCALL /* modify_ldt for i386 */
@@ -136,8 +136,8 @@ SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex_wrapper)
SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper) /* 125 */
SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask_wrapper)
NI_SYSCALL /* old "create module" */
-SYSCALL(sys_init_module,sys_init_module,sys32_init_module_wrapper)
-SYSCALL(sys_delete_module,sys_delete_module,sys32_delete_module_wrapper)
+SYSCALL(sys_init_module,sys_init_module,sys_init_module_wrapper)
+SYSCALL(sys_delete_module,sys_delete_module,sys_delete_module_wrapper)
NI_SYSCALL /* 130: old get_kernel_syms */
SYSCALL(sys_quotactl,sys_quotactl,sys32_quotactl_wrapper)
SYSCALL(sys_getpgid,sys_getpgid,sys32_getpgid_wrapper)
@@ -339,4 +339,4 @@ SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper)
SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper)
SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper)
SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */
-SYSCALL(sys_perf_counter_open,sys_perf_counter_open,sys_perf_counter_open_wrapper)
+SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper)
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 45e1708b70f..45a3e9a7ae2 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -75,7 +75,7 @@ __setup("vdso=", vdso_setup);
static union {
struct vdso_data data;
u8 page[PAGE_SIZE];
-} vdso_data_store __attribute__((__section__(".data.page_aligned")));
+} vdso_data_store __page_aligned_data;
struct vdso_data *vdso_data = &vdso_data_store.data;
/*
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index ca78ad60ba2..d13e8755a8c 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -13,7 +13,7 @@ KBUILD_AFLAGS_31 += -m31 -s
KBUILD_CFLAGS_31 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_31 += -m31 -fPIC -shared -fno-common -fno-builtin
KBUILD_CFLAGS_31 += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
- $(call ld-option, -Wl$(comma)--hash-style=sysv)
+ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_31)
$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_31)
diff --git a/arch/s390/kernel/vdso32/vdso32_wrapper.S b/arch/s390/kernel/vdso32/vdso32_wrapper.S
index 61639a89e70..ae42f8ce350 100644
--- a/arch/s390/kernel/vdso32/vdso32_wrapper.S
+++ b/arch/s390/kernel/vdso32/vdso32_wrapper.S
@@ -1,7 +1,8 @@
#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/page.h>
- .section ".data.page_aligned"
+ __PAGE_ALIGNED_DATA
.globl vdso32_start, vdso32_end
.balign PAGE_SIZE
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index 6fc8e829258..449352dda9c 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -13,7 +13,7 @@ KBUILD_AFLAGS_64 += -m64 -s
KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin
KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
- $(call ld-option, -Wl$(comma)--hash-style=sysv)
+ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64)
diff --git a/arch/s390/kernel/vdso64/vdso64_wrapper.S b/arch/s390/kernel/vdso64/vdso64_wrapper.S
index d8e2ac14d56..c245842b516 100644
--- a/arch/s390/kernel/vdso64/vdso64_wrapper.S
+++ b/arch/s390/kernel/vdso64/vdso64_wrapper.S
@@ -1,7 +1,8 @@
#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/page.h>
- .section ".data.page_aligned"
+ __PAGE_ALIGNED_DATA
.globl vdso64_start, vdso64_end
.balign PAGE_SIZE
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 2c2f9835341..43486c2408e 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -478,7 +478,7 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
if (!inti)
return -ENOMEM;
- inti->type = KVM_S390_PROGRAM_INT;;
+ inti->type = KVM_S390_PROGRAM_INT;
inti->pgm.code = code;
VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 413c240cbca..b201135cc18 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -262,7 +262,7 @@ cmm_skip_blanks(char *cp, char **endp)
static struct ctl_table cmm_table[];
static int
-cmm_pages_handler(ctl_table *ctl, int write, struct file *filp,
+cmm_pages_handler(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
char buf[16], *p;
@@ -303,7 +303,7 @@ cmm_pages_handler(ctl_table *ctl, int write, struct file *filp,
}
static int
-cmm_timeout_handler(ctl_table *ctl, int write, struct file *filp,
+cmm_timeout_handler(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
char buf[64], *p;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 1abbadd497e..6d507462967 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -10,7 +10,7 @@
* Copyright (C) 1995 Linus Torvalds
*/
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -306,7 +306,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write)
* interrupts again and then search the VMAs
*/
local_irq_enable();
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
down_read(&mm->mmap_sem);
si_code = SEGV_MAPERR;
@@ -366,11 +366,11 @@ good_area:
}
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
tsk->min_flt++;
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
up_read(&mm->mmap_sem);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index c634dfbe92e..76564795222 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -105,7 +105,7 @@ void __init mem_init(void)
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ nr_free_pages() << (PAGE_SHIFT-10),
max_mapnr << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index f92ec203ad9..098923ae458 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -50,28 +50,64 @@ void __init cmma_init(void)
cmma_flag = 0;
}
-void arch_free_page(struct page *page, int order)
+static inline void set_page_unstable(struct page *page, int order)
{
int i, rc;
- if (!cmma_flag)
- return;
for (i = 0; i < (1 << order); i++)
asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
: "=&d" (rc)
- : "a" ((page_to_pfn(page) + i) << PAGE_SHIFT),
+ : "a" (page_to_phys(page + i)),
"i" (ESSA_SET_UNUSED));
}
-void arch_alloc_page(struct page *page, int order)
+void arch_free_page(struct page *page, int order)
{
- int i, rc;
-
if (!cmma_flag)
return;
+ set_page_unstable(page, order);
+}
+
+static inline void set_page_stable(struct page *page, int order)
+{
+ int i, rc;
+
for (i = 0; i < (1 << order); i++)
asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
: "=&d" (rc)
- : "a" ((page_to_pfn(page) + i) << PAGE_SHIFT),
+ : "a" (page_to_phys(page + i)),
"i" (ESSA_SET_STABLE));
}
+
+void arch_alloc_page(struct page *page, int order)
+{
+ if (!cmma_flag)
+ return;
+ set_page_stable(page, order);
+}
+
+void arch_set_page_states(int make_stable)
+{
+ unsigned long flags, order, t;
+ struct list_head *l;
+ struct page *page;
+ struct zone *zone;
+
+ if (!cmma_flag)
+ return;
+ if (make_stable)
+ drain_local_pages(NULL);
+ for_each_populated_zone(zone) {
+ spin_lock_irqsave(&zone->lock, flags);
+ for_each_migratetype_order(order, t) {
+ list_for_each(l, &zone->free_area[order].free_list[t]) {
+ page = list_entry(l, struct page, lru);
+ if (make_stable)
+ set_page_stable(page, order);
+ else
+ set_page_unstable(page, order);
+ }
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
+ }
+}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index c7021524707..c60bfb309ce 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -314,21 +314,18 @@ int s390_enable_sie(void)
}
EXPORT_SYMBOL_GPL(s390_enable_sie);
-#ifdef CONFIG_DEBUG_PAGEALLOC
-#ifdef CONFIG_HIBERNATION
+#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
bool kernel_page_present(struct page *page)
{
unsigned long addr;
int cc;
addr = page_to_phys(page);
- asm("lra %1,0(%1)\n"
- "ipm %0\n"
- "srl %0,28"
- :"=d"(cc),"+a"(addr)::"cc");
+ asm volatile(
+ " lra %1,0(%1)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (cc), "+a" (addr) : : "cc");
return cc == 0;
}
-
-#endif /* CONFIG_HIBERNATION */
-#endif /* CONFIG_DEBUG_PAGEALLOC */
-
+#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */
diff --git a/arch/score/include/asm/page.h b/arch/score/include/asm/page.h
index ee5821042fc..d92a5a2d36d 100644
--- a/arch/score/include/asm/page.h
+++ b/arch/score/include/asm/page.h
@@ -2,10 +2,11 @@
#define _ASM_SCORE_PAGE_H
#include <linux/pfn.h>
+#include <linux/const.h>
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT (12)
-#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#ifdef __KERNEL__
diff --git a/arch/score/include/asm/thread_info.h b/arch/score/include/asm/thread_info.h
index 3a112288552..55939992c27 100644
--- a/arch/score/include/asm/thread_info.h
+++ b/arch/score/include/asm/thread_info.h
@@ -7,6 +7,15 @@
#define KU_USER 0x08
#define KU_KERN 0x00
+#include <asm/page.h>
+#include <linux/const.h>
+
+/* thread information allocation */
+#define THREAD_SIZE_ORDER (1)
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+#define THREAD_MASK (THREAD_SIZE - _AC(1,UL))
+#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
+
#ifndef __ASSEMBLY__
#include <asm/processor.h>
@@ -62,12 +71,6 @@ struct thread_info {
register struct thread_info *__current_thread_info __asm__("r28");
#define current_thread_info() __current_thread_info
-/* thread information allocation */
-#define THREAD_SIZE_ORDER (1)
-#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
-#define THREAD_MASK (THREAD_SIZE - 1UL)
-#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
-
#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL)
#define free_thread_info(info) kfree(info)
diff --git a/arch/score/kernel/init_task.c b/arch/score/kernel/init_task.c
index ff952f6c63f..baa03ee217d 100644
--- a/arch/score/kernel/init_task.c
+++ b/arch/score/kernel/init_task.c
@@ -34,9 +34,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"), __aligned__(THREAD_SIZE))) =
- { INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data =
+ { INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
diff --git a/arch/score/kernel/vmlinux.lds.S b/arch/score/kernel/vmlinux.lds.S
index f85569831d5..eebcbaa4e97 100644
--- a/arch/score/kernel/vmlinux.lds.S
+++ b/arch/score/kernel/vmlinux.lds.S
@@ -24,6 +24,8 @@
*/
#include <asm-generic/vmlinux.lds.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
OUTPUT_ARCH(score)
ENTRY(_stext)
@@ -49,21 +51,9 @@ SECTIONS
. = ALIGN(16);
RODATA
- /* Exception table */
- . = ALIGN(16);
- __ex_table : {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
- }
+ EXCEPTION_TABLE(16)
- /* writeable */
- .data ALIGN (4096): {
- *(.data.init_task)
-
- DATA_DATA
- CONSTRUCTORS
- }
+ RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE)
/* We want the small data sections together, so single-instruction offsets
can access them all, and initialized data all before uninitialized, so
@@ -72,45 +62,14 @@ SECTIONS
.sdata : {
*(.sdata)
}
-
- . = ALIGN(32);
- .data.cacheline_aligned : {
- *(.data.cacheline_aligned)
- }
_edata = .; /* End of data section */
/* will be freed after init */
- . = ALIGN(4096); /* Init code and data */
+ . = ALIGN(PAGE_SIZE); /* Init code and data */
__init_begin = .;
- . = ALIGN(4096);
- .init.text : {
- _sinittext = .;
- INIT_TEXT
- _einittext = .;
- }
- .init.data : {
- INIT_DATA
- }
- . = ALIGN(16);
- .init.setup : {
- __setup_start = .;
- *(.init.setup)
- __setup_end = .;
- }
-
- .initcall.init : {
- __initcall_start = .;
- INITCALLS
- __initcall_end = .;
- }
-
- .con_initcall.init : {
- __con_initcall_start = .;
- *(.con_initcall.init)
- __con_initcall_end = .;
- }
- SECURITY_INIT
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(16)
/* .exit.text is discarded at runtime, not link time, to deal with
* references from .rodata
@@ -121,28 +80,10 @@ SECTIONS
.exit.data : {
EXIT_DATA
}
-#if defined(CONFIG_BLK_DEV_INITRD)
- .init.ramfs ALIGN(4096): {
- __initramfs_start = .;
- *(.init.ramfs)
- __initramfs_end = .;
- . = ALIGN(4);
- LONG(0);
- }
-#endif
- . = ALIGN(4096);
+ . = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
- __bss_start = .; /* BSS */
- .sbss : {
- *(.sbss)
- *(.scommon)
- }
- .bss : {
- *(.bss)
- *(COMMON)
- }
- __bss_stop = .;
+ BSS_SECTION(0, 0, 0)
_end = .;
}
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 4df3570fe51..b940424f8cc 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -16,7 +16,7 @@ config SUPERH
select HAVE_IOREMAP_PROT if MMU
select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG
- select HAVE_PERF_COUNTERS
+ select HAVE_PERF_EVENTS
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 96bc1698310..5f9881e16e2 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -121,6 +121,7 @@ static struct resource sh_eth_resources[] = {
struct sh_eth_plat_data sh_eth_plat = {
.phy = 0x1f, /* SMSC LAN8700 */
.edmac_endian = EDMAC_LITTLE_ENDIAN,
+ .ether_link_active_low = 1
};
static struct platform_device sh_eth_device = {
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 00973e0f8c6..e78c3be8ad2 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -22,6 +22,7 @@
#include <linux/usb/r8a66597.h>
#include <video/sh_mobile_lcdc.h>
#include <media/sh_mobile_ceu.h>
+#include <sound/sh_fsi.h>
#include <asm/io.h>
#include <asm/heartbeat.h>
#include <asm/sh_eth.h>
@@ -255,6 +256,65 @@ static struct platform_device ceu1_device = {
},
};
+/* FSI */
+/*
+ * FSI-A use external clock which came from ak464x.
+ * So, we should change parent of fsi
+ */
+#define FCLKACR 0xa4150008
+static void fsimck_init(struct clk *clk)
+{
+ u32 status = ctrl_inl(clk->enable_reg);
+
+ /* use external clock */
+ status &= ~0x000000ff;
+ status |= 0x00000080;
+ ctrl_outl(status, clk->enable_reg);
+}
+
+static struct clk_ops fsimck_clk_ops = {
+ .init = fsimck_init,
+};
+
+static struct clk fsimcka_clk = {
+ .name = "fsimcka_clk",
+ .id = -1,
+ .ops = &fsimck_clk_ops,
+ .enable_reg = (void __iomem *)FCLKACR,
+ .rate = 0, /* unknown */
+};
+
+struct sh_fsi_platform_info fsi_info = {
+ .porta_flags = SH_FSI_BRS_INV |
+ SH_FSI_OUT_SLAVE_MODE |
+ SH_FSI_IN_SLAVE_MODE |
+ SH_FSI_OFMT(PCM) |
+ SH_FSI_IFMT(PCM),
+};
+
+static struct resource fsi_resources[] = {
+ [0] = {
+ .name = "FSI",
+ .start = 0xFE3C0000,
+ .end = 0xFE3C021d,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 108,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device fsi_device = {
+ .name = "sh_fsi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(fsi_resources),
+ .resource = fsi_resources,
+ .dev = {
+ .platform_data = &fsi_info,
+ },
+};
+
/* KEYSC in SoC (Needs SW33-2 set to ON) */
static struct sh_keysc_info keysc_info = {
.mode = SH_KEYSC_MODE_1,
@@ -399,6 +459,7 @@ static struct platform_device *ms7724se_devices[] __initdata = {
&sh_eth_device,
&sh7724_usb0_host_device,
&sh7724_usb1_gadget_device,
+ &fsi_device,
};
#define EEPROM_OP 0xBA206000
@@ -466,11 +527,13 @@ static void __init sh_eth_init(void)
static int __init devices_setup(void)
{
u16 sw = ctrl_inw(SW4140); /* select camera, monitor */
+ struct clk *fsia_clk;
/* Reset Release */
ctrl_outw(ctrl_inw(FPGA_OUT) &
~((1 << 1) | /* LAN */
(1 << 6) | /* VIDEO DAC */
+ (1 << 7) | /* AK4643 */
(1 << 12) | /* USB0 */
(1 << 14)), /* RMII */
FPGA_OUT);
@@ -609,6 +672,32 @@ static int __init devices_setup(void)
gpio_request(GPIO_FN_KEYOUT1, NULL);
gpio_request(GPIO_FN_KEYOUT0, NULL);
+ /* enable FSI */
+ gpio_request(GPIO_FN_FSIMCKB, NULL);
+ gpio_request(GPIO_FN_FSIMCKA, NULL);
+ gpio_request(GPIO_FN_FSIOASD, NULL);
+ gpio_request(GPIO_FN_FSIIABCK, NULL);
+ gpio_request(GPIO_FN_FSIIALRCK, NULL);
+ gpio_request(GPIO_FN_FSIOABCK, NULL);
+ gpio_request(GPIO_FN_FSIOALRCK, NULL);
+ gpio_request(GPIO_FN_CLKAUDIOAO, NULL);
+ gpio_request(GPIO_FN_FSIIBSD, NULL);
+ gpio_request(GPIO_FN_FSIOBSD, NULL);
+ gpio_request(GPIO_FN_FSIIBBCK, NULL);
+ gpio_request(GPIO_FN_FSIIBLRCK, NULL);
+ gpio_request(GPIO_FN_FSIOBBCK, NULL);
+ gpio_request(GPIO_FN_FSIOBLRCK, NULL);
+ gpio_request(GPIO_FN_CLKAUDIOBO, NULL);
+ gpio_request(GPIO_FN_FSIIASD, NULL);
+
+ /* change parent of FSI A */
+ fsia_clk = clk_get(NULL, "fsia_clk");
+ clk_register(&fsimcka_clk);
+ clk_set_parent(fsia_clk, &fsimcka_clk);
+ clk_set_rate(fsia_clk, 11000);
+ clk_set_rate(&fsimcka_clk, 11000);
+ clk_put(fsia_clk);
+
/*
* enable SH-Eth
*
diff --git a/arch/sh/boot/compressed/install.sh b/arch/sh/boot/compressed/install.sh
index 90589f0fec1..f9f41818b17 100644
--- a/arch/sh/boot/compressed/install.sh
+++ b/arch/sh/boot/compressed/install.sh
@@ -23,8 +23,8 @@
# User may have a custom install script
-if [ -x /sbin/installkernel ]; then
- exec /sbin/installkernel "$@"
+if [ -x /sbin/${INSTALLKERNEL} ]; then
+ exec /sbin/${INSTALLKERNEL} "$@"
fi
if [ "$2" = "zImage" ]; then
diff --git a/arch/sh/configs/ap325rxa_defconfig b/arch/sh/configs/ap325rxa_defconfig
index 6c38a43594f..2f78d01cc6c 100644
--- a/arch/sh/configs/ap325rxa_defconfig
+++ b/arch/sh/configs/ap325rxa_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 16:04:11 2009
+# Linux kernel version: 2.6.31
+# Fri Sep 25 11:22:50 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -28,7 +29,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -39,6 +42,12 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -51,11 +60,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_GROUP_SCHED=y
@@ -88,18 +98,19 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
@@ -107,6 +118,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -119,7 +134,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -135,7 +150,7 @@ CONFIG_IOSCHED_CFQ=y
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
-# CONFIG_FREEZER is not set
+CONFIG_FREEZER=y
#
# System type
@@ -169,6 +184,7 @@ CONFIG_ARCH_SHMOBILE=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
CONFIG_CPU_SUBTYPE_SH7723=y
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -215,6 +231,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -296,7 +313,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=tty1 console=ttySC5,38400 root=/dev/nfs ip=dhcp"
#
@@ -316,7 +334,13 @@ CONFIG_BINFMT_ELF=y
#
# Power management options (EXPERIMENTAL)
#
-# CONFIG_PM is not set
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_HIBERNATION is not set
+CONFIG_PM_RUNTIME=y
# CONFIG_CPU_IDLE is not set
CONFIG_NET=y
@@ -364,6 +388,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -393,6 +418,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -400,7 +426,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -413,6 +438,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
@@ -422,9 +448,9 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
CONFIG_MTD_CMDLINE_PARTS=y
# CONFIG_MTD_AR7_PARTS is not set
@@ -480,6 +506,7 @@ CONFIG_MTD_PHYSMAP=y
#
# CONFIG_MTD_DATAFLASH is not set
# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -583,7 +610,6 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
-# CONFIG_SCSI_BNX2_ISCSI is not set
# CONFIG_LIBFC is not set
# CONFIG_LIBFCOE is not set
# CONFIG_SCSI_DEBUG is not set
@@ -637,12 +663,10 @@ CONFIG_SMSC911X=y
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -726,6 +750,7 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_HELPER_AUTO=y
@@ -736,6 +761,7 @@ CONFIG_I2C_HELPER_AUTO=y
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
# CONFIG_I2C_GPIO is not set
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_SH_MOBILE=y
@@ -757,9 +783,6 @@ CONFIG_I2C_SH_MOBILE=y
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -780,6 +803,11 @@ CONFIG_SPI_GPIO=y
#
# CONFIG_SPI_SPIDEV is not set
# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_GPIO_SYSFS is not set
@@ -804,11 +832,15 @@ CONFIG_GPIOLIB=y
#
# CONFIG_GPIO_MAX7301 is not set
# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -828,8 +860,12 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
# CONFIG_REGULATOR is not set
CONFIG_MEDIA_SUPPORT=y
@@ -957,6 +993,8 @@ CONFIG_MMC_BLOCK_BOUNCE=y
# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
+# CONFIG_MMC_ATMELMCI is not set
CONFIG_MMC_SPI=y
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
@@ -992,6 +1030,7 @@ CONFIG_RTC_DRV_PCF8563=y
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
@@ -1003,6 +1042,7 @@ CONFIG_RTC_DRV_PCF8563=y
# CONFIG_RTC_DRV_R9701 is not set
# CONFIG_RTC_DRV_RS5C348 is not set
# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
#
# Platform RTC drivers
@@ -1056,8 +1096,10 @@ CONFIG_FS_MBCACHE=y
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1122,7 +1164,6 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
@@ -1202,6 +1243,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -1212,8 +1254,11 @@ CONFIG_FRAME_WARN=1024
# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DMA_API_DEBUG is not set
@@ -1221,6 +1266,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -1234,7 +1280,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD2=y
@@ -1275,11 +1320,13 @@ CONFIG_CRYPTO_CBC=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1341,5 +1388,6 @@ CONFIG_CRC7=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/dreamcast_defconfig b/arch/sh/configs/dreamcast_defconfig
index 95717a041ed..aedbd4f1304 100644
--- a/arch/sh/configs/dreamcast_defconfig
+++ b/arch/sh/configs/dreamcast_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:24:48 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 17:56:07 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -29,7 +30,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -40,6 +43,12 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -52,11 +61,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
@@ -86,19 +96,20 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-# CONFIG_MARKERS is not set
# CONFIG_OPROFILE is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -108,6 +119,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -120,7 +135,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -167,6 +182,7 @@ CONFIG_CPU_SUBTYPE_SH7091=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -218,6 +234,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -322,7 +339,8 @@ CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
# CONFIG_UBC_WAKEUP is not set
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC1,115200 panic=3"
#
@@ -395,6 +413,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -424,6 +443,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -431,7 +451,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -444,6 +463,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
# CONFIG_STANDALONE is not set
CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
@@ -496,7 +516,11 @@ CONFIG_HAVE_IDE=y
#
#
-# Enable only one of the two stacks, unless you know what you are doing
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# See the help texts for more information.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -561,10 +585,7 @@ CONFIG_8139TOO=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -605,13 +626,14 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
#
CONFIG_INPUT_KEYBOARD=y
# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_KEYBOARD_MAPLE=y
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
-CONFIG_KEYBOARD_MAPLE=y
+# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_SH_KEYSC is not set
+# CONFIG_KEYBOARD_XTKBD is not set
CONFIG_INPUT_MOUSE=y
# CONFIG_MOUSE_PS2 is not set
# CONFIG_MOUSE_SERIAL is not set
@@ -675,11 +697,15 @@ CONFIG_HW_RANDOM=y
CONFIG_DEVPORT=y
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -716,6 +742,7 @@ CONFIG_SSB_POSSIBLE=y
#
# Graphics support
#
+CONFIG_VGA_ARB=y
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
@@ -807,7 +834,6 @@ CONFIG_LOGO_SUPERH_CLUT224=y
# CONFIG_SOUND is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
# CONFIG_HID_PID is not set
@@ -861,8 +887,10 @@ CONFIG_RTC_LIB=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
@@ -922,7 +950,6 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
@@ -949,6 +976,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -958,8 +986,11 @@ CONFIG_FRAME_WARN=1024
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_LATENCYTOP is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DMA_API_DEBUG is not set
@@ -967,6 +998,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -980,7 +1012,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
@@ -1012,11 +1043,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1079,5 +1112,6 @@ CONFIG_CRC32=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/ecovec24-romimage_defconfig b/arch/sh/configs/ecovec24-romimage_defconfig
index 9a22c64775b..0774924623c 100644
--- a/arch/sh/configs/ecovec24-romimage_defconfig
+++ b/arch/sh/configs/ecovec24-romimage_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.31-rc7
-# Tue Sep 8 13:56:18 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 17:56:41 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -59,11 +59,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -79,15 +80,9 @@ CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
-CONFIG_INITRAMFS_ROOT_UID=0
-CONFIG_INITRAMFS_ROOT_GID=0
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
-CONFIG_INITRAMFS_COMPRESSION_NONE=y
-# CONFIG_INITRAMFS_COMPRESSION_GZIP is not set
-# CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set
-# CONFIG_INITRAMFS_COMPRESSION_LZMA is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -107,20 +102,19 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
-CONFIG_HAVE_PERF_COUNTERS=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
@@ -237,6 +231,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -319,7 +314,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC0,115200"
#
@@ -382,6 +378,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -422,6 +419,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
@@ -530,10 +528,7 @@ CONFIG_SH_ETH=y
# CONFIG_KS8842 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -625,6 +620,7 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
# CONFIG_I2C_CHARDEV is not set
CONFIG_I2C_HELPER_AUTO=y
@@ -657,9 +653,6 @@ CONFIG_I2C_SH_MOBILE=y
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -693,11 +686,14 @@ CONFIG_GPIO_SYSFS=y
#
# SPI GPIO expanders:
#
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -717,6 +713,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
@@ -771,6 +768,7 @@ CONFIG_USB_DEVICE_CLASS=y
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
# CONFIG_USB_SL811_HCD is not set
CONFIG_USB_R8A66597_HCD=y
# CONFIG_USB_HWA_HCD is not set
@@ -875,6 +873,7 @@ CONFIG_EXT2_FS=y
# CONFIG_XFS_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
# CONFIG_FSNOTIFY is not set
# CONFIG_DNOTIFY is not set
@@ -975,6 +974,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -989,7 +989,7 @@ CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
-CONFIG_HAVE_FTRACE_SYSCALLS=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DYNAMIC_DEBUG is not set
diff --git a/arch/sh/configs/ecovec24_defconfig b/arch/sh/configs/ecovec24_defconfig
index 2050a76683c..ac6469718a2 100644
--- a/arch/sh/configs/ecovec24_defconfig
+++ b/arch/sh/configs/ecovec24_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.31-rc7
-# Wed Aug 26 09:09:07 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 17:45:39 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -60,11 +60,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_GROUP_SCHED=y
@@ -97,20 +98,19 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
-CONFIG_HAVE_PERF_COUNTERS=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
@@ -232,6 +232,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -314,7 +315,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=tty0, console=ttySC0,115200 root=/dev/nfs ip=dhcp mem=120M memchunk.vpu=4m"
#
@@ -388,6 +390,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -417,6 +420,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -424,7 +428,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -437,6 +440,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
@@ -446,9 +450,9 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
CONFIG_MTD_CMDLINE_PARTS=y
# CONFIG_MTD_AR7_PARTS is not set
@@ -504,6 +508,7 @@ CONFIG_MTD_PHYSMAP=y
#
# CONFIG_MTD_DATAFLASH is not set
# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -666,10 +671,7 @@ CONFIG_SH_ETH=y
# CONFIG_KS8851 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -713,11 +715,15 @@ CONFIG_INPUT_EVDEV=y
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_GPIO is not set
# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
# CONFIG_KEYBOARD_SUNKBD is not set
CONFIG_KEYBOARD_SH_KEYSC=y
@@ -771,6 +777,7 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_HELPER_AUTO=y
@@ -804,9 +811,6 @@ CONFIG_I2C_SH_MOBILE=y
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -856,11 +860,15 @@ CONFIG_GPIOLIB=y
#
# CONFIG_GPIO_MAX7301 is not set
# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -880,8 +888,10 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
# CONFIG_AB3100_CORE is not set
# CONFIG_EZX_PCAP is not set
# CONFIG_REGULATOR is not set
@@ -936,8 +946,10 @@ CONFIG_SOC_CAMERA=y
CONFIG_VIDEO_SH_MOBILE_CEU=y
# CONFIG_V4L_USB_DRIVERS is not set
CONFIG_RADIO_ADAPTERS=y
+# CONFIG_I2C_SI4713 is not set
+# CONFIG_RADIO_SI4713 is not set
# CONFIG_USB_DSBR is not set
-# CONFIG_USB_SI470X is not set
+# CONFIG_RADIO_SI470X is not set
# CONFIG_USB_MR800 is not set
# CONFIG_RADIO_TEA5764 is not set
# CONFIG_DAB is not set
@@ -1003,7 +1015,6 @@ CONFIG_LOGO_SUPERH_CLUT224=y
# CONFIG_SOUND is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
#
@@ -1026,6 +1037,7 @@ CONFIG_USB_HID=y
# CONFIG_HID_EZKEY is not set
# CONFIG_HID_KYE is not set
# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_TWINHAN is not set
# CONFIG_HID_KENSINGTON is not set
# CONFIG_HID_LOGITECH is not set
# CONFIG_HID_MICROSOFT is not set
@@ -1070,6 +1082,7 @@ CONFIG_USB_MON=y
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
# CONFIG_USB_SL811_HCD is not set
CONFIG_USB_R8A66597_HCD=y
# CONFIG_USB_HWA_HCD is not set
@@ -1161,6 +1174,8 @@ CONFIG_MMC_BLOCK_BOUNCE=y
# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
+# CONFIG_MMC_ATMELMCI is not set
CONFIG_MMC_SPI=y
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
@@ -1208,6 +1223,7 @@ CONFIG_RTC_DRV_PCF8563=y
# CONFIG_RTC_DRV_R9701 is not set
# CONFIG_RTC_DRV_RS5C348 is not set
# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
#
# Platform RTC drivers
@@ -1265,6 +1281,7 @@ CONFIG_FS_POSIX_ACL=y
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1329,7 +1346,6 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
@@ -1409,6 +1425,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1423,7 +1440,7 @@ CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
-CONFIG_HAVE_FTRACE_SYSCALLS=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DYNAMIC_DEBUG is not set
@@ -1446,7 +1463,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD2=y
@@ -1487,11 +1503,13 @@ CONFIG_CRYPTO_CBC=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
diff --git a/arch/sh/configs/edosk7705_defconfig b/arch/sh/configs/edosk7705_defconfig
index 497414c439f..86c9bc05062 100644
--- a/arch/sh/configs/edosk7705_defconfig
+++ b/arch/sh/configs/edosk7705_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:25:35 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 17:57:13 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,6 +13,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -26,7 +27,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -36,17 +39,24 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
# CONFIG_SYSVIPC is not set
# CONFIG_BSD_PROCESS_ACCT is not set
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
# CONFIG_CGROUPS is not set
@@ -70,18 +80,19 @@ CONFIG_EMBEDDED=y
# CONFIG_EVENTFD is not set
CONFIG_SHMEM=y
# CONFIG_AIO is not set
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
# CONFIG_VM_EVENT_COUNTERS is not set
-# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
@@ -89,6 +100,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_BASE_SMALL=1
@@ -125,6 +140,7 @@ CONFIG_CPU_SUBTYPE_SH7705=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -165,7 +181,6 @@ CONFIG_SPARSEMEM_MANUAL=y
CONFIG_SPARSEMEM=y
CONFIG_HAVE_MEMORY_PRESENT=y
CONFIG_SPARSEMEM_STATIC=y
-CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
# CONFIG_PHYS_ADDR_T_64BIT is not set
@@ -173,6 +188,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -250,7 +266,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_CMDLINE_OVERWRITE is not set
+# CONFIG_CMDLINE_EXTEND is not set
#
# Bus options
@@ -321,11 +338,14 @@ CONFIG_HAVE_IDE=y
# CONFIG_R3964 is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -409,6 +429,7 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
@@ -416,8 +437,11 @@ CONFIG_FRAME_WARN=1024
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_LATENCYTOP is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DMA_API_DEBUG is not set
@@ -425,6 +449,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -449,4 +474,5 @@ CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/edosk7760_defconfig b/arch/sh/configs/edosk7760_defconfig
index 77684ed9127..4c0f82b7def 100644
--- a/arch/sh/configs/edosk7760_defconfig
+++ b/arch/sh/configs/edosk7760_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:25:55 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 17:57:30 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -27,7 +28,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -38,6 +41,12 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION="_edosk7760"
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -51,11 +60,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=17
@@ -91,19 +101,20 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_IOREMAP_PROT=y
@@ -112,6 +123,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -124,7 +139,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -171,6 +186,7 @@ CONFIG_CPU_SUBTYPE_SH7760=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -216,6 +232,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -304,7 +321,8 @@ CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x02000000
CONFIG_ENTRY_OFFSET=0x00001000
# CONFIG_UBC_WAKEUP is not set
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="mem=64M console=ttySC2,115200 root=/dev/nfs rw nfsroot=192.168.0.3:/scripts/filesys ip=192.168.0.4"
#
@@ -367,6 +385,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -396,6 +415,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -403,7 +423,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -416,6 +435,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
@@ -426,9 +446,9 @@ CONFIG_DEBUG_DEVRES=y
CONFIG_MTD=y
CONFIG_MTD_DEBUG=y
CONFIG_MTD_DEBUG_VERBOSE=0
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
CONFIG_MTD_CMDLINE_PARTS=y
# CONFIG_MTD_AR7_PARTS is not set
@@ -563,10 +583,7 @@ CONFIG_SMC91X=y
# CONFIG_KS8842 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -649,6 +666,7 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_HELPER_AUTO=y
@@ -659,6 +677,7 @@ CONFIG_I2C_HELPER_AUTO=y
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_SH7760=y
# CONFIG_I2C_SH_MOBILE is not set
@@ -680,20 +699,21 @@ CONFIG_I2C_SH7760=y
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_TSL2550 is not set
CONFIG_I2C_DEBUG_CORE=y
CONFIG_I2C_DEBUG_ALGO=y
CONFIG_I2C_DEBUG_BUS=y
CONFIG_I2C_DEBUG_CHIP=y
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -712,8 +732,10 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -837,8 +859,10 @@ CONFIG_FS_MBCACHE=y
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -900,7 +924,6 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
# CONFIG_NFS_V3 is not set
@@ -974,6 +997,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_UNUSED_SYMBOLS=y
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -1010,18 +1034,23 @@ CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
# CONFIG_FRAME_POINTER is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_PAGE_POISONING is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
@@ -1029,6 +1058,7 @@ CONFIG_FTRACE=y
# CONFIG_PREEMPT_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_FTRACE_SYSCALLS is not set
# CONFIG_BOOT_TRACER is not set
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
@@ -1045,11 +1075,11 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_EARLY_SCIF_CONSOLE=y
CONFIG_EARLY_SCIF_CONSOLE_PORT=0xffe80000
CONFIG_EARLY_PRINTK=y
-# CONFIG_DEBUG_BOOTMEM is not set
-CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_STACK_DEBUG is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_4KSTACKS is not set
CONFIG_DUMP_CODE=y
+# CONFIG_DWARF_UNWINDER is not set
# CONFIG_SH_NO_BSS_INIT is not set
#
@@ -1064,7 +1094,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_HASH=y
@@ -1100,11 +1129,13 @@ CONFIG_CRYPTO_HASH2=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1168,5 +1199,6 @@ CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/espt_defconfig b/arch/sh/configs/espt_defconfig
index 881128eeab3..9b785517abc 100644
--- a/arch/sh/configs/espt_defconfig
+++ b/arch/sh/configs/espt_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:27:21 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 17:58:18 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -27,7 +28,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -37,6 +40,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -48,11 +57,12 @@ CONFIG_SYSVIPC_SYSCTL=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -92,19 +102,21 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
-CONFIG_MARKERS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -114,6 +126,11 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -125,7 +142,7 @@ CONFIG_MODULES=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -173,6 +190,7 @@ CONFIG_CPU_SH4A=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
CONFIG_CPU_SUBTYPE_SH7763=y
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -214,7 +232,6 @@ CONFIG_SPARSEMEM=y
CONFIG_HAVE_MEMORY_PRESENT=y
CONFIG_SPARSEMEM_STATIC=y
# CONFIG_MEMORY_HOTPLUG is not set
-CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
# CONFIG_PHYS_ADDR_T_64BIT is not set
@@ -222,6 +239,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -303,7 +321,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/nfs ip=bootp"
#
@@ -371,6 +390,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -412,6 +432,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
@@ -421,9 +442,9 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
CONFIG_MTD_CMDLINE_PARTS=y
# CONFIG_MTD_AR7_PARTS is not set
@@ -477,6 +498,7 @@ CONFIG_MTD_CFI_UTIL=y
CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PHYSMAP=y
# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_GPIO_ADDR is not set
# CONFIG_MTD_PLATRAM is not set
#
@@ -554,7 +576,6 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
-# CONFIG_SCSI_BNX2_ISCSI is not set
# CONFIG_LIBFC is not set
# CONFIG_LIBFCOE is not set
# CONFIG_SCSI_DEBUG is not set
@@ -610,10 +631,7 @@ CONFIG_SH_ETH=y
# CONFIG_KS8842 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -705,11 +723,15 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -819,6 +841,7 @@ CONFIG_USB_MON=y
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
@@ -888,6 +911,7 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_LD is not set
# CONFIG_USB_TRANCEVIBRATOR is not set
# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
@@ -930,8 +954,10 @@ CONFIG_FS_MBCACHE=y
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -997,7 +1023,6 @@ CONFIG_ROMFS_BACKED_BY_BLOCK=y
CONFIG_ROMFS_ON_BLOCK=y
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
# CONFIG_NFS_V3 is not set
@@ -1071,6 +1096,7 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1082,11 +1108,15 @@ CONFIG_STACKTRACE=y
# CONFIG_LATENCYTOP is not set
CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
CONFIG_EVENT_TRACING=y
CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
@@ -1096,6 +1126,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -1109,7 +1140,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
@@ -1141,11 +1171,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1208,5 +1240,6 @@ CONFIG_ZLIB_INFLATE=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/hp6xx_defconfig b/arch/sh/configs/hp6xx_defconfig
index 3249d46fdc1..f59be446f82 100644
--- a/arch/sh/configs/hp6xx_defconfig
+++ b/arch/sh/configs/hp6xx_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:28:12 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 17:59:45 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -28,7 +29,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -38,6 +41,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
# CONFIG_SYSVIPC is not set
CONFIG_BSD_PROCESS_ACCT=y
@@ -46,11 +55,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -81,18 +91,19 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
@@ -100,6 +111,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -107,7 +122,7 @@ CONFIG_RT_MUTEXES=y
CONFIG_BASE_SMALL=0
# CONFIG_MODULES is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -154,6 +169,7 @@ CONFIG_CPU_SUBTYPE_SH7709=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -199,6 +215,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -288,7 +305,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_CMDLINE_OVERWRITE is not set
+# CONFIG_CMDLINE_EXTEND is not set
#
# Bus options
@@ -322,6 +340,7 @@ CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
# CONFIG_HIBERNATION is not set
CONFIG_APM_EMULATION=y
+# CONFIG_PM_RUNTIME is not set
# CONFIG_CPU_IDLE is not set
# CONFIG_NET is not set
@@ -390,7 +409,6 @@ CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_SAS_LIBSAS is not set
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
-# CONFIG_SCSI_BNX2_ISCSI is not set
# CONFIG_LIBFC is not set
# CONFIG_LIBFCOE is not set
# CONFIG_SCSI_DEBUG is not set
@@ -399,6 +417,7 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
CONFIG_SATA_PMP=y
CONFIG_ATA_SFF=y
# CONFIG_SATA_MV is not set
@@ -428,13 +447,14 @@ CONFIG_INPUT_EVDEV=y
#
CONFIG_INPUT_KEYBOARD=y
# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_KEYBOARD_HP6XX=y
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
-CONFIG_KEYBOARD_HP6XX=y
+# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_SH_KEYSC is not set
+# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
@@ -452,6 +472,7 @@ CONFIG_TOUCHSCREEN_HP600=y
# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
# CONFIG_INPUT_MISC is not set
#
@@ -507,11 +528,15 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -663,7 +688,9 @@ CONFIG_EXT2_FS=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -725,7 +752,6 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
#
# Partition Types
@@ -782,6 +808,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -791,8 +818,11 @@ CONFIG_FRAME_WARN=1024
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_LATENCYTOP is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DMA_API_DEBUG is not set
@@ -800,6 +830,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -813,7 +844,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD2=y
@@ -854,11 +884,13 @@ CONFIG_CRYPTO_PCBC=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -920,4 +952,5 @@ CONFIG_CRC32=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/kfr2r09-romimage_defconfig b/arch/sh/configs/kfr2r09-romimage_defconfig
index c0f9263e138..02590e127f7 100644
--- a/arch/sh/configs/kfr2r09-romimage_defconfig
+++ b/arch/sh/configs/kfr2r09-romimage_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.31-rc6
-# Thu Aug 20 15:09:16 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:01:48 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -58,11 +58,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -78,15 +79,9 @@ CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
-CONFIG_INITRAMFS_ROOT_UID=0
-CONFIG_INITRAMFS_ROOT_GID=0
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
-# CONFIG_INITRAMFS_COMPRESSION_NONE is not set
-CONFIG_INITRAMFS_COMPRESSION_GZIP=y
-# CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set
-# CONFIG_INITRAMFS_COMPRESSION_LZMA is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -106,20 +101,19 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
-CONFIG_HAVE_PERF_COUNTERS=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
@@ -173,6 +167,7 @@ CONFIG_ARCH_SHMOBILE=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
CONFIG_CPU_SUBTYPE_SH7724=y
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -219,6 +214,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -301,7 +297,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC1,115200 quiet"
#
@@ -324,6 +321,7 @@ CONFIG_BINFMT_ELF=y
CONFIG_PM=y
# CONFIG_PM_DEBUG is not set
# CONFIG_SUSPEND is not set
+CONFIG_PM_RUNTIME=y
# CONFIG_CPU_IDLE is not set
CONFIG_NET=y
@@ -362,6 +360,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -402,6 +401,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
@@ -489,6 +489,7 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
# CONFIG_I2C_CHARDEV is not set
CONFIG_I2C_HELPER_AUTO=y
@@ -520,9 +521,6 @@ CONFIG_I2C_SH_MOBILE=y
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -556,11 +554,14 @@ CONFIG_GPIO_SYSFS=y
#
# SPI GPIO expanders:
#
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -580,6 +581,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
@@ -718,6 +720,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -732,7 +735,7 @@ CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
-CONFIG_HAVE_FTRACE_SYSCALLS=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DYNAMIC_DEBUG is not set
diff --git a/arch/sh/configs/kfr2r09_defconfig b/arch/sh/configs/kfr2r09_defconfig
index cef61319d2f..8ae65d294b1 100644
--- a/arch/sh/configs/kfr2r09_defconfig
+++ b/arch/sh/configs/kfr2r09_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.31-rc6
-# Thu Aug 20 21:58:52 2009
+# Linux kernel version: 2.6.31
+# Fri Sep 25 11:54:22 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -60,11 +60,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -102,20 +103,19 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
-CONFIG_HAVE_PERF_COUNTERS=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
@@ -190,6 +190,7 @@ CONFIG_ARCH_SHMOBILE=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
CONFIG_CPU_SUBTYPE_SH7724=y
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -236,6 +237,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -319,7 +321,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=tty0 console=ttySC1,115200"
#
@@ -343,6 +346,7 @@ CONFIG_PM=y
# CONFIG_PM_DEBUG is not set
# CONFIG_SUSPEND is not set
# CONFIG_HIBERNATION is not set
+CONFIG_PM_RUNTIME=y
CONFIG_CPU_IDLE=y
CONFIG_CPU_IDLE_GOV_LADDER=y
CONFIG_CPU_IDLE_GOV_MENU=y
@@ -383,6 +387,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -423,6 +428,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
@@ -432,9 +438,9 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
CONFIG_MTD_CMDLINE_PARTS=y
# CONFIG_MTD_AR7_PARTS is not set
@@ -500,7 +506,12 @@ CONFIG_MTD_PHYSMAP=y
# CONFIG_MTD_DOC2001 is not set
# CONFIG_MTD_DOC2001PLUS is not set
# CONFIG_MTD_NAND is not set
-# CONFIG_MTD_ONENAND is not set
+CONFIG_MTD_ONENAND=y
+# CONFIG_MTD_ONENAND_VERIFY_WRITE is not set
+CONFIG_MTD_ONENAND_GENERIC=y
+# CONFIG_MTD_ONENAND_OTP is not set
+# CONFIG_MTD_ONENAND_2X_PROGRAM is not set
+# CONFIG_MTD_ONENAND_SIM is not set
#
# LPDDR flash memory drivers
@@ -564,11 +575,15 @@ CONFIG_INPUT_EVDEV=y
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_GPIO is not set
# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
# CONFIG_KEYBOARD_SUNKBD is not set
CONFIG_KEYBOARD_SH_KEYSC=y
@@ -621,6 +636,7 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
# CONFIG_I2C_CHARDEV is not set
CONFIG_I2C_HELPER_AUTO=y
@@ -653,9 +669,6 @@ CONFIG_I2C_SH_MOBILE=y
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -689,11 +702,14 @@ CONFIG_GPIO_SYSFS=y
#
# SPI GPIO expanders:
#
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -713,6 +729,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
@@ -856,6 +873,8 @@ CONFIG_MMC_BLOCK_BOUNCE=y
# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
+# CONFIG_MMC_ATMELMCI is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
# CONFIG_ACCESSIBILITY is not set
@@ -941,6 +960,7 @@ CONFIG_UIO_PDRV_GENIRQ=y
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1002,6 +1022,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1016,7 +1037,7 @@ CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
-CONFIG_HAVE_FTRACE_SYSCALLS=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DYNAMIC_DEBUG is not set
diff --git a/arch/sh/configs/landisk_defconfig b/arch/sh/configs/landisk_defconfig
index ba05739fda2..c2a9a399638 100644
--- a/arch/sh/configs/landisk_defconfig
+++ b/arch/sh/configs/landisk_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:28:45 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:05:49 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -28,7 +29,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -38,6 +41,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -49,11 +58,12 @@ CONFIG_SYSVIPC_SYSCTL=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
@@ -83,19 +93,20 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_IOREMAP_PROT=y
@@ -104,6 +115,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -116,7 +131,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -163,6 +178,7 @@ CONFIG_CPU_SUBTYPE_SH7751R=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -208,6 +224,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -295,7 +312,8 @@ CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
# CONFIG_UBC_WAKEUP is not set
-# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_CMDLINE_OVERWRITE is not set
+# CONFIG_CMDLINE_EXTEND is not set
#
# Bus options
@@ -411,6 +429,7 @@ CONFIG_IP_NF_QUEUE=m
# CONFIG_IP_NF_ARPTABLES is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -442,6 +461,7 @@ CONFIG_ATALK=m
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -449,7 +469,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -462,6 +481,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
@@ -631,6 +651,7 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
# CONFIG_SCSI_SRP is not set
# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
# CONFIG_SCSI_DH is not set
@@ -653,7 +674,11 @@ CONFIG_MD_RAID1=m
#
#
-# Enable only one of the two stacks, unless you know what you are doing
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# See the help texts for more information.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -729,6 +754,7 @@ CONFIG_NETDEV_1000=y
# CONFIG_VIA_VELOCITY is not set
# CONFIG_TIGON3 is not set
# CONFIG_BNX2 is not set
+# CONFIG_CNIC is not set
# CONFIG_QLA3XXX is not set
# CONFIG_ATL1 is not set
# CONFIG_ATL1E is not set
@@ -754,10 +780,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -868,10 +891,20 @@ CONFIG_HW_RANDOM=y
CONFIG_DEVPORT=y
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
# CONFIG_SENSORS_I5K_AMB is not set
# CONFIG_SENSORS_F71805F is not set
# CONFIG_SENSORS_F71882FG is not set
@@ -886,9 +919,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_VT8231 is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -910,6 +941,7 @@ CONFIG_SSB_POSSIBLE=y
#
# Graphics support
#
+CONFIG_VGA_ARB=y
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -928,11 +960,11 @@ CONFIG_DUMMY_CONSOLE=y
CONFIG_FONT_8x16=y
CONFIG_SOUND=m
CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
# CONFIG_SND is not set
CONFIG_SOUND_PRIME=m
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
#
@@ -961,6 +993,7 @@ CONFIG_HID_CYPRESS=m
CONFIG_HID_EZKEY=m
# CONFIG_HID_KYE is not set
CONFIG_HID_GYRATION=m
+# CONFIG_HID_TWINHAN is not set
# CONFIG_HID_KENSINGTON is not set
CONFIG_HID_LOGITECH=m
# CONFIG_LOGITECH_FF is not set
@@ -1011,6 +1044,7 @@ CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
@@ -1177,8 +1211,10 @@ CONFIG_REISERFS_FS=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1251,7 +1287,6 @@ CONFIG_ROMFS_ON_BLOCK=y
CONFIG_UFS_FS=m
# CONFIG_UFS_FS_WRITE is not set
# CONFIG_UFS_DEBUG is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=m
CONFIG_NFS_V3=y
@@ -1331,6 +1366,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -1340,8 +1376,11 @@ CONFIG_FRAME_WARN=1024
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_LATENCYTOP is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DMA_API_DEBUG is not set
@@ -1350,6 +1389,7 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_SH_STANDARD_BIOS=y
# CONFIG_EARLY_SCIF_CONSOLE is not set
# CONFIG_EARLY_PRINTK is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -1363,7 +1403,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
@@ -1395,11 +1434,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1462,5 +1503,6 @@ CONFIG_CRC32=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/lboxre2_defconfig b/arch/sh/configs/lboxre2_defconfig
index c0bc2fd033b..ec0c0b432c7 100644
--- a/arch/sh/configs/lboxre2_defconfig
+++ b/arch/sh/configs/lboxre2_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:29:50 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:09:59 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -28,7 +29,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -38,6 +41,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -49,11 +58,12 @@ CONFIG_SYSVIPC_SYSCTL=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
@@ -83,19 +93,20 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_IOREMAP_PROT=y
@@ -104,6 +115,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -116,7 +131,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -163,6 +178,7 @@ CONFIG_CPU_SUBTYPE_SH7751R=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -208,6 +224,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -295,7 +312,8 @@ CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
# CONFIG_UBC_WAKEUP is not set
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC1,115200 root=/dev/sda1"
#
@@ -411,6 +429,7 @@ CONFIG_NETFILTER_ADVANCED=y
# CONFIG_IP_NF_ARPTABLES is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -440,6 +459,7 @@ CONFIG_NETFILTER_ADVANCED=y
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -447,7 +467,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -460,6 +479,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
@@ -572,12 +592,14 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
# CONFIG_SCSI_SRP is not set
# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
# CONFIG_SCSI_DH is not set
# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
CONFIG_SATA_PMP=y
# CONFIG_SATA_AHCI is not set
# CONFIG_SATA_SIL24 is not set
@@ -599,6 +621,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_ALI is not set
# CONFIG_PATA_AMD is not set
# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATP867X is not set
# CONFIG_PATA_ATIIXP is not set
# CONFIG_PATA_CMD640_PCI is not set
# CONFIG_PATA_CMD64X is not set
@@ -627,6 +650,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_PCMCIA is not set
# CONFIG_PATA_PDC_OLD is not set
# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RDC is not set
# CONFIG_PATA_RZ1000 is not set
# CONFIG_PATA_SC1200 is not set
# CONFIG_PATA_SERVERWORKS is not set
@@ -645,7 +669,11 @@ CONFIG_PATA_PLATFORM=y
#
#
-# Enable only one of the two stacks, unless you know what you are doing
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# See the help texts for more information.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -725,6 +753,7 @@ CONFIG_NETDEV_1000=y
# CONFIG_VIA_VELOCITY is not set
# CONFIG_TIGON3 is not set
# CONFIG_BNX2 is not set
+# CONFIG_CNIC is not set
# CONFIG_QLA3XXX is not set
# CONFIG_ATL1 is not set
# CONFIG_ATL1E is not set
@@ -750,10 +779,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -863,10 +889,20 @@ CONFIG_HW_RANDOM=y
CONFIG_DEVPORT=y
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
# CONFIG_SENSORS_I5K_AMB is not set
# CONFIG_SENSORS_F71805F is not set
# CONFIG_SENSORS_F71882FG is not set
@@ -881,9 +917,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_VT8231 is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -905,6 +939,7 @@ CONFIG_SSB_POSSIBLE=y
#
# Graphics support
#
+CONFIG_VGA_ARB=y
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -923,7 +958,6 @@ CONFIG_DUMMY_CONSOLE=y
# CONFIG_SOUND is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
# CONFIG_HID_PID is not set
@@ -1021,8 +1055,10 @@ CONFIG_FS_MBCACHE=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1089,7 +1125,6 @@ CONFIG_ROMFS_BACKED_BY_BLOCK=y
CONFIG_ROMFS_ON_BLOCK=y
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
@@ -1155,6 +1190,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -1164,8 +1200,11 @@ CONFIG_FRAME_WARN=1024
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_LATENCYTOP is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DMA_API_DEBUG is not set
@@ -1174,6 +1213,7 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_SH_STANDARD_BIOS=y
# CONFIG_EARLY_SCIF_CONSOLE is not set
# CONFIG_EARLY_PRINTK is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -1187,7 +1227,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
@@ -1219,11 +1258,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1286,5 +1327,6 @@ CONFIG_CRC32=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/magicpanelr2_defconfig b/arch/sh/configs/magicpanelr2_defconfig
index c5859e82d91..79091e3e32c 100644
--- a/arch/sh/configs/magicpanelr2_defconfig
+++ b/arch/sh/configs/magicpanelr2_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:30:31 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:10:49 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -28,7 +29,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -38,6 +41,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -52,11 +61,12 @@ CONFIG_AUDIT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
# CONFIG_GROUP_SCHED is not set
@@ -91,18 +101,19 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_IOREMAP_PROT=y
@@ -111,6 +122,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -123,7 +138,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -170,6 +185,7 @@ CONFIG_CPU_SUBTYPE_SH7720=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -215,6 +231,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -306,7 +323,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_CMDLINE_OVERWRITE is not set
+# CONFIG_CMDLINE_EXTEND is not set
#
# Bus options
@@ -368,6 +386,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -397,6 +416,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -404,7 +424,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -417,6 +436,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_FW_LOADER=y
@@ -428,9 +448,9 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
CONFIG_MTD_REDBOOT_PARTS=y
CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
@@ -589,10 +609,7 @@ CONFIG_SMSC911X=y
# CONFIG_KS8842 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -631,13 +648,15 @@ CONFIG_INPUT_EVDEV=y
#
CONFIG_INPUT_KEYBOARD=y
CONFIG_KEYBOARD_ATKBD=y
-# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_MATRIX is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
-# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_SH_KEYSC is not set
+# CONFIG_KEYBOARD_XTKBD is not set
CONFIG_INPUT_MOUSE=y
# CONFIG_MOUSE_PS2 is not set
# CONFIG_MOUSE_SERIAL is not set
@@ -701,6 +720,11 @@ CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
@@ -721,11 +745,14 @@ CONFIG_GPIOLIB=y
#
# SPI GPIO expanders:
#
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -829,8 +856,10 @@ CONFIG_JBD=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
@@ -900,7 +929,6 @@ CONFIG_JFFS2_RTIME=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
@@ -976,6 +1004,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -1006,24 +1035,30 @@ CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_PAGE_POISONING is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_FTRACE_SYSCALLS is not set
# CONFIG_BOOT_TRACER is not set
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
@@ -1036,16 +1071,15 @@ CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
-# CONFIG_KMEMCHECK is not set
# CONFIG_SH_STANDARD_BIOS is not set
CONFIG_EARLY_SCIF_CONSOLE=y
CONFIG_EARLY_SCIF_CONSOLE_PORT=0xa4430000
CONFIG_EARLY_PRINTK=y
-# CONFIG_DEBUG_BOOTMEM is not set
-# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_STACK_DEBUG is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_4KSTACKS is not set
CONFIG_DUMP_CODE=y
+# CONFIG_DWARF_UNWINDER is not set
# CONFIG_SH_NO_BSS_INIT is not set
#
@@ -1077,5 +1111,6 @@ CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/microdev_defconfig b/arch/sh/configs/microdev_defconfig
index e5a21e1d625..6bb5976aff2 100644
--- a/arch/sh/configs/microdev_defconfig
+++ b/arch/sh/configs/microdev_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:31:56 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:14:35 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -27,7 +28,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -38,6 +41,12 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
# CONFIG_SYSVIPC is not set
# CONFIG_POSIX_MQUEUE is not set
@@ -49,11 +58,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
@@ -87,18 +97,19 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
@@ -106,6 +117,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -113,7 +128,7 @@ CONFIG_RT_MUTEXES=y
CONFIG_BASE_SMALL=0
# CONFIG_MODULES is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -160,6 +175,7 @@ CONFIG_CPU_SH4=y
CONFIG_CPU_SUBTYPE_SH4_202=y
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -211,6 +227,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -296,7 +313,8 @@ CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
# CONFIG_UBC_WAKEUP is not set
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/hda1"
#
@@ -364,6 +382,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -393,6 +412,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -400,7 +420,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -413,6 +432,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
@@ -499,10 +519,7 @@ CONFIG_SMC91X=y
# CONFIG_KS8842 is not set
CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -561,10 +578,20 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
# CONFIG_SENSORS_F71805F is not set
# CONFIG_SENSORS_F71882FG is not set
# CONFIG_SENSORS_IT87 is not set
@@ -575,9 +602,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_VT1211 is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -662,8 +687,10 @@ CONFIG_FS_MBCACHE=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -726,12 +753,12 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
CONFIG_ROOT_NFS=y
# CONFIG_NFSD is not set
CONFIG_LOCKD=y
@@ -803,6 +830,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -812,8 +840,11 @@ CONFIG_FRAME_WARN=1024
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_LATENCYTOP is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DMA_API_DEBUG is not set
@@ -821,6 +852,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -834,7 +866,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD2=y
@@ -875,11 +906,13 @@ CONFIG_CRYPTO_ECB=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -943,5 +976,6 @@ CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig
index b18cfd39cac..65018283c3a 100644
--- a/arch/sh/configs/migor_defconfig
+++ b/arch/sh/configs/migor_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 16:06:48 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:17:41 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -29,7 +30,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -39,6 +42,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -50,11 +59,12 @@ CONFIG_SYSVIPC_SYSCTL=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -89,19 +99,21 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
-CONFIG_MARKERS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -111,6 +123,11 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -122,7 +139,7 @@ CONFIG_MODULES=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -138,7 +155,7 @@ CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="anticipatory"
-# CONFIG_FREEZER is not set
+CONFIG_FREEZER=y
#
# System type
@@ -173,6 +190,7 @@ CONFIG_ARCH_SHMOBILE=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -224,6 +242,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -308,7 +327,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=tty0 console=ttySC0,115200 earlyprintk=serial ip=on root=/dev/nfs ip=dhcp"
#
@@ -328,7 +348,13 @@ CONFIG_BINFMT_ELF=y
#
# Power management options (EXPERIMENTAL)
#
-# CONFIG_PM is not set
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_HIBERNATION is not set
+CONFIG_PM_RUNTIME=y
# CONFIG_CPU_IDLE is not set
CONFIG_NET=y
@@ -376,6 +402,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -406,6 +433,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
CONFIG_WIRELESS_EXT=y
CONFIG_WIRELESS_EXT_SYSFS=y
@@ -414,7 +442,6 @@ CONFIG_WIRELESS_EXT_SYSFS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -427,6 +454,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=m
@@ -436,9 +464,9 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
CONFIG_MTD_CMDLINE_PARTS=y
# CONFIG_MTD_AR7_PARTS is not set
@@ -585,7 +613,6 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
-# CONFIG_SCSI_BNX2_ISCSI is not set
# CONFIG_LIBFC is not set
# CONFIG_LIBFCOE is not set
# CONFIG_SCSI_DEBUG is not set
@@ -621,10 +648,7 @@ CONFIG_SMC91X=y
# CONFIG_KS8842 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -659,14 +683,19 @@ CONFIG_INPUT_EVDEV=y
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
-# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
CONFIG_KEYBOARD_SH_KEYSC=y
+# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
@@ -715,6 +744,7 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
# CONFIG_I2C_CHARDEV is not set
CONFIG_I2C_HELPER_AUTO=y
@@ -725,6 +755,7 @@ CONFIG_I2C_HELPER_AUTO=y
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
# CONFIG_I2C_GPIO is not set
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_SH_MOBILE=y
@@ -746,15 +777,17 @@ CONFIG_I2C_SH_MOBILE=y
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_GPIO_SYSFS is not set
@@ -777,11 +810,14 @@ CONFIG_GPIOLIB=y
#
# SPI GPIO expanders:
#
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -801,8 +837,10 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
# CONFIG_REGULATOR is not set
CONFIG_MEDIA_SUPPORT=y
@@ -922,7 +960,6 @@ CONFIG_LOGO_SUPERH_VGA16=y
# CONFIG_SOUND is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
# CONFIG_HID_PID is not set
@@ -952,13 +989,13 @@ CONFIG_USB_GADGET_SELECTED=y
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
# CONFIG_USB_GADGET_PXA27X is not set
# CONFIG_USB_GADGET_S3C_HSOTG is not set
-# CONFIG_USB_GADGET_S3C2410 is not set
# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
CONFIG_USB_GADGET_M66592=y
CONFIG_USB_M66592=y
-CONFIG_SUPERH_BUILT_IN_M66592=y
# CONFIG_USB_GADGET_AMD5536UDC is not set
# CONFIG_USB_GADGET_FSL_QE is not set
# CONFIG_USB_GADGET_CI13XXX is not set
@@ -1017,6 +1054,7 @@ CONFIG_RTC_DRV_RS5C372=y
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
@@ -1064,8 +1102,10 @@ CONFIG_UIO_PDRV_GENIRQ=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
@@ -1126,7 +1166,6 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
# CONFIG_NFS_V3 is not set
@@ -1161,6 +1200,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1172,11 +1212,15 @@ CONFIG_STACKTRACE=y
# CONFIG_LATENCYTOP is not set
CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
CONFIG_EVENT_TRACING=y
CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
@@ -1188,6 +1232,7 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_EARLY_SCIF_CONSOLE=y
CONFIG_EARLY_SCIF_CONSOLE_PORT=0xffe00000
CONFIG_EARLY_PRINTK=y
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -1201,7 +1246,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER2=y
@@ -1240,11 +1284,13 @@ CONFIG_CRYPTO_WORKQUEUE=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1308,5 +1354,6 @@ CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/polaris_defconfig b/arch/sh/configs/polaris_defconfig
index 67edd3f3f9b..7fc1952419a 100644
--- a/arch/sh/configs/polaris_defconfig
+++ b/arch/sh/configs/polaris_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:33:28 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:20:53 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -27,7 +28,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -38,6 +41,12 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -52,11 +61,12 @@ CONFIG_AUDIT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_GROUP_SCHED=y
@@ -91,18 +101,19 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_IOREMAP_PROT=y
@@ -111,6 +122,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -123,7 +138,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -170,6 +185,7 @@ CONFIG_CPU_SUBTYPE_SH7709=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -215,6 +231,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -302,7 +319,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC1,115200 root=/dev/mtdblock2 rootfstype=jffs2 mem=63M mtdparts=physmap-flash.0:0x00100000(bootloader)ro,0x00500000(Kernel)ro,0x00A00000(Filesystem)"
#
@@ -363,6 +381,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -403,6 +422,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
@@ -414,9 +434,9 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
CONFIG_MTD_CMDLINE_PARTS=y
# CONFIG_MTD_AR7_PARTS is not set
@@ -577,10 +597,7 @@ CONFIG_SMSC911X=y
# CONFIG_KS8842 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -666,11 +683,15 @@ CONFIG_UNIX98_PTYS=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -770,8 +791,10 @@ CONFIG_RTC_DRV_SH=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
@@ -841,7 +864,6 @@ CONFIG_JFFS2_RTIME=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
@@ -877,6 +899,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -916,18 +939,23 @@ CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_LIST is not set
CONFIG_DEBUG_SG=y
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_PAGE_POISONING is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
@@ -935,6 +963,7 @@ CONFIG_FTRACE=y
# CONFIG_PREEMPT_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_FTRACE_SYSCALLS is not set
# CONFIG_BOOT_TRACER is not set
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
@@ -947,16 +976,15 @@ CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
-# CONFIG_KMEMCHECK is not set
# CONFIG_SH_STANDARD_BIOS is not set
CONFIG_EARLY_SCIF_CONSOLE=y
-CONFIG_EARLY_SCIF_CONSOLE_PORT=0x00000000
+CONFIG_EARLY_SCIF_CONSOLE_PORT=0xa4000150
CONFIG_EARLY_PRINTK=y
-# CONFIG_DEBUG_BOOTMEM is not set
-# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_STACK_DEBUG is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_4KSTACKS is not set
CONFIG_DUMP_CODE=y
+# CONFIG_DWARF_UNWINDER is not set
# CONFIG_SH_NO_BSS_INIT is not set
#
@@ -987,5 +1015,6 @@ CONFIG_ZLIB_DEFLATE=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/r7780mp_defconfig b/arch/sh/configs/r7780mp_defconfig
index 107a8e337ec..903b021e8d9 100644
--- a/arch/sh/configs/r7780mp_defconfig
+++ b/arch/sh/configs/r7780mp_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:34:44 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:24:31 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -28,8 +29,10 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_IO_TRAPPED=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -40,6 +43,12 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -52,11 +61,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -92,20 +102,22 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
-CONFIG_MARKERS=y
CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -115,6 +127,11 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -126,7 +143,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -174,6 +191,7 @@ CONFIG_CPU_SH4A=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
CONFIG_CPU_SUBTYPE_SH7780=y
@@ -229,6 +247,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -315,7 +334,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda1"
#
@@ -396,6 +416,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
CONFIG_STP=m
@@ -428,6 +449,7 @@ CONFIG_LLC=m
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
CONFIG_WIRELESS_EXT=y
CONFIG_WIRELESS_EXT_SYSFS=y
@@ -436,7 +458,6 @@ CONFIG_WIRELESS_EXT_SYSFS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -449,6 +470,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=m
@@ -460,9 +482,9 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
# CONFIG_MTD_CMDLINE_PARTS is not set
# CONFIG_MTD_AR7_PARTS is not set
@@ -513,6 +535,7 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PHYSMAP=y
# CONFIG_MTD_PHYSMAP_COMPAT is not set
# CONFIG_MTD_PCI is not set
+# CONFIG_MTD_GPIO_ADDR is not set
# CONFIG_MTD_INTEL_VR_NOR is not set
# CONFIG_MTD_PLATRAM is not set
@@ -651,11 +674,13 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
# CONFIG_SCSI_SRP is not set
# CONFIG_SCSI_DH is not set
# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
CONFIG_SATA_PMP=y
# CONFIG_SATA_AHCI is not set
# CONFIG_SATA_SIL24 is not set
@@ -677,6 +702,7 @@ CONFIG_SATA_SIL=y
# CONFIG_PATA_ALI is not set
# CONFIG_PATA_AMD is not set
# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATP867X is not set
# CONFIG_PATA_ATIIXP is not set
# CONFIG_PATA_CMD640_PCI is not set
# CONFIG_PATA_CMD64X is not set
@@ -704,6 +730,7 @@ CONFIG_SATA_SIL=y
# CONFIG_PATA_OPTIDMA is not set
# CONFIG_PATA_PDC_OLD is not set
# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RDC is not set
# CONFIG_PATA_RZ1000 is not set
# CONFIG_PATA_SC1200 is not set
# CONFIG_PATA_SERVERWORKS is not set
@@ -722,7 +749,11 @@ CONFIG_PATA_PLATFORM=y
#
#
-# Enable only one of the two stacks, unless you know what you are doing
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# See the help texts for more information.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -804,6 +835,7 @@ CONFIG_R8169=y
# CONFIG_VIA_VELOCITY is not set
# CONFIG_TIGON3 is not set
# CONFIG_BNX2 is not set
+# CONFIG_CNIC is not set
# CONFIG_QLA3XXX is not set
# CONFIG_ATL1 is not set
# CONFIG_ATL1E is not set
@@ -829,10 +861,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -873,13 +902,17 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
CONFIG_KEYBOARD_ATKBD=y
-# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_SH_KEYSC is not set
+# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
@@ -933,6 +966,7 @@ CONFIG_HW_RANDOM=y
CONFIG_DEVPORT=y
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_HELPER_AUTO=y
@@ -961,6 +995,7 @@ CONFIG_I2C_HELPER_AUTO=y
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
CONFIG_I2C_HIGHLANDER=y
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SH_MOBILE is not set
@@ -987,19 +1022,26 @@ CONFIG_I2C_HIGHLANDER=y
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
# CONFIG_SENSORS_AD7414 is not set
# CONFIG_SENSORS_AD7418 is not set
# CONFIG_SENSORS_ADM1021 is not set
@@ -1049,6 +1091,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_ADS7828 is not set
# CONFIG_SENSORS_THMC50 is not set
# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
# CONFIG_SENSORS_VIA686A is not set
# CONFIG_SENSORS_VT1211 is not set
# CONFIG_SENSORS_VT8231 is not set
@@ -1060,7 +1103,6 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_W83L786NG is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
CONFIG_THERMAL=y
# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
@@ -1081,14 +1123,17 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
#
+CONFIG_VGA_ARB=y
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1101,11 +1146,11 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_DISPLAY_SUPPORT is not set
CONFIG_SOUND=m
CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
# CONFIG_SND is not set
CONFIG_SOUND_PRIME=m
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
# CONFIG_HID_PID is not set
@@ -1169,6 +1214,7 @@ CONFIG_RTC_DRV_RS5C372=y
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
@@ -1221,8 +1267,10 @@ CONFIG_FS_MBCACHE=y
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1289,12 +1337,12 @@ CONFIG_MINIX_FS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
@@ -1370,6 +1418,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1404,21 +1453,27 @@ CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
# CONFIG_FRAME_POINTER is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_PAGE_POISONING is not set
CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
CONFIG_EVENT_TRACING=y
CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
@@ -1427,6 +1482,7 @@ CONFIG_FTRACE=y
# CONFIG_PREEMPT_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_FTRACE_SYSCALLS is not set
# CONFIG_BOOT_TRACER is not set
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
@@ -1445,11 +1501,11 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_EARLY_SCIF_CONSOLE=y
CONFIG_EARLY_SCIF_CONSOLE_PORT=0xffe00000
# CONFIG_EARLY_PRINTK is not set
-# CONFIG_DEBUG_BOOTMEM is not set
-CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_STACK_DEBUG is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_4KSTACKS is not set
CONFIG_DUMP_CODE=y
+# CONFIG_DWARF_UNWINDER is not set
# CONFIG_SH_NO_BSS_INIT is not set
#
@@ -1464,7 +1520,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD2=y
@@ -1506,11 +1561,13 @@ CONFIG_CRYPTO_PCBC=m
#
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1573,5 +1630,6 @@ CONFIG_CRC32=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/r7785rp_defconfig b/arch/sh/configs/r7785rp_defconfig
index 8a3dc300db4..27ff46c13a8 100644
--- a/arch/sh/configs/r7785rp_defconfig
+++ b/arch/sh/configs/r7785rp_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:37:20 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:29:23 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -29,8 +30,10 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_IO_TRAPPED=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -41,6 +44,12 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -56,12 +65,12 @@ CONFIG_AUDIT_TREE=y
#
# RCU Subsystem
#
-# CONFIG_CLASSIC_RCU is not set
-# CONFIG_TREE_RCU is not set
-CONFIG_PREEMPT_RCU=y
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
CONFIG_RCU_TRACE=y
-# CONFIG_TREE_RCU_TRACE is not set
-CONFIG_PREEMPT_RCU_TRACE=y
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+CONFIG_TREE_RCU_TRACE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -93,20 +102,22 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
-CONFIG_MARKERS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
CONFIG_KPROBES=y
@@ -117,6 +128,11 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -129,7 +145,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -178,6 +194,7 @@ CONFIG_CPU_SHX2=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -231,7 +248,6 @@ CONFIG_SPARSEMEM=y
CONFIG_HAVE_MEMORY_PRESENT=y
CONFIG_SPARSEMEM_STATIC=y
# CONFIG_MEMORY_HOTPLUG is not set
-CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
# CONFIG_PHYS_ADDR_T_64BIT is not set
@@ -239,6 +255,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -338,7 +355,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda1"
#
@@ -419,6 +437,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
CONFIG_STP=m
@@ -452,6 +471,7 @@ CONFIG_LLC=m
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
CONFIG_WIRELESS_EXT=y
CONFIG_WIRELESS_EXT_SYSFS=y
@@ -460,7 +480,6 @@ CONFIG_WIRELESS_EXT_SYSFS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -473,6 +492,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=m
@@ -591,11 +611,13 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
# CONFIG_SCSI_SRP is not set
# CONFIG_SCSI_DH is not set
# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
CONFIG_SATA_PMP=y
# CONFIG_SATA_AHCI is not set
# CONFIG_SATA_SIL24 is not set
@@ -617,6 +639,7 @@ CONFIG_SATA_SIL=y
# CONFIG_PATA_ALI is not set
# CONFIG_PATA_AMD is not set
# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATP867X is not set
# CONFIG_PATA_ATIIXP is not set
# CONFIG_PATA_CMD640_PCI is not set
# CONFIG_PATA_CMD64X is not set
@@ -644,6 +667,7 @@ CONFIG_SATA_SIL=y
# CONFIG_PATA_OPTIDMA is not set
# CONFIG_PATA_PDC_OLD is not set
# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RDC is not set
# CONFIG_PATA_RZ1000 is not set
# CONFIG_PATA_SC1200 is not set
# CONFIG_PATA_SERVERWORKS is not set
@@ -662,7 +686,11 @@ CONFIG_PATA_PLATFORM=y
#
#
-# Enable only one of the two stacks, unless you know what you are doing
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# See the help texts for more information.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -721,6 +749,7 @@ CONFIG_R8169=y
# CONFIG_VIA_VELOCITY is not set
# CONFIG_TIGON3 is not set
# CONFIG_BNX2 is not set
+# CONFIG_CNIC is not set
# CONFIG_QLA3XXX is not set
# CONFIG_ATL1 is not set
# CONFIG_ATL1E is not set
@@ -746,10 +775,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -790,14 +816,19 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
CONFIG_KEYBOARD_ATKBD=y
-# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
-# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_SH_KEYSC is not set
+# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
@@ -851,6 +882,7 @@ CONFIG_HW_RANDOM=y
CONFIG_DEVPORT=y
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_HELPER_AUTO=y
@@ -879,6 +911,7 @@ CONFIG_I2C_HELPER_AUTO=y
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
# CONFIG_I2C_GPIO is not set
CONFIG_I2C_HIGHLANDER=y
# CONFIG_I2C_OCORES is not set
@@ -906,15 +939,17 @@ CONFIG_I2C_HIGHLANDER=y
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
@@ -935,14 +970,24 @@ CONFIG_GPIOLIB=y
# PCI GPIO expanders:
#
# CONFIG_GPIO_BT8XX is not set
+# CONFIG_GPIO_LANGWELL is not set
#
# SPI GPIO expanders:
#
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
# CONFIG_SENSORS_AD7414 is not set
# CONFIG_SENSORS_AD7418 is not set
# CONFIG_SENSORS_ADM1021 is not set
@@ -993,6 +1038,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_ADS7828 is not set
# CONFIG_SENSORS_THMC50 is not set
# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
# CONFIG_SENSORS_VIA686A is not set
# CONFIG_SENSORS_VT1211 is not set
# CONFIG_SENSORS_VT8231 is not set
@@ -1004,9 +1050,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_W83L786NG is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -1026,14 +1070,17 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
#
+CONFIG_VGA_ARB=y
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1099,11 +1146,11 @@ CONFIG_FB_SH_MOBILE_LCDC=m
# CONFIG_LOGO is not set
CONFIG_SOUND=m
CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
# CONFIG_SND is not set
CONFIG_SOUND_PRIME=m
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
# CONFIG_HID_PID is not set
@@ -1167,6 +1214,7 @@ CONFIG_RTC_DRV_RS5C372=y
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
@@ -1219,8 +1267,10 @@ CONFIG_FS_MBCACHE=y
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1286,12 +1336,12 @@ CONFIG_MINIX_FS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
@@ -1367,6 +1417,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1401,22 +1452,29 @@ CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_KPROBES_SANITY_TEST is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_PAGE_POISONING is not set
CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
CONFIG_EVENT_TRACING=y
CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
@@ -1425,6 +1483,7 @@ CONFIG_FTRACE=y
# CONFIG_PREEMPT_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_FTRACE_SYSCALLS is not set
# CONFIG_BOOT_TRACER is not set
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
@@ -1442,11 +1501,11 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_SH_STANDARD_BIOS=y
# CONFIG_EARLY_SCIF_CONSOLE is not set
CONFIG_EARLY_PRINTK=y
-# CONFIG_DEBUG_BOOTMEM is not set
-CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_STACK_DEBUG is not set
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_4KSTACKS=y
CONFIG_DUMP_CODE=y
+# CONFIG_DWARF_UNWINDER is not set
# CONFIG_SH_NO_BSS_INIT is not set
#
@@ -1461,7 +1520,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD2=y
@@ -1503,11 +1561,13 @@ CONFIG_CRYPTO_PCBC=m
#
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1571,5 +1631,6 @@ CONFIG_AUDIT_GENERIC=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/rsk7201_defconfig b/arch/sh/configs/rsk7201_defconfig
index 55c3656a75c..c40db12e9ad 100644
--- a/arch/sh/configs/rsk7201_defconfig
+++ b/arch/sh/configs/rsk7201_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:39:54 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:34:29 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -27,7 +28,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -37,6 +40,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_BSD_PROCESS_ACCT=y
@@ -45,11 +54,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
# CONFIG_IKCONFIG_PROC is not set
CONFIG_LOG_BUF_SHIFT=14
@@ -86,19 +96,21 @@ CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
# CONFIG_AIO is not set
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
# CONFIG_SLAB is not set
# CONFIG_SLUB is not set
CONFIG_SLOB=y
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
-CONFIG_MARKERS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -107,6 +119,11 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_RT_MUTEXES=y
@@ -117,7 +134,7 @@ CONFIG_MODULES=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -165,6 +182,7 @@ CONFIG_CPU_SUBTYPE_SH7201=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -285,7 +303,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC0,115200 earlyprintk=serial ignore_loglevel"
#
@@ -309,6 +328,7 @@ CONFIG_BINFMT_SHARED_FLAT=y
#
CONFIG_PM=y
# CONFIG_PM_DEBUG is not set
+# CONFIG_PM_RUNTIME is not set
CONFIG_CPU_IDLE=y
CONFIG_CPU_IDLE_GOV_LADDER=y
# CONFIG_NET is not set
@@ -327,9 +347,9 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_SYS_HYPERVISOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
CONFIG_MTD_REDBOOT_PARTS=y
CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
@@ -498,6 +518,11 @@ CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
@@ -597,7 +622,9 @@ CONFIG_EXT2_FS=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
# CONFIG_FILE_LOCKING is not set
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
@@ -632,7 +659,6 @@ CONFIG_INOTIFY_USER=y
CONFIG_PROC_FS=y
CONFIG_PROC_SYSCTL=y
CONFIG_SYSFS=y
-# CONFIG_TMPFS is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
CONFIG_MISC_FILESYSTEMS=y
@@ -668,7 +694,6 @@ CONFIG_ROMFS_BACKED_BY_BLOCK=y
CONFIG_ROMFS_ON_BLOCK=y
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
#
# Partition Types
@@ -686,6 +711,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -698,11 +724,15 @@ CONFIG_STACKTRACE=y
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
CONFIG_EVENT_TRACING=y
CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
@@ -712,6 +742,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -741,4 +772,5 @@ CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/rsk7203_defconfig b/arch/sh/configs/rsk7203_defconfig
index 69e619967b7..5cabdb3a84f 100644
--- a/arch/sh/configs/rsk7203_defconfig
+++ b/arch/sh/configs/rsk7203_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:40:44 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:35:04 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -28,7 +29,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -38,6 +41,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
@@ -50,11 +59,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
# CONFIG_IKCONFIG_PROC is not set
CONFIG_LOG_BUF_SHIFT=14
@@ -93,19 +103,21 @@ CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
# CONFIG_SLAB is not set
# CONFIG_SLUB is not set
CONFIG_SLOB=y
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
-CONFIG_MARKERS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -114,6 +126,11 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_RT_MUTEXES=y
@@ -124,7 +141,7 @@ CONFIG_MODULES=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -172,6 +189,7 @@ CONFIG_CPU_SUBTYPE_SH7203=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -308,7 +326,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC0,115200 earlyprintk=serial ignore_loglevel"
#
@@ -332,6 +351,7 @@ CONFIG_BINFMT_SHARED_FLAT=y
#
CONFIG_PM=y
# CONFIG_PM_DEBUG is not set
+# CONFIG_PM_RUNTIME is not set
CONFIG_CPU_IDLE=y
CONFIG_CPU_IDLE_GOV_LADDER=y
CONFIG_NET=y
@@ -373,6 +393,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -403,6 +424,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -410,7 +432,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -432,9 +453,9 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
CONFIG_MTD_REDBOOT_PARTS=y
CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
@@ -591,10 +612,7 @@ CONFIG_SMSC911X=y
# CONFIG_KS8842 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -679,6 +697,11 @@ CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
@@ -699,6 +722,10 @@ CONFIG_GPIOLIB=y
#
# SPI GPIO expanders:
#
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
@@ -741,7 +768,6 @@ CONFIG_VIDEO_OUTPUT_CONTROL=y
# CONFIG_SOUND is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
#
@@ -764,6 +790,7 @@ CONFIG_HID_CYPRESS=y
CONFIG_HID_EZKEY=y
# CONFIG_HID_KYE is not set
CONFIG_HID_GYRATION=y
+# CONFIG_HID_TWINHAN is not set
# CONFIG_HID_KENSINGTON is not set
CONFIG_HID_LOGITECH=y
# CONFIG_LOGITECH_FF is not set
@@ -811,6 +838,7 @@ CONFIG_USB_MON=y
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
# CONFIG_USB_SL811_HCD is not set
CONFIG_USB_R8A66597_HCD=y
# CONFIG_USB_HWA_HCD is not set
@@ -954,8 +982,10 @@ CONFIG_RTC_DRV_SH=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
@@ -990,7 +1020,6 @@ CONFIG_INOTIFY_USER=y
CONFIG_PROC_FS=y
CONFIG_PROC_SYSCTL=y
CONFIG_SYSFS=y
-# CONFIG_TMPFS is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
CONFIG_MISC_FILESYSTEMS=y
@@ -1016,7 +1045,6 @@ CONFIG_ROMFS_BACKED_BY_BLOCK=y
CONFIG_ROMFS_ON_BLOCK=y
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
# CONFIG_NFS_V3 is not set
@@ -1090,6 +1118,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1129,22 +1158,28 @@ CONFIG_DEBUG_WRITECOUNT=y
CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_SG=y
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_PAGE_POISONING is not set
CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
CONFIG_EVENT_TRACING=y
CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
@@ -1152,6 +1187,7 @@ CONFIG_FTRACE=y
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_FTRACE_SYSCALLS is not set
# CONFIG_BOOT_TRACER is not set
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
@@ -1170,10 +1206,10 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_EARLY_SCIF_CONSOLE=y
CONFIG_EARLY_SCIF_CONSOLE_PORT=0xfffe8000
CONFIG_EARLY_PRINTK=y
-CONFIG_DEBUG_BOOTMEM=y
-CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_STACK_DEBUG is not set
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DUMP_CODE=y
+# CONFIG_DWARF_UNWINDER is not set
# CONFIG_SH_NO_BSS_INIT is not set
#
@@ -1203,5 +1239,6 @@ CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/rts7751r2d1_defconfig b/arch/sh/configs/rts7751r2d1_defconfig
index c6e9b1c0fa3..f521e82cc19 100644
--- a/arch/sh/configs/rts7751r2d1_defconfig
+++ b/arch/sh/configs/rts7751r2d1_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:42:26 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:36:25 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -28,8 +29,10 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_IO_TRAPPED=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -39,6 +42,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -50,11 +59,12 @@ CONFIG_SYSVIPC_SYSCTL=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
@@ -84,20 +94,22 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
-CONFIG_MARKERS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -107,6 +119,11 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -118,7 +135,7 @@ CONFIG_MODULES=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -165,6 +182,7 @@ CONFIG_CPU_SUBTYPE_SH7751R=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -210,6 +228,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -303,7 +322,8 @@ CONFIG_ZERO_PAGE_OFFSET=0x00010000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
# CONFIG_UBC_WAKEUP is not set
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=tty0 console=ttySC0,115200 root=/dev/sda1 earlyprintk=serial"
#
@@ -378,6 +398,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -408,6 +429,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
CONFIG_WIRELESS_EXT=y
CONFIG_WIRELESS_EXT_SYSFS=y
@@ -416,7 +438,6 @@ CONFIG_WIRELESS_EXT_SYSFS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -429,6 +450,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=m
@@ -542,11 +564,13 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
# CONFIG_SCSI_SRP is not set
# CONFIG_SCSI_DH is not set
# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
CONFIG_SATA_PMP=y
# CONFIG_SATA_AHCI is not set
# CONFIG_SATA_SIL24 is not set
@@ -568,6 +592,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_ALI is not set
# CONFIG_PATA_AMD is not set
# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATP867X is not set
# CONFIG_PATA_ATIIXP is not set
# CONFIG_PATA_CMD640_PCI is not set
# CONFIG_PATA_CMD64X is not set
@@ -595,6 +620,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_OPTIDMA is not set
# CONFIG_PATA_PDC_OLD is not set
# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RDC is not set
# CONFIG_PATA_RZ1000 is not set
# CONFIG_PATA_SC1200 is not set
# CONFIG_PATA_SERVERWORKS is not set
@@ -613,7 +639,11 @@ CONFIG_PATA_PLATFORM=y
#
#
-# Enable only one of the two stacks, unless you know what you are doing
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# See the help texts for more information.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -673,6 +703,7 @@ CONFIG_8139TOO=y
# CONFIG_SUNDANCE is not set
# CONFIG_TLAN is not set
# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
# CONFIG_VIA_RHINE is not set
# CONFIG_SC92031 is not set
# CONFIG_ATL2 is not set
@@ -694,6 +725,7 @@ CONFIG_NETDEV_1000=y
# CONFIG_VIA_VELOCITY is not set
# CONFIG_TIGON3 is not set
# CONFIG_BNX2 is not set
+# CONFIG_CNIC is not set
# CONFIG_QLA3XXX is not set
# CONFIG_ATL1 is not set
# CONFIG_ATL1E is not set
@@ -719,10 +751,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -840,10 +869,20 @@ CONFIG_SPI_SH_SCI=y
#
# CONFIG_SPI_SPIDEV is not set
# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
# CONFIG_SENSORS_ADCXX is not set
# CONFIG_SENSORS_I5K_AMB is not set
# CONFIG_SENSORS_F71805F is not set
@@ -862,9 +901,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
# CONFIG_SENSORS_LIS3_SPI is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -880,12 +917,15 @@ CONFIG_SSB_POSSIBLE=y
CONFIG_MFD_SM501=y
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_EZX_PCAP is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
#
+CONFIG_VGA_ARB=y
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
@@ -969,6 +1009,7 @@ CONFIG_LOGO=y
CONFIG_LOGO_SUPERH_CLUT224=y
CONFIG_SOUND=y
CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
CONFIG_SND=m
CONFIG_SND_TIMER=m
CONFIG_SND_PCM=m
@@ -1071,7 +1112,6 @@ CONFIG_SOUND_PRIME=m
CONFIG_AC97_BUS=m
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
#
@@ -1094,6 +1134,7 @@ CONFIG_HID_CYPRESS=y
CONFIG_HID_EZKEY=y
# CONFIG_HID_KYE is not set
CONFIG_HID_GYRATION=y
+# CONFIG_HID_TWINHAN is not set
# CONFIG_HID_KENSINGTON is not set
CONFIG_HID_LOGITECH=y
# CONFIG_LOGITECH_FF is not set
@@ -1142,6 +1183,7 @@ CONFIG_USB_DEVICE_CLASS=y
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
@@ -1213,6 +1255,7 @@ CONFIG_USB_LIBUSUAL=y
# CONFIG_USB_LD is not set
# CONFIG_USB_TRANCEVIBRATOR is not set
# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
@@ -1252,6 +1295,7 @@ CONFIG_RTC_INTF_DEV=y
CONFIG_RTC_DRV_R9701=y
# CONFIG_RTC_DRV_RS5C348 is not set
# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
#
# Platform RTC drivers
@@ -1293,8 +1337,10 @@ CONFIG_EXT2_FS=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1357,7 +1403,6 @@ CONFIG_MINIX_FS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
@@ -1423,6 +1468,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1434,11 +1480,15 @@ CONFIG_STACKTRACE=y
# CONFIG_LATENCYTOP is not set
CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
CONFIG_EVENT_TRACING=y
CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
@@ -1450,6 +1500,7 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_EARLY_SCIF_CONSOLE=y
CONFIG_EARLY_SCIF_CONSOLE_PORT=0xffe80000
CONFIG_EARLY_PRINTK=y
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -1463,7 +1514,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
@@ -1495,11 +1545,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1562,5 +1614,6 @@ CONFIG_CRC32=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/rts7751r2dplus_defconfig b/arch/sh/configs/rts7751r2dplus_defconfig
index bc10469d31f..a156cd1e061 100644
--- a/arch/sh/configs/rts7751r2dplus_defconfig
+++ b/arch/sh/configs/rts7751r2dplus_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:43:19 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:39:48 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -28,8 +29,10 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_IO_TRAPPED=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -39,6 +42,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -50,11 +59,12 @@ CONFIG_SYSVIPC_SYSCTL=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
@@ -84,20 +94,22 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
-CONFIG_MARKERS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -107,6 +119,11 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -118,7 +135,7 @@ CONFIG_MODULES=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -165,6 +182,7 @@ CONFIG_CPU_SUBTYPE_SH7751R=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -210,6 +228,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -303,7 +322,8 @@ CONFIG_ZERO_PAGE_OFFSET=0x00010000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
# CONFIG_UBC_WAKEUP is not set
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=tty0 console=ttySC0,115200 root=/dev/sda1 earlyprintk=serial"
#
@@ -378,6 +398,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -408,6 +429,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
CONFIG_WIRELESS_EXT=y
CONFIG_WIRELESS_EXT_SYSFS=y
@@ -416,7 +438,6 @@ CONFIG_WIRELESS_EXT_SYSFS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -429,6 +450,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=m
@@ -438,9 +460,9 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
CONFIG_MTD_CMDLINE_PARTS=y
# CONFIG_MTD_AR7_PARTS is not set
@@ -499,6 +521,7 @@ CONFIG_MTD_PHYSMAP=y
# CONFIG_MTD_PMC551 is not set
# CONFIG_MTD_DATAFLASH is not set
# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -627,11 +650,13 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
# CONFIG_SCSI_SRP is not set
# CONFIG_SCSI_DH is not set
# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
CONFIG_SATA_PMP=y
# CONFIG_SATA_AHCI is not set
# CONFIG_SATA_SIL24 is not set
@@ -653,6 +678,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_ALI is not set
# CONFIG_PATA_AMD is not set
# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATP867X is not set
# CONFIG_PATA_ATIIXP is not set
# CONFIG_PATA_CMD640_PCI is not set
# CONFIG_PATA_CMD64X is not set
@@ -680,6 +706,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_OPTIDMA is not set
# CONFIG_PATA_PDC_OLD is not set
# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RDC is not set
# CONFIG_PATA_RZ1000 is not set
# CONFIG_PATA_SC1200 is not set
# CONFIG_PATA_SERVERWORKS is not set
@@ -698,7 +725,11 @@ CONFIG_PATA_PLATFORM=y
#
#
-# Enable only one of the two stacks, unless you know what you are doing
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# See the help texts for more information.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -758,6 +789,7 @@ CONFIG_8139TOO=y
# CONFIG_SUNDANCE is not set
# CONFIG_TLAN is not set
# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
# CONFIG_VIA_RHINE is not set
# CONFIG_SC92031 is not set
# CONFIG_ATL2 is not set
@@ -779,6 +811,7 @@ CONFIG_NETDEV_1000=y
# CONFIG_VIA_VELOCITY is not set
# CONFIG_TIGON3 is not set
# CONFIG_BNX2 is not set
+# CONFIG_CNIC is not set
# CONFIG_QLA3XXX is not set
# CONFIG_ATL1 is not set
# CONFIG_ATL1E is not set
@@ -804,10 +837,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -925,10 +955,20 @@ CONFIG_SPI_SH_SCI=y
#
# CONFIG_SPI_SPIDEV is not set
# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
# CONFIG_SENSORS_ADCXX is not set
# CONFIG_SENSORS_I5K_AMB is not set
# CONFIG_SENSORS_F71805F is not set
@@ -947,9 +987,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
# CONFIG_SENSORS_LIS3_SPI is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -965,12 +1003,15 @@ CONFIG_SSB_POSSIBLE=y
CONFIG_MFD_SM501=y
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_EZX_PCAP is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
#
+CONFIG_VGA_ARB=y
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
@@ -1054,6 +1095,7 @@ CONFIG_LOGO=y
CONFIG_LOGO_SUPERH_CLUT224=y
CONFIG_SOUND=y
CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
CONFIG_SND=m
CONFIG_SND_TIMER=m
CONFIG_SND_PCM=m
@@ -1156,7 +1198,6 @@ CONFIG_SOUND_PRIME=m
CONFIG_AC97_BUS=m
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
#
@@ -1179,6 +1220,7 @@ CONFIG_HID_CYPRESS=y
CONFIG_HID_EZKEY=y
# CONFIG_HID_KYE is not set
CONFIG_HID_GYRATION=y
+# CONFIG_HID_TWINHAN is not set
# CONFIG_HID_KENSINGTON is not set
CONFIG_HID_LOGITECH=y
# CONFIG_LOGITECH_FF is not set
@@ -1227,6 +1269,7 @@ CONFIG_USB_DEVICE_CLASS=y
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
@@ -1298,6 +1341,7 @@ CONFIG_USB_LIBUSUAL=y
# CONFIG_USB_LD is not set
# CONFIG_USB_TRANCEVIBRATOR is not set
# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
@@ -1337,6 +1381,7 @@ CONFIG_RTC_INTF_DEV=y
CONFIG_RTC_DRV_R9701=y
# CONFIG_RTC_DRV_RS5C348 is not set
# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
#
# Platform RTC drivers
@@ -1378,8 +1423,10 @@ CONFIG_EXT2_FS=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1443,7 +1490,6 @@ CONFIG_MINIX_FS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
@@ -1509,6 +1555,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1520,11 +1567,15 @@ CONFIG_STACKTRACE=y
# CONFIG_LATENCYTOP is not set
CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
CONFIG_EVENT_TRACING=y
CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
@@ -1536,6 +1587,7 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_EARLY_SCIF_CONSOLE=y
CONFIG_EARLY_SCIF_CONSOLE_PORT=0xffe80000
CONFIG_EARLY_PRINTK=y
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -1549,7 +1601,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
@@ -1581,11 +1632,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1648,5 +1701,6 @@ CONFIG_CRC32=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/sdk7780_defconfig b/arch/sh/configs/sdk7780_defconfig
index 753fb276e9f..055536b5c5c 100644
--- a/arch/sh/configs/sdk7780_defconfig
+++ b/arch/sh/configs/sdk7780_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:43:54 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:40:25 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -28,7 +29,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -39,6 +42,12 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION="_SDK7780"
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -52,11 +61,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=18
@@ -88,20 +98,21 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
CONFIG_SLUB_DEBUG=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_IOREMAP_PROT=y
@@ -110,6 +121,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -122,7 +137,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-CONFIG_LBD=y
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -170,6 +185,7 @@ CONFIG_CPU_SH4A=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
CONFIG_CPU_SUBTYPE_SH7780=y
@@ -225,6 +241,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -314,7 +331,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x01800000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="mem=128M console=tty0 console=ttySC0,115200 ip=bootp root=/dev/nfs nfsroot=192.168.0.1:/home/rootfs"
#
@@ -435,6 +453,7 @@ CONFIG_IPV6_NDISC_NODETYPE=y
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -496,6 +515,7 @@ CONFIG_NET_SCH_FIFO=y
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -503,7 +523,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -516,6 +535,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
@@ -677,12 +697,14 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
# CONFIG_SCSI_SRP is not set
# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
# CONFIG_SCSI_DH is not set
# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
CONFIG_SATA_PMP=y
# CONFIG_SATA_AHCI is not set
# CONFIG_SATA_SIL24 is not set
@@ -704,6 +726,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_ALI is not set
# CONFIG_PATA_AMD is not set
# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATP867X is not set
# CONFIG_PATA_ATIIXP is not set
# CONFIG_PATA_CMD640_PCI is not set
# CONFIG_PATA_CMD64X is not set
@@ -732,6 +755,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_PCMCIA is not set
# CONFIG_PATA_PDC_OLD is not set
# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RDC is not set
# CONFIG_PATA_RZ1000 is not set
# CONFIG_PATA_SC1200 is not set
# CONFIG_PATA_SERVERWORKS is not set
@@ -760,7 +784,11 @@ CONFIG_BLK_DEV_DM=y
#
#
-# Enable only one of the two stacks, unless you know what you are doing
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# See the help texts for more information.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -804,10 +832,7 @@ CONFIG_SMC91X=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -862,12 +887,13 @@ CONFIG_INPUT_EVDEV=y
#
CONFIG_INPUT_KEYBOARD=y
CONFIG_KEYBOARD_ATKBD=y
-# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_SH_KEYSC is not set
+# CONFIG_KEYBOARD_XTKBD is not set
CONFIG_INPUT_MOUSE=y
CONFIG_MOUSE_PS2=y
CONFIG_MOUSE_PS2_ALPS=y
@@ -875,6 +901,7 @@ CONFIG_MOUSE_PS2_LOGIPS2PP=y
CONFIG_MOUSE_PS2_SYNAPTICS=y
CONFIG_MOUSE_PS2_TRACKPOINT=y
# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_SENTELIC is not set
# CONFIG_MOUSE_PS2_TOUCHKIT is not set
# CONFIG_MOUSE_SERIAL is not set
# CONFIG_MOUSE_APPLETOUCH is not set
@@ -947,6 +974,11 @@ CONFIG_HW_RANDOM=y
CONFIG_DEVPORT=y
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
CONFIG_POWER_SUPPLY=y
# CONFIG_POWER_SUPPLY_DEBUG is not set
@@ -954,7 +986,6 @@ CONFIG_POWER_SUPPLY=y
# CONFIG_BATTERY_DS2760 is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -986,6 +1017,7 @@ CONFIG_SSB_DRIVER_PCICORE=y
#
# Graphics support
#
+CONFIG_VGA_ARB=y
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1072,11 +1104,11 @@ CONFIG_LOGO_SUPERH_VGA16=y
CONFIG_LOGO_SUPERH_CLUT224=y
CONFIG_SOUND=y
CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
# CONFIG_SND is not set
CONFIG_SOUND_PRIME=y
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
#
@@ -1099,6 +1131,7 @@ CONFIG_HID_CYPRESS=y
CONFIG_HID_EZKEY=y
# CONFIG_HID_KYE is not set
CONFIG_HID_GYRATION=y
+# CONFIG_HID_TWINHAN is not set
# CONFIG_HID_KENSINGTON is not set
CONFIG_HID_LOGITECH=y
# CONFIG_LOGITECH_FF is not set
@@ -1149,6 +1182,7 @@ CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
# CONFIG_USB_OHCI_HCD is not set
# CONFIG_USB_UHCI_HCD is not set
# CONFIG_USB_SL811_HCD is not set
@@ -1278,6 +1312,7 @@ CONFIG_FS_POSIX_ACL=y
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1345,7 +1380,6 @@ CONFIG_MINIX_FS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
@@ -1425,6 +1459,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_UNUSED_SYMBOLS=y
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -1461,18 +1496,23 @@ CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
# CONFIG_FRAME_POINTER is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_PAGE_POISONING is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
@@ -1480,6 +1520,7 @@ CONFIG_FTRACE=y
# CONFIG_PREEMPT_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_FTRACE_SYSCALLS is not set
# CONFIG_BOOT_TRACER is not set
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
@@ -1495,11 +1536,11 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_SH_STANDARD_BIOS=y
# CONFIG_EARLY_SCIF_CONSOLE is not set
# CONFIG_EARLY_PRINTK is not set
-# CONFIG_DEBUG_BOOTMEM is not set
-CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_STACK_DEBUG is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_4KSTACKS is not set
CONFIG_DUMP_CODE=y
+# CONFIG_DWARF_UNWINDER is not set
# CONFIG_SH_NO_BSS_INIT is not set
#
@@ -1514,7 +1555,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_HASH=y
@@ -1550,11 +1590,13 @@ CONFIG_CRYPTO_HASH2=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1617,5 +1659,6 @@ CONFIG_CRC32=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig
index 8dd2f130e49..1cd1777aa43 100644
--- a/arch/sh/configs/se7206_defconfig
+++ b/arch/sh/configs/se7206_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:46:15 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:45:28 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -28,7 +29,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -39,6 +42,12 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
@@ -53,12 +62,12 @@ CONFIG_AUDIT_TREE=y
#
# RCU Subsystem
#
-# CONFIG_CLASSIC_RCU is not set
-# CONFIG_TREE_RCU is not set
-CONFIG_PREEMPT_RCU=y
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
CONFIG_RCU_TRACE=y
-# CONFIG_TREE_RCU_TRACE is not set
-CONFIG_PREEMPT_RCU_TRACE=y
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+CONFIG_TREE_RCU_TRACE=y
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
@@ -105,19 +114,21 @@ CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB is not set
# CONFIG_SLUB is not set
CONFIG_SLOB=y
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
-CONFIG_MARKERS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -126,6 +137,11 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_RT_MUTEXES=y
@@ -137,7 +153,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -185,6 +201,7 @@ CONFIG_CPU_SUBTYPE_SH7206=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -319,7 +336,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC3,115200 ignore_loglevel earlyprintk=serial"
#
@@ -389,6 +407,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -419,6 +438,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -426,7 +446,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -448,9 +467,9 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
# CONFIG_MTD_CMDLINE_PARTS is not set
# CONFIG_MTD_AR7_PARTS is not set
@@ -588,10 +607,7 @@ CONFIG_SMC91X=y
# CONFIG_KS8842 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -647,11 +663,15 @@ CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -746,8 +766,10 @@ CONFIG_EXT2_FS=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
@@ -782,8 +804,6 @@ CONFIG_INOTIFY_USER=y
CONFIG_PROC_FS=y
CONFIG_PROC_SYSCTL=y
CONFIG_SYSFS=y
-CONFIG_TMPFS=y
-# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLB_PAGE is not set
CONFIG_CONFIGFS_FS=y
CONFIG_MISC_FILESYSTEMS=y
@@ -809,7 +829,6 @@ CONFIG_ROMFS_BACKED_BY_BLOCK=y
CONFIG_ROMFS_ON_BLOCK=y
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
@@ -846,6 +865,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -882,20 +902,27 @@ CONFIG_DEBUG_VM=y
CONFIG_DEBUG_LIST=y
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_PAGE_POISONING is not set
CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
CONFIG_EVENT_TRACING=y
CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
@@ -904,6 +931,7 @@ CONFIG_FTRACE=y
# CONFIG_PREEMPT_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_FTRACE_SYSCALLS is not set
# CONFIG_BOOT_TRACER is not set
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
@@ -920,10 +948,10 @@ CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
-# CONFIG_DEBUG_BOOTMEM is not set
-CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_STACK_DEBUG is not set
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DUMP_CODE=y
+# CONFIG_DWARF_UNWINDER is not set
# CONFIG_SH_NO_BSS_INIT is not set
#
@@ -938,7 +966,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_HASH=y
@@ -974,11 +1001,13 @@ CONFIG_CRYPTO_HASH2=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1046,5 +1075,6 @@ CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/se7343_defconfig b/arch/sh/configs/se7343_defconfig
index 18f46debf92..5531444b808 100644
--- a/arch/sh/configs/se7343_defconfig
+++ b/arch/sh/configs/se7343_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:47:07 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:46:55 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -28,7 +29,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -38,6 +41,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -50,11 +59,12 @@ CONFIG_POSIX_MQUEUE_SYSCTL=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_GROUP_SCHED=y
@@ -88,18 +98,19 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
# CONFIG_SHMEM is not set
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_IOREMAP_PROT=y
@@ -108,6 +119,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -119,7 +134,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -135,7 +150,7 @@ CONFIG_DEFAULT_DEADLINE=y
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="deadline"
-# CONFIG_FREEZER is not set
+CONFIG_FREEZER=y
#
# System type
@@ -169,6 +184,7 @@ CONFIG_ARCH_SHMOBILE=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -214,6 +230,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -296,7 +313,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC0,115200"
#
@@ -316,7 +334,12 @@ CONFIG_BINFMT_ELF=y
#
# Power management options (EXPERIMENTAL)
#
-# CONFIG_PM is not set
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_PM_RUNTIME=y
# CONFIG_CPU_IDLE is not set
CONFIG_NET=y
@@ -360,6 +383,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -389,6 +413,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -396,7 +421,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -418,9 +442,9 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
# CONFIG_MTD_CMDLINE_PARTS is not set
# CONFIG_MTD_AR7_PARTS is not set
@@ -560,10 +584,7 @@ CONFIG_NETDEVICES=y
CONFIG_MII=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -671,6 +692,7 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
# CONFIG_I2C_CHARDEV is not set
CONFIG_I2C_HELPER_AUTO=y
@@ -681,6 +703,7 @@ CONFIG_I2C_HELPER_AUTO=y
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_SH_MOBILE=y
# CONFIG_I2C_SIMTEC is not set
@@ -702,20 +725,21 @@ CONFIG_I2C_SH_MOBILE=y
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -734,8 +758,10 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -788,6 +814,7 @@ CONFIG_DUMMY_CONSOLE=y
# CONFIG_LOGO is not set
CONFIG_SOUND=y
CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
CONFIG_SND=y
CONFIG_SND_TIMER=y
CONFIG_SND_PCM=y
@@ -822,7 +849,6 @@ CONFIG_SND_USB=y
# CONFIG_SOUND_PRIME is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
#
@@ -845,6 +871,7 @@ CONFIG_HID_CYPRESS=y
CONFIG_HID_EZKEY=y
# CONFIG_HID_KYE is not set
CONFIG_HID_GYRATION=y
+# CONFIG_HID_TWINHAN is not set
# CONFIG_HID_KENSINGTON is not set
CONFIG_HID_LOGITECH=y
# CONFIG_LOGITECH_FF is not set
@@ -877,6 +904,7 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_DEVICEFS=y
CONFIG_USB_DEVICE_CLASS=y
# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_SUSPEND is not set
# CONFIG_USB_OTG is not set
# CONFIG_USB_OTG_WHITELIST is not set
# CONFIG_USB_OTG_BLACKLIST_HUB is not set
@@ -891,6 +919,7 @@ CONFIG_USB_DEVICE_CLASS=y
# CONFIG_USB_OXU210HP_HCD is not set
CONFIG_USB_ISP116X_HCD=y
# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
# CONFIG_USB_SL811_HCD is not set
# CONFIG_USB_R8A66597_HCD is not set
# CONFIG_USB_HWA_HCD is not set
@@ -990,8 +1019,10 @@ CONFIG_FS_MBCACHE=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
@@ -1028,8 +1059,6 @@ CONFIG_PROC_FS=y
CONFIG_PROC_SYSCTL=y
CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
-CONFIG_TMPFS=y
-# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLBFS is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
@@ -1062,7 +1091,6 @@ CONFIG_CRAMFS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
@@ -1140,6 +1168,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -1149,8 +1178,11 @@ CONFIG_FRAME_WARN=1024
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_LATENCYTOP is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DMA_API_DEBUG is not set
@@ -1160,6 +1192,7 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_EARLY_SCIF_CONSOLE=y
CONFIG_EARLY_SCIF_CONSOLE_PORT=0xffe00000
CONFIG_EARLY_PRINTK=y
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -1173,7 +1206,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
@@ -1205,11 +1237,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1273,5 +1307,6 @@ CONFIG_ZLIB_DEFLATE=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/se7619_defconfig b/arch/sh/configs/se7619_defconfig
index 724bb77c9dc..6921b199b1d 100644
--- a/arch/sh/configs/se7619_defconfig
+++ b/arch/sh/configs/se7619_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:47:56 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:50:05 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -27,7 +28,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -37,17 +40,24 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
# CONFIG_SYSVIPC is not set
# CONFIG_BSD_PROCESS_ACCT is not set
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
@@ -73,31 +83,36 @@ CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
# CONFIG_VM_EVENT_COUNTERS is not set
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
CONFIG_BASE_SMALL=1
# CONFIG_MODULES is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -144,6 +159,7 @@ CONFIG_CPU_SUBTYPE_SH7619=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -262,7 +278,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_CMDLINE_OVERWRITE is not set
+# CONFIG_CMDLINE_EXTEND is not set
#
# Bus options
@@ -469,11 +486,15 @@ CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -507,7 +528,6 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_SOUND is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
# CONFIG_HID_PID is not set
@@ -559,7 +579,9 @@ CONFIG_RTC_LIB=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
@@ -594,7 +616,6 @@ CONFIG_INOTIFY_USER=y
CONFIG_PROC_FS=y
CONFIG_PROC_SYSCTL=y
# CONFIG_SYSFS is not set
-# CONFIG_TMPFS is not set
# CONFIG_HUGETLB_PAGE is not set
CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
@@ -619,7 +640,6 @@ CONFIG_ROMFS_BACKED_BY_BLOCK=y
CONFIG_ROMFS_ON_BLOCK=y
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
#
# Partition Types
@@ -637,6 +657,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
@@ -645,8 +666,11 @@ CONFIG_FRAME_WARN=1024
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_LATENCYTOP is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DMA_API_DEBUG is not set
@@ -654,6 +678,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -680,4 +705,5 @@ CONFIG_ZLIB_INFLATE=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/se7705_defconfig b/arch/sh/configs/se7705_defconfig
index 6ca6a2fc06e..3abb06879f0 100644
--- a/arch/sh/configs/se7705_defconfig
+++ b/arch/sh/configs/se7705_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:48:18 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:50:52 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -27,7 +28,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -38,6 +41,12 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
# CONFIG_SWAP is not set
# CONFIG_SYSVIPC is not set
# CONFIG_POSIX_MQUEUE is not set
@@ -48,11 +57,12 @@ CONFIG_LOCALVERSION_AUTO=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
@@ -83,18 +93,19 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
@@ -102,6 +113,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -113,7 +128,7 @@ CONFIG_MODULES=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -160,6 +175,7 @@ CONFIG_CPU_SUBTYPE_SH7705=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -205,6 +221,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -289,7 +306,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_CMDLINE_OVERWRITE is not set
+# CONFIG_CMDLINE_EXTEND is not set
#
# Bus options
@@ -355,6 +373,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -384,6 +403,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -391,7 +411,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -409,9 +428,9 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
# CONFIG_MTD_CMDLINE_PARTS is not set
# CONFIG_MTD_AR7_PARTS is not set
@@ -547,10 +566,7 @@ CONFIG_STNIC=y
# CONFIG_KS8842 is not set
CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -642,10 +658,20 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
# CONFIG_SENSORS_F71805F is not set
# CONFIG_SENSORS_F71882FG is not set
# CONFIG_SENSORS_IT87 is not set
@@ -656,9 +682,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_VT1211 is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -692,7 +716,6 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_SOUND is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
# CONFIG_HID_PID is not set
@@ -746,7 +769,9 @@ CONFIG_EXT2_FS=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -815,7 +840,6 @@ CONFIG_JFFS2_RTIME=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
# CONFIG_NFS_V3 is not set
@@ -849,6 +873,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
@@ -857,8 +882,11 @@ CONFIG_FRAME_WARN=1024
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_LATENCYTOP is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DMA_API_DEBUG is not set
@@ -866,6 +894,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -878,7 +907,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
@@ -910,11 +938,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -979,5 +1009,6 @@ CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig
index b8aae11bc8f..1a43cfecb39 100644
--- a/arch/sh/configs/se7712_defconfig
+++ b/arch/sh/configs/se7712_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:49:00 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:53:32 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,6 +13,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -26,7 +27,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -36,6 +39,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -49,11 +58,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
@@ -84,18 +94,19 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
# CONFIG_SHMEM is not set
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_IOREMAP_PROT=y
@@ -104,6 +115,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -115,7 +130,7 @@ CONFIG_MODULES=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -162,6 +177,7 @@ CONFIG_CPU_SUBTYPE_SH7712=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -207,6 +223,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -291,7 +308,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda1"
#
@@ -368,6 +386,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -433,6 +452,7 @@ CONFIG_NET_SCH_FIFO=y
CONFIG_FIB_RULES=y
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -440,7 +460,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -464,9 +483,9 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
# CONFIG_MTD_CMDLINE_PARTS is not set
# CONFIG_MTD_AR7_PARTS is not set
@@ -597,7 +616,6 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
-# CONFIG_SCSI_BNX2_ISCSI is not set
# CONFIG_LIBFC is not set
# CONFIG_LIBFCOE is not set
# CONFIG_SCSI_DEBUG is not set
@@ -605,6 +623,7 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
CONFIG_SATA_PMP=y
CONFIG_ATA_SFF=y
# CONFIG_SATA_MV is not set
@@ -658,10 +677,7 @@ CONFIG_SH_ETH=y
# CONFIG_KS8842 is not set
CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -719,11 +735,15 @@ CONFIG_HW_RANDOM=m
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -828,8 +848,10 @@ CONFIG_FS_MBCACHE=y
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
@@ -866,8 +888,6 @@ CONFIG_PROC_FS=y
CONFIG_PROC_SYSCTL=y
CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
-CONFIG_TMPFS=y
-# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLBFS is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
@@ -900,7 +920,6 @@ CONFIG_CRAMFS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
# CONFIG_NFS_V3 is not set
@@ -935,6 +954,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -964,24 +984,30 @@ CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_PAGE_POISONING is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_FTRACE_SYSCALLS is not set
# CONFIG_BOOT_TRACER is not set
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
@@ -994,14 +1020,13 @@ CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
-# CONFIG_KMEMCHECK is not set
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
-# CONFIG_DEBUG_BOOTMEM is not set
-# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_STACK_DEBUG is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_4KSTACKS is not set
# CONFIG_DUMP_CODE is not set
+# CONFIG_DWARF_UNWINDER is not set
# CONFIG_SH_NO_BSS_INIT is not set
#
@@ -1016,7 +1041,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=y
@@ -1059,11 +1083,13 @@ CONFIG_CRYPTO_PCBC=m
#
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1127,5 +1153,6 @@ CONFIG_ZLIB_DEFLATE=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig
index 306e21c4253..b8a3c8c4bac 100644
--- a/arch/sh/configs/se7721_defconfig
+++ b/arch/sh/configs/se7721_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:50:49 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:57:11 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,6 +13,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -27,7 +28,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -37,6 +40,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -50,11 +59,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_GROUP_SCHED=y
@@ -89,18 +99,19 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
# CONFIG_SHMEM is not set
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_IOREMAP_PROT=y
@@ -109,6 +120,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -120,7 +135,7 @@ CONFIG_MODULES=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -167,6 +182,7 @@ CONFIG_CPU_SUBTYPE_SH7721=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -212,6 +228,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -296,7 +313,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda2"
#
@@ -373,6 +391,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -438,6 +457,7 @@ CONFIG_NET_SCH_FIFO=y
CONFIG_FIB_RULES=y
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -445,7 +465,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -469,9 +488,9 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
# CONFIG_MTD_CMDLINE_PARTS is not set
# CONFIG_MTD_AR7_PARTS is not set
@@ -606,6 +625,7 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
CONFIG_SATA_PMP=y
CONFIG_ATA_SFF=y
# CONFIG_SATA_MV is not set
@@ -621,10 +641,7 @@ CONFIG_NETDEVICES=y
# CONFIG_NET_ETHERNET is not set
CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -672,12 +689,13 @@ CONFIG_INPUT_EVDEV=y
#
CONFIG_INPUT_KEYBOARD=y
# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_SH_KEYSC is not set
+# CONFIG_KEYBOARD_XTKBD is not set
CONFIG_INPUT_MOUSE=y
# CONFIG_MOUSE_PS2 is not set
# CONFIG_MOUSE_SERIAL is not set
@@ -725,6 +743,11 @@ CONFIG_UNIX98_PTYS=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
@@ -762,7 +785,6 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_SOUND is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
#
@@ -785,6 +807,7 @@ CONFIG_HID_CYPRESS=y
CONFIG_HID_EZKEY=y
# CONFIG_HID_KYE is not set
CONFIG_HID_GYRATION=y
+# CONFIG_HID_TWINHAN is not set
# CONFIG_HID_KENSINGTON is not set
CONFIG_HID_LOGITECH=y
# CONFIG_LOGITECH_FF is not set
@@ -831,6 +854,7 @@ CONFIG_USB_MON=y
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
@@ -900,6 +924,7 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_LD is not set
# CONFIG_USB_TRANCEVIBRATOR is not set
# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
@@ -961,8 +986,10 @@ CONFIG_FS_MBCACHE=y
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
@@ -1002,8 +1029,6 @@ CONFIG_PROC_FS=y
CONFIG_PROC_SYSCTL=y
CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
-CONFIG_TMPFS=y
-# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLBFS is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
@@ -1036,7 +1061,6 @@ CONFIG_CRAMFS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
# CONFIG_NETWORK_FILESYSTEMS is not set
#
@@ -1095,6 +1119,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -1124,24 +1149,30 @@ CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
# CONFIG_PAGE_POISONING is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_FTRACE_SYSCALLS is not set
# CONFIG_BOOT_TRACER is not set
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
@@ -1154,14 +1185,13 @@ CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
-# CONFIG_KMEMCHECK is not set
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
-# CONFIG_DEBUG_BOOTMEM is not set
-# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_STACK_DEBUG is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_4KSTACKS is not set
# CONFIG_DUMP_CODE is not set
+# CONFIG_DWARF_UNWINDER is not set
# CONFIG_SH_NO_BSS_INIT is not set
#
@@ -1176,7 +1206,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=y
@@ -1219,11 +1248,13 @@ CONFIG_CRYPTO_CBC=y
#
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1287,5 +1318,6 @@ CONFIG_ZLIB_DEFLATE=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/se7722_defconfig b/arch/sh/configs/se7722_defconfig
index 61943829984..d709b7f35ac 100644
--- a/arch/sh/configs/se7722_defconfig
+++ b/arch/sh/configs/se7722_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 12:54:24 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:57:41 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -29,7 +30,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -40,6 +43,12 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -52,11 +61,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -91,19 +101,20 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-# CONFIG_MARKERS is not set
# CONFIG_OPROFILE is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -113,6 +124,11 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -125,7 +141,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -141,7 +157,7 @@ CONFIG_IOSCHED_NOOP=y
# CONFIG_DEFAULT_CFQ is not set
CONFIG_DEFAULT_NOOP=y
CONFIG_DEFAULT_IOSCHED="noop"
-# CONFIG_FREEZER is not set
+CONFIG_FREEZER=y
#
# System type
@@ -176,6 +192,7 @@ CONFIG_ARCH_SHMOBILE=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -233,6 +250,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -317,7 +335,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_CMDLINE_OVERWRITE is not set
+# CONFIG_CMDLINE_EXTEND is not set
#
# Bus options
@@ -336,7 +355,13 @@ CONFIG_BINFMT_ELF=y
#
# Power management options (EXPERIMENTAL)
#
-# CONFIG_PM is not set
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_HIBERNATION is not set
+CONFIG_PM_RUNTIME=y
# CONFIG_CPU_IDLE is not set
CONFIG_NET=y
@@ -381,6 +406,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -410,6 +436,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -417,7 +444,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -430,6 +456,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
@@ -494,7 +521,6 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
-# CONFIG_SCSI_BNX2_ISCSI is not set
# CONFIG_LIBFC is not set
# CONFIG_LIBFCOE is not set
# CONFIG_SCSI_DEBUG is not set
@@ -502,6 +528,7 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
CONFIG_SATA_PMP=y
CONFIG_ATA_SFF=y
# CONFIG_SATA_MV is not set
@@ -535,10 +562,7 @@ CONFIG_SMC91X=y
# CONFIG_KS8842 is not set
CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -577,12 +601,13 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
#
CONFIG_INPUT_KEYBOARD=y
CONFIG_KEYBOARD_ATKBD=y
-# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_SH_KEYSC is not set
+# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
@@ -631,10 +656,20 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
# CONFIG_SENSORS_F71805F is not set
# CONFIG_SENSORS_F71882FG is not set
# CONFIG_SENSORS_IT87 is not set
@@ -645,9 +680,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_VT1211 is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -681,7 +714,6 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_SOUND is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
# CONFIG_HID_PID is not set
@@ -778,8 +810,10 @@ CONFIG_FS_MBCACHE=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -839,7 +873,6 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
@@ -866,6 +899,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -878,8 +912,11 @@ CONFIG_DEBUG_FS=y
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DYNAMIC_DEBUG is not set
@@ -889,6 +926,7 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_SH_STANDARD_BIOS=y
# CONFIG_EARLY_SCIF_CONSOLE is not set
# CONFIG_EARLY_PRINTK is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -902,7 +940,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
@@ -934,11 +971,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1002,5 +1041,6 @@ CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/se7724_defconfig b/arch/sh/configs/se7724_defconfig
index 3ee783a0a07..56b0b9ff9e0 100644
--- a/arch/sh/configs/se7724_defconfig
+++ b/arch/sh/configs/se7724_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Mon Jun 29 16:28:43 2009
+# Linux kernel version: 2.6.31
+# Fri Sep 25 11:50:59 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -42,6 +42,12 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -54,11 +60,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_GROUP_SCHED=y
@@ -91,20 +98,19 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
-CONFIG_HAVE_PERF_COUNTERS=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
@@ -144,7 +150,7 @@ CONFIG_IOSCHED_CFQ=y
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
-# CONFIG_FREEZER is not set
+CONFIG_FREEZER=y
#
# System type
@@ -178,6 +184,7 @@ CONFIG_ARCH_SHMOBILE=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
CONFIG_CPU_SUBTYPE_SH7724=y
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -224,6 +231,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -249,6 +257,8 @@ CONFIG_CPU_HAS_FPU=y
#
CONFIG_SOLUTION_ENGINE=y
CONFIG_SH_7724_SOLUTION_ENGINE=y
+# CONFIG_SH_KFR2R09 is not set
+# CONFIG_SH_ECOVEC is not set
#
# Timer and clock configuration
@@ -269,7 +279,10 @@ CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
#
# DMA support
#
-# CONFIG_SH_DMA is not set
+CONFIG_SH_DMA_API=y
+CONFIG_SH_DMA=y
+CONFIG_NR_ONCHIP_DMA_CHANNELS=12
+# CONFIG_NR_DMA_CHANNELS_BOOL is not set
#
# Companion Chips
@@ -305,7 +318,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=tty1 console=ttySC3,115200 root=/dev/nfs ip=dhcp memchunk.vpu=4m"
#
@@ -325,7 +339,13 @@ CONFIG_BINFMT_ELF=y
#
# Power management options (EXPERIMENTAL)
#
-# CONFIG_PM is not set
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_HIBERNATION is not set
+CONFIG_PM_RUNTIME=y
# CONFIG_CPU_IDLE is not set
CONFIG_NET=y
@@ -373,6 +393,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -402,6 +423,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -409,7 +431,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -422,6 +443,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
@@ -431,9 +453,9 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
CONFIG_MTD_CMDLINE_PARTS=y
# CONFIG_MTD_AR7_PARTS is not set
@@ -489,6 +511,7 @@ CONFIG_MTD_PHYSMAP=y
#
# CONFIG_MTD_DATAFLASH is not set
# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -648,12 +671,10 @@ CONFIG_SMC91X=y
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -697,14 +718,19 @@ CONFIG_INPUT_EVDEV=y
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
-# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
CONFIG_KEYBOARD_SH_KEYSC=y
+# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
@@ -754,6 +780,7 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_HELPER_AUTO=y
@@ -764,6 +791,7 @@ CONFIG_I2C_HELPER_AUTO=y
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
# CONFIG_I2C_GPIO is not set
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_SH_MOBILE=y
@@ -786,9 +814,6 @@ CONFIG_I2C_SH_MOBILE=y
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -838,11 +863,15 @@ CONFIG_GPIOLIB=y
#
# CONFIG_GPIO_MAX7301 is not set
# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -862,8 +891,10 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
# CONFIG_AB3100_CORE is not set
# CONFIG_EZX_PCAP is not set
# CONFIG_REGULATOR is not set
@@ -876,24 +907,24 @@ CONFIG_VIDEO_DEV=y
CONFIG_VIDEO_V4L2_COMMON=y
# CONFIG_VIDEO_ALLOW_V4L1 is not set
CONFIG_VIDEO_V4L1_COMPAT=y
-# CONFIG_DVB_CORE is not set
-CONFIG_VIDEO_MEDIA=y
+CONFIG_DVB_CORE=m
+CONFIG_VIDEO_MEDIA=m
#
# Multimedia drivers
#
# CONFIG_MEDIA_ATTACH is not set
-CONFIG_MEDIA_TUNER=y
+CONFIG_MEDIA_TUNER=m
# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
-CONFIG_MEDIA_TUNER_SIMPLE=y
-CONFIG_MEDIA_TUNER_TDA8290=y
-CONFIG_MEDIA_TUNER_TDA9887=y
-CONFIG_MEDIA_TUNER_TEA5761=y
-CONFIG_MEDIA_TUNER_TEA5767=y
-CONFIG_MEDIA_TUNER_MT20XX=y
-CONFIG_MEDIA_TUNER_XC2028=y
-CONFIG_MEDIA_TUNER_XC5000=y
-CONFIG_MEDIA_TUNER_MC44S803=y
+CONFIG_MEDIA_TUNER_SIMPLE=m
+CONFIG_MEDIA_TUNER_TDA8290=m
+CONFIG_MEDIA_TUNER_TDA9887=m
+CONFIG_MEDIA_TUNER_TEA5761=m
+CONFIG_MEDIA_TUNER_TEA5767=m
+CONFIG_MEDIA_TUNER_MT20XX=m
+CONFIG_MEDIA_TUNER_XC2028=m
+CONFIG_MEDIA_TUNER_XC5000=m
+CONFIG_MEDIA_TUNER_MC44S803=m
CONFIG_VIDEO_V4L2=y
CONFIG_VIDEOBUF_GEN=y
CONFIG_VIDEOBUF_DMA_CONTIG=y
@@ -904,6 +935,7 @@ CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
# CONFIG_VIDEO_VIVI is not set
# CONFIG_VIDEO_SAA5246A is not set
# CONFIG_VIDEO_SAA5249 is not set
+# CONFIG_VIDEO_AU0828 is not set
CONFIG_SOC_CAMERA=y
# CONFIG_SOC_CAMERA_MT9M001 is not set
# CONFIG_SOC_CAMERA_MT9M111 is not set
@@ -919,15 +951,18 @@ CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
CONFIG_USB_GSPCA=m
# CONFIG_USB_M5602 is not set
# CONFIG_USB_STV06XX is not set
+# CONFIG_USB_GL860 is not set
# CONFIG_USB_GSPCA_CONEX is not set
# CONFIG_USB_GSPCA_ETOMS is not set
# CONFIG_USB_GSPCA_FINEPIX is not set
+# CONFIG_USB_GSPCA_JEILINJ is not set
# CONFIG_USB_GSPCA_MARS is not set
# CONFIG_USB_GSPCA_MR97310A is not set
# CONFIG_USB_GSPCA_OV519 is not set
# CONFIG_USB_GSPCA_OV534 is not set
# CONFIG_USB_GSPCA_PAC207 is not set
# CONFIG_USB_GSPCA_PAC7311 is not set
+# CONFIG_USB_GSPCA_SN9C20X is not set
# CONFIG_USB_GSPCA_SONIXB is not set
# CONFIG_USB_GSPCA_SONIXJ is not set
# CONFIG_USB_GSPCA_SPCA500 is not set
@@ -957,6 +992,26 @@ CONFIG_USB_PWC_INPUT_EVDEV=y
# CONFIG_USB_STKWEBCAM is not set
# CONFIG_USB_S2255 is not set
# CONFIG_RADIO_ADAPTERS is not set
+CONFIG_DVB_MAX_ADAPTERS=8
+# CONFIG_DVB_DYNAMIC_MINORS is not set
+CONFIG_DVB_CAPTURE_DRIVERS=y
+# CONFIG_TTPCI_EEPROM is not set
+
+#
+# Supported USB Adapters
+#
+# CONFIG_DVB_USB is not set
+# CONFIG_SMS_SIANO_MDTV is not set
+
+#
+# Supported FlexCopII (B2C2) Adapters
+#
+# CONFIG_DVB_B2C2_FLEXCOP is not set
+
+#
+# Supported DVB Frontends
+#
+# CONFIG_DVB_FE_CUSTOMISE is not set
# CONFIG_DAB is not set
#
@@ -1017,10 +1072,80 @@ CONFIG_LOGO=y
# CONFIG_LOGO_SUPERH_MONO is not set
# CONFIG_LOGO_SUPERH_VGA16 is not set
CONFIG_LOGO_SUPERH_CLUT224=y
-# CONFIG_SOUND is not set
+CONFIG_SOUND=y
+# CONFIG_SOUND_OSS_CORE is not set
+CONFIG_SND=m
+CONFIG_SND_TIMER=m
+CONFIG_SND_PCM=m
+CONFIG_SND_JACK=y
+# CONFIG_SND_SEQUENCER is not set
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_PCM_OSS is not set
+# CONFIG_SND_DYNAMIC_MINORS is not set
+# CONFIG_SND_SUPPORT_OLD_API is not set
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_SPI is not set
+# CONFIG_SND_SUPERH is not set
+CONFIG_SND_USB=y
+# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_CAIAQ is not set
+CONFIG_SND_SOC=m
+
+#
+# SoC Audio support for SuperH
+#
+CONFIG_SND_SOC_SH4_FSI=m
+CONFIG_SND_FSI_AK4642=y
+CONFIG_SND_SOC_I2C_AND_SPI=m
+CONFIG_SND_SOC_ALL_CODECS=m
+CONFIG_SND_SOC_WM_HUBS=m
+CONFIG_SND_SOC_AD1836=m
+CONFIG_SND_SOC_AD1938=m
+CONFIG_SND_SOC_AD73311=m
+CONFIG_SND_SOC_AK4104=m
+CONFIG_SND_SOC_AK4535=m
+CONFIG_SND_SOC_AK4642=m
+CONFIG_SND_SOC_CS4270=m
+CONFIG_SND_SOC_L3=m
+CONFIG_SND_SOC_PCM3008=m
+CONFIG_SND_SOC_SPDIF=m
+CONFIG_SND_SOC_SSM2602=m
+CONFIG_SND_SOC_TLV320AIC23=m
+CONFIG_SND_SOC_TLV320AIC26=m
+CONFIG_SND_SOC_TLV320AIC3X=m
+CONFIG_SND_SOC_UDA134X=m
+CONFIG_SND_SOC_UDA1380=m
+CONFIG_SND_SOC_WM8510=m
+CONFIG_SND_SOC_WM8523=m
+CONFIG_SND_SOC_WM8580=m
+CONFIG_SND_SOC_WM8728=m
+CONFIG_SND_SOC_WM8731=m
+CONFIG_SND_SOC_WM8750=m
+CONFIG_SND_SOC_WM8753=m
+CONFIG_SND_SOC_WM8776=m
+CONFIG_SND_SOC_WM8900=m
+CONFIG_SND_SOC_WM8903=m
+CONFIG_SND_SOC_WM8940=m
+CONFIG_SND_SOC_WM8960=m
+CONFIG_SND_SOC_WM8961=m
+CONFIG_SND_SOC_WM8971=m
+CONFIG_SND_SOC_WM8974=m
+CONFIG_SND_SOC_WM8988=m
+CONFIG_SND_SOC_WM8990=m
+CONFIG_SND_SOC_WM8993=m
+CONFIG_SND_SOC_WM9081=m
+CONFIG_SND_SOC_MAX9877=m
+# CONFIG_SOUND_PRIME is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
#
@@ -1043,6 +1168,7 @@ CONFIG_USB_HID=y
# CONFIG_HID_EZKEY is not set
# CONFIG_HID_KYE is not set
# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_TWINHAN is not set
# CONFIG_HID_KENSINGTON is not set
# CONFIG_HID_LOGITECH is not set
# CONFIG_HID_MICROSOFT is not set
@@ -1072,6 +1198,7 @@ CONFIG_USB=y
# CONFIG_USB_DEVICEFS is not set
CONFIG_USB_DEVICE_CLASS=y
# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_SUSPEND is not set
# CONFIG_USB_OTG is not set
# CONFIG_USB_OTG_WHITELIST is not set
# CONFIG_USB_OTG_BLACKLIST_HUB is not set
@@ -1086,9 +1213,11 @@ CONFIG_USB_MON=y
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
# CONFIG_USB_SL811_HCD is not set
CONFIG_USB_R8A66597_HCD=y
# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
#
# USB Device Class drivers
@@ -1151,9 +1280,46 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_LD is not set
# CONFIG_USB_TRANCEVIBRATOR is not set
# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
# CONFIG_USB_VST is not set
-# CONFIG_USB_GADGET is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+CONFIG_USB_GADGET_R8A66597=y
+CONFIG_USB_R8A66597=y
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
#
# OTG and related infrastructure
@@ -1176,6 +1342,8 @@ CONFIG_MMC_BLOCK_BOUNCE=y
# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
+# CONFIG_MMC_ATMELMCI is not set
CONFIG_MMC_SPI=y
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
@@ -1223,6 +1391,7 @@ CONFIG_RTC_DRV_PCF8563=y
# CONFIG_RTC_DRV_R9701 is not set
# CONFIG_RTC_DRV_RS5C348 is not set
# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
#
# Platform RTC drivers
@@ -1279,6 +1448,7 @@ CONFIG_FS_POSIX_ACL=y
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1343,7 +1513,6 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
@@ -1423,6 +1592,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -1433,8 +1603,11 @@ CONFIG_FRAME_WARN=1024
# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DMA_API_DEBUG is not set
@@ -1442,6 +1615,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -1455,7 +1629,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD2=y
@@ -1496,11 +1669,13 @@ CONFIG_CRYPTO_CBC=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1562,5 +1737,6 @@ CONFIG_CRC7=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/se7750_defconfig b/arch/sh/configs/se7750_defconfig
index 564bf7bdce6..7bc926c17b7 100644
--- a/arch/sh/configs/se7750_defconfig
+++ b/arch/sh/configs/se7750_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 13:00:01 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 18:58:58 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -27,7 +28,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -37,6 +40,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -49,11 +58,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -84,18 +94,19 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_IOREMAP_PROT=y
@@ -104,6 +115,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -115,7 +130,7 @@ CONFIG_MODULES=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -162,6 +177,7 @@ CONFIG_CPU_SUBTYPE_SH7750=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -207,6 +223,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -291,7 +308,8 @@ CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
# CONFIG_UBC_WAKEUP is not set
-# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_CMDLINE_OVERWRITE is not set
+# CONFIG_CMDLINE_EXTEND is not set
#
# Bus options
@@ -358,6 +376,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -387,6 +406,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -394,7 +414,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -412,9 +431,9 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
# CONFIG_MTD_CMDLINE_PARTS is not set
# CONFIG_MTD_AR7_PARTS is not set
@@ -563,7 +582,6 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
-# CONFIG_SCSI_BNX2_ISCSI is not set
# CONFIG_LIBFC is not set
# CONFIG_LIBFCOE is not set
# CONFIG_SCSI_DEBUG is not set
@@ -599,10 +617,7 @@ CONFIG_STNIC=y
# CONFIG_KS8842 is not set
CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -665,10 +680,20 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
# CONFIG_SENSORS_F71805F is not set
# CONFIG_SENSORS_F71882FG is not set
# CONFIG_SENSORS_IT87 is not set
@@ -679,9 +704,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_VT1211 is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -766,8 +789,10 @@ CONFIG_RTC_LIB=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -838,7 +863,6 @@ CONFIG_JFFS2_RTIME=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
# CONFIG_NFS_V3 is not set
@@ -885,6 +909,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -894,8 +919,11 @@ CONFIG_FRAME_WARN=1024
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_LATENCYTOP is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DMA_API_DEBUG is not set
@@ -903,6 +931,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -916,7 +945,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
@@ -948,11 +976,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1016,5 +1046,6 @@ CONFIG_ZLIB_DEFLATE=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/se7751_defconfig b/arch/sh/configs/se7751_defconfig
index eb431c43e99..c20ae5e35c8 100644
--- a/arch/sh/configs/se7751_defconfig
+++ b/arch/sh/configs/se7751_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 13:02:26 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 19:01:41 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -27,7 +28,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -37,6 +40,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -49,11 +58,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
@@ -87,18 +97,19 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_IOREMAP_PROT=y
@@ -107,6 +118,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -118,7 +133,7 @@ CONFIG_MODULES=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -165,6 +180,7 @@ CONFIG_CPU_SUBTYPE_SH7751=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -210,6 +226,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -295,7 +312,8 @@ CONFIG_ZERO_PAGE_OFFSET=0x00010000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
# CONFIG_UBC_WAKEUP is not set
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC1,38400"
#
@@ -382,6 +400,7 @@ CONFIG_IP_NF_QUEUE=y
# CONFIG_IP_NF_ARPTABLES is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -411,6 +430,7 @@ CONFIG_IP_NF_QUEUE=y
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -418,7 +438,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -436,9 +455,9 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
# CONFIG_MTD_CMDLINE_PARTS is not set
# CONFIG_MTD_AR7_PARTS is not set
@@ -574,10 +593,7 @@ CONFIG_MII=y
# CONFIG_KS8842 is not set
CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -632,10 +648,20 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
# CONFIG_SENSORS_F71805F is not set
# CONFIG_SENSORS_F71882FG is not set
# CONFIG_SENSORS_IT87 is not set
@@ -646,9 +672,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_VT1211 is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -734,8 +758,10 @@ CONFIG_EXT2_FS=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -806,7 +832,6 @@ CONFIG_JFFS2_RTIME=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
@@ -833,6 +858,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -842,8 +868,11 @@ CONFIG_FRAME_WARN=1024
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_LATENCYTOP is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DMA_API_DEBUG is not set
@@ -851,6 +880,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -864,7 +894,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
@@ -896,11 +925,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -965,5 +996,6 @@ CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/se7780_defconfig b/arch/sh/configs/se7780_defconfig
index 756beec5fb2..82baeef40a9 100644
--- a/arch/sh/configs/se7780_defconfig
+++ b/arch/sh/configs/se7780_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 13:03:56 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 19:03:59 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -28,7 +29,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -38,6 +41,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -48,11 +57,12 @@ CONFIG_SYSVIPC_SYSCTL=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -81,19 +91,20 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
@@ -101,6 +112,11 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -112,7 +128,8 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
# CONFIG_BLK_DEV_INTEGRITY is not set
#
@@ -159,6 +176,7 @@ CONFIG_CPU_SH4A=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
CONFIG_CPU_SUBTYPE_SH7780=y
@@ -199,7 +217,6 @@ CONFIG_SPARSEMEM_MANUAL=y
CONFIG_SPARSEMEM=y
CONFIG_HAVE_MEMORY_PRESENT=y
CONFIG_SPARSEMEM_STATIC=y
-CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
# CONFIG_PHYS_ADDR_T_64BIT is not set
@@ -207,6 +224,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -287,8 +305,9 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00810000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttySC0.115200 root=/dev/sda1"
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
+CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda1"
#
# Bus options
@@ -382,6 +401,7 @@ CONFIG_IPV6=y
# CONFIG_BT is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -389,7 +409,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
@@ -406,9 +425,9 @@ CONFIG_STANDALONE=y
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
# CONFIG_MTD_CMDLINE_PARTS is not set
# CONFIG_MTD_AR7_PARTS is not set
@@ -549,6 +568,7 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SPI_ATTRS is not set
# CONFIG_SCSI_FC_ATTRS is not set
# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
# CONFIG_SCSI_SAS_LIBSAS is not set
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
@@ -587,11 +607,13 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
# CONFIG_SCSI_SRP is not set
# CONFIG_SCSI_DH is not set
# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
CONFIG_SATA_PMP=y
# CONFIG_SATA_AHCI is not set
# CONFIG_SATA_SIL24 is not set
@@ -612,6 +634,7 @@ CONFIG_SATA_SIL=y
# CONFIG_PATA_ALI is not set
# CONFIG_PATA_AMD is not set
# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATP867X is not set
# CONFIG_PATA_ATIIXP is not set
# CONFIG_PATA_CMD64X is not set
# CONFIG_PATA_CS5520 is not set
@@ -630,6 +653,7 @@ CONFIG_SATA_SIL=y
# CONFIG_PATA_NS87410 is not set
# CONFIG_PATA_NS87415 is not set
# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_RDC is not set
# CONFIG_PATA_RZ1000 is not set
# CONFIG_PATA_SC1200 is not set
# CONFIG_PATA_SERVERWORKS is not set
@@ -648,8 +672,13 @@ CONFIG_SATA_SIL=y
#
#
-# A new alternative FireWire stack is available with EXPERIMENTAL=y
+# You can enable one or both FireWire driver stacks.
#
+
+#
+# See the help texts for more information.
+#
+# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
# CONFIG_I2O is not set
CONFIG_NETDEVICES=y
@@ -724,10 +753,7 @@ CONFIG_NET_PCI=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -821,10 +847,19 @@ CONFIG_UNIX98_PTYS=y
CONFIG_DEVPORT=y
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
# CONFIG_SENSORS_IT87 is not set
# CONFIG_SENSORS_PC87360 is not set
# CONFIG_SENSORS_SIS5595 is not set
@@ -833,7 +868,6 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_VT8231 is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
CONFIG_THERMAL=y
# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
@@ -857,6 +891,7 @@ CONFIG_SSB_POSSIBLE=y
#
# Graphics support
#
+CONFIG_VGA_ARB=y
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -937,11 +972,11 @@ CONFIG_LOGO_LINUX_CLUT224=y
CONFIG_LOGO_SUPERH_CLUT224=y
CONFIG_SOUND=y
CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
# CONFIG_SND is not set
CONFIG_SOUND_PRIME=y
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
#
@@ -964,6 +999,7 @@ CONFIG_HID_CYPRESS=y
CONFIG_HID_EZKEY=y
# CONFIG_HID_KYE is not set
CONFIG_HID_GYRATION=y
+# CONFIG_HID_TWINHAN is not set
# CONFIG_HID_KENSINGTON is not set
CONFIG_HID_LOGITECH=y
# CONFIG_LOGITECH_FF is not set
@@ -1009,6 +1045,7 @@ CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
@@ -1243,6 +1280,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1253,8 +1291,11 @@ CONFIG_DEBUG_FS=y
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DYNAMIC_DEBUG is not set
@@ -1263,6 +1304,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -1276,7 +1318,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_NULL is not set
@@ -1309,6 +1350,7 @@ CONFIG_CRYPTO=y
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1371,5 +1413,6 @@ CONFIG_ZLIB_INFLATE=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig
index 7fedaaee861..dd0e8900afb 100644
--- a/arch/sh/configs/sh03_defconfig
+++ b/arch/sh/configs/sh03_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 13:04:41 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 19:07:14 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -29,7 +30,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -40,6 +43,12 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -53,11 +62,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
@@ -91,20 +101,22 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
-CONFIG_MARKERS=y
CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -114,6 +126,11 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -126,7 +143,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -173,6 +190,7 @@ CONFIG_CPU_SUBTYPE_SH7751=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -218,6 +236,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -302,7 +321,8 @@ CONFIG_ZERO_PAGE_OFFSET=0x00004000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
# CONFIG_UBC_WAKEUP is not set
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC1,115200 mem=64M root=/dev/nfs"
#
@@ -382,6 +402,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -412,6 +433,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -419,7 +441,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -432,6 +453,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
@@ -591,6 +613,7 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
# CONFIG_SCSI_SRP is not set
# CONFIG_SCSI_DH is not set
# CONFIG_SCSI_OSD_INITIATOR is not set
@@ -603,7 +626,11 @@ CONFIG_SCSI_LOWLEVEL=y
#
#
-# Enable only one of the two stacks, unless you know what you are doing
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# See the help texts for more information.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -679,6 +706,7 @@ CONFIG_NETDEV_1000=y
# CONFIG_VIA_VELOCITY is not set
# CONFIG_TIGON3 is not set
# CONFIG_BNX2 is not set
+# CONFIG_CNIC is not set
# CONFIG_QLA3XXX is not set
# CONFIG_ATL1 is not set
# CONFIG_ATL1E is not set
@@ -704,10 +732,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -804,10 +829,20 @@ CONFIG_HW_RANDOM=y
CONFIG_DEVPORT=y
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
# CONFIG_SENSORS_I5K_AMB is not set
# CONFIG_SENSORS_F71805F is not set
# CONFIG_SENSORS_F71882FG is not set
@@ -822,9 +857,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_VT8231 is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -861,6 +894,7 @@ CONFIG_SSB_POSSIBLE=y
#
# Graphics support
#
+CONFIG_VGA_ARB=y
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -879,7 +913,6 @@ CONFIG_DUMMY_CONSOLE=y
# CONFIG_SOUND is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
# CONFIG_HID_PID is not set
@@ -944,8 +977,10 @@ CONFIG_FS_MBCACHE=y
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1011,12 +1046,12 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
@@ -1108,6 +1143,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1119,11 +1155,15 @@ CONFIG_STACKTRACE=y
# CONFIG_LATENCYTOP is not set
CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
CONFIG_EVENT_TRACING=y
CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
@@ -1134,6 +1174,7 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_SH_STANDARD_BIOS=y
# CONFIG_EARLY_SCIF_CONSOLE is not set
# CONFIG_EARLY_PRINTK is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -1147,7 +1188,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD2=y
@@ -1189,11 +1229,13 @@ CONFIG_CRYPTO_ECB=m
#
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1259,5 +1301,6 @@ CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/sh7710voipgw_defconfig b/arch/sh/configs/sh7710voipgw_defconfig
index c296ca5d95c..662156ec921 100644
--- a/arch/sh/configs/sh7710voipgw_defconfig
+++ b/arch/sh/configs/sh7710voipgw_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 13:06:13 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 19:11:49 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -27,7 +28,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -37,6 +40,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -49,11 +58,12 @@ CONFIG_POSIX_MQUEUE_SYSCTL=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_GROUP_SCHED=y
@@ -87,18 +97,19 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
# CONFIG_SHMEM is not set
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_IOREMAP_PROT=y
@@ -107,6 +118,11 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -118,7 +134,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -165,6 +181,7 @@ CONFIG_CPU_SUBTYPE_SH7710=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -210,6 +227,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -292,7 +310,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_CMDLINE_OVERWRITE is not set
+# CONFIG_CMDLINE_EXTEND is not set
#
# Bus options
@@ -374,6 +393,7 @@ CONFIG_NETFILTER_ADVANCED=y
# CONFIG_IP_NF_ARPTABLES is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -440,6 +460,7 @@ CONFIG_NET_SCH_FIFO=y
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -447,7 +468,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -469,9 +489,9 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
# CONFIG_MTD_CMDLINE_PARTS is not set
# CONFIG_MTD_AR7_PARTS is not set
@@ -605,10 +625,7 @@ CONFIG_NET_ETHERNET=y
# CONFIG_KS8842 is not set
CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -686,6 +703,11 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
@@ -723,7 +745,6 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_SOUND is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
# CONFIG_HID_PID is not set
@@ -775,8 +796,10 @@ CONFIG_RTC_LIB=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
@@ -813,8 +836,6 @@ CONFIG_PROC_FS=y
CONFIG_PROC_SYSCTL=y
CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
-CONFIG_TMPFS=y
-# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLBFS is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
@@ -847,7 +868,6 @@ CONFIG_JFFS2_RTIME=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
@@ -874,6 +894,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -883,8 +904,11 @@ CONFIG_DEBUG_FS=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_LATENCYTOP is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DYNAMIC_DEBUG is not set
@@ -893,6 +917,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -906,7 +931,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
@@ -938,11 +962,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1006,5 +1032,6 @@ CONFIG_ZLIB_DEFLATE=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/sh7724_generic_defconfig b/arch/sh/configs/sh7724_generic_defconfig
index ba26be1b413..e06719a30ba 100644
--- a/arch/sh/configs/sh7724_generic_defconfig
+++ b/arch/sh/configs/sh7724_generic_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 13:06:48 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 19:14:00 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -28,7 +29,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -38,6 +41,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -46,14 +55,12 @@ CONFIG_SYSVIPC_SYSCTL=y
#
# RCU Subsystem
#
-# CONFIG_CLASSIC_RCU is not set
CONFIG_TREE_RCU=y
-# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_PREEMPT_RCU is not set
# CONFIG_RCU_TRACE is not set
CONFIG_RCU_FANOUT=32
# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
CONFIG_GROUP_SCHED=y
@@ -92,19 +99,21 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
-CONFIG_MARKERS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_IOREMAP_PROT=y
@@ -113,13 +122,18 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_RT_MUTEXES=y
CONFIG_BASE_SMALL=0
# CONFIG_MODULES is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -169,6 +183,7 @@ CONFIG_ARCH_SHMOBILE=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
CONFIG_CPU_SUBTYPE_SH7724=y
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -214,7 +229,6 @@ CONFIG_SPARSEMEM_STATIC=y
#
# Memory hotplug is currently incompatible with Software Suspend
#
-CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
# CONFIG_PHYS_ADDR_T_64BIT is not set
@@ -222,6 +236,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -246,6 +261,8 @@ CONFIG_CPU_HAS_FPU=y
# Board support
#
# CONFIG_SH_7724_SOLUTION_ENGINE is not set
+# CONFIG_SH_KFR2R09 is not set
+# CONFIG_SH_ECOVEC is not set
#
# Timer and clock configuration
@@ -318,7 +335,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_CMDLINE_OVERWRITE is not set
+# CONFIG_CMDLINE_EXTEND is not set
#
# Bus options
@@ -345,6 +363,7 @@ CONFIG_SUSPEND_FREEZER=y
CONFIG_HIBERNATION_NVS=y
CONFIG_HIBERNATION=y
CONFIG_PM_STD_PARTITION=""
+CONFIG_PM_RUNTIME=y
CONFIG_CPU_IDLE=y
CONFIG_CPU_IDLE_GOV_LADDER=y
CONFIG_CPU_IDLE_GOV_MENU=y
@@ -427,6 +446,7 @@ CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_HELPER_AUTO=y
@@ -437,6 +457,7 @@ CONFIG_I2C_HELPER_AUTO=y
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_SH_MOBILE=y
# CONFIG_I2C_SIMTEC is not set
@@ -456,20 +477,21 @@ CONFIG_I2C_SH_MOBILE=y
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -488,8 +510,10 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -540,6 +564,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
@@ -587,7 +612,9 @@ CONFIG_UIO_PDRV_GENIRQ=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
@@ -642,6 +669,7 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -654,11 +682,15 @@ CONFIG_STACKTRACE=y
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
CONFIG_EVENT_TRACING=y
CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
@@ -668,6 +700,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -692,4 +725,5 @@ CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/sh7763rdp_defconfig b/arch/sh/configs/sh7763rdp_defconfig
index b1d9b2311e3..194ff703e23 100644
--- a/arch/sh/configs/sh7763rdp_defconfig
+++ b/arch/sh/configs/sh7763rdp_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 13:07:15 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 19:15:37 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -27,7 +28,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -37,6 +40,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -48,11 +57,12 @@ CONFIG_SYSVIPC_SYSCTL=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -92,19 +102,21 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
-CONFIG_MARKERS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -114,6 +126,11 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -125,7 +142,7 @@ CONFIG_MODULES=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -173,6 +190,7 @@ CONFIG_CPU_SH4A=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
CONFIG_CPU_SUBTYPE_SH7763=y
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -214,7 +232,6 @@ CONFIG_SPARSEMEM=y
CONFIG_HAVE_MEMORY_PRESENT=y
CONFIG_SPARSEMEM_STATIC=y
# CONFIG_MEMORY_HOTPLUG is not set
-CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
# CONFIG_PHYS_ADDR_T_64BIT is not set
@@ -222,6 +239,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -303,7 +321,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC2,115200 root=/dev/sda1 rootdelay=10"
#
@@ -371,6 +390,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -401,6 +421,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
CONFIG_WIRELESS_EXT=y
CONFIG_WIRELESS_EXT_SYSFS=y
@@ -409,7 +430,6 @@ CONFIG_WIRELESS_EXT_SYSFS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -422,6 +442,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
@@ -431,9 +452,9 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
CONFIG_MTD_CMDLINE_PARTS=y
# CONFIG_MTD_AR7_PARTS is not set
@@ -488,6 +509,7 @@ CONFIG_MTD_CFI_UTIL=y
CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PHYSMAP=y
# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_GPIO_ADDR is not set
# CONFIG_MTD_PLATRAM is not set
#
@@ -565,7 +587,6 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
-# CONFIG_SCSI_BNX2_ISCSI is not set
# CONFIG_LIBFC is not set
# CONFIG_LIBFCOE is not set
# CONFIG_SCSI_DEBUG is not set
@@ -621,10 +642,7 @@ CONFIG_SH_ETH=y
# CONFIG_KS8842 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -716,11 +734,15 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -830,6 +852,7 @@ CONFIG_USB_MON=y
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
@@ -899,6 +922,7 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_LD is not set
# CONFIG_USB_TRANCEVIBRATOR is not set
# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
@@ -923,6 +947,8 @@ CONFIG_MMC_BLOCK_BOUNCE=y
# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
+# CONFIG_MMC_ATMELMCI is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
# CONFIG_ACCESSIBILITY is not set
@@ -956,8 +982,10 @@ CONFIG_FS_MBCACHE=y
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1022,7 +1050,6 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
# CONFIG_NFS_V3 is not set
@@ -1096,6 +1123,7 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1107,11 +1135,15 @@ CONFIG_STACKTRACE=y
# CONFIG_LATENCYTOP is not set
CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
CONFIG_EVENT_TRACING=y
CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
@@ -1121,6 +1153,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -1134,7 +1167,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
@@ -1166,11 +1198,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1232,5 +1266,6 @@ CONFIG_CRC32=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/sh7770_generic_defconfig b/arch/sh/configs/sh7770_generic_defconfig
index 426a88f7a23..34bed5541f3 100644
--- a/arch/sh/configs/sh7770_generic_defconfig
+++ b/arch/sh/configs/sh7770_generic_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 13:08:05 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 19:17:16 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -27,7 +28,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -37,6 +40,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -45,14 +54,12 @@ CONFIG_SYSVIPC_SYSCTL=y
#
# RCU Subsystem
#
-# CONFIG_CLASSIC_RCU is not set
CONFIG_TREE_RCU=y
-# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_PREEMPT_RCU is not set
# CONFIG_RCU_TRACE is not set
CONFIG_RCU_FANOUT=32
# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
CONFIG_GROUP_SCHED=y
@@ -91,19 +98,21 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
-CONFIG_MARKERS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_IOREMAP_PROT=y
@@ -112,13 +121,18 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_RT_MUTEXES=y
CONFIG_BASE_SMALL=0
# CONFIG_MODULES is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -166,6 +180,7 @@ CONFIG_CPU_SH4A=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
CONFIG_CPU_SUBTYPE_SH7770=y
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -210,7 +225,6 @@ CONFIG_SPARSEMEM_STATIC=y
#
# Memory hotplug is currently incompatible with Software Suspend
#
-CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
# CONFIG_PHYS_ADDR_T_64BIT is not set
@@ -218,6 +232,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -313,7 +328,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_CMDLINE_OVERWRITE is not set
+# CONFIG_CMDLINE_EXTEND is not set
#
# Bus options
@@ -338,6 +354,7 @@ CONFIG_PM_SLEEP=y
CONFIG_HIBERNATION_NVS=y
CONFIG_HIBERNATION=y
CONFIG_PM_STD_PARTITION=""
+# CONFIG_PM_RUNTIME is not set
CONFIG_CPU_IDLE=y
CONFIG_CPU_IDLE_GOV_LADDER=y
CONFIG_CPU_IDLE_GOV_MENU=y
@@ -420,6 +437,7 @@ CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_HELPER_AUTO=y
@@ -430,6 +448,7 @@ CONFIG_I2C_HELPER_AUTO=y
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_SH_MOBILE=y
# CONFIG_I2C_SIMTEC is not set
@@ -449,20 +468,21 @@ CONFIG_I2C_SH_MOBILE=y
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -481,8 +501,10 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -533,6 +555,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
@@ -580,7 +603,9 @@ CONFIG_UIO_PDRV_GENIRQ=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
@@ -635,6 +660,7 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -647,11 +673,15 @@ CONFIG_STACKTRACE=y
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
CONFIG_EVENT_TRACING=y
CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
@@ -661,6 +691,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -685,4 +716,5 @@ CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/sh7785lcr_32bit_defconfig b/arch/sh/configs/sh7785lcr_32bit_defconfig
index ed316f602db..51cbaedf7a5 100644
--- a/arch/sh/configs/sh7785lcr_32bit_defconfig
+++ b/arch/sh/configs/sh7785lcr_32bit_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 13:08:29 2009
+# Linux kernel version: 2.6.31
+# Fri Sep 25 11:39:20 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -29,8 +30,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
-CONFIG_IO_TRAPPED=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -41,10 +43,17 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
-# CONFIG_POSIX_MQUEUE is not set
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
CONFIG_BSD_PROCESS_ACCT=y
# CONFIG_BSD_PROCESS_ACCT_V3 is not set
# CONFIG_TASKSTATS is not set
@@ -53,11 +62,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -67,8 +77,7 @@ CONFIG_FAIR_GROUP_SCHED=y
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
# CONFIG_BLK_DEV_INITRD is not set
@@ -79,7 +88,7 @@ CONFIG_EMBEDDED=y
CONFIG_UID16=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
+CONFIG_KALLSYMS_ALL=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
@@ -93,28 +102,37 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+CONFIG_PERF_COUNTERS=y
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-# CONFIG_MARKERS is not set
-# CONFIG_OPROFILE is not set
+CONFIG_TRACEPOINTS=y
+CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
-# CONFIG_KPROBES is not set
+CONFIG_KPROBES=y
+CONFIG_KRETPROBES=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+CONFIG_GCOV_KERNEL=y
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -127,7 +145,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -176,6 +194,7 @@ CONFIG_CPU_SHX2=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -215,6 +234,12 @@ CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
# CONFIG_PAGE_SIZE_64KB is not set
+CONFIG_HUGETLB_PAGE_SIZE_64K=y
+# CONFIG_HUGETLB_PAGE_SIZE_256K is not set
+# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_64MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set
CONFIG_SELECT_MEMORY_MODEL=y
# CONFIG_FLATMEM_MANUAL is not set
# CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -223,7 +248,6 @@ CONFIG_SPARSEMEM=y
CONFIG_HAVE_MEMORY_PRESENT=y
CONFIG_SPARSEMEM_STATIC=y
# CONFIG_MEMORY_HOTPLUG is not set
-CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
# CONFIG_PHYS_ADDR_T_64BIT is not set
@@ -231,6 +255,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -256,6 +281,7 @@ CONFIG_CPU_HAS_FPU=y
#
# CONFIG_SH_HIGHLANDER is not set
CONFIG_SH_SH7785LCR=y
+# CONFIG_SH_SH7785LCR_PT is not set
#
# Timer and clock configuration
@@ -264,14 +290,29 @@ CONFIG_SH_TIMER_TMU=y
CONFIG_SH_PCLK_FREQ=50000000
CONFIG_SH_CLK_CPG=y
CONFIG_TICK_ONESHOT=y
-# CONFIG_NO_HZ is not set
+CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
#
# CPU Frequency scaling
#
-# CONFIG_CPU_FREQ is not set
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+# CONFIG_CPU_FREQ_DEBUG is not set
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+CONFIG_SH_CPU_FREQ=y
#
# DMA support
@@ -299,12 +340,12 @@ CONFIG_HZ=250
CONFIG_SCHED_HRTICK=y
CONFIG_KEXEC=y
# CONFIG_CRASH_DUMP is not set
-# CONFIG_SECCOMP is not set
+CONFIG_SECCOMP=y
# CONFIG_PREEMPT_NONE is not set
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
+CONFIG_SPARSE_IRQ=y
#
# Boot options
@@ -312,7 +353,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_CMDLINE_OVERWRITE is not set
+# CONFIG_CMDLINE_EXTEND is not set
#
# Bus options
@@ -321,8 +363,7 @@ CONFIG_PCI=y
CONFIG_SH_PCIDMA_NONCOHERENT=y
# CONFIG_PCIEPORTBUS is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
-# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_LEGACY is not set
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
# CONFIG_PCCARD is not set
@@ -339,8 +380,13 @@ CONFIG_BINFMT_ELF=y
#
# Power management options (EXPERIMENTAL)
#
-# CONFIG_PM is not set
-# CONFIG_CPU_IDLE is not set
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+# CONFIG_HIBERNATION is not set
+# CONFIG_PM_RUNTIME is not set
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
CONFIG_NET=y
#
@@ -392,6 +438,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -414,6 +461,8 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# Network testing
#
# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
@@ -421,6 +470,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
CONFIG_WIRELESS_EXT=y
CONFIG_WIRELESS_EXT_SYSFS=y
@@ -429,7 +479,6 @@ CONFIG_WIRELESS_EXT_SYSFS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -442,18 +491,17 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
-# CONFIG_DEBUG_DRIVER is not set
-# CONFIG_DEBUG_DEVRES is not set
# CONFIG_SYS_HYPERVISOR is not set
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
# CONFIG_MTD_CMDLINE_PARTS is not set
# CONFIG_MTD_AR7_PARTS is not set
@@ -538,7 +586,8 @@ CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_DAC960 is not set
# CONFIG_BLK_DEV_UMEM is not set
# CONFIG_BLK_DEV_COW_COMMON is not set
-# CONFIG_BLK_DEV_LOOP is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
# CONFIG_BLK_DEV_NBD is not set
# CONFIG_BLK_DEV_SX8 is not set
# CONFIG_BLK_DEV_UB is not set
@@ -561,7 +610,7 @@ CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
# CONFIG_SCSI_TGT is not set
# CONFIG_SCSI_NETLINK is not set
-CONFIG_SCSI_PROC_FS=y
+# CONFIG_SCSI_PROC_FS is not set
#
# SCSI support type (disk, tape, CD-ROM)
@@ -591,6 +640,7 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
CONFIG_SATA_PMP=y
# CONFIG_SATA_AHCI is not set
# CONFIG_SATA_SIL24 is not set
@@ -612,6 +662,7 @@ CONFIG_SATA_SIL=y
# CONFIG_PATA_ALI is not set
# CONFIG_PATA_AMD is not set
# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATP867X is not set
# CONFIG_PATA_ATIIXP is not set
# CONFIG_PATA_CMD640_PCI is not set
# CONFIG_PATA_CMD64X is not set
@@ -639,6 +690,7 @@ CONFIG_SATA_SIL=y
# CONFIG_PATA_OPTIDMA is not set
# CONFIG_PATA_PDC_OLD is not set
# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RDC is not set
# CONFIG_PATA_RZ1000 is not set
# CONFIG_PATA_SC1200 is not set
# CONFIG_PATA_SERVERWORKS is not set
@@ -657,7 +709,11 @@ CONFIG_SATA_SIL=y
#
#
-# Enable only one of the two stacks, unless you know what you are doing
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# See the help texts for more information.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -690,6 +746,7 @@ CONFIG_R8169=y
# CONFIG_VIA_VELOCITY is not set
# CONFIG_TIGON3 is not set
# CONFIG_BNX2 is not set
+# CONFIG_CNIC is not set
# CONFIG_QLA3XXX is not set
# CONFIG_ATL1 is not set
# CONFIG_ATL1E is not set
@@ -697,12 +754,7 @@ CONFIG_R8169=y
# CONFIG_JME is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
+# CONFIG_WLAN is not set
#
# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -739,25 +791,42 @@ CONFIG_INPUT_FF_MEMLESS=m
# Userland interfaces
#
CONFIG_INPUT_MOUSEDEV=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_EVDEV is not set
-# CONFIG_INPUT_EVBUG is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=m
#
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
-# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_ADP5588 is not set
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_SH_KEYSC is not set
-# CONFIG_INPUT_MOUSE is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_SENTELIC is not set
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
@@ -766,7 +835,12 @@ CONFIG_INPUT_KEYBOARD=y
#
# Hardware I/O ports
#
-# CONFIG_SERIO is not set
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_PCIPS2 is not set
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
# CONFIG_GAMEPORT is not set
#
@@ -777,7 +851,7 @@ CONFIG_CONSOLE_TRANSLATIONS=y
CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y
CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_DEVKMEM=y
+# CONFIG_DEVKMEM is not set
# CONFIG_SERIAL_NONSTANDARD is not set
# CONFIG_NOZOMI is not set
@@ -796,12 +870,10 @@ CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
CONFIG_UNIX98_PTYS=y
-# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
-CONFIG_HW_RANDOM=y
-# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_HW_RANDOM is not set
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
# CONFIG_RAW_DRIVER is not set
@@ -809,6 +881,7 @@ CONFIG_HW_RANDOM=y
CONFIG_DEVPORT=y
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
# CONFIG_I2C_CHARDEV is not set
CONFIG_I2C_HELPER_AUTO=y
CONFIG_I2C_ALGOPCA=y
@@ -838,6 +911,7 @@ CONFIG_I2C_ALGOPCA=y
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SH_MOBILE is not set
# CONFIG_I2C_SIMTEC is not set
@@ -864,21 +938,42 @@ CONFIG_I2C_PCA_PLATFORM=y
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
-# CONFIG_WATCHDOG is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ALIM7101_WDT is not set
+CONFIG_SH_WDT=y
+# CONFIG_SH_WDT_MMAP is not set
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
#
@@ -896,14 +991,17 @@ CONFIG_MFD_SM501=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
#
+# CONFIG_VGA_ARB is not set
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -915,12 +1013,11 @@ CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
-CONFIG_FB_SYS_FILLRECT=m
-CONFIG_FB_SYS_COPYAREA=m
-CONFIG_FB_SYS_IMAGEBLIT=m
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
# CONFIG_FB_FOREIGN_ENDIAN is not set
-CONFIG_FB_SYS_FOPS=m
-CONFIG_FB_DEFERRED_IO=y
+# CONFIG_FB_SYS_FOPS is not set
# CONFIG_FB_SVGALIB is not set
# CONFIG_FB_MACMODES is not set
# CONFIG_FB_BACKLIGHT is not set
@@ -955,7 +1052,7 @@ CONFIG_FB_DEFERRED_IO=y
# CONFIG_FB_ARK is not set
# CONFIG_FB_PM3 is not set
# CONFIG_FB_CARMINE is not set
-CONFIG_FB_SH_MOBILE_LCDC=m
+# CONFIG_FB_SH_MOBILE_LCDC is not set
CONFIG_FB_SM501=y
# CONFIG_FB_VIRTUAL is not set
# CONFIG_FB_METRONOME is not set
@@ -981,14 +1078,112 @@ CONFIG_FONT_8x16=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
-CONFIG_LOGO_LINUX_CLUT224=y
-# CONFIG_LOGO_SUPERH_MONO is not set
-# CONFIG_LOGO_SUPERH_VGA16 is not set
-# CONFIG_LOGO_SUPERH_CLUT224 is not set
-# CONFIG_SOUND is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_LOGO_SUPERH_MONO=y
+CONFIG_LOGO_SUPERH_VGA16=y
+CONFIG_LOGO_SUPERH_CLUT224=y
+CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_HWDEP=y
+CONFIG_SND_RAWMIDI=y
+CONFIG_SND_SEQUENCER=y
+# CONFIG_SND_SEQ_DUMMY is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_HRTIMER=y
+CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
+CONFIG_SND_DYNAMIC_MINORS=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_RAWMIDI_SEQ=y
+CONFIG_SND_OPL3_LIB_SEQ=y
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_MPU401_UART=y
+CONFIG_SND_OPL3_LIB=y
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_PCI=y
+# CONFIG_SND_AD1889 is not set
+# CONFIG_SND_ALS300 is not set
+# CONFIG_SND_ALI5451 is not set
+# CONFIG_SND_ATIIXP is not set
+# CONFIG_SND_ATIIXP_MODEM is not set
+# CONFIG_SND_AU8810 is not set
+# CONFIG_SND_AU8820 is not set
+# CONFIG_SND_AU8830 is not set
+# CONFIG_SND_AW2 is not set
+# CONFIG_SND_AZT3328 is not set
+# CONFIG_SND_BT87X is not set
+# CONFIG_SND_CA0106 is not set
+CONFIG_SND_CMIPCI=y
+# CONFIG_SND_OXYGEN is not set
+# CONFIG_SND_CS4281 is not set
+# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CTXFI is not set
+# CONFIG_SND_DARLA20 is not set
+# CONFIG_SND_GINA20 is not set
+# CONFIG_SND_LAYLA20 is not set
+# CONFIG_SND_DARLA24 is not set
+# CONFIG_SND_GINA24 is not set
+# CONFIG_SND_LAYLA24 is not set
+# CONFIG_SND_MONA is not set
+# CONFIG_SND_MIA is not set
+# CONFIG_SND_ECHO3G is not set
+# CONFIG_SND_INDIGO is not set
+# CONFIG_SND_INDIGOIO is not set
+# CONFIG_SND_INDIGODJ is not set
+# CONFIG_SND_INDIGOIOX is not set
+# CONFIG_SND_INDIGODJX is not set
+# CONFIG_SND_EMU10K1 is not set
+# CONFIG_SND_EMU10K1X is not set
+# CONFIG_SND_ENS1370 is not set
+# CONFIG_SND_ENS1371 is not set
+# CONFIG_SND_ES1938 is not set
+# CONFIG_SND_ES1968 is not set
+# CONFIG_SND_FM801 is not set
+# CONFIG_SND_HDA_INTEL is not set
+# CONFIG_SND_HDSP is not set
+# CONFIG_SND_HDSPM is not set
+# CONFIG_SND_HIFIER is not set
+# CONFIG_SND_ICE1712 is not set
+# CONFIG_SND_ICE1724 is not set
+# CONFIG_SND_INTEL8X0 is not set
+# CONFIG_SND_INTEL8X0M is not set
+# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_LX6464ES is not set
+# CONFIG_SND_MAESTRO3 is not set
+# CONFIG_SND_MIXART is not set
+# CONFIG_SND_NM256 is not set
+# CONFIG_SND_PCXHR is not set
+# CONFIG_SND_RIPTIDE is not set
+# CONFIG_SND_RME32 is not set
+# CONFIG_SND_RME96 is not set
+# CONFIG_SND_RME9652 is not set
+# CONFIG_SND_SONICVIBES is not set
+# CONFIG_SND_TRIDENT is not set
+# CONFIG_SND_VIA82XX is not set
+# CONFIG_SND_VIA82XX_MODEM is not set
+# CONFIG_SND_VIRTUOSO is not set
+# CONFIG_SND_VX222 is not set
+# CONFIG_SND_YMFPCI is not set
+# CONFIG_SND_SUPERH is not set
+CONFIG_SND_USB=y
+# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_CAIAQ is not set
+# CONFIG_SND_SOC is not set
+# CONFIG_SOUND_PRIME is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
#
@@ -1001,34 +1196,39 @@ CONFIG_USB_HID=y
#
# Special HID drivers
#
-CONFIG_HID_A4TECH=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-# CONFIG_HID_DRAGONRISE is not set
-CONFIG_HID_EZKEY=y
-# CONFIG_HID_KYE is not set
-CONFIG_HID_GYRATION=y
-# CONFIG_HID_KENSINGTON is not set
-CONFIG_HID_LOGITECH=y
+CONFIG_HID_A4TECH=m
+CONFIG_HID_APPLE=m
+CONFIG_HID_BELKIN=m
+CONFIG_HID_CHERRY=m
+CONFIG_HID_CHICONY=m
+CONFIG_HID_CYPRESS=m
+CONFIG_HID_DRAGONRISE=m
+# CONFIG_DRAGONRISE_FF is not set
+CONFIG_HID_EZKEY=m
+CONFIG_HID_KYE=m
+CONFIG_HID_GYRATION=m
+CONFIG_HID_TWINHAN=m
+CONFIG_HID_KENSINGTON=m
+CONFIG_HID_LOGITECH=m
# CONFIG_LOGITECH_FF is not set
# CONFIG_LOGIRUMBLEPAD2_FF is not set
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
-# CONFIG_HID_NTRIG is not set
-CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_MICROSOFT=m
+CONFIG_HID_MONTEREY=m
+CONFIG_HID_NTRIG=m
+CONFIG_HID_PANTHERLORD=m
# CONFIG_PANTHERLORD_FF is not set
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
-CONFIG_HID_SUNPLUS=y
-# CONFIG_HID_GREENASIA is not set
-# CONFIG_HID_SMARTJOYPLUS is not set
-# CONFIG_HID_TOPSEED is not set
+CONFIG_HID_PETALYNX=m
+CONFIG_HID_SAMSUNG=m
+CONFIG_HID_SONY=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_HID_GREENASIA=m
+# CONFIG_GREENASIA_FF is not set
+CONFIG_HID_SMARTJOYPLUS=m
+# CONFIG_SMARTJOYPLUS_FF is not set
+CONFIG_HID_TOPSEED=m
# CONFIG_HID_THRUSTMASTER is not set
-# CONFIG_HID_ZEROPLUS is not set
+CONFIG_HID_ZEROPLUS=m
+# CONFIG_ZEROPLUS_FF is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
@@ -1040,13 +1240,14 @@ CONFIG_USB=y
#
# Miscellaneous USB options
#
-CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICEFS is not set
CONFIG_USB_DEVICE_CLASS=y
# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_SUSPEND is not set
# CONFIG_USB_OTG is not set
# CONFIG_USB_OTG_WHITELIST is not set
# CONFIG_USB_OTG_BLACKLIST_HUB is not set
-CONFIG_USB_MON=y
+# CONFIG_USB_MON is not set
# CONFIG_USB_WUSB is not set
# CONFIG_USB_WUSB_CBAF is not set
@@ -1061,10 +1262,8 @@ CONFIG_USB_EHCI_HCD=m
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
-CONFIG_USB_OHCI_HCD=m
-# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
-# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
-CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
# CONFIG_USB_UHCI_HCD is not set
# CONFIG_USB_SL811_HCD is not set
CONFIG_USB_R8A66597_HCD=y
@@ -1133,7 +1332,7 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_LD is not set
# CONFIG_USB_TRANCEVIBRATOR is not set
# CONFIG_USB_IOWARRIOR is not set
-CONFIG_USB_TEST=m
+# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
@@ -1143,7 +1342,29 @@ CONFIG_USB_TEST=m
#
# CONFIG_NOP_USB_XCEIV is not set
# CONFIG_UWB is not set
-# CONFIG_MMC is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+CONFIG_MMC_SDHCI=m
+# CONFIG_MMC_SDHCI_PCI is not set
+CONFIG_MMC_SDHCI_PLTFM=m
+# CONFIG_MMC_AT91 is not set
+# CONFIG_MMC_ATMELMCI is not set
+# CONFIG_MMC_TIFM_SD is not set
+# CONFIG_MMC_CB710 is not set
+# CONFIG_MMC_VIA_SDMMC is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
# CONFIG_ACCESSIBILITY is not set
@@ -1179,6 +1400,7 @@ CONFIG_RTC_DRV_RS5C372=y
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
@@ -1203,9 +1425,20 @@ CONFIG_RTC_DRV_RS5C372=y
#
# CONFIG_RTC_DRV_SH is not set
# CONFIG_RTC_DRV_GENERIC is not set
-# CONFIG_DMADEVICES is not set
+CONFIG_DMADEVICES=y
+
+#
+# DMA Devices
+#
# CONFIG_AUXDISPLAY is not set
-# CONFIG_UIO is not set
+CONFIG_UIO=m
+# CONFIG_UIO_CIF is not set
+# CONFIG_UIO_PDRV is not set
+# CONFIG_UIO_PDRV_GENIRQ is not set
+# CONFIG_UIO_SMX is not set
+# CONFIG_UIO_AEC is not set
+# CONFIG_UIO_SERCOS3 is not set
+# CONFIG_UIO_PCI_GENERIC is not set
#
# TI VLYNQ
@@ -1225,13 +1458,15 @@ CONFIG_EXT3_FS_XATTR=y
# CONFIG_EXT3_FS_SECURITY is not set
# CONFIG_EXT4_FS is not set
CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
-CONFIG_FS_POSIX_ACL=y
+# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1275,9 +1510,9 @@ CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
-# CONFIG_HUGETLBFS is not set
-# CONFIG_HUGETLB_PAGE is not set
-# CONFIG_CONFIGFS_FS is not set
+CONFIG_HUGETLBFS=y
+CONFIG_HUGETLB_PAGE=y
+CONFIG_CONFIGFS_FS=y
CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
@@ -1297,20 +1532,20 @@ CONFIG_MINIX_FS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
+CONFIG_NFSD=m
CONFIG_NFSD_V3=y
# CONFIG_NFSD_V3_ACL is not set
-CONFIG_NFSD_V4=y
+# CONFIG_NFSD_V4 is not set
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
-CONFIG_EXPORTFS=y
+CONFIG_EXPORTFS=m
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
CONFIG_SUNRPC_GSS=y
@@ -1373,85 +1608,69 @@ CONFIG_NLS_ISO8859_1=y
# Kernel hacking
#
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
-# CONFIG_PRINTK_TIME is not set
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
-# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
+CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DEBUG_SHIRQ is not set
-CONFIG_DETECT_SOFTLOCKUP=y
-# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
-CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
-CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
-CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+# CONFIG_DEBUG_KERNEL is not set
CONFIG_SCHED_DEBUG=y
-# CONFIG_SCHEDSTATS is not set
-# CONFIG_TIMER_STATS is not set
-# CONFIG_DEBUG_OBJECTS is not set
-# CONFIG_DEBUG_SLAB is not set
-CONFIG_DEBUG_PREEMPT=y
-# CONFIG_DEBUG_RT_MUTEXES is not set
-# CONFIG_RT_MUTEX_TESTER is not set
-# CONFIG_DEBUG_SPINLOCK is not set
-# CONFIG_DEBUG_MUTEXES is not set
-# CONFIG_DEBUG_LOCK_ALLOC is not set
-# CONFIG_PROVE_LOCKING is not set
-# CONFIG_LOCK_STAT is not set
-# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
-# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_TRACE_IRQFLAGS=y
+CONFIG_STACKTRACE=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_DEBUG_INFO is not set
-# CONFIG_DEBUG_VM is not set
-# CONFIG_DEBUG_WRITECOUNT is not set
# CONFIG_DEBUG_MEMORY_INIT is not set
-# CONFIG_DEBUG_LIST is not set
-# CONFIG_DEBUG_SG is not set
-# CONFIG_DEBUG_NOTIFIERS is not set
-# CONFIG_FRAME_POINTER is not set
-# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_FRAME_POINTER=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-# CONFIG_BACKTRACE_SELF_TEST is not set
-# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
-# CONFIG_FAULT_INJECTION is not set
-# CONFIG_LATENCYTOP is not set
+CONFIG_LATENCYTOP=y
CONFIG_SYSCTL_SYSCALL_CHECK=y
-# CONFIG_PAGE_POISONING is not set
+CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_TRACER_MAX_TRACE=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
-# CONFIG_FUNCTION_TRACER is not set
-# CONFIG_IRQSOFF_TRACER is not set
+CONFIG_FUNCTION_TRACER=y
+CONFIG_FUNCTION_GRAPH_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
# CONFIG_PREEMPT_TRACER is not set
-# CONFIG_SCHED_TRACER is not set
-# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+CONFIG_SCHED_TRACER=y
+# CONFIG_FTRACE_SYSCALLS is not set
# CONFIG_BOOT_TRACER is not set
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
# CONFIG_PROFILE_ALL_BRANCHES is not set
-# CONFIG_STACK_TRACER is not set
-# CONFIG_KMEMTRACE is not set
-# CONFIG_WORKQUEUE_TRACER is not set
+CONFIG_STACK_TRACER=y
+CONFIG_KMEMTRACE=y
+CONFIG_WORKQUEUE_TRACER=y
# CONFIG_BLK_DEV_IO_TRACE is not set
+CONFIG_DYNAMIC_FTRACE=y
+# CONFIG_FUNCTION_PROFILER is not set
+CONFIG_FTRACE_MCOUNT_RECORD=y
+# CONFIG_FTRACE_STARTUP_TEST is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_DMA_API_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
-# CONFIG_KGDB is not set
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
-# CONFIG_DEBUG_BOOTMEM is not set
-# CONFIG_DEBUG_STACKOVERFLOW is not set
-# CONFIG_DEBUG_STACK_USAGE is not set
-# CONFIG_4KSTACKS is not set
-# CONFIG_DUMP_CODE is not set
-# CONFIG_SH_NO_BSS_INIT is not set
+CONFIG_DWARF_UNWINDER=y
+CONFIG_MCOUNT=y
#
# Security options
@@ -1465,7 +1684,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD2=y
@@ -1507,11 +1725,13 @@ CONFIG_CRYPTO_CBC=y
#
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1556,7 +1776,7 @@ CONFIG_CRYPTO_DES=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-# CONFIG_BINARY_PRINTF is not set
+CONFIG_BINARY_PRINTF=y
#
# Library routines
@@ -1573,5 +1793,6 @@ CONFIG_CRC32=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/sh7785lcr_defconfig b/arch/sh/configs/sh7785lcr_defconfig
index 004e6f5e8a6..8c2c47ed399 100644
--- a/arch/sh/configs/sh7785lcr_defconfig
+++ b/arch/sh/configs/sh7785lcr_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 13:09:34 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 19:23:18 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -29,8 +30,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
-CONFIG_IO_TRAPPED=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -41,6 +43,12 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -53,11 +61,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -93,19 +102,20 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-# CONFIG_MARKERS is not set
# CONFIG_OPROFILE is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -115,6 +125,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -127,7 +141,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -176,6 +190,7 @@ CONFIG_CPU_SHX2=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -224,6 +239,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -306,7 +322,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_CMDLINE_OVERWRITE is not set
+# CONFIG_CMDLINE_EXTEND is not set
#
# Bus options
@@ -386,6 +403,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -415,6 +433,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
CONFIG_WIRELESS_EXT=y
CONFIG_WIRELESS_EXT_SYSFS=y
@@ -423,7 +442,6 @@ CONFIG_WIRELESS_EXT_SYSFS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -436,6 +454,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
@@ -445,9 +464,9 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
# CONFIG_MTD_CMDLINE_PARTS is not set
# CONFIG_MTD_AR7_PARTS is not set
@@ -585,6 +604,7 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
CONFIG_SATA_PMP=y
# CONFIG_SATA_AHCI is not set
# CONFIG_SATA_SIL24 is not set
@@ -606,6 +626,7 @@ CONFIG_SATA_SIL=y
# CONFIG_PATA_ALI is not set
# CONFIG_PATA_AMD is not set
# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATP867X is not set
# CONFIG_PATA_ATIIXP is not set
# CONFIG_PATA_CMD640_PCI is not set
# CONFIG_PATA_CMD64X is not set
@@ -633,6 +654,7 @@ CONFIG_SATA_SIL=y
# CONFIG_PATA_OPTIDMA is not set
# CONFIG_PATA_PDC_OLD is not set
# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RDC is not set
# CONFIG_PATA_RZ1000 is not set
# CONFIG_PATA_SC1200 is not set
# CONFIG_PATA_SERVERWORKS is not set
@@ -651,7 +673,11 @@ CONFIG_SATA_SIL=y
#
#
-# Enable only one of the two stacks, unless you know what you are doing
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# See the help texts for more information.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -684,6 +710,7 @@ CONFIG_R8169=y
# CONFIG_VIA_VELOCITY is not set
# CONFIG_TIGON3 is not set
# CONFIG_BNX2 is not set
+# CONFIG_CNIC is not set
# CONFIG_QLA3XXX is not set
# CONFIG_ATL1 is not set
# CONFIG_ATL1E is not set
@@ -691,10 +718,7 @@ CONFIG_R8169=y
# CONFIG_JME is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -744,13 +768,17 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_SH_KEYSC is not set
+# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
@@ -803,6 +831,7 @@ CONFIG_HW_RANDOM=y
CONFIG_DEVPORT=y
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
# CONFIG_I2C_CHARDEV is not set
CONFIG_I2C_HELPER_AUTO=y
CONFIG_I2C_ALGOPCA=y
@@ -832,6 +861,7 @@ CONFIG_I2C_ALGOPCA=y
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SH_MOBILE is not set
# CONFIG_I2C_SIMTEC is not set
@@ -858,20 +888,21 @@ CONFIG_I2C_PCA_PLATFORM=y
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -890,14 +921,17 @@ CONFIG_MFD_SM501=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
#
+CONFIG_VGA_ARB=y
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -982,7 +1016,6 @@ CONFIG_LOGO_LINUX_CLUT224=y
# CONFIG_SOUND is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
#
@@ -1005,6 +1038,7 @@ CONFIG_HID_CYPRESS=y
CONFIG_HID_EZKEY=y
# CONFIG_HID_KYE is not set
CONFIG_HID_GYRATION=y
+# CONFIG_HID_TWINHAN is not set
# CONFIG_HID_KENSINGTON is not set
CONFIG_HID_LOGITECH=y
# CONFIG_LOGITECH_FF is not set
@@ -1055,6 +1089,7 @@ CONFIG_USB_EHCI_HCD=m
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
CONFIG_USB_OHCI_HCD=m
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
@@ -1173,6 +1208,7 @@ CONFIG_RTC_DRV_RS5C372=y
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
@@ -1224,8 +1260,10 @@ CONFIG_FS_MBCACHE=y
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1291,12 +1329,12 @@ CONFIG_MINIX_FS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
@@ -1372,6 +1410,7 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -1407,18 +1446,23 @@ CONFIG_DEBUG_PREEMPT=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
# CONFIG_FRAME_POINTER is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_PAGE_POISONING is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
@@ -1426,6 +1470,7 @@ CONFIG_FTRACE=y
# CONFIG_PREEMPT_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_FTRACE_SYSCALLS is not set
# CONFIG_BOOT_TRACER is not set
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
@@ -1440,11 +1485,11 @@ CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
-# CONFIG_DEBUG_BOOTMEM is not set
-# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_STACK_DEBUG is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_4KSTACKS is not set
# CONFIG_DUMP_CODE is not set
+# CONFIG_DWARF_UNWINDER is not set
# CONFIG_SH_NO_BSS_INIT is not set
#
@@ -1459,7 +1504,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD2=y
@@ -1501,11 +1545,13 @@ CONFIG_CRYPTO_CBC=y
#
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1567,5 +1613,6 @@ CONFIG_CRC32=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/shmin_defconfig b/arch/sh/configs/shmin_defconfig
index ad7d60972fc..92115e61275 100644
--- a/arch/sh/configs/shmin_defconfig
+++ b/arch/sh/configs/shmin_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 13:10:33 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 19:27:17 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,6 +13,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -26,7 +27,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -36,6 +39,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
# CONFIG_SWAP is not set
# CONFIG_SYSVIPC is not set
# CONFIG_POSIX_MQUEUE is not set
@@ -46,11 +55,12 @@ CONFIG_LOCALVERSION_AUTO=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
@@ -77,18 +87,19 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
# CONFIG_SHMEM is not set
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
# CONFIG_SLAB is not set
# CONFIG_SLUB is not set
CONFIG_SLOB=y
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
@@ -96,12 +107,16 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_BASE_SMALL=1
# CONFIG_MODULES is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -148,6 +163,7 @@ CONFIG_CPU_SUBTYPE_SH7706=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -193,6 +209,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -274,7 +291,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00210000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC1,115200 root=1f01 mtdparts=phys_mapped_flash:64k(firm)ro,-(sys) netdev=34,0x300,eth0 "
#
@@ -339,6 +357,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -368,6 +387,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -375,7 +395,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -529,10 +548,7 @@ CONFIG_NET_ETHERNET=y
# CONFIG_KS8842 is not set
CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -591,11 +607,15 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -672,7 +692,9 @@ CONFIG_RTC_LIB=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
@@ -709,8 +731,6 @@ CONFIG_PROC_FS=y
CONFIG_PROC_SYSCTL=y
CONFIG_PROC_PAGE_MONITOR=y
# CONFIG_SYSFS is not set
-CONFIG_TMPFS=y
-# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLBFS is not set
# CONFIG_HUGETLB_PAGE is not set
CONFIG_MISC_FILESYSTEMS=y
@@ -732,7 +752,6 @@ CONFIG_CRAMFS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
@@ -768,6 +787,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
@@ -775,8 +795,11 @@ CONFIG_FRAME_WARN=1024
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_LATENCYTOP is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DMA_API_DEBUG is not set
@@ -785,6 +808,7 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_SH_STANDARD_BIOS=y
# CONFIG_EARLY_SCIF_CONSOLE is not set
CONFIG_EARLY_PRINTK=y
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -797,7 +821,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
@@ -828,11 +851,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -895,5 +920,6 @@ CONFIG_ZLIB_INFLATE=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/shx3_defconfig b/arch/sh/configs/shx3_defconfig
index 207b0c9a8cd..e3858d757d5 100644
--- a/arch/sh/configs/shx3_defconfig
+++ b/arch/sh/configs/shx3_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 13:11:03 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 19:29:26 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -30,7 +31,9 @@ CONFIG_LOCKDEP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -40,6 +43,12 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -55,12 +64,12 @@ CONFIG_AUDIT_TREE=y
#
# RCU Subsystem
#
-# CONFIG_CLASSIC_RCU is not set
-# CONFIG_TREE_RCU is not set
-CONFIG_PREEMPT_RCU=y
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
CONFIG_RCU_TRACE=y
-# CONFIG_TREE_RCU_TRACE is not set
-CONFIG_PREEMPT_RCU_TRACE=y
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+CONFIG_TREE_RCU_TRACE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -111,19 +120,21 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
# CONFIG_SLAB is not set
# CONFIG_SLUB is not set
CONFIG_SLOB=y
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
-CONFIG_MARKERS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
CONFIG_KPROBES=y
@@ -135,6 +146,11 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_USE_GENERIC_SMP_HELPERS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_RT_MUTEXES=y
@@ -147,7 +163,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_STOP_MACHINE=y
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -196,6 +212,7 @@ CONFIG_CPU_SHX3=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -256,6 +273,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -353,7 +371,8 @@ CONFIG_PREEMPT=y
CONFIG_ZERO_PAGE_OFFSET=0x00010000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00010000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC0,115200 earlyprintk=bios ignore_loglevel"
#
@@ -438,6 +457,7 @@ CONFIG_IPV6_NDISC_NODETYPE=y
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -489,6 +509,7 @@ CONFIG_CAN_VCAN=m
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
@@ -562,7 +583,6 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
-# CONFIG_SCSI_BNX2_ISCSI is not set
# CONFIG_LIBFC is not set
# CONFIG_LIBFCOE is not set
# CONFIG_SCSI_DEBUG is not set
@@ -570,6 +590,7 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
CONFIG_SATA_PMP=y
CONFIG_ATA_SFF=y
# CONFIG_SATA_MV is not set
@@ -602,12 +623,10 @@ CONFIG_SMC91X=y
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -675,6 +694,7 @@ CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_TCG_TPM is not set
CONFIG_I2C=m
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
# CONFIG_I2C_CHARDEV is not set
CONFIG_I2C_HELPER_AUTO=y
@@ -685,6 +705,7 @@ CONFIG_I2C_HELPER_AUTO=y
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SH_MOBILE is not set
# CONFIG_I2C_SIMTEC is not set
@@ -706,9 +727,6 @@ CONFIG_I2C_HELPER_AUTO=y
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -729,11 +747,15 @@ CONFIG_SPI_MASTER=y
#
# CONFIG_SPI_SPIDEV is not set
# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -762,8 +784,12 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -808,6 +834,7 @@ CONFIG_USB_MON=y
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
# CONFIG_USB_SL811_HCD is not set
CONFIG_USB_R8A66597_HCD=m
# CONFIG_USB_HWA_HCD is not set
@@ -862,6 +889,7 @@ CONFIG_USB_R8A66597_HCD=m
# CONFIG_USB_LD is not set
# CONFIG_USB_TRANCEVIBRATOR is not set
# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
# CONFIG_USB_VST is not set
CONFIG_USB_GADGET=y
@@ -876,10 +904,11 @@ CONFIG_USB_GADGET_SELECTED=y
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
# CONFIG_USB_GADGET_PXA27X is not set
# CONFIG_USB_GADGET_S3C_HSOTG is not set
-# CONFIG_USB_GADGET_S3C2410 is not set
# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
CONFIG_USB_GADGET_M66592=y
CONFIG_USB_M66592=y
# CONFIG_USB_GADGET_AMD5536UDC is not set
@@ -939,6 +968,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
@@ -950,6 +980,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_R9701 is not set
# CONFIG_RTC_DRV_RS5C348 is not set
# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
#
# Platform RTC drivers
@@ -1002,8 +1033,10 @@ CONFIG_FS_MBCACHE=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1063,7 +1096,6 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
@@ -1129,6 +1161,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1164,22 +1197,29 @@ CONFIG_DEBUG_VM=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_KPROBES_SANITY_TEST is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
# CONFIG_PAGE_POISONING is not set
CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
CONFIG_EVENT_TRACING=y
CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
@@ -1188,6 +1228,7 @@ CONFIG_FTRACE=y
# CONFIG_PREEMPT_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_FTRACE_SYSCALLS is not set
# CONFIG_BOOT_TRACER is not set
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
@@ -1205,10 +1246,10 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_SH_STANDARD_BIOS=y
# CONFIG_EARLY_SCIF_CONSOLE is not set
CONFIG_EARLY_PRINTK=y
-# CONFIG_DEBUG_BOOTMEM is not set
-CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_STACK_DEBUG is not set
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DUMP_CODE=y
+# CONFIG_DWARF_UNWINDER is not set
# CONFIG_SH_NO_BSS_INIT is not set
#
@@ -1223,7 +1264,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
# CONFIG_CRYPTO_MANAGER is not set
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
@@ -1255,11 +1295,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1322,5 +1364,6 @@ CONFIG_AUDIT_GENERIC=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/snapgear_defconfig b/arch/sh/configs/snapgear_defconfig
index 2be2d75adbb..cb919a0de4b 100644
--- a/arch/sh/configs/snapgear_defconfig
+++ b/arch/sh/configs/snapgear_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.31-rc6
-# Thu Aug 20 15:03:04 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 19:33:00 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -57,11 +57,12 @@ CONFIG_KERNEL_GZIP=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
@@ -95,21 +96,20 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
-CONFIG_HAVE_PERF_COUNTERS=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
@@ -175,6 +175,7 @@ CONFIG_CPU_SUBTYPE_SH7751R=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -220,6 +221,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -312,7 +314,8 @@ CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
# CONFIG_UBC_WAKEUP is not set
-# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_CMDLINE_OVERWRITE is not set
+# CONFIG_CMDLINE_EXTEND is not set
#
# Bus options
@@ -374,6 +377,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -403,6 +407,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
@@ -410,7 +415,6 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -618,10 +622,7 @@ CONFIG_8139TOO_PIO=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -714,7 +715,6 @@ CONFIG_DEVPORT=y
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -736,6 +736,7 @@ CONFIG_SSB_POSSIBLE=y
#
# Graphics support
#
+CONFIG_VGA_ARB=y
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -818,6 +819,7 @@ CONFIG_EXT2_FS=y
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
@@ -882,7 +884,6 @@ CONFIG_ROMFS_BACKED_BY_BLOCK=y
CONFIG_ROMFS_ON_BLOCK=y
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
@@ -909,6 +910,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -922,7 +924,7 @@ CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
-CONFIG_HAVE_FTRACE_SYSCALLS=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DMA_API_DEBUG is not set
diff --git a/arch/sh/configs/systemh_defconfig b/arch/sh/configs/systemh_defconfig
index 5d970263c09..b9fe960309f 100644
--- a/arch/sh/configs/systemh_defconfig
+++ b/arch/sh/configs/systemh_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 13:12:28 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 19:35:03 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -27,7 +28,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -38,6 +41,12 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
# CONFIG_SYSVIPC is not set
# CONFIG_BSD_PROCESS_ACCT is not set
@@ -45,11 +54,12 @@ CONFIG_SWAP=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
@@ -83,18 +93,19 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_IOREMAP_PROT=y
@@ -103,6 +114,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -115,7 +130,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -162,6 +177,7 @@ CONFIG_CPU_SUBTYPE_SH7751R=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -207,6 +223,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -294,7 +311,8 @@ CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
# CONFIG_UBC_WAKEUP is not set
-# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_CMDLINE_OVERWRITE is not set
+# CONFIG_CMDLINE_EXTEND is not set
#
# Bus options
@@ -402,10 +420,20 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
# CONFIG_SENSORS_F71805F is not set
# CONFIG_SENSORS_F71882FG is not set
# CONFIG_SENSORS_IT87 is not set
@@ -416,9 +444,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_VT1211 is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -495,7 +521,9 @@ CONFIG_RTC_LIB=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -559,7 +587,6 @@ CONFIG_ROMFS_BACKED_BY_BLOCK=y
CONFIG_ROMFS_ON_BLOCK=y
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
#
# Partition Types
@@ -577,6 +604,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -586,8 +614,11 @@ CONFIG_FRAME_WARN=1024
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_LATENCYTOP is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DMA_API_DEBUG is not set
@@ -595,6 +626,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -623,4 +655,5 @@ CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig
index 7ad080e820c..2ca79ed9fb6 100644
--- a/arch/sh/configs/titan_defconfig
+++ b/arch/sh/configs/titan_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 13:12:54 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 19:36:36 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -28,7 +29,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -38,6 +41,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -50,11 +59,12 @@ CONFIG_POSIX_MQUEUE_SYSCTL=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
@@ -90,19 +100,20 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_IOREMAP_PROT=y
@@ -111,6 +122,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -123,7 +138,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -170,6 +185,7 @@ CONFIG_CPU_SUBTYPE_SH7751R=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -215,6 +231,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -307,7 +324,8 @@ CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x009e0000
CONFIG_ENTRY_OFFSET=0x00001000
# CONFIG_UBC_WAKEUP is not set
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC1,38400N81 root=/dev/nfs ip=:::::eth1:autoconf rw"
#
@@ -509,6 +527,7 @@ CONFIG_IP6_NF_RAW=m
# CONFIG_BRIDGE_NF_EBTABLES is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
CONFIG_STP=y
@@ -595,6 +614,7 @@ CONFIG_NET_SCH_FIFO=y
CONFIG_FIB_RULES=y
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
CONFIG_WIRELESS_EXT=y
CONFIG_WIRELESS_EXT_SYSFS=y
@@ -603,7 +623,6 @@ CONFIG_WIRELESS_EXT_SYSFS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -616,6 +635,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=m
@@ -628,9 +648,9 @@ CONFIG_CONNECTOR=m
CONFIG_MTD=m
CONFIG_MTD_DEBUG=y
CONFIG_MTD_DEBUG_VERBOSE=0
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
# CONFIG_MTD_PARTITIONS is not set
-# CONFIG_MTD_TESTS is not set
#
# User Modules And Translation Layers
@@ -820,6 +840,7 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
# CONFIG_SCSI_SRP is not set
# CONFIG_SCSI_DH is not set
# CONFIG_SCSI_OSD_INITIATOR is not set
@@ -832,7 +853,11 @@ CONFIG_SCSI_LOWLEVEL=y
#
#
-# Enable only one of the two stacks, unless you know what you are doing
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# See the help texts for more information.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -931,6 +956,7 @@ CONFIG_NETDEV_1000=y
# CONFIG_VIA_VELOCITY is not set
# CONFIG_TIGON3 is not set
# CONFIG_BNX2 is not set
+# CONFIG_CNIC is not set
# CONFIG_QLA3XXX is not set
# CONFIG_ATL1 is not set
# CONFIG_ATL1E is not set
@@ -956,10 +982,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -1087,10 +1110,20 @@ CONFIG_HW_RANDOM=y
CONFIG_DEVPORT=y
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
# CONFIG_SENSORS_I5K_AMB is not set
# CONFIG_SENSORS_F71805F is not set
# CONFIG_SENSORS_F71882FG is not set
@@ -1105,9 +1138,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_VT8231 is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -1149,6 +1180,7 @@ CONFIG_SSB_POSSIBLE=y
#
# Graphics support
#
+CONFIG_VGA_ARB=y
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1167,7 +1199,6 @@ CONFIG_DUMMY_CONSOLE=y
# CONFIG_SOUND is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
#
@@ -1217,6 +1248,7 @@ CONFIG_USB_EHCI_TT_NEWSCHED=y
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
@@ -1416,8 +1448,10 @@ CONFIG_XFS_FS=m
# CONFIG_XFS_POSIX_ACL is not set
# CONFIG_XFS_RT is not set
# CONFIG_XFS_DEBUG is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1489,7 +1523,6 @@ CONFIG_ROMFS_BACKED_BY_BLOCK=y
CONFIG_ROMFS_ON_BLOCK=y
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
@@ -1591,6 +1624,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -1621,23 +1655,29 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
# CONFIG_FRAME_POINTER is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_PAGE_POISONING is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_FTRACE_SYSCALLS is not set
# CONFIG_BOOT_TRACER is not set
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
@@ -1650,14 +1690,13 @@ CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
-# CONFIG_KMEMCHECK is not set
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
-# CONFIG_DEBUG_BOOTMEM is not set
-# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_STACK_DEBUG is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_4KSTACKS is not set
# CONFIG_DUMP_CODE is not set
+# CONFIG_DWARF_UNWINDER is not set
# CONFIG_SH_NO_BSS_INIT is not set
#
@@ -1672,7 +1711,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=y
@@ -1715,11 +1753,13 @@ CONFIG_CRYPTO_ECB=y
#
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_GHASH is not set
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=y
@@ -1790,5 +1830,6 @@ CONFIG_TEXTSEARCH_FSM=m
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/ul2_defconfig b/arch/sh/configs/ul2_defconfig
index 608fe563614..b012ca77f02 100644
--- a/arch/sh/configs/ul2_defconfig
+++ b/arch/sh/configs/ul2_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 13:14:36 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 19:42:33 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -29,7 +30,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -40,6 +43,12 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -52,11 +61,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -91,19 +101,20 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-# CONFIG_MARKERS is not set
# CONFIG_OPROFILE is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -113,6 +124,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -125,7 +140,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -141,7 +156,7 @@ CONFIG_IOSCHED_NOOP=y
# CONFIG_DEFAULT_CFQ is not set
CONFIG_DEFAULT_NOOP=y
CONFIG_DEFAULT_IOSCHED="noop"
-# CONFIG_FREEZER is not set
+CONFIG_FREEZER=y
#
# System type
@@ -176,6 +191,7 @@ CONFIG_ARCH_SHMOBILE=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -233,6 +249,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -314,7 +331,8 @@ CONFIG_GUSA=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE_OVERWRITE=y
+# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/nfs ip=dhcp"
#
@@ -334,7 +352,13 @@ CONFIG_BINFMT_ELF=y
#
# Power management options (EXPERIMENTAL)
#
-# CONFIG_PM is not set
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_HIBERNATION is not set
+CONFIG_PM_RUNTIME=y
# CONFIG_CPU_IDLE is not set
CONFIG_NET=y
@@ -382,6 +406,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -411,19 +436,17 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
CONFIG_CFG80211=y
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+CONFIG_CFG80211_DEFAULT_PS_VALUE=1
# CONFIG_WIRELESS_OLD_REGULATORY is not set
CONFIG_WIRELESS_EXT=y
CONFIG_WIRELESS_EXT_SYSFS=y
CONFIG_LIB80211=m
# CONFIG_LIB80211_DEBUG is not set
CONFIG_MAC80211=y
-CONFIG_MAC80211_DEFAULT_PS=y
-CONFIG_MAC80211_DEFAULT_PS_VALUE=1
-
-#
-# Rate control algorithm selection
-#
CONFIG_MAC80211_RC_PID=y
# CONFIG_MAC80211_RC_MINSTREL is not set
CONFIG_MAC80211_RC_DEFAULT_PID=y
@@ -444,6 +467,7 @@ CONFIG_MAC80211_RC_DEFAULT="pid"
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
@@ -453,9 +477,9 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
# CONFIG_MTD_CMDLINE_PARTS is not set
# CONFIG_MTD_AR7_PARTS is not set
@@ -590,7 +614,6 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
-# CONFIG_SCSI_BNX2_ISCSI is not set
# CONFIG_LIBFC is not set
# CONFIG_LIBFCOE is not set
# CONFIG_SCSI_DEBUG is not set
@@ -598,6 +621,7 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
CONFIG_SATA_PMP=y
CONFIG_ATA_SFF=y
# CONFIG_SATA_MV is not set
@@ -631,10 +655,7 @@ CONFIG_MII=y
# CONFIG_KS8842 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
CONFIG_WLAN_80211=y
CONFIG_LIBERTAS=m
@@ -648,12 +669,13 @@ CONFIG_LIBERTAS_DEBUG=y
# CONFIG_RTL8187 is not set
# CONFIG_MAC80211_HWSIM is not set
# CONFIG_P54_COMMON is not set
-# CONFIG_AR9170_USB is not set
+# CONFIG_ATH_COMMON is not set
# CONFIG_HOSTAP is not set
# CONFIG_B43 is not set
# CONFIG_B43LEGACY is not set
# CONFIG_ZD1211RW is not set
# CONFIG_RT2X00 is not set
+# CONFIG_WL12XX is not set
# CONFIG_IWM is not set
#
@@ -750,10 +772,20 @@ CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
# CONFIG_SENSORS_F71805F is not set
# CONFIG_SENSORS_F71882FG is not set
# CONFIG_SENSORS_IT87 is not set
@@ -764,9 +796,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_VT1211 is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -813,6 +843,7 @@ CONFIG_USB=y
# CONFIG_USB_DEVICEFS is not set
CONFIG_USB_DEVICE_CLASS=y
# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_SUSPEND is not set
# CONFIG_USB_OTG is not set
# CONFIG_USB_OTG_WHITELIST is not set
# CONFIG_USB_OTG_BLACKLIST_HUB is not set
@@ -827,9 +858,9 @@ CONFIG_USB_MON=y
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
# CONFIG_USB_SL811_HCD is not set
CONFIG_USB_R8A66597_HCD=y
-# CONFIG_SUPERH_ON_CHIP_R8A66597 is not set
# CONFIG_USB_HWA_HCD is not set
#
@@ -893,6 +924,7 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_LD is not set
# CONFIG_USB_TRANCEVIBRATOR is not set
# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
@@ -917,6 +949,8 @@ CONFIG_MMC_BLOCK_BOUNCE=y
# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
+# CONFIG_MMC_ATMELMCI is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
# CONFIG_ACCESSIBILITY is not set
@@ -949,8 +983,10 @@ CONFIG_FS_MBCACHE=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1014,7 +1050,6 @@ CONFIG_CRAMFS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
# CONFIG_NFS_V3 is not set
@@ -1091,6 +1126,7 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -1103,8 +1139,11 @@ CONFIG_FRAME_WARN=1024
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DMA_API_DEBUG is not set
@@ -1112,6 +1151,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_DWARF_UNWINDER is not set
#
# Security options
@@ -1125,7 +1165,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD2=y
@@ -1167,11 +1206,13 @@ CONFIG_CRYPTO_ECB=y
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
CONFIG_CRYPTO_MICHAEL_MIC=y
@@ -1235,5 +1276,6 @@ CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/configs/urquell_defconfig b/arch/sh/configs/urquell_defconfig
index ee1987e6cc5..9f8aee5bc55 100644
--- a/arch/sh/configs/urquell_defconfig
+++ b/arch/sh/configs/urquell_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Thu Jun 18 13:15:28 2009
+# Linux kernel version: 2.6.31
+# Thu Sep 24 19:46:13 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,11 +14,13 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
# CONFIG_ARCH_SUSPEND_POSSIBLE is not set
CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_SYS_SUPPORTS_SMP=y
CONFIG_SYS_SUPPORTS_NUMA=y
CONFIG_SYS_SUPPORTS_PCI=y
CONFIG_SYS_SUPPORTS_TMU=y
@@ -29,7 +31,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -39,6 +43,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -54,14 +64,12 @@ CONFIG_AUDIT_TREE=y
#
# RCU Subsystem
#
-# CONFIG_CLASSIC_RCU is not set
CONFIG_TREE_RCU=y
-# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_PREEMPT_RCU is not set
# CONFIG_RCU_TRACE is not set
CONFIG_RCU_FANOUT=32
# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -111,20 +119,20 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-CONFIG_TRACEPOINTS=y
-CONFIG_MARKERS=y
# CONFIG_OPROFILE is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -134,6 +142,11 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -146,7 +159,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -195,6 +208,7 @@ CONFIG_CPU_SHX3=y
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
# CONFIG_CPU_SUBTYPE_SH7723 is not set
# CONFIG_CPU_SUBTYPE_SH7724 is not set
+# CONFIG_CPU_SUBTYPE_SH7757 is not set
# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
@@ -244,7 +258,6 @@ CONFIG_SPARSEMEM=y
CONFIG_HAVE_MEMORY_PRESENT=y
CONFIG_SPARSEMEM_STATIC=y
# CONFIG_MEMORY_HOTPLUG is not set
-CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
# CONFIG_PHYS_ADDR_T_64BIT is not set
@@ -252,6 +265,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -322,6 +336,7 @@ CONFIG_SCHED_HRTICK=y
CONFIG_KEXEC=y
# CONFIG_CRASH_DUMP is not set
CONFIG_SECCOMP=y
+# CONFIG_SMP is not set
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
@@ -334,7 +349,8 @@ CONFIG_SPARSE_IRQ=y
CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
-# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_CMDLINE_OVERWRITE is not set
+# CONFIG_CMDLINE_EXTEND is not set
#
# Bus options
@@ -343,6 +359,8 @@ CONFIG_PCI=y
# CONFIG_SH_PCIDMA_NONCOHERENT is not set
CONFIG_PCIEPORTBUS=y
CONFIG_PCIEAER=y
+# CONFIG_PCIE_ECRC is not set
+# CONFIG_PCIEAER_INJECT is not set
CONFIG_PCIEASPM=y
CONFIG_PCIEASPM_DEBUG=y
# CONFIG_ARCH_SUPPORTS_MSI is not set
@@ -367,6 +385,7 @@ CONFIG_BINFMT_MISC=y
CONFIG_PM=y
# CONFIG_PM_DEBUG is not set
# CONFIG_HIBERNATION is not set
+# CONFIG_PM_RUNTIME is not set
CONFIG_CPU_IDLE=y
CONFIG_CPU_IDLE_GOV_LADDER=y
CONFIG_NET=y
@@ -420,6 +439,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -442,7 +462,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
@@ -450,6 +469,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
# CONFIG_WIRELESS_OLD_REGULATORY is not set
CONFIG_WIRELESS_EXT=y
CONFIG_WIRELESS_EXT_SYSFS=y
@@ -458,7 +478,6 @@ CONFIG_WIRELESS_EXT_SYSFS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -471,6 +490,7 @@ CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
@@ -480,9 +500,9 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
# CONFIG_MTD_CMDLINE_PARTS is not set
# CONFIG_MTD_AR7_PARTS is not set
@@ -620,6 +640,7 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
CONFIG_SATA_PMP=y
# CONFIG_SATA_AHCI is not set
CONFIG_SATA_SIL24=y
@@ -641,6 +662,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_ALI is not set
# CONFIG_PATA_AMD is not set
# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATP867X is not set
# CONFIG_PATA_ATIIXP is not set
# CONFIG_PATA_CMD640_PCI is not set
# CONFIG_PATA_CMD64X is not set
@@ -668,6 +690,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_OPTIDMA is not set
# CONFIG_PATA_PDC_OLD is not set
# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RDC is not set
# CONFIG_PATA_RZ1000 is not set
# CONFIG_PATA_SC1200 is not set
# CONFIG_PATA_SERVERWORKS is not set
@@ -686,7 +709,11 @@ CONFIG_ATA_SFF=y
#
#
-# Enable only one of the two stacks, unless you know what you are doing
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# See the help texts for more information.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -782,6 +809,7 @@ CONFIG_SKY2_DEBUG=y
# CONFIG_VIA_VELOCITY is not set
# CONFIG_TIGON3 is not set
# CONFIG_BNX2 is not set
+# CONFIG_CNIC is not set
# CONFIG_QLA3XXX is not set
# CONFIG_ATL1 is not set
# CONFIG_ATL1E is not set
@@ -789,10 +817,7 @@ CONFIG_SKY2_DEBUG=y
# CONFIG_JME is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -842,14 +867,19 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
-# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_SH_KEYSC is not set
+# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
@@ -902,6 +932,7 @@ CONFIG_HW_RANDOM=y
CONFIG_DEVPORT=y
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
# CONFIG_I2C_CHARDEV is not set
CONFIG_I2C_HELPER_AUTO=y
CONFIG_I2C_ALGOPCA=y
@@ -931,6 +962,7 @@ CONFIG_I2C_ALGOPCA=y
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
# CONFIG_I2C_GPIO is not set
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SH_MOBILE is not set
@@ -958,15 +990,17 @@ CONFIG_I2C_PCA_PLATFORM=y
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
@@ -987,14 +1021,24 @@ CONFIG_GPIOLIB=y
# PCI GPIO expanders:
#
# CONFIG_GPIO_BT8XX is not set
+# CONFIG_GPIO_LANGWELL is not set
#
# SPI GPIO expanders:
#
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
# CONFIG_SENSORS_AD7414 is not set
# CONFIG_SENSORS_AD7418 is not set
# CONFIG_SENSORS_ADM1021 is not set
@@ -1045,6 +1089,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_ADS7828 is not set
# CONFIG_SENSORS_THMC50 is not set
# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
# CONFIG_SENSORS_VIA686A is not set
# CONFIG_SENSORS_VT1211 is not set
# CONFIG_SENSORS_VT8231 is not set
@@ -1056,9 +1101,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_W83L786NG is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
# CONFIG_THERMAL is not set
-CONFIG_THERMAL_HWMON=y
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -1079,8 +1122,10 @@ CONFIG_MFD_SM501=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
# CONFIG_REGULATOR is not set
CONFIG_MEDIA_SUPPORT=y
@@ -1099,6 +1144,7 @@ CONFIG_MEDIA_SUPPORT=y
#
# Graphics support
#
+CONFIG_VGA_ARB=y
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1183,7 +1229,6 @@ CONFIG_LOGO_LINUX_CLUT224=y
# CONFIG_SOUND is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
#
@@ -1206,6 +1251,7 @@ CONFIG_HID_CYPRESS=y
CONFIG_HID_EZKEY=y
# CONFIG_HID_KYE is not set
CONFIG_HID_GYRATION=y
+# CONFIG_HID_TWINHAN is not set
# CONFIG_HID_KENSINGTON is not set
CONFIG_HID_LOGITECH=y
# CONFIG_LOGITECH_FF is not set
@@ -1255,6 +1301,7 @@ CONFIG_USB_MON=y
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
@@ -1373,6 +1420,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
@@ -1422,6 +1470,7 @@ CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_XATTR=y
# CONFIG_EXT4_FS_POSIX_ACL is not set
# CONFIG_EXT4_FS_SECURITY is not set
+# CONFIG_EXT4_DEBUG is not set
CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
CONFIG_JBD2=y
@@ -1431,9 +1480,11 @@ CONFIG_FS_MBCACHE=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
CONFIG_BTRFS_FS=y
# CONFIG_BTRFS_FS_POSIX_ACL is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1499,12 +1550,12 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
CONFIG_ROOT_NFS=y
# CONFIG_NFSD is not set
CONFIG_LOCKD=y
@@ -1576,6 +1627,7 @@ CONFIG_PRINTK_TIME=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1610,18 +1662,23 @@ CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_PAGE_POISONING is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DYNAMIC_DEBUG is not set
@@ -1633,11 +1690,11 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_EARLY_SCIF_CONSOLE=y
CONFIG_EARLY_SCIF_CONSOLE_PORT=0xffeb0000
CONFIG_EARLY_PRINTK=y
-# CONFIG_DEBUG_BOOTMEM is not set
-# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_STACK_DEBUG is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_4KSTACKS is not set
# CONFIG_DUMP_CODE is not set
+# CONFIG_DWARF_UNWINDER is not set
# CONFIG_SH_NO_BSS_INIT is not set
#
@@ -1652,7 +1709,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD2=y
@@ -1694,11 +1750,13 @@ CONFIG_CRYPTO_CBC=y
#
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1764,5 +1822,6 @@ CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig
index b91fa8dbf04..4d58eb0973d 100644
--- a/arch/sh/drivers/dma/Kconfig
+++ b/arch/sh/drivers/dma/Kconfig
@@ -1,12 +1,9 @@
menu "DMA support"
-config SH_DMA_API
- bool
config SH_DMA
bool "SuperH on-chip DMA controller (DMAC) support"
depends on CPU_SH3 || CPU_SH4
- select SH_DMA_API
default n
config SH_DMA_IRQ_MULTI
@@ -19,6 +16,15 @@ config SH_DMA_IRQ_MULTI
CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785 || \
CPU_SUBTYPE_SH7760
+config SH_DMA_API
+ depends on SH_DMA
+ bool "SuperH DMA API support"
+ default n
+ help
+ SH_DMA_API always enabled DMA API of used SuperH.
+ If you want to use DMA ENGINE, you must not enable this.
+ Please enable DMA_ENGINE and SH_DMAE.
+
config NR_ONCHIP_DMA_CHANNELS
int
depends on SH_DMA
diff --git a/arch/sh/drivers/dma/Makefile b/arch/sh/drivers/dma/Makefile
index c6068137b46..d88c9484762 100644
--- a/arch/sh/drivers/dma/Makefile
+++ b/arch/sh/drivers/dma/Makefile
@@ -2,8 +2,7 @@
# Makefile for the SuperH DMA specific kernel interface routines under Linux.
#
-obj-$(CONFIG_SH_DMA_API) += dma-api.o dma-sysfs.o
-obj-$(CONFIG_SH_DMA) += dma-sh.o
+obj-$(CONFIG_SH_DMA_API) += dma-sh.o dma-api.o dma-sysfs.o
obj-$(CONFIG_PVR2_DMA) += dma-pvr2.o
obj-$(CONFIG_G2_DMA) += dma-g2.o
obj-$(CONFIG_SH_DMABRG) += dmabrg.o
diff --git a/arch/sh/include/asm/dma-sh.h b/arch/sh/include/asm/dma-sh.h
index 68a5f4cb034..78eed3e0bdf 100644
--- a/arch/sh/include/asm/dma-sh.h
+++ b/arch/sh/include/asm/dma-sh.h
@@ -116,4 +116,17 @@ static u32 dma_base_addr[] __maybe_unused = {
#define CHCR 0x0C
#define DMAOR 0x40
+/*
+ * for dma engine
+ *
+ * SuperH DMA mode
+ */
+#define SHDMA_MIX_IRQ (1 << 1)
+#define SHDMA_DMAOR1 (1 << 2)
+#define SHDMA_DMAE1 (1 << 3)
+
+struct sh_dmae_pdata {
+ unsigned int mode;
+};
+
#endif /* __DMA_SH_H */
diff --git a/arch/sh/include/asm/perf_counter.h b/arch/sh/include/asm/perf_counter.h
deleted file mode 100644
index d8e6bb9c0cc..00000000000
--- a/arch/sh/include/asm/perf_counter.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __ASM_SH_PERF_COUNTER_H
-#define __ASM_SH_PERF_COUNTER_H
-
-/* SH only supports software counters through this interface. */
-static inline void set_perf_counter_pending(void) {}
-
-#define PERF_COUNTER_INDEX_OFFSET 0
-
-#endif /* __ASM_SH_PERF_COUNTER_H */
diff --git a/arch/sh/include/asm/perf_event.h b/arch/sh/include/asm/perf_event.h
new file mode 100644
index 00000000000..11a302297ab
--- /dev/null
+++ b/arch/sh/include/asm/perf_event.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_SH_PERF_EVENT_H
+#define __ASM_SH_PERF_EVENT_H
+
+/* SH only supports software events through this interface. */
+static inline void set_perf_event_pending(void) {}
+
+#define PERF_EVENT_INDEX_OFFSET 0
+
+#endif /* __ASM_SH_PERF_EVENT_H */
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h
index ca64f43abe6..53ef26ced75 100644
--- a/arch/sh/include/asm/smp.h
+++ b/arch/sh/include/asm/smp.h
@@ -44,7 +44,6 @@ void plat_send_ipi(unsigned int cpu, unsigned int message);
void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
#else
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h
index f8c40cc6505..65e7bd2f224 100644
--- a/arch/sh/include/asm/topology.h
+++ b/arch/sh/include/asm/topology.h
@@ -31,7 +31,6 @@
#define cpu_to_node(cpu) ((void)(cpu),0)
#define parent_node(node) ((void)(node),0)
-#define node_to_cpumask(node) ((void)node, cpu_online_map)
#define cpumask_of_node(node) ((void)node, cpu_online_mask)
#define pcibus_to_node(bus) ((void)(bus), -1)
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
index 925dd40d9d5..f3fd1b9eb6b 100644
--- a/arch/sh/include/asm/unistd_32.h
+++ b/arch/sh/include/asm/unistd_32.h
@@ -344,7 +344,7 @@
#define __NR_preadv 333
#define __NR_pwritev 334
#define __NR_rt_tgsigqueueinfo 335
-#define __NR_perf_counter_open 336
+#define __NR_perf_event_open 336
#define NR_syscalls 337
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
index 2b84bc916bc..343ce8f073e 100644
--- a/arch/sh/include/asm/unistd_64.h
+++ b/arch/sh/include/asm/unistd_64.h
@@ -384,7 +384,7 @@
#define __NR_preadv 361
#define __NR_pwritev 362
#define __NR_rt_tgsigqueueinfo 363
-#define __NR_perf_counter_open 364
+#define __NR_perf_event_open 364
#ifdef __KERNEL__
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index 35097753456..5491b094cf0 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -17,6 +17,7 @@
#include <linux/sh_timer.h>
#include <asm/clock.h>
#include <asm/mmzone.h>
+#include <asm/dma-sh.h>
#include <cpu/sh7722.h>
static struct resource rtc_resources[] = {
@@ -373,6 +374,18 @@ static struct platform_device sci_device = {
},
};
+static struct sh_dmae_pdata dma_platform_data = {
+ .mode = 0,
+};
+
+static struct platform_device dma_device = {
+ .name = "sh-dma-engine",
+ .id = -1,
+ .dev = {
+ .platform_data = &dma_platform_data,
+ },
+};
+
static struct platform_device *sh7722_devices[] __initdata = {
&cmt_device,
&tmu0_device,
@@ -385,6 +398,7 @@ static struct platform_device *sh7722_devices[] __initdata = {
&vpu_device,
&veu_device,
&jpu_device,
+ &dma_device,
};
static int __init sh7722_devices_setup(void)
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index 2c901f44695..12ff56f19c5 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
+#include <asm/dma-sh.h>
static struct sh_timer_config tmu0_platform_data = {
.name = "TMU0",
@@ -240,6 +241,18 @@ static struct platform_device sci_device = {
},
};
+static struct sh_dmae_pdata dma_platform_data = {
+ .mode = (SHDMA_MIX_IRQ | SHDMA_DMAOR1),
+};
+
+static struct platform_device dma_device = {
+ .name = "sh-dma-engine",
+ .id = -1,
+ .dev = {
+ .platform_data = &dma_platform_data,
+ },
+};
+
static struct platform_device *sh7780_devices[] __initdata = {
&tmu0_device,
&tmu1_device,
@@ -249,6 +262,7 @@ static struct platform_device *sh7780_devices[] __initdata = {
&tmu5_device,
&rtc_device,
&sci_device,
+ &dma_device,
};
static int __init sh7780_devices_setup(void)
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index bc4d8d75332..03b3616c80a 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -24,7 +24,6 @@
#include <asm/unwinder.h>
#include <asm/sections.h>
#include <asm/unaligned.h>
-#include <asm/dwarf.h>
#include <asm/stacktrace.h>
/* Reserve enough memory for two stack frames */
diff --git a/arch/sh/kernel/init_task.c b/arch/sh/kernel/init_task.c
index 1719957c0a6..11f2ea556a6 100644
--- a/arch/sh/kernel/init_task.c
+++ b/arch/sh/kernel/init_task.c
@@ -17,9 +17,8 @@ struct pt_regs fake_swapper_regs;
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"))) =
- { INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data =
+ { INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 60f8af4497c..7cb933ba495 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -165,11 +165,9 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs)
}
#ifdef CONFIG_IRQSTACKS
-static char softirq_stack[NR_CPUS * THREAD_SIZE]
- __attribute__((__section__(".bss.page_aligned")));
+static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
-static char hardirq_stack[NR_CPUS * THREAD_SIZE]
- __attribute__((__section__(".bss.page_aligned")));
+static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
/*
* allocate per-cpu stacks for hardirq and for softirq processing
diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c
index 63ba12836ea..eb68bfdd86e 100644
--- a/arch/sh/kernel/sys_sh32.c
+++ b/arch/sh/kernel/sys_sh32.c
@@ -9,7 +9,6 @@
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
-#include <linux/utsname.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/ipc.h>
diff --git a/arch/sh/kernel/sys_sh64.c b/arch/sh/kernel/sys_sh64.c
index 91fb8445a5a..287235768bc 100644
--- a/arch/sh/kernel/sys_sh64.c
+++ b/arch/sh/kernel/sys_sh64.c
@@ -23,7 +23,6 @@
#include <linux/stat.h>
#include <linux/mman.h>
#include <linux/file.h>
-#include <linux/utsname.h>
#include <linux/syscalls.h>
#include <linux/ipc.h>
#include <asm/uaccess.h>
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 16ba225ede8..19fd11dd987 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -352,4 +352,4 @@ ENTRY(sys_call_table)
.long sys_preadv
.long sys_pwritev
.long sys_rt_tgsigqueueinfo /* 335 */
- .long sys_perf_counter_open
+ .long sys_perf_event_open
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index af6fb7410c2..5bfde6c7749 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -390,4 +390,4 @@ sys_call_table:
.long sys_preadv
.long sys_pwritev
.long sys_rt_tgsigqueueinfo
- .long sys_perf_counter_open
+ .long sys_perf_event_open
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 6aba9af79ea..69bb1652ecc 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -452,6 +452,12 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
u_int rm;
int ret, index;
+ /*
+ * XXX: We can't handle mixed 16/32-bit instructions yet
+ */
+ if (instruction_size(instruction) != 2)
+ return -EINVAL;
+
index = (instruction>>8)&15; /* 0x0F00 */
rm = regs->regs[index];
@@ -619,9 +625,9 @@ asmlinkage void do_address_error(struct pt_regs *regs,
se_user += 1;
-#ifndef CONFIG_CPU_SH2A
set_fs(USER_DS);
- if (copy_from_user(&instruction, (u16 *)(regs->pc & ~1), 2)) {
+ if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1),
+ sizeof(instruction))) {
set_fs(oldfs);
goto uspace_segv;
}
@@ -633,7 +639,6 @@ asmlinkage void do_address_error(struct pt_regs *regs,
"in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
current->comm, current->pid, (void *)regs->pc,
instruction);
-#endif
if (se_usermode & 2)
goto fixup;
@@ -673,12 +678,6 @@ uspace_segv:
} else {
se_sys += 1;
- if (se_kernmode_warn)
- printk(KERN_NOTICE "Unaligned kernel access "
- "on behalf of \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
- current->comm, current->pid, (void *)regs->pc,
- instruction);
-
if (regs->pc & 1)
die("unaligned program counter", regs, error_code);
@@ -692,6 +691,12 @@ uspace_segv:
die("insn faulting in do_address_error", regs, 0);
}
+ if (se_kernmode_warn)
+ printk(KERN_NOTICE "Unaligned kernel access "
+ "on behalf of \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+ current->comm, current->pid, (void *)regs->pc,
+ instruction);
+
handle_unaligned_access(instruction, regs,
&user_mem_access, 0);
set_fs(oldfs);
diff --git a/arch/sh/kernel/vsyscall/Makefile b/arch/sh/kernel/vsyscall/Makefile
index 4bbce1cfa35..8f0ea5fc835 100644
--- a/arch/sh/kernel/vsyscall/Makefile
+++ b/arch/sh/kernel/vsyscall/Makefile
@@ -15,7 +15,7 @@ quiet_cmd_syscall = SYSCALL $@
export CPPFLAGS_vsyscall.lds += -P -C -Ush
vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 \
- $(call ld-option, -Wl$(comma)--hash-style=sysv)
+ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
SYSCFLAGS_vsyscall-trapa.so = $(vsyscall-flags)
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 781b413ff82..47530104e0a 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -15,7 +15,7 @@
#include <linux/mm.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/io_trapped.h>
#include <asm/system.h>
#include <asm/mmu_context.h>
@@ -157,7 +157,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
if ((regs->sr & SR_IMASK) != SR_IMASK)
local_irq_enable();
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
/*
* If we're in an interrupt, have no user context or are running
@@ -208,11 +208,11 @@ survive:
}
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
tsk->min_flt++;
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index edc842ff61e..8173e38afd3 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -186,8 +186,6 @@ void __init paging_init(void)
set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
}
-static struct kcore_list kcore_mem, kcore_vmalloc;
-
void __init mem_init(void)
{
int codesize, datasize, initsize;
@@ -226,13 +224,9 @@ void __init mem_init(void)
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
- kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
- kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
- VMALLOC_END - VMALLOC_START);
-
printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
"%dk data, %dk init)\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT-10),
codesize >> 10,
datasize >> 10,
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index 2dcc48528f7..de0b0e88182 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -20,7 +20,7 @@
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -116,7 +116,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
/* Not an IO address, so reenable interrupts */
local_irq_enable();
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
/*
* If we're in an interrupt or have no user
@@ -201,11 +201,11 @@ survive:
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
tsk->min_flt++;
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 86b82348b97..97fca4695e0 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -25,7 +25,7 @@ config SPARC
select ARCH_WANT_OPTIONAL_GPIOLIB
select RTC_CLASS
select RTC_DRV_M48T59
- select HAVE_PERF_COUNTERS
+ select HAVE_PERF_EVENTS
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
@@ -47,7 +47,7 @@ config SPARC64
select RTC_DRV_BQ4802
select RTC_DRV_SUN4V
select RTC_DRV_STARFIRE
- select HAVE_PERF_COUNTERS
+ select HAVE_PERF_EVENTS
config ARCH_DEFCONFIG
string
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index 467221dd570..dfe272d1446 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -31,7 +31,6 @@ export BITS := 32
#KBUILD_CFLAGS += -g -pipe -fcall-used-g5 -fcall-used-g7
KBUILD_CFLAGS += -m32 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7
KBUILD_AFLAGS += -m32
-CPPFLAGS_vmlinux.lds += -m32
#LDFLAGS_vmlinux = -N -Ttext 0xf0004000
# Since 2.5.40, the first stage is left not btfix-ed.
@@ -45,9 +44,6 @@ else
CHECKFLAGS += -D__sparc__ -D__sparc_v9__ -D__arch64__ -m64
-# Undefine sparc when processing vmlinux.lds - it is used
-# And teach CPP we are doing 64 bit builds (for this case)
-CPPFLAGS_vmlinux.lds += -m64 -Usparc
LDFLAGS := -m elf64_sparc
export BITS := 64
diff --git a/arch/sparc/include/asm/mman.h b/arch/sparc/include/asm/mman.h
index 988192e8e95..c3029ad6619 100644
--- a/arch/sparc/include/asm/mman.h
+++ b/arch/sparc/include/asm/mman.h
@@ -20,6 +20,8 @@
#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
+#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
+#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/perf_counter.h b/arch/sparc/include/asm/perf_counter.h
deleted file mode 100644
index 5d7a8ca0e49..00000000000
--- a/arch/sparc/include/asm/perf_counter.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef __ASM_SPARC_PERF_COUNTER_H
-#define __ASM_SPARC_PERF_COUNTER_H
-
-extern void set_perf_counter_pending(void);
-
-#define PERF_COUNTER_INDEX_OFFSET 0
-
-#ifdef CONFIG_PERF_COUNTERS
-extern void init_hw_perf_counters(void);
-#else
-static inline void init_hw_perf_counters(void) { }
-#endif
-
-#endif
diff --git a/arch/sparc/include/asm/perf_event.h b/arch/sparc/include/asm/perf_event.h
new file mode 100644
index 00000000000..7e2669894ce
--- /dev/null
+++ b/arch/sparc/include/asm/perf_event.h
@@ -0,0 +1,14 @@
+#ifndef __ASM_SPARC_PERF_EVENT_H
+#define __ASM_SPARC_PERF_EVENT_H
+
+extern void set_perf_event_pending(void);
+
+#define PERF_EVENT_INDEX_OFFSET 0
+
+#ifdef CONFIG_PERF_EVENTS
+extern void init_hw_perf_events(void);
+#else
+static inline void init_hw_perf_events(void) { }
+#endif
+
+#endif
diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h
index becb6bf353a..f49e11cd4de 100644
--- a/arch/sparc/include/asm/smp_64.h
+++ b/arch/sparc/include/asm/smp_64.h
@@ -36,7 +36,6 @@ extern int sparc64_multi_core;
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
/*
* General functions that each host system must provide.
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index 26cd25c0839..600a79035fa 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -12,22 +12,8 @@ static inline int cpu_to_node(int cpu)
#define parent_node(node) (node)
-static inline cpumask_t node_to_cpumask(int node)
-{
- return numa_cpumask_lookup_table[node];
-}
#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
-/*
- * Returns a pointer to the cpumask of CPUs on Node 'node'.
- * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
- */
-#define node_to_cpumask_ptr(v, node) \
- cpumask_t *v = &(numa_cpumask_lookup_table[node])
-
-#define node_to_cpumask_ptr_next(v, node) \
- v = &(numa_cpumask_lookup_table[node])
-
struct pci_bus;
#ifdef CONFIG_PCI
extern int pcibus_to_node(struct pci_bus *pbus);
@@ -71,8 +57,6 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
#ifdef CONFIG_SMP
#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).core_id)
-#define topology_core_siblings(cpu) (cpu_core_map[cpu])
-#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
#define mc_capable() (sparc64_multi_core)
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index 706df669f3b..42f2316c3ea 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -395,7 +395,7 @@
#define __NR_preadv 324
#define __NR_pwritev 325
#define __NR_rt_tgsigqueueinfo 326
-#define __NR_perf_counter_open 327
+#define __NR_perf_event_open 327
#define NR_SYSCALLS 328
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index d4de32f0f8a..9d83d3bcb49 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -258,8 +258,6 @@ static inline void *vio_dring_entry(struct vio_dring_state *dr,
static inline u32 vio_dring_avail(struct vio_dring_state *dr,
unsigned int ring_size)
{
- BUILD_BUG_ON(!is_power_of_2(ring_size));
-
return (dr->pending -
((dr->prod - dr->cons) & (ring_size - 1)));
}
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 247cc620cee..5b47fab9966 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -7,7 +7,11 @@ ccflags-y := -Werror
extra-y := head_$(BITS).o
extra-y += init_task.o
-extra-y += vmlinux.lds
+
+# Undefine sparc when processing vmlinux.lds - it is used
+# And teach CPP we are doing $(BITS) builds (for this case)
+CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS)
+extra-y += vmlinux.lds
obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o
obj-$(CONFIG_SPARC32) += etrap_32.o
@@ -104,5 +108,5 @@ obj-$(CONFIG_AUDIT) += audit.o
audit--$(CONFIG_AUDIT) := compat_audit.o
obj-$(CONFIG_COMPAT) += $(audit--y)
-pc--$(CONFIG_PERF_COUNTERS) := perf_counter.o
+pc--$(CONFIG_PERF_EVENTS) := perf_event.o
obj-$(CONFIG_SPARC64) += $(pc--y)
diff --git a/arch/sparc/kernel/init_task.c b/arch/sparc/kernel/init_task.c
index 28125c5b3d3..5fe3d65581f 100644
--- a/arch/sparc/kernel/init_task.c
+++ b/arch/sparc/kernel/init_task.c
@@ -18,6 +18,5 @@ EXPORT_SYMBOL(init_task);
* If this is not aligned on a 8k boundry, then you should change code
* in etrap.S which assumes it.
*/
-union thread_union init_thread_union
- __attribute__((section (".data.init_task")))
- = { INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data =
+ { INIT_THREAD_INFO(init_task) };
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 8daab33fc17..8ab1d4728a4 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -229,7 +229,7 @@ static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
tid = ((a << IMAP_AID_SHIFT) |
(n << IMAP_NID_SHIFT));
tid &= (IMAP_AID_SAFARI |
- IMAP_NID_SAFARI);;
+ IMAP_NID_SAFARI);
}
} else {
tid = cpuid << IMAP_TID_SHIFT;
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index 378eb53e077..b129611590a 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -19,7 +19,7 @@
#include <linux/delay.h>
#include <linux/smp.h>
-#include <asm/perf_counter.h>
+#include <asm/perf_event.h>
#include <asm/ptrace.h>
#include <asm/local.h>
#include <asm/pcr.h>
@@ -265,7 +265,7 @@ int __init nmi_init(void)
}
}
if (!err)
- init_hw_perf_counters();
+ init_hw_perf_events();
return err;
}
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 68ff0010707..2d94e7a03af 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -7,7 +7,7 @@
#include <linux/init.h>
#include <linux/irq.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/pil.h>
#include <asm/pcr.h>
@@ -15,7 +15,7 @@
/* This code is shared between various users of the performance
* counters. Users will be oprofile, pseudo-NMI watchdog, and the
- * perf_counter support layer.
+ * perf_event support layer.
*/
#define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
@@ -42,14 +42,14 @@ void deferred_pcr_work_irq(int irq, struct pt_regs *regs)
old_regs = set_irq_regs(regs);
irq_enter();
-#ifdef CONFIG_PERF_COUNTERS
- perf_counter_do_pending();
+#ifdef CONFIG_PERF_EVENTS
+ perf_event_do_pending();
#endif
irq_exit();
set_irq_regs(old_regs);
}
-void set_perf_counter_pending(void)
+void set_perf_event_pending(void)
{
set_softint(1 << PIL_DEFERRED_PCR_WORK);
}
diff --git a/arch/sparc/kernel/perf_counter.c b/arch/sparc/kernel/perf_event.c
index b1265ce8a05..2d6a1b10c81 100644
--- a/arch/sparc/kernel/perf_counter.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1,8 +1,8 @@
-/* Performance counter support for sparc64.
+/* Performance event support for sparc64.
*
* Copyright (C) 2009 David S. Miller <davem@davemloft.net>
*
- * This code is based almost entirely upon the x86 perf counter
+ * This code is based almost entirely upon the x86 perf event
* code, which is:
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
@@ -12,7 +12,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*/
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/kprobes.h>
#include <linux/kernel.h>
#include <linux/kdebug.h>
@@ -46,19 +46,19 @@
* normal code.
*/
-#define MAX_HWCOUNTERS 2
+#define MAX_HWEVENTS 2
#define MAX_PERIOD ((1UL << 32) - 1)
#define PIC_UPPER_INDEX 0
#define PIC_LOWER_INDEX 1
-struct cpu_hw_counters {
- struct perf_counter *counters[MAX_HWCOUNTERS];
- unsigned long used_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)];
- unsigned long active_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)];
+struct cpu_hw_events {
+ struct perf_event *events[MAX_HWEVENTS];
+ unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
+ unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
int enabled;
};
-DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, };
+DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
struct perf_event_map {
u16 encoding;
@@ -87,9 +87,9 @@ static const struct perf_event_map ultra3i_perfmon_event_map[] = {
[PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
};
-static const struct perf_event_map *ultra3i_event_map(int event)
+static const struct perf_event_map *ultra3i_event_map(int event_id)
{
- return &ultra3i_perfmon_event_map[event];
+ return &ultra3i_perfmon_event_map[event_id];
}
static const struct sparc_pmu ultra3i_pmu = {
@@ -111,9 +111,9 @@ static const struct perf_event_map niagara2_perfmon_event_map[] = {
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
};
-static const struct perf_event_map *niagara2_event_map(int event)
+static const struct perf_event_map *niagara2_event_map(int event_id)
{
- return &niagara2_perfmon_event_map[event];
+ return &niagara2_perfmon_event_map[event_id];
}
static const struct sparc_pmu niagara2_pmu = {
@@ -130,13 +130,13 @@ static const struct sparc_pmu niagara2_pmu = {
static const struct sparc_pmu *sparc_pmu __read_mostly;
-static u64 event_encoding(u64 event, int idx)
+static u64 event_encoding(u64 event_id, int idx)
{
if (idx == PIC_UPPER_INDEX)
- event <<= sparc_pmu->upper_shift;
+ event_id <<= sparc_pmu->upper_shift;
else
- event <<= sparc_pmu->lower_shift;
- return event;
+ event_id <<= sparc_pmu->lower_shift;
+ return event_id;
}
static u64 mask_for_index(int idx)
@@ -151,7 +151,7 @@ static u64 nop_for_index(int idx)
sparc_pmu->lower_nop, idx);
}
-static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc,
+static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc,
int idx)
{
u64 val, mask = mask_for_index(idx);
@@ -160,7 +160,7 @@ static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc,
pcr_ops->write((val & ~mask) | hwc->config);
}
-static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc,
+static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc,
int idx)
{
u64 mask = mask_for_index(idx);
@@ -172,7 +172,7 @@ static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc,
void hw_perf_enable(void)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
u64 val;
int i;
@@ -184,9 +184,9 @@ void hw_perf_enable(void)
val = pcr_ops->read();
- for (i = 0; i < MAX_HWCOUNTERS; i++) {
- struct perf_counter *cp = cpuc->counters[i];
- struct hw_perf_counter *hwc;
+ for (i = 0; i < MAX_HWEVENTS; i++) {
+ struct perf_event *cp = cpuc->events[i];
+ struct hw_perf_event *hwc;
if (!cp)
continue;
@@ -199,7 +199,7 @@ void hw_perf_enable(void)
void hw_perf_disable(void)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
u64 val;
if (!cpuc->enabled)
@@ -241,8 +241,8 @@ static void write_pmc(int idx, u64 val)
write_pic(pic);
}
-static int sparc_perf_counter_set_period(struct perf_counter *counter,
- struct hw_perf_counter *hwc, int idx)
+static int sparc_perf_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc, int idx)
{
s64 left = atomic64_read(&hwc->period_left);
s64 period = hwc->sample_period;
@@ -268,33 +268,33 @@ static int sparc_perf_counter_set_period(struct perf_counter *counter,
write_pmc(idx, (u64)(-left) & 0xffffffff);
- perf_counter_update_userpage(counter);
+ perf_event_update_userpage(event);
return ret;
}
-static int sparc_pmu_enable(struct perf_counter *counter)
+static int sparc_pmu_enable(struct perf_event *event)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- struct hw_perf_counter *hwc = &counter->hw;
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
if (test_and_set_bit(idx, cpuc->used_mask))
return -EAGAIN;
- sparc_pmu_disable_counter(hwc, idx);
+ sparc_pmu_disable_event(hwc, idx);
- cpuc->counters[idx] = counter;
+ cpuc->events[idx] = event;
set_bit(idx, cpuc->active_mask);
- sparc_perf_counter_set_period(counter, hwc, idx);
- sparc_pmu_enable_counter(hwc, idx);
- perf_counter_update_userpage(counter);
+ sparc_perf_event_set_period(event, hwc, idx);
+ sparc_pmu_enable_event(hwc, idx);
+ perf_event_update_userpage(event);
return 0;
}
-static u64 sparc_perf_counter_update(struct perf_counter *counter,
- struct hw_perf_counter *hwc, int idx)
+static u64 sparc_perf_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc, int idx)
{
int shift = 64 - 32;
u64 prev_raw_count, new_raw_count;
@@ -311,79 +311,79 @@ again:
delta = (new_raw_count << shift) - (prev_raw_count << shift);
delta >>= shift;
- atomic64_add(delta, &counter->count);
+ atomic64_add(delta, &event->count);
atomic64_sub(delta, &hwc->period_left);
return new_raw_count;
}
-static void sparc_pmu_disable(struct perf_counter *counter)
+static void sparc_pmu_disable(struct perf_event *event)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- struct hw_perf_counter *hwc = &counter->hw;
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
clear_bit(idx, cpuc->active_mask);
- sparc_pmu_disable_counter(hwc, idx);
+ sparc_pmu_disable_event(hwc, idx);
barrier();
- sparc_perf_counter_update(counter, hwc, idx);
- cpuc->counters[idx] = NULL;
+ sparc_perf_event_update(event, hwc, idx);
+ cpuc->events[idx] = NULL;
clear_bit(idx, cpuc->used_mask);
- perf_counter_update_userpage(counter);
+ perf_event_update_userpage(event);
}
-static void sparc_pmu_read(struct perf_counter *counter)
+static void sparc_pmu_read(struct perf_event *event)
{
- struct hw_perf_counter *hwc = &counter->hw;
- sparc_perf_counter_update(counter, hwc, hwc->idx);
+ struct hw_perf_event *hwc = &event->hw;
+ sparc_perf_event_update(event, hwc, hwc->idx);
}
-static void sparc_pmu_unthrottle(struct perf_counter *counter)
+static void sparc_pmu_unthrottle(struct perf_event *event)
{
- struct hw_perf_counter *hwc = &counter->hw;
- sparc_pmu_enable_counter(hwc, hwc->idx);
+ struct hw_perf_event *hwc = &event->hw;
+ sparc_pmu_enable_event(hwc, hwc->idx);
}
-static atomic_t active_counters = ATOMIC_INIT(0);
+static atomic_t active_events = ATOMIC_INIT(0);
static DEFINE_MUTEX(pmc_grab_mutex);
-void perf_counter_grab_pmc(void)
+void perf_event_grab_pmc(void)
{
- if (atomic_inc_not_zero(&active_counters))
+ if (atomic_inc_not_zero(&active_events))
return;
mutex_lock(&pmc_grab_mutex);
- if (atomic_read(&active_counters) == 0) {
+ if (atomic_read(&active_events) == 0) {
if (atomic_read(&nmi_active) > 0) {
on_each_cpu(stop_nmi_watchdog, NULL, 1);
BUG_ON(atomic_read(&nmi_active) != 0);
}
- atomic_inc(&active_counters);
+ atomic_inc(&active_events);
}
mutex_unlock(&pmc_grab_mutex);
}
-void perf_counter_release_pmc(void)
+void perf_event_release_pmc(void)
{
- if (atomic_dec_and_mutex_lock(&active_counters, &pmc_grab_mutex)) {
+ if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
if (atomic_read(&nmi_active) == 0)
on_each_cpu(start_nmi_watchdog, NULL, 1);
mutex_unlock(&pmc_grab_mutex);
}
}
-static void hw_perf_counter_destroy(struct perf_counter *counter)
+static void hw_perf_event_destroy(struct perf_event *event)
{
- perf_counter_release_pmc();
+ perf_event_release_pmc();
}
-static int __hw_perf_counter_init(struct perf_counter *counter)
+static int __hw_perf_event_init(struct perf_event *event)
{
- struct perf_counter_attr *attr = &counter->attr;
- struct hw_perf_counter *hwc = &counter->hw;
+ struct perf_event_attr *attr = &event->attr;
+ struct hw_perf_event *hwc = &event->hw;
const struct perf_event_map *pmap;
u64 enc;
@@ -396,8 +396,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
if (attr->config >= sparc_pmu->max_events)
return -EINVAL;
- perf_counter_grab_pmc();
- counter->destroy = hw_perf_counter_destroy;
+ perf_event_grab_pmc();
+ event->destroy = hw_perf_event_destroy;
/* We save the enable bits in the config_base. So to
* turn off sampling just write 'config', and to enable
@@ -439,16 +439,16 @@ static const struct pmu pmu = {
.unthrottle = sparc_pmu_unthrottle,
};
-const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
+const struct pmu *hw_perf_event_init(struct perf_event *event)
{
- int err = __hw_perf_counter_init(counter);
+ int err = __hw_perf_event_init(event);
if (err)
return ERR_PTR(err);
return &pmu;
}
-void perf_counter_print_debug(void)
+void perf_event_print_debug(void)
{
unsigned long flags;
u64 pcr, pic;
@@ -471,16 +471,16 @@ void perf_counter_print_debug(void)
local_irq_restore(flags);
}
-static int __kprobes perf_counter_nmi_handler(struct notifier_block *self,
+static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
{
struct die_args *args = __args;
struct perf_sample_data data;
- struct cpu_hw_counters *cpuc;
+ struct cpu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
- if (!atomic_read(&active_counters))
+ if (!atomic_read(&active_events))
return NOTIFY_DONE;
switch (cmd) {
@@ -495,32 +495,32 @@ static int __kprobes perf_counter_nmi_handler(struct notifier_block *self,
data.addr = 0;
- cpuc = &__get_cpu_var(cpu_hw_counters);
- for (idx = 0; idx < MAX_HWCOUNTERS; idx++) {
- struct perf_counter *counter = cpuc->counters[idx];
- struct hw_perf_counter *hwc;
+ cpuc = &__get_cpu_var(cpu_hw_events);
+ for (idx = 0; idx < MAX_HWEVENTS; idx++) {
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
u64 val;
if (!test_bit(idx, cpuc->active_mask))
continue;
- hwc = &counter->hw;
- val = sparc_perf_counter_update(counter, hwc, idx);
+ hwc = &event->hw;
+ val = sparc_perf_event_update(event, hwc, idx);
if (val & (1ULL << 31))
continue;
- data.period = counter->hw.last_period;
- if (!sparc_perf_counter_set_period(counter, hwc, idx))
+ data.period = event->hw.last_period;
+ if (!sparc_perf_event_set_period(event, hwc, idx))
continue;
- if (perf_counter_overflow(counter, 1, &data, regs))
- sparc_pmu_disable_counter(hwc, idx);
+ if (perf_event_overflow(event, 1, &data, regs))
+ sparc_pmu_disable_event(hwc, idx);
}
return NOTIFY_STOP;
}
-static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
- .notifier_call = perf_counter_nmi_handler,
+static __read_mostly struct notifier_block perf_event_nmi_notifier = {
+ .notifier_call = perf_event_nmi_handler,
};
static bool __init supported_pmu(void)
@@ -536,9 +536,9 @@ static bool __init supported_pmu(void)
return false;
}
-void __init init_hw_perf_counters(void)
+void __init init_hw_perf_events(void)
{
- pr_info("Performance counters: ");
+ pr_info("Performance events: ");
if (!supported_pmu()) {
pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
@@ -547,10 +547,10 @@ void __init init_hw_perf_counters(void)
pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
- /* All sparc64 PMUs currently have 2 counters. But this simple
- * driver only supports one active counter at a time.
+ /* All sparc64 PMUs currently have 2 events. But this simple
+ * driver only supports one active event at a time.
*/
- perf_max_counters = 1;
+ perf_max_events = 1;
- register_die_notifier(&perf_counter_nmi_notifier);
+ register_die_notifier(&perf_event_nmi_notifier);
}
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index f5000a460c0..04e28b2671c 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -16,7 +16,6 @@
#include <linux/signal.h>
#include <linux/resource.h>
#include <linux/times.h>
-#include <linux/utsname.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/sem.h>
diff --git a/arch/sparc/kernel/systbls.h b/arch/sparc/kernel/systbls.h
index 15c2d752b2b..a63c5d2d984 100644
--- a/arch/sparc/kernel/systbls.h
+++ b/arch/sparc/kernel/systbls.h
@@ -3,10 +3,11 @@
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/utsname.h>
#include <asm/utrap.h>
#include <asm/signal.h>
+struct new_utsname;
+
extern asmlinkage unsigned long sys_getpagesize(void);
extern asmlinkage unsigned long sparc_brk(unsigned long brk);
extern asmlinkage long sparc_pipe(struct pt_regs *regs);
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 04181577cb6..0f1658d3749 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -82,5 +82,5 @@ sys_call_table:
/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
-/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open
+/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 91b06b7f7ac..009825f6e73 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -83,7 +83,7 @@ sys_call_table32:
/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
.word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
- .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_counter_open
+ .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open
#endif /* CONFIG_COMPAT */
@@ -158,4 +158,4 @@ sys_call_table:
/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
.word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
- .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open
+ .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 54114ad0bde..dc7c3b17a15 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -472,7 +472,7 @@ void __init mem_init(void)
reservedpages++;
printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT - 10),
codepages << (PAGE_SHIFT-10),
reservedpages << (PAGE_SHIFT - 10),
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 0728def3223..fc633dbacf8 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -96,11 +96,10 @@ CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,)
$(call cc-option, -fno-stack-protector,) \
$(call cc-option, -fno-stack-protector-all,)
-CONFIG_KERNEL_STACK_ORDER ?= 2
-STACK_SIZE := $(shell echo $$[ 4096 * (1 << $(CONFIG_KERNEL_STACK_ORDER)) ] )
-
-CPPFLAGS_vmlinux.lds = -U$(SUBARCH) -DSTART=$(START) -DELF_ARCH=$(ELF_ARCH) \
- -DELF_FORMAT="$(ELF_FORMAT)" -DKERNEL_STACK_SIZE=$(STACK_SIZE)
+# Options used by linker script
+export LDS_START := $(START)
+export LDS_ELF_ARCH := $(ELF_ARCH)
+export LDS_ELF_FORMAT := $(ELF_FORMAT)
# The wrappers will select whether using "malloc" or the kernel allocator.
LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index f114813ae25..a74245ae3a8 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -533,7 +533,7 @@ static int eth_parse(char *str, int *index_out, char **str_out,
char **error_out)
{
char *end;
- int n, err = -EINVAL;;
+ int n, err = -EINVAL;
n = simple_strtoul(str, &end, 0);
if (end == str) {
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 8f05d4d9da1..635d16d90a8 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -106,7 +106,7 @@ static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo);
#define MAX_DEV (16)
-static struct block_device_operations ubd_blops = {
+static const struct block_device_operations ubd_blops = {
.owner = THIS_MODULE,
.open = ubd_open,
.release = ubd_release,
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S
index 37ecc5577a9..ac55b9efa1c 100644
--- a/arch/um/include/asm/common.lds.S
+++ b/arch/um/include/asm/common.lds.S
@@ -16,11 +16,7 @@
. = ALIGN(4096);
.note : { *(.note.*) }
- __ex_table : {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
- }
+ EXCEPTION_TABLE(0)
BUG_TABLE
@@ -43,28 +39,17 @@
}
.init.setup : {
- __setup_start = .;
- *(.init.setup)
- __setup_end = .;
+ INIT_SETUP(0)
}
- . = ALIGN(32);
- .data.percpu : {
- __per_cpu_start = . ;
- *(.data.percpu)
- __per_cpu_end = . ;
- }
+ PERCPU(32)
.initcall.init : {
- __initcall_start = .;
- INITCALLS
- __initcall_end = .;
+ INIT_CALLS
}
.con_initcall.init : {
- __con_initcall_start = .;
- *(.con_initcall.init)
- __con_initcall_end = .;
+ CON_INITCALL
}
.uml.initcall.init : {
@@ -118,8 +103,6 @@
. = ALIGN(4096);
.init.ramfs : {
- __initramfs_start = .;
- *(.init.ramfs)
- __initramfs_end = .;
+ INIT_RAM_FS
}
diff --git a/arch/um/include/asm/hardirq.h b/arch/um/include/asm/hardirq.h
index 313ebb8a256..fb3c05a0cbb 100644
--- a/arch/um/include/asm/hardirq.h
+++ b/arch/um/include/asm/hardirq.h
@@ -1,25 +1 @@
-/* (c) 2004 cw@f00f.org, GPLv2 blah blah */
-
-#ifndef __ASM_UM_HARDIRQ_H
-#define __ASM_UM_HARDIRQ_H
-
-#include <linux/threads.h>
-#include <linux/irq.h>
-
-/* NOTE: When SMP works again we might want to make this
- * ____cacheline_aligned or maybe use per_cpu state? --cw */
-typedef struct {
- unsigned int __softirq_pending;
-} irq_cpustat_t;
-
-#include <linux/irq_cpustat.h>
-
-/* As this would be very strange for UML to get we BUG() after the
- * printk. */
-static inline void ack_bad_irq(unsigned int irq)
-{
- printk(KERN_ERR "unexpected IRQ %02x\n", irq);
- BUG();
-}
-
-#endif /* __ASM_UM_HARDIRQ_H */
+#include <asm-generic/hardirq.h>
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index 54f42e8b010..34d813011b7 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -35,8 +35,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
unsigned cpu = smp_processor_id();
if(prev != next){
- cpu_clear(cpu, prev->cpu_vm_mask);
- cpu_set(cpu, next->cpu_vm_mask);
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
if(next != &init_mm)
__switch_mm(&next->context.id);
}
diff --git a/arch/um/include/shared/ptrace_user.h b/arch/um/include/shared/ptrace_user.h
index 4bce6e01288..7fd8539bc19 100644
--- a/arch/um/include/shared/ptrace_user.h
+++ b/arch/um/include/shared/ptrace_user.h
@@ -29,7 +29,7 @@ extern int ptrace_setregs(long pid, unsigned long *regs_in);
* recompilation. So, we use PTRACE_OLDSETOPTIONS in UML.
* We also want to be able to build the kernel on 2.4, which doesn't
* have PTRACE_OLDSETOPTIONS. So, if it is missing, we declare
- * PTRACE_OLDSETOPTIONS to to be the same as PTRACE_SETOPTIONS.
+ * PTRACE_OLDSETOPTIONS to be the same as PTRACE_SETOPTIONS.
*
* On architectures, that start to support PTRACE_O_TRACESYSGOOD on
* linux 2.6, PTRACE_OLDSETOPTIONS never is defined, and also isn't
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 388ec0a3ea9..1119233597a 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -3,6 +3,9 @@
# Licensed under the GPL
#
+CPPFLAGS_vmlinux.lds := -U$(SUBARCH) -DSTART=$(LDS_START) \
+ -DELF_ARCH=$(LDS_ELF_ARCH) \
+ -DELF_FORMAT=$(LDS_ELF_FORMAT)
extra-y := vmlinux.lds
clean-files :=
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index 715a188c047..7fcad58e216 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -16,11 +16,7 @@ SECTIONS
_text = .;
_stext = .;
__init_begin = .;
- .init.text : {
- _sinittext = .;
- INIT_TEXT
- _einittext = .;
- }
+ INIT_TEXT_SECTION(PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
@@ -96,8 +92,7 @@ SECTIONS
.init_array : { *(.init_array) }
.fini_array : { *(.fini_array) }
.data : {
- . = ALIGN(KERNEL_STACK_SIZE); /* init_task */
- *(.data.init_task)
+ INIT_TASK_DATA(KERNEL_STACK_SIZE)
. = ALIGN(KERNEL_STACK_SIZE);
*(.data.init_irqstack)
DATA_DATA
diff --git a/arch/um/kernel/init_task.c b/arch/um/kernel/init_task.c
index b25121b537d..8aa77b61a5f 100644
--- a/arch/um/kernel/init_task.c
+++ b/arch/um/kernel/init_task.c
@@ -30,9 +30,8 @@ EXPORT_SYMBOL(init_task);
* "init_task" linker map entry..
*/
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"))) =
- { INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data =
+ { INIT_THREAD_INFO(init_task) };
union thread_union cpu0_irqstack
__attribute__((__section__(".data.init_irqstack"))) =
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 61d7e6138ff..a5d5e70cf6f 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -77,7 +77,7 @@ void __init mem_init(void)
num_physpages = totalram_pages;
max_pfn = totalram_pages;
printk(KERN_INFO "Memory: %luk available\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10));
+ nr_free_pages() << (PAGE_SHIFT-10));
kmalloc_ok = 1;
#ifdef CONFIG_HIGHMEM
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 0cd9a7a05e7..8bfd1e90581 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -38,10 +38,10 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
*pte = pte_mkread(*pte);
return 0;
- out_pmd:
- pud_free(mm, pud);
out_pte:
pmd_free(mm, pmd);
+ out_pmd:
+ pud_free(mm, pud);
out:
return -ENOMEM;
}
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index 98351c78bc8..106bf27e2a9 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -111,7 +111,7 @@ void smp_prepare_cpus(unsigned int maxcpus)
int i;
for (i = 0; i < ncpus; ++i)
- cpu_set(i, cpu_possible_map);
+ set_cpu_possible(i, true);
cpu_clear(me, cpu_online_map);
cpu_set(me, cpu_online_map);
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index 2ebd39765db..e7a6cca667a 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -22,11 +22,7 @@ SECTIONS
_text = .;
_stext = .;
__init_begin = .;
- .init.text : {
- _sinittext = .;
- INIT_TEXT
- _einittext = .;
- }
+ INIT_TEXT_SECTION(PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
.text :
@@ -52,8 +48,7 @@ SECTIONS
init.data : { INIT_DATA }
.data :
{
- . = ALIGN(KERNEL_STACK_SIZE); /* init_task */
- *(.data.init_task)
+ INIT_TASK_DATA(KERNEL_STACK_SIZE)
. = ALIGN(KERNEL_STACK_SIZE);
*(.data.init_irqstack)
DATA_DATA
@@ -81,19 +76,10 @@ SECTIONS
_edata = .;
PROVIDE (edata = .);
. = ALIGN(PAGE_SIZE);
- .sbss :
- {
- __bss_start = .;
- PROVIDE(_bss_start = .);
- *(.sbss)
- *(.scommon)
- }
- .bss :
- {
- *(.dynbss)
- *(.bss)
- *(COMMON)
- }
+ __bss_start = .;
+ PROVIDE(_bss_start = .);
+ SBSS(0)
+ BSS(0)
_end = .;
PROVIDE (end = .);
diff --git a/arch/um/kernel/vmlinux.lds.S b/arch/um/kernel/vmlinux.lds.S
index f8aeb448aab..16e49bfa2b4 100644
--- a/arch/um/kernel/vmlinux.lds.S
+++ b/arch/um/kernel/vmlinux.lds.S
@@ -1,3 +1,6 @@
+
+KERNEL_STACK_SIZE = 4096 * (1 << CONFIG_KERNEL_STACK_ORDER);
+
#ifdef CONFIG_LD_SCRIPT_STATIC
#include "uml.lds.S"
#else
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 51c59015b28..93698794aa3 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -24,7 +24,7 @@ config X86
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_IDE
select HAVE_OPROFILE
- select HAVE_PERF_COUNTERS if (!M386 && !M486)
+ select HAVE_PERF_EVENTS if (!M386 && !M486)
select HAVE_IOREMAP_PROT
select HAVE_KPROBES
select ARCH_WANT_OPTIONAL_GPIOLIB
@@ -1204,6 +1204,10 @@ config ARCH_DISCONTIGMEM_DEFAULT
def_bool y
depends on NUMA && X86_32
+config ARCH_PROC_KCORE_TEXT
+ def_bool y
+ depends on X86_64 && PROC_KCORE
+
config ARCH_SPARSEMEM_DEFAULT
def_bool y
depends on X86_64
@@ -1662,6 +1666,8 @@ source "kernel/power/Kconfig"
source "drivers/acpi/Kconfig"
+source "drivers/sfi/Kconfig"
+
config X86_APM_BOOT
bool
default y
@@ -1857,7 +1863,7 @@ config PCI_DIRECT
config PCI_MMCONFIG
def_bool y
- depends on X86_32 && PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY)
+ depends on X86_32 && PCI && (ACPI || SFI) && (PCI_GOMMCONFIG || PCI_GOANY)
config PCI_OLPC
def_bool y
@@ -1895,7 +1901,7 @@ config DMAR_DEFAULT_ON
config DMAR_BROKEN_GFX_WA
def_bool n
prompt "Workaround broken graphics drivers (going away soon)"
- depends on DMAR
+ depends on DMAR && BROKEN
---help---
Current Graphics drivers tend to use physical address
for DMA and avoid using DMA APIs. Setting this config
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 7983c420eaf..a012ee8ef80 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -179,8 +179,8 @@ archclean:
define archhelp
echo '* bzImage - Compressed kernel image (arch/x86/boot/bzImage)'
echo ' install - Install kernel using'
- echo ' (your) ~/bin/installkernel or'
- echo ' (distribution) /sbin/installkernel or'
+ echo ' (your) ~/bin/$(INSTALLKERNEL) or'
+ echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
echo ' install to $$(INSTALL_PATH) and run lilo'
echo ' fdimage - Create 1.4MB boot floppy image (arch/x86/boot/fdimage)'
echo ' fdimage144 - Create 1.4MB boot floppy image (arch/x86/boot/fdimage)'
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 75e4f001e70..f543b70ffae 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -23,13 +23,14 @@
*/
.text
+#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/page_types.h>
#include <asm/boot.h>
#include <asm/asm-offsets.h>
- .section ".text.head","ax",@progbits
+ __HEAD
ENTRY(startup_32)
cld
/*
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index f62c284db9e..077e1b69198 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -24,6 +24,7 @@
.code32
.text
+#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/pgtable_types.h>
@@ -33,7 +34,7 @@
#include <asm/processor-flags.h>
#include <asm/asm-offsets.h>
- .section ".text.head"
+ __HEAD
.code32
ENTRY(startup_32)
cld
diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S
index cc353e1b3ff..f4193bb4878 100644
--- a/arch/x86/boot/compressed/vmlinux.lds.S
+++ b/arch/x86/boot/compressed/vmlinux.lds.S
@@ -1,3 +1,5 @@
+#include <asm-generic/vmlinux.lds.h>
+
OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
#undef i386
@@ -18,9 +20,9 @@ SECTIONS
* address 0.
*/
. = 0;
- .text.head : {
+ .head.text : {
_head = . ;
- *(.text.head)
+ HEAD_TEXT
_ehead = . ;
}
.rodata.compressed : {
diff --git a/arch/x86/boot/install.sh b/arch/x86/boot/install.sh
index 8d60ee15dfd..d13ec1c3864 100644
--- a/arch/x86/boot/install.sh
+++ b/arch/x86/boot/install.sh
@@ -33,8 +33,8 @@ verify "$3"
# User may have a custom install script
-if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
-if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi
+if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
# Default install - same as make zlilo
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index ba331bfd111..74619c4f9fd 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -831,5 +831,5 @@ ia32_sys_call_table:
.quad compat_sys_preadv
.quad compat_sys_pwritev
.quad compat_sys_rt_tgsigqueueinfo /* 335 */
- .quad sys_perf_counter_open
+ .quad sys_perf_event_open
ia32_syscall_end:
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 20d1465a2ab..4518dc50090 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -144,7 +144,6 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
#else /* !CONFIG_ACPI */
-#define acpi_disabled 1
#define acpi_lapic 0
#define acpi_ioapic 0
static inline void acpi_noirq_set(void) { }
diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
index 5d367caa0e3..549860d3be8 100644
--- a/arch/x86/include/asm/cache.h
+++ b/arch/x86/include/asm/cache.h
@@ -1,6 +1,8 @@
#ifndef _ASM_X86_CACHE_H
#define _ASM_X86_CACHE_H
+#include <linux/linkage.h>
+
/* L1 cache line size */
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
@@ -13,7 +15,7 @@
#ifdef CONFIG_SMP
#define __cacheline_aligned_in_smp \
__attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) \
- __attribute__((__section__(".data.page_aligned")))
+ __page_aligned_data
#endif
#endif
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 5e3f2044f0d..f5693c81a1d 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -49,7 +49,7 @@ BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
-#ifdef CONFIG_PERF_COUNTERS
+#ifdef CONFIG_PERF_EVENTS
BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR)
#endif
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index f923203dc39..4a2d4e0c18d 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -37,12 +37,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (likely(prev != next)) {
/* stop flush ipis for the previous mm */
- cpu_clear(cpu, prev->cpu_vm_mask);
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
#ifdef CONFIG_SMP
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
percpu_write(cpu_tlbstate.active_mm, next);
#endif
- cpu_set(cpu, next->cpu_vm_mask);
+ cpumask_set_cpu(cpu, mm_cpumask(next));
/* Re-load page tables */
load_cr3(next->pgd);
@@ -58,7 +58,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
- if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
+ if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
/* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index e63cf7d441e..139d4c1a33a 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -40,8 +40,7 @@ extern unsigned int nmi_watchdog;
#define NMI_INVALID 3
struct ctl_table;
-struct file;
-extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
+extern int proc_nmi_enabled(struct ctl_table *, int ,
void __user *, size_t *, loff_t *);
extern int unknown_nmi_panic;
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index f76a162c082..ada8c201d51 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -143,7 +143,11 @@ static inline int __pcibus_to_node(const struct pci_bus *bus)
static inline const struct cpumask *
cpumask_of_pcibus(const struct pci_bus *bus)
{
- return cpumask_of_node(__pcibus_to_node(bus));
+ int node;
+
+ node = __pcibus_to_node(bus);
+ return (node == -1) ? cpu_online_mask :
+ cpumask_of_node(node);
}
#endif
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_event.h
index e7b7c938ae2..ad7ce3fd506 100644
--- a/arch/x86/include/asm/perf_counter.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -1,8 +1,8 @@
-#ifndef _ASM_X86_PERF_COUNTER_H
-#define _ASM_X86_PERF_COUNTER_H
+#ifndef _ASM_X86_PERF_EVENT_H
+#define _ASM_X86_PERF_EVENT_H
/*
- * Performance counter hw details:
+ * Performance event hw details:
*/
#define X86_PMC_MAX_GENERIC 8
@@ -43,7 +43,7 @@
union cpuid10_eax {
struct {
unsigned int version_id:8;
- unsigned int num_counters:8;
+ unsigned int num_events:8;
unsigned int bit_width:8;
unsigned int mask_length:8;
} split;
@@ -52,7 +52,7 @@ union cpuid10_eax {
union cpuid10_edx {
struct {
- unsigned int num_counters_fixed:4;
+ unsigned int num_events_fixed:4;
unsigned int reserved:28;
} split;
unsigned int full;
@@ -60,7 +60,7 @@ union cpuid10_edx {
/*
- * Fixed-purpose performance counters:
+ * Fixed-purpose performance events:
*/
/*
@@ -87,22 +87,22 @@ union cpuid10_edx {
/*
* We model BTS tracing as another fixed-mode PMC.
*
- * We choose a value in the middle of the fixed counter range, since lower
- * values are used by actual fixed counters and higher values are used
+ * We choose a value in the middle of the fixed event range, since lower
+ * values are used by actual fixed events and higher values are used
* to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
*/
#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
-#ifdef CONFIG_PERF_COUNTERS
-extern void init_hw_perf_counters(void);
-extern void perf_counters_lapic_init(void);
+#ifdef CONFIG_PERF_EVENTS
+extern void init_hw_perf_events(void);
+extern void perf_events_lapic_init(void);
-#define PERF_COUNTER_INDEX_OFFSET 0
+#define PERF_EVENT_INDEX_OFFSET 0
#else
-static inline void init_hw_perf_counters(void) { }
-static inline void perf_counters_lapic_init(void) { }
+static inline void init_hw_perf_events(void) { }
+static inline void perf_events_lapic_init(void) { }
#endif
-#endif /* _ASM_X86_PERF_COUNTER_H */
+#endif /* _ASM_X86_PERF_EVENT_H */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 7b467bf3c68..d1f4a760be2 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -277,6 +277,7 @@ static inline pteval_t pte_flags(pte_t pte)
typedef struct page *pgtable_t;
extern pteval_t __supported_pte_mask;
+extern void set_nx(void);
extern int nx_enabled;
#define pgprot_writecombine pgprot_writecombine
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 6a84ed166ae..1e796782cd7 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -121,7 +121,6 @@ static inline void arch_send_call_function_single_ipi(int cpu)
smp_ops.send_call_func_single_ipi(cpu);
}
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
smp_ops.send_call_func_ipi(mask);
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index d82f39bb790..8d33bc5462d 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -1,7 +1,7 @@
/*
* Access to user system call parameters and results
*
- * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
@@ -16,13 +16,13 @@
#include <linux/sched.h>
#include <linux/err.h>
-static inline long syscall_get_nr(struct task_struct *task,
- struct pt_regs *regs)
+/*
+ * Only the low 32 bits of orig_ax are meaningful, so we return int.
+ * This importantly ignores the high bits on 64-bit, so comparisons
+ * sign-extend the low 32 bits.
+ */
+static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
{
- /*
- * We always sign-extend a -1 value being set here,
- * so this is always either -1L or a syscall number.
- */
return regs->orig_ax;
}
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 6f0695d744b..25a92842dd9 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -165,21 +165,11 @@ static inline int numa_node_id(void)
return 0;
}
-static inline int cpu_to_node(int cpu)
-{
- return 0;
-}
-
static inline int early_cpu_to_node(int cpu)
{
return 0;
}
-static inline const struct cpumask *cpumask_of_node(int node)
-{
- return cpu_online_mask;
-}
-
static inline void setup_node_to_cpumask_map(void) { }
#endif
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h
index 8deaada61bc..6fb3c209a7e 100644
--- a/arch/x86/include/asm/unistd_32.h
+++ b/arch/x86/include/asm/unistd_32.h
@@ -341,7 +341,7 @@
#define __NR_preadv 333
#define __NR_pwritev 334
#define __NR_rt_tgsigqueueinfo 335
-#define __NR_perf_counter_open 336
+#define __NR_perf_event_open 336
#ifdef __KERNEL__
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index b9f3c60de5f..8d3ad0adbc6 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -659,8 +659,8 @@ __SYSCALL(__NR_preadv, sys_preadv)
__SYSCALL(__NR_pwritev, sys_pwritev)
#define __NR_rt_tgsigqueueinfo 297
__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
-#define __NR_perf_counter_open 298
-__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open)
+#define __NR_perf_event_open 298
+__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
#ifndef __NO_STUBS
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 4ba419b668a..d8e5d0cdd67 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_INTEL_TXT) += tboot.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += cpu/
obj-y += acpi/
+obj-$(CONFIG_SFI) += sfi.o
obj-y += reboot.o
obj-$(CONFIG_MCA) += mca_32.o
obj-$(CONFIG_X86_MSR) += msr.o
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index a58ef98be15..894aa97f071 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -14,7 +14,7 @@
* Mikael Pettersson : PM converted to driver model.
*/
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/kernel_stat.h>
#include <linux/mc146818rtc.h>
#include <linux/acpi_pmtmr.h>
@@ -35,7 +35,7 @@
#include <linux/smp.h>
#include <linux/mm.h>
-#include <asm/perf_counter.h>
+#include <asm/perf_event.h>
#include <asm/x86_init.h>
#include <asm/pgalloc.h>
#include <asm/atomic.h>
@@ -1189,7 +1189,7 @@ void __cpuinit setup_local_APIC(void)
apic_write(APIC_ESR, 0);
}
#endif
- perf_counters_lapic_init();
+ perf_events_lapic_init();
preempt_disable();
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 64970b9885f..dc69f28489f 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -227,17 +227,14 @@ static struct irq_cfg *get_one_free_irq_cfg(int node)
cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
if (cfg) {
- if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
+ if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
kfree(cfg);
cfg = NULL;
- } else if (!alloc_cpumask_var_node(&cfg->old_domain,
+ } else if (!zalloc_cpumask_var_node(&cfg->old_domain,
GFP_ATOMIC, node)) {
free_cpumask_var(cfg->domain);
kfree(cfg);
cfg = NULL;
- } else {
- cpumask_clear(cfg->domain);
- cpumask_clear(cfg->old_domain);
}
}
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index cb66a22d98a..7ff61d6a188 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -508,14 +508,14 @@ static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
/*
* proc handler for /proc/sys/kernel/nmi
*/
-int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
+int proc_nmi_enabled(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
int old_state;
nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0;
old_state = nmi_watchdog_enabled;
- proc_dointvec(table, write, file, buffer, length, ppos);
+ proc_dointvec(table, write, buffer, length, ppos);
if (!!old_state == !!nmi_watchdog_enabled)
return 0;
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 8dd30638fe4..68537e957a9 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -27,7 +27,7 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
-obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_X86_MCE) += mcheck/
obj-$(CONFIG_MTRR) += mtrr/
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index f32fa71ccf9..c910a716a71 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -184,7 +184,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
* approved Athlon
*/
WARN_ONCE(1, "WARNING: This combination of AMD"
- "processors is not suitable for SMP.\n");
+ " processors is not suitable for SMP.\n");
if (!test_taint(TAINT_UNSAFE_SMP))
add_taint(TAINT_UNSAFE_SMP);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 2fea97eccf7..cc25c2b4a56 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -13,7 +13,7 @@
#include <linux/io.h>
#include <asm/stackprotector.h>
-#include <asm/perf_counter.h>
+#include <asm/perf_event.h>
#include <asm/mmu_context.h>
#include <asm/hypervisor.h>
#include <asm/processor.h>
@@ -869,7 +869,7 @@ void __init identify_boot_cpu(void)
#else
vgetcpu_set_mode();
#endif
- init_hw_perf_counters();
+ init_hw_perf_events();
}
void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index 7029f0e2aca..472763d9209 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -98,8 +98,9 @@ static struct notifier_block mce_raise_nb = {
};
/* Inject mce on current CPU */
-static int raise_local(struct mce *m)
+static int raise_local(void)
{
+ struct mce *m = &__get_cpu_var(injectm);
int context = MCJ_CTX(m->inject_flags);
int ret = 0;
int cpu = m->extcpu;
@@ -167,12 +168,12 @@ static void raise_mce(struct mce *m)
}
cpu_relax();
}
- raise_local(m);
+ raise_local();
put_cpu();
put_online_cpus();
} else
#endif
- raise_local(m);
+ raise_local();
}
/* Error injection interface */
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 2f5aab26320..4b2af86e3e8 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -305,13 +305,25 @@ static int msr_to_offset(u32 msr)
static u64 mce_rdmsrl(u32 msr)
{
u64 v;
+
if (__get_cpu_var(injectm).finished) {
int offset = msr_to_offset(msr);
+
if (offset < 0)
return 0;
return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
}
- rdmsrl(msr, v);
+
+ if (rdmsrl_safe(msr, &v)) {
+ WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
+ /*
+ * Return zero in case the access faulted. This should
+ * not happen normally but can happen if the CPU does
+ * something weird, or if the code is buggy.
+ */
+ v = 0;
+ }
+
return v;
}
@@ -319,6 +331,7 @@ static void mce_wrmsrl(u32 msr, u64 v)
{
if (__get_cpu_var(injectm).finished) {
int offset = msr_to_offset(msr);
+
if (offset >= 0)
*(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
return;
@@ -415,7 +428,7 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
m->ip = mce_rdmsrl(rip_msr);
}
-#ifdef CONFIG_X86_LOCAL_APIC
+#ifdef CONFIG_X86_LOCAL_APIC
/*
* Called after interrupts have been reenabled again
* when a MCE happened during an interrupts off region
@@ -1172,6 +1185,7 @@ static int mce_banks_init(void)
return -ENOMEM;
for (i = 0; i < banks; i++) {
struct mce_bank *b = &mce_banks[i];
+
b->ctl = -1ULL;
b->init = 1;
}
@@ -1203,6 +1217,7 @@ static int __cpuinit mce_cap_init(void)
banks = b;
if (!mce_banks) {
int err = mce_banks_init();
+
if (err)
return err;
}
@@ -1237,6 +1252,7 @@ static void mce_init(void)
for (i = 0; i < banks; i++) {
struct mce_bank *b = &mce_banks[i];
+
if (!b->init)
continue;
wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
@@ -1626,6 +1642,7 @@ static int mce_disable(void)
for (i = 0; i < banks; i++) {
struct mce_bank *b = &mce_banks[i];
+
if (b->init)
wrmsrl(MSR_IA32_MCx_CTL(i), 0);
}
@@ -1911,6 +1928,7 @@ static void mce_disable_cpu(void *h)
cmci_clear();
for (i = 0; i < banks; i++) {
struct mce_bank *b = &mce_banks[i];
+
if (b->init)
wrmsrl(MSR_IA32_MCx_CTL(i), 0);
}
@@ -1928,6 +1946,7 @@ static void mce_reenable_cpu(void *h)
cmci_reenable();
for (i = 0; i < banks; i++) {
struct mce_bank *b = &mce_banks[i];
+
if (b->init)
wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
}
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 63a56d147e4..b3a1dba7533 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -34,20 +34,31 @@
/* How long to wait between reporting thermal events */
#define CHECK_INTERVAL (300 * HZ)
-static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES;
-static DEFINE_PER_CPU(unsigned long, thermal_throttle_count);
-static DEFINE_PER_CPU(bool, thermal_throttle_active);
+/*
+ * Current thermal throttling state:
+ */
+struct thermal_state {
+ bool is_throttled;
+
+ u64 next_check;
+ unsigned long throttle_count;
+ unsigned long last_throttle_count;
+};
+
+static DEFINE_PER_CPU(struct thermal_state, thermal_state);
-static atomic_t therm_throt_en = ATOMIC_INIT(0);
+static atomic_t therm_throt_en = ATOMIC_INIT(0);
#ifdef CONFIG_SYSFS
#define define_therm_throt_sysdev_one_ro(_name) \
static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL)
#define define_therm_throt_sysdev_show_func(name) \
-static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev, \
- struct sysdev_attribute *attr, \
- char *buf) \
+ \
+static ssize_t therm_throt_sysdev_show_##name( \
+ struct sys_device *dev, \
+ struct sysdev_attribute *attr, \
+ char *buf) \
{ \
unsigned int cpu = dev->id; \
ssize_t ret; \
@@ -55,7 +66,7 @@ static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev, \
preempt_disable(); /* CPU hotplug */ \
if (cpu_online(cpu)) \
ret = sprintf(buf, "%lu\n", \
- per_cpu(thermal_throttle_##name, cpu)); \
+ per_cpu(thermal_state, cpu).name); \
else \
ret = 0; \
preempt_enable(); \
@@ -63,11 +74,11 @@ static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev, \
return ret; \
}
-define_therm_throt_sysdev_show_func(count);
-define_therm_throt_sysdev_one_ro(count);
+define_therm_throt_sysdev_show_func(throttle_count);
+define_therm_throt_sysdev_one_ro(throttle_count);
static struct attribute *thermal_throttle_attrs[] = {
- &attr_count.attr,
+ &attr_throttle_count.attr,
NULL
};
@@ -93,33 +104,39 @@ static struct attribute_group thermal_throttle_attr_group = {
* 1 : Event should be logged further, and a message has been
* printed to the syslog.
*/
-static int therm_throt_process(int curr)
+static int therm_throt_process(bool is_throttled)
{
- unsigned int cpu = smp_processor_id();
- __u64 tmp_jiffs = get_jiffies_64();
- bool was_throttled = __get_cpu_var(thermal_throttle_active);
- bool is_throttled = __get_cpu_var(thermal_throttle_active) = curr;
+ struct thermal_state *state;
+ unsigned int this_cpu;
+ bool was_throttled;
+ u64 now;
+
+ this_cpu = smp_processor_id();
+ now = get_jiffies_64();
+ state = &per_cpu(thermal_state, this_cpu);
+
+ was_throttled = state->is_throttled;
+ state->is_throttled = is_throttled;
if (is_throttled)
- __get_cpu_var(thermal_throttle_count)++;
+ state->throttle_count++;
- if (!(was_throttled ^ is_throttled) &&
- time_before64(tmp_jiffs, __get_cpu_var(next_check)))
+ if (time_before64(now, state->next_check) &&
+ state->throttle_count != state->last_throttle_count)
return 0;
- __get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL;
+ state->next_check = now + CHECK_INTERVAL;
+ state->last_throttle_count = state->throttle_count;
/* if we just entered the thermal event */
if (is_throttled) {
- printk(KERN_CRIT "CPU%d: Temperature above threshold, "
- "cpu clock throttled (total events = %lu)\n",
- cpu, __get_cpu_var(thermal_throttle_count));
+ printk(KERN_CRIT "CPU%d: Temperature above threshold, cpu clock throttled (total events = %lu)\n", this_cpu, state->throttle_count);
add_taint(TAINT_MACHINE_CHECK);
return 1;
}
if (was_throttled) {
- printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu);
+ printk(KERN_INFO "CPU%d: Temperature/speed normal\n", this_cpu);
return 1;
}
@@ -213,7 +230,7 @@ static void intel_thermal_interrupt(void)
__u64 msr_val;
rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
- if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT))
+ if (therm_throt_process((msr_val & THERM_STATUS_PROCHOT) != 0))
mce_log_therm_throt_event(msr_val);
}
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_event.c
index a6c8b27553c..a3c7adb06b7 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1,5 +1,5 @@
/*
- * Performance counter x86 architecture code
+ * Performance events x86 architecture code
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
@@ -11,7 +11,7 @@
* For licencing details see kernel-base/COPYING
*/
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/capability.h>
#include <linux/notifier.h>
#include <linux/hardirq.h>
@@ -27,10 +27,10 @@
#include <asm/stacktrace.h>
#include <asm/nmi.h>
-static u64 perf_counter_mask __read_mostly;
+static u64 perf_event_mask __read_mostly;
-/* The maximal number of PEBS counters: */
-#define MAX_PEBS_COUNTERS 4
+/* The maximal number of PEBS events: */
+#define MAX_PEBS_EVENTS 4
/* The size of a BTS record in bytes: */
#define BTS_RECORD_SIZE 24
@@ -65,11 +65,11 @@ struct debug_store {
u64 pebs_index;
u64 pebs_absolute_maximum;
u64 pebs_interrupt_threshold;
- u64 pebs_counter_reset[MAX_PEBS_COUNTERS];
+ u64 pebs_event_reset[MAX_PEBS_EVENTS];
};
-struct cpu_hw_counters {
- struct perf_counter *counters[X86_PMC_IDX_MAX];
+struct cpu_hw_events {
+ struct perf_event *events[X86_PMC_IDX_MAX];
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long interrupts;
@@ -86,17 +86,17 @@ struct x86_pmu {
int (*handle_irq)(struct pt_regs *);
void (*disable_all)(void);
void (*enable_all)(void);
- void (*enable)(struct hw_perf_counter *, int);
- void (*disable)(struct hw_perf_counter *, int);
+ void (*enable)(struct hw_perf_event *, int);
+ void (*disable)(struct hw_perf_event *, int);
unsigned eventsel;
unsigned perfctr;
u64 (*event_map)(int);
u64 (*raw_event)(u64);
int max_events;
- int num_counters;
- int num_counters_fixed;
- int counter_bits;
- u64 counter_mask;
+ int num_events;
+ int num_events_fixed;
+ int event_bits;
+ u64 event_mask;
int apic;
u64 max_period;
u64 intel_ctrl;
@@ -106,7 +106,7 @@ struct x86_pmu {
static struct x86_pmu x86_pmu __read_mostly;
-static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
+static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.enabled = 1,
};
@@ -124,35 +124,35 @@ static const u64 p6_perfmon_event_map[] =
[PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
};
-static u64 p6_pmu_event_map(int event)
+static u64 p6_pmu_event_map(int hw_event)
{
- return p6_perfmon_event_map[event];
+ return p6_perfmon_event_map[hw_event];
}
/*
- * Counter setting that is specified not to count anything.
+ * Event setting that is specified not to count anything.
* We use this to effectively disable a counter.
*
* L2_RQSTS with 0 MESI unit mask.
*/
-#define P6_NOP_COUNTER 0x0000002EULL
+#define P6_NOP_EVENT 0x0000002EULL
-static u64 p6_pmu_raw_event(u64 event)
+static u64 p6_pmu_raw_event(u64 hw_event)
{
#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
#define P6_EVNTSEL_INV_MASK 0x00800000ULL
-#define P6_EVNTSEL_COUNTER_MASK 0xFF000000ULL
+#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
#define P6_EVNTSEL_MASK \
(P6_EVNTSEL_EVENT_MASK | \
P6_EVNTSEL_UNIT_MASK | \
P6_EVNTSEL_EDGE_MASK | \
P6_EVNTSEL_INV_MASK | \
- P6_EVNTSEL_COUNTER_MASK)
+ P6_EVNTSEL_REG_MASK)
- return event & P6_EVNTSEL_MASK;
+ return hw_event & P6_EVNTSEL_MASK;
}
@@ -170,16 +170,16 @@ static const u64 intel_perfmon_event_map[] =
[PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
};
-static u64 intel_pmu_event_map(int event)
+static u64 intel_pmu_event_map(int hw_event)
{
- return intel_perfmon_event_map[event];
+ return intel_perfmon_event_map[hw_event];
}
/*
- * Generalized hw caching related event table, filled
+ * Generalized hw caching related hw_event table, filled
* in on a per model basis. A value of 0 means
- * 'not supported', -1 means 'event makes no sense on
- * this CPU', any other value means the raw event
+ * 'not supported', -1 means 'hw_event makes no sense on
+ * this CPU', any other value means the raw hw_event
* ID.
*/
@@ -463,22 +463,22 @@ static const u64 atom_hw_cache_event_ids
},
};
-static u64 intel_pmu_raw_event(u64 event)
+static u64 intel_pmu_raw_event(u64 hw_event)
{
#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
-#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
+#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
#define CORE_EVNTSEL_MASK \
(CORE_EVNTSEL_EVENT_MASK | \
CORE_EVNTSEL_UNIT_MASK | \
CORE_EVNTSEL_EDGE_MASK | \
CORE_EVNTSEL_INV_MASK | \
- CORE_EVNTSEL_COUNTER_MASK)
+ CORE_EVNTSEL_REG_MASK)
- return event & CORE_EVNTSEL_MASK;
+ return hw_event & CORE_EVNTSEL_MASK;
}
static const u64 amd_hw_cache_event_ids
@@ -585,39 +585,39 @@ static const u64 amd_perfmon_event_map[] =
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
};
-static u64 amd_pmu_event_map(int event)
+static u64 amd_pmu_event_map(int hw_event)
{
- return amd_perfmon_event_map[event];
+ return amd_perfmon_event_map[hw_event];
}
-static u64 amd_pmu_raw_event(u64 event)
+static u64 amd_pmu_raw_event(u64 hw_event)
{
#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
#define K7_EVNTSEL_INV_MASK 0x000800000ULL
-#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL
+#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
#define K7_EVNTSEL_MASK \
(K7_EVNTSEL_EVENT_MASK | \
K7_EVNTSEL_UNIT_MASK | \
K7_EVNTSEL_EDGE_MASK | \
K7_EVNTSEL_INV_MASK | \
- K7_EVNTSEL_COUNTER_MASK)
+ K7_EVNTSEL_REG_MASK)
- return event & K7_EVNTSEL_MASK;
+ return hw_event & K7_EVNTSEL_MASK;
}
/*
- * Propagate counter elapsed time into the generic counter.
- * Can only be executed on the CPU where the counter is active.
+ * Propagate event elapsed time into the generic event.
+ * Can only be executed on the CPU where the event is active.
* Returns the delta events processed.
*/
static u64
-x86_perf_counter_update(struct perf_counter *counter,
- struct hw_perf_counter *hwc, int idx)
+x86_perf_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc, int idx)
{
- int shift = 64 - x86_pmu.counter_bits;
+ int shift = 64 - x86_pmu.event_bits;
u64 prev_raw_count, new_raw_count;
s64 delta;
@@ -625,15 +625,15 @@ x86_perf_counter_update(struct perf_counter *counter,
return 0;
/*
- * Careful: an NMI might modify the previous counter value.
+ * Careful: an NMI might modify the previous event value.
*
* Our tactic to handle this is to first atomically read and
* exchange a new raw count - then add that new-prev delta
- * count to the generic counter atomically:
+ * count to the generic event atomically:
*/
again:
prev_raw_count = atomic64_read(&hwc->prev_count);
- rdmsrl(hwc->counter_base + idx, new_raw_count);
+ rdmsrl(hwc->event_base + idx, new_raw_count);
if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
@@ -642,7 +642,7 @@ again:
/*
* Now we have the new raw value and have updated the prev
* timestamp already. We can now calculate the elapsed delta
- * (counter-)time and add that to the generic counter.
+ * (event-)time and add that to the generic event.
*
* Careful, not all hw sign-extends above the physical width
* of the count.
@@ -650,13 +650,13 @@ again:
delta = (new_raw_count << shift) - (prev_raw_count << shift);
delta >>= shift;
- atomic64_add(delta, &counter->count);
+ atomic64_add(delta, &event->count);
atomic64_sub(delta, &hwc->period_left);
return new_raw_count;
}
-static atomic_t active_counters;
+static atomic_t active_events;
static DEFINE_MUTEX(pmc_reserve_mutex);
static bool reserve_pmc_hardware(void)
@@ -667,12 +667,12 @@ static bool reserve_pmc_hardware(void)
if (nmi_watchdog == NMI_LOCAL_APIC)
disable_lapic_nmi_watchdog();
- for (i = 0; i < x86_pmu.num_counters; i++) {
+ for (i = 0; i < x86_pmu.num_events; i++) {
if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
goto perfctr_fail;
}
- for (i = 0; i < x86_pmu.num_counters; i++) {
+ for (i = 0; i < x86_pmu.num_events; i++) {
if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
goto eventsel_fail;
}
@@ -685,7 +685,7 @@ eventsel_fail:
for (i--; i >= 0; i--)
release_evntsel_nmi(x86_pmu.eventsel + i);
- i = x86_pmu.num_counters;
+ i = x86_pmu.num_events;
perfctr_fail:
for (i--; i >= 0; i--)
@@ -703,7 +703,7 @@ static void release_pmc_hardware(void)
#ifdef CONFIG_X86_LOCAL_APIC
int i;
- for (i = 0; i < x86_pmu.num_counters; i++) {
+ for (i = 0; i < x86_pmu.num_events; i++) {
release_perfctr_nmi(x86_pmu.perfctr + i);
release_evntsel_nmi(x86_pmu.eventsel + i);
}
@@ -720,7 +720,7 @@ static inline bool bts_available(void)
static inline void init_debug_store_on_cpu(int cpu)
{
- struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds;
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
if (!ds)
return;
@@ -732,7 +732,7 @@ static inline void init_debug_store_on_cpu(int cpu)
static inline void fini_debug_store_on_cpu(int cpu)
{
- if (!per_cpu(cpu_hw_counters, cpu).ds)
+ if (!per_cpu(cpu_hw_events, cpu).ds)
return;
wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
@@ -751,12 +751,12 @@ static void release_bts_hardware(void)
fini_debug_store_on_cpu(cpu);
for_each_possible_cpu(cpu) {
- struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds;
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
if (!ds)
continue;
- per_cpu(cpu_hw_counters, cpu).ds = NULL;
+ per_cpu(cpu_hw_events, cpu).ds = NULL;
kfree((void *)(unsigned long)ds->bts_buffer_base);
kfree(ds);
@@ -796,7 +796,7 @@ static int reserve_bts_hardware(void)
ds->bts_interrupt_threshold =
ds->bts_absolute_maximum - BTS_OVFL_TH;
- per_cpu(cpu_hw_counters, cpu).ds = ds;
+ per_cpu(cpu_hw_events, cpu).ds = ds;
err = 0;
}
@@ -812,9 +812,9 @@ static int reserve_bts_hardware(void)
return err;
}
-static void hw_perf_counter_destroy(struct perf_counter *counter)
+static void hw_perf_event_destroy(struct perf_event *event)
{
- if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) {
+ if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
release_pmc_hardware();
release_bts_hardware();
mutex_unlock(&pmc_reserve_mutex);
@@ -827,7 +827,7 @@ static inline int x86_pmu_initialized(void)
}
static inline int
-set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr)
+set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
{
unsigned int cache_type, cache_op, cache_result;
u64 config, val;
@@ -880,7 +880,7 @@ static void intel_pmu_enable_bts(u64 config)
static void intel_pmu_disable_bts(void)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
unsigned long debugctlmsr;
if (!cpuc->ds)
@@ -898,10 +898,10 @@ static void intel_pmu_disable_bts(void)
/*
* Setup the hardware configuration for a given attr_type
*/
-static int __hw_perf_counter_init(struct perf_counter *counter)
+static int __hw_perf_event_init(struct perf_event *event)
{
- struct perf_counter_attr *attr = &counter->attr;
- struct hw_perf_counter *hwc = &counter->hw;
+ struct perf_event_attr *attr = &event->attr;
+ struct hw_perf_event *hwc = &event->hw;
u64 config;
int err;
@@ -909,22 +909,22 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
return -ENODEV;
err = 0;
- if (!atomic_inc_not_zero(&active_counters)) {
+ if (!atomic_inc_not_zero(&active_events)) {
mutex_lock(&pmc_reserve_mutex);
- if (atomic_read(&active_counters) == 0) {
+ if (atomic_read(&active_events) == 0) {
if (!reserve_pmc_hardware())
err = -EBUSY;
else
err = reserve_bts_hardware();
}
if (!err)
- atomic_inc(&active_counters);
+ atomic_inc(&active_events);
mutex_unlock(&pmc_reserve_mutex);
}
if (err)
return err;
- counter->destroy = hw_perf_counter_destroy;
+ event->destroy = hw_perf_event_destroy;
/*
* Generate PMC IRQs:
@@ -948,15 +948,15 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
/*
* If we have a PMU initialized but no APIC
* interrupts, we cannot sample hardware
- * counters (user-space has to fall back and
- * sample via a hrtimer based software counter):
+ * events (user-space has to fall back and
+ * sample via a hrtimer based software event):
*/
if (!x86_pmu.apic)
return -EOPNOTSUPP;
}
/*
- * Raw event type provide the config in the event structure
+ * Raw hw_event type provide the config in the hw_event structure
*/
if (attr->type == PERF_TYPE_RAW) {
hwc->config |= x86_pmu.raw_event(attr->config);
@@ -1001,7 +1001,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
static void p6_pmu_disable_all(void)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
u64 val;
if (!cpuc->enabled)
@@ -1018,7 +1018,7 @@ static void p6_pmu_disable_all(void)
static void intel_pmu_disable_all(void)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (!cpuc->enabled)
return;
@@ -1034,7 +1034,7 @@ static void intel_pmu_disable_all(void)
static void amd_pmu_disable_all(void)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
if (!cpuc->enabled)
@@ -1043,12 +1043,12 @@ static void amd_pmu_disable_all(void)
cpuc->enabled = 0;
/*
* ensure we write the disable before we start disabling the
- * counters proper, so that amd_pmu_enable_counter() does the
+ * events proper, so that amd_pmu_enable_event() does the
* right thing.
*/
barrier();
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for (idx = 0; idx < x86_pmu.num_events; idx++) {
u64 val;
if (!test_bit(idx, cpuc->active_mask))
@@ -1070,7 +1070,7 @@ void hw_perf_disable(void)
static void p6_pmu_enable_all(void)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
unsigned long val;
if (cpuc->enabled)
@@ -1087,7 +1087,7 @@ static void p6_pmu_enable_all(void)
static void intel_pmu_enable_all(void)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (cpuc->enabled)
return;
@@ -1098,19 +1098,19 @@ static void intel_pmu_enable_all(void)
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
- struct perf_counter *counter =
- cpuc->counters[X86_PMC_IDX_FIXED_BTS];
+ struct perf_event *event =
+ cpuc->events[X86_PMC_IDX_FIXED_BTS];
- if (WARN_ON_ONCE(!counter))
+ if (WARN_ON_ONCE(!event))
return;
- intel_pmu_enable_bts(counter->hw.config);
+ intel_pmu_enable_bts(event->hw.config);
}
}
static void amd_pmu_enable_all(void)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
if (cpuc->enabled)
@@ -1119,14 +1119,14 @@ static void amd_pmu_enable_all(void)
cpuc->enabled = 1;
barrier();
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- struct perf_counter *counter = cpuc->counters[idx];
+ for (idx = 0; idx < x86_pmu.num_events; idx++) {
+ struct perf_event *event = cpuc->events[idx];
u64 val;
if (!test_bit(idx, cpuc->active_mask))
continue;
- val = counter->hw.config;
+ val = event->hw.config;
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
}
@@ -1153,19 +1153,19 @@ static inline void intel_pmu_ack_status(u64 ack)
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
}
-static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
+static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
(void)checking_wrmsrl(hwc->config_base + idx,
hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
}
-static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
(void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
}
static inline void
-intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
+intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
{
int idx = __idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, mask;
@@ -1178,10 +1178,10 @@ intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
}
static inline void
-p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- u64 val = P6_NOP_COUNTER;
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ u64 val = P6_NOP_EVENT;
if (cpuc->enabled)
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
@@ -1190,7 +1190,7 @@ p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
}
static inline void
-intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
intel_pmu_disable_bts();
@@ -1202,24 +1202,24 @@ intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
return;
}
- x86_pmu_disable_counter(hwc, idx);
+ x86_pmu_disable_event(hwc, idx);
}
static inline void
-amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
- x86_pmu_disable_counter(hwc, idx);
+ x86_pmu_disable_event(hwc, idx);
}
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
/*
* Set the next IRQ period, based on the hwc->period_left value.
- * To be called with the counter disabled in hw:
+ * To be called with the event disabled in hw:
*/
static int
-x86_perf_counter_set_period(struct perf_counter *counter,
- struct hw_perf_counter *hwc, int idx)
+x86_perf_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc, int idx)
{
s64 left = atomic64_read(&hwc->period_left);
s64 period = hwc->sample_period;
@@ -1245,7 +1245,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
ret = 1;
}
/*
- * Quirk: certain CPUs dont like it if just 1 event is left:
+ * Quirk: certain CPUs dont like it if just 1 hw_event is left:
*/
if (unlikely(left < 2))
left = 2;
@@ -1256,21 +1256,21 @@ x86_perf_counter_set_period(struct perf_counter *counter,
per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
/*
- * The hw counter starts counting from this counter offset,
+ * The hw event starts counting from this event offset,
* mark it to be able to extra future deltas:
*/
atomic64_set(&hwc->prev_count, (u64)-left);
- err = checking_wrmsrl(hwc->counter_base + idx,
- (u64)(-left) & x86_pmu.counter_mask);
+ err = checking_wrmsrl(hwc->event_base + idx,
+ (u64)(-left) & x86_pmu.event_mask);
- perf_counter_update_userpage(counter);
+ perf_event_update_userpage(event);
return ret;
}
static inline void
-intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
+intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
{
int idx = __idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, bits, mask;
@@ -1295,9 +1295,9 @@ intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
err = checking_wrmsrl(hwc->config_base, ctrl_val);
}
-static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
+static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
u64 val;
val = hwc->config;
@@ -1308,10 +1308,10 @@ static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
}
-static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
+static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
- if (!__get_cpu_var(cpu_hw_counters).enabled)
+ if (!__get_cpu_var(cpu_hw_events).enabled)
return;
intel_pmu_enable_bts(hwc->config);
@@ -1323,134 +1323,134 @@ static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
return;
}
- x86_pmu_enable_counter(hwc, idx);
+ x86_pmu_enable_event(hwc, idx);
}
-static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
+static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (cpuc->enabled)
- x86_pmu_enable_counter(hwc, idx);
+ x86_pmu_enable_event(hwc, idx);
}
static int
-fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
+fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
{
- unsigned int event;
+ unsigned int hw_event;
- event = hwc->config & ARCH_PERFMON_EVENT_MASK;
+ hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK;
- if (unlikely((event ==
+ if (unlikely((hw_event ==
x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
(hwc->sample_period == 1)))
return X86_PMC_IDX_FIXED_BTS;
- if (!x86_pmu.num_counters_fixed)
+ if (!x86_pmu.num_events_fixed)
return -1;
- if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
+ if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
return X86_PMC_IDX_FIXED_INSTRUCTIONS;
- if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
+ if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
return X86_PMC_IDX_FIXED_CPU_CYCLES;
- if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
+ if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
return X86_PMC_IDX_FIXED_BUS_CYCLES;
return -1;
}
/*
- * Find a PMC slot for the freshly enabled / scheduled in counter:
+ * Find a PMC slot for the freshly enabled / scheduled in event:
*/
-static int x86_pmu_enable(struct perf_counter *counter)
+static int x86_pmu_enable(struct perf_event *event)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- struct hw_perf_counter *hwc = &counter->hw;
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
int idx;
- idx = fixed_mode_idx(counter, hwc);
+ idx = fixed_mode_idx(event, hwc);
if (idx == X86_PMC_IDX_FIXED_BTS) {
/* BTS is already occupied. */
if (test_and_set_bit(idx, cpuc->used_mask))
return -EAGAIN;
hwc->config_base = 0;
- hwc->counter_base = 0;
+ hwc->event_base = 0;
hwc->idx = idx;
} else if (idx >= 0) {
/*
- * Try to get the fixed counter, if that is already taken
- * then try to get a generic counter:
+ * Try to get the fixed event, if that is already taken
+ * then try to get a generic event:
*/
if (test_and_set_bit(idx, cpuc->used_mask))
goto try_generic;
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
/*
- * We set it so that counter_base + idx in wrmsr/rdmsr maps to
+ * We set it so that event_base + idx in wrmsr/rdmsr maps to
* MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
*/
- hwc->counter_base =
+ hwc->event_base =
MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
hwc->idx = idx;
} else {
idx = hwc->idx;
- /* Try to get the previous generic counter again */
+ /* Try to get the previous generic event again */
if (test_and_set_bit(idx, cpuc->used_mask)) {
try_generic:
idx = find_first_zero_bit(cpuc->used_mask,
- x86_pmu.num_counters);
- if (idx == x86_pmu.num_counters)
+ x86_pmu.num_events);
+ if (idx == x86_pmu.num_events)
return -EAGAIN;
set_bit(idx, cpuc->used_mask);
hwc->idx = idx;
}
hwc->config_base = x86_pmu.eventsel;
- hwc->counter_base = x86_pmu.perfctr;
+ hwc->event_base = x86_pmu.perfctr;
}
- perf_counters_lapic_init();
+ perf_events_lapic_init();
x86_pmu.disable(hwc, idx);
- cpuc->counters[idx] = counter;
+ cpuc->events[idx] = event;
set_bit(idx, cpuc->active_mask);
- x86_perf_counter_set_period(counter, hwc, idx);
+ x86_perf_event_set_period(event, hwc, idx);
x86_pmu.enable(hwc, idx);
- perf_counter_update_userpage(counter);
+ perf_event_update_userpage(event);
return 0;
}
-static void x86_pmu_unthrottle(struct perf_counter *counter)
+static void x86_pmu_unthrottle(struct perf_event *event)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- struct hw_perf_counter *hwc = &counter->hw;
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
- cpuc->counters[hwc->idx] != counter))
+ cpuc->events[hwc->idx] != event))
return;
x86_pmu.enable(hwc, hwc->idx);
}
-void perf_counter_print_debug(void)
+void perf_event_print_debug(void)
{
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
- struct cpu_hw_counters *cpuc;
+ struct cpu_hw_events *cpuc;
unsigned long flags;
int cpu, idx;
- if (!x86_pmu.num_counters)
+ if (!x86_pmu.num_events)
return;
local_irq_save(flags);
cpu = smp_processor_id();
- cpuc = &per_cpu(cpu_hw_counters, cpu);
+ cpuc = &per_cpu(cpu_hw_events, cpu);
if (x86_pmu.version >= 2) {
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
@@ -1466,7 +1466,7 @@ void perf_counter_print_debug(void)
}
pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for (idx = 0; idx < x86_pmu.num_events; idx++) {
rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
rdmsrl(x86_pmu.perfctr + idx, pmc_count);
@@ -1479,7 +1479,7 @@ void perf_counter_print_debug(void)
pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
cpu, idx, prev_left);
}
- for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
+ for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
@@ -1488,7 +1488,7 @@ void perf_counter_print_debug(void)
local_irq_restore(flags);
}
-static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
+static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
{
struct debug_store *ds = cpuc->ds;
struct bts_record {
@@ -1496,14 +1496,14 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
u64 to;
u64 flags;
};
- struct perf_counter *counter = cpuc->counters[X86_PMC_IDX_FIXED_BTS];
+ struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
struct bts_record *at, *top;
struct perf_output_handle handle;
struct perf_event_header header;
struct perf_sample_data data;
struct pt_regs regs;
- if (!counter)
+ if (!event)
return;
if (!ds)
@@ -1518,7 +1518,7 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
ds->bts_index = ds->bts_buffer_base;
- data.period = counter->hw.last_period;
+ data.period = event->hw.last_period;
data.addr = 0;
regs.ip = 0;
@@ -1527,9 +1527,9 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
* We will overwrite the from and to address before we output
* the sample.
*/
- perf_prepare_sample(&header, &data, counter, &regs);
+ perf_prepare_sample(&header, &data, event, &regs);
- if (perf_output_begin(&handle, counter,
+ if (perf_output_begin(&handle, event,
header.size * (top - at), 1, 1))
return;
@@ -1537,20 +1537,20 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
data.ip = at->from;
data.addr = at->to;
- perf_output_sample(&handle, &header, &data, counter);
+ perf_output_sample(&handle, &header, &data, event);
}
perf_output_end(&handle);
/* There's new data available. */
- counter->hw.interrupts++;
- counter->pending_kill = POLL_IN;
+ event->hw.interrupts++;
+ event->pending_kill = POLL_IN;
}
-static void x86_pmu_disable(struct perf_counter *counter)
+static void x86_pmu_disable(struct perf_event *event)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- struct hw_perf_counter *hwc = &counter->hw;
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
/*
@@ -1562,63 +1562,63 @@ static void x86_pmu_disable(struct perf_counter *counter)
/*
* Make sure the cleared pointer becomes visible before we
- * (potentially) free the counter:
+ * (potentially) free the event:
*/
barrier();
/*
- * Drain the remaining delta count out of a counter
+ * Drain the remaining delta count out of a event
* that we are disabling:
*/
- x86_perf_counter_update(counter, hwc, idx);
+ x86_perf_event_update(event, hwc, idx);
/* Drain the remaining BTS records. */
if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
intel_pmu_drain_bts_buffer(cpuc);
- cpuc->counters[idx] = NULL;
+ cpuc->events[idx] = NULL;
clear_bit(idx, cpuc->used_mask);
- perf_counter_update_userpage(counter);
+ perf_event_update_userpage(event);
}
/*
- * Save and restart an expired counter. Called by NMI contexts,
- * so it has to be careful about preempting normal counter ops:
+ * Save and restart an expired event. Called by NMI contexts,
+ * so it has to be careful about preempting normal event ops:
*/
-static int intel_pmu_save_and_restart(struct perf_counter *counter)
+static int intel_pmu_save_and_restart(struct perf_event *event)
{
- struct hw_perf_counter *hwc = &counter->hw;
+ struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
int ret;
- x86_perf_counter_update(counter, hwc, idx);
- ret = x86_perf_counter_set_period(counter, hwc, idx);
+ x86_perf_event_update(event, hwc, idx);
+ ret = x86_perf_event_set_period(event, hwc, idx);
- if (counter->state == PERF_COUNTER_STATE_ACTIVE)
- intel_pmu_enable_counter(hwc, idx);
+ if (event->state == PERF_EVENT_STATE_ACTIVE)
+ intel_pmu_enable_event(hwc, idx);
return ret;
}
static void intel_pmu_reset(void)
{
- struct debug_store *ds = __get_cpu_var(cpu_hw_counters).ds;
+ struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
unsigned long flags;
int idx;
- if (!x86_pmu.num_counters)
+ if (!x86_pmu.num_events)
return;
local_irq_save(flags);
printk("clearing PMU state on CPU#%d\n", smp_processor_id());
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for (idx = 0; idx < x86_pmu.num_events; idx++) {
checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
}
- for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
+ for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
}
if (ds)
@@ -1630,38 +1630,38 @@ static void intel_pmu_reset(void)
static int p6_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
- struct cpu_hw_counters *cpuc;
- struct perf_counter *counter;
- struct hw_perf_counter *hwc;
+ struct cpu_hw_events *cpuc;
+ struct perf_event *event;
+ struct hw_perf_event *hwc;
int idx, handled = 0;
u64 val;
data.addr = 0;
- cpuc = &__get_cpu_var(cpu_hw_counters);
+ cpuc = &__get_cpu_var(cpu_hw_events);
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for (idx = 0; idx < x86_pmu.num_events; idx++) {
if (!test_bit(idx, cpuc->active_mask))
continue;
- counter = cpuc->counters[idx];
- hwc = &counter->hw;
+ event = cpuc->events[idx];
+ hwc = &event->hw;
- val = x86_perf_counter_update(counter, hwc, idx);
- if (val & (1ULL << (x86_pmu.counter_bits - 1)))
+ val = x86_perf_event_update(event, hwc, idx);
+ if (val & (1ULL << (x86_pmu.event_bits - 1)))
continue;
/*
- * counter overflow
+ * event overflow
*/
handled = 1;
- data.period = counter->hw.last_period;
+ data.period = event->hw.last_period;
- if (!x86_perf_counter_set_period(counter, hwc, idx))
+ if (!x86_perf_event_set_period(event, hwc, idx))
continue;
- if (perf_counter_overflow(counter, 1, &data, regs))
- p6_pmu_disable_counter(hwc, idx);
+ if (perf_event_overflow(event, 1, &data, regs))
+ p6_pmu_disable_event(hwc, idx);
}
if (handled)
@@ -1677,13 +1677,13 @@ static int p6_pmu_handle_irq(struct pt_regs *regs)
static int intel_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
- struct cpu_hw_counters *cpuc;
+ struct cpu_hw_events *cpuc;
int bit, loops;
u64 ack, status;
data.addr = 0;
- cpuc = &__get_cpu_var(cpu_hw_counters);
+ cpuc = &__get_cpu_var(cpu_hw_events);
perf_disable();
intel_pmu_drain_bts_buffer(cpuc);
@@ -1696,8 +1696,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
loops = 0;
again:
if (++loops > 100) {
- WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
- perf_counter_print_debug();
+ WARN_ONCE(1, "perfevents: irq loop stuck!\n");
+ perf_event_print_debug();
intel_pmu_reset();
perf_enable();
return 1;
@@ -1706,19 +1706,19 @@ again:
inc_irq_stat(apic_perf_irqs);
ack = status;
for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
- struct perf_counter *counter = cpuc->counters[bit];
+ struct perf_event *event = cpuc->events[bit];
clear_bit(bit, (unsigned long *) &status);
if (!test_bit(bit, cpuc->active_mask))
continue;
- if (!intel_pmu_save_and_restart(counter))
+ if (!intel_pmu_save_and_restart(event))
continue;
- data.period = counter->hw.last_period;
+ data.period = event->hw.last_period;
- if (perf_counter_overflow(counter, 1, &data, regs))
- intel_pmu_disable_counter(&counter->hw, bit);
+ if (perf_event_overflow(event, 1, &data, regs))
+ intel_pmu_disable_event(&event->hw, bit);
}
intel_pmu_ack_status(ack);
@@ -1738,38 +1738,38 @@ again:
static int amd_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
- struct cpu_hw_counters *cpuc;
- struct perf_counter *counter;
- struct hw_perf_counter *hwc;
+ struct cpu_hw_events *cpuc;
+ struct perf_event *event;
+ struct hw_perf_event *hwc;
int idx, handled = 0;
u64 val;
data.addr = 0;
- cpuc = &__get_cpu_var(cpu_hw_counters);
+ cpuc = &__get_cpu_var(cpu_hw_events);
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for (idx = 0; idx < x86_pmu.num_events; idx++) {
if (!test_bit(idx, cpuc->active_mask))
continue;
- counter = cpuc->counters[idx];
- hwc = &counter->hw;
+ event = cpuc->events[idx];
+ hwc = &event->hw;
- val = x86_perf_counter_update(counter, hwc, idx);
- if (val & (1ULL << (x86_pmu.counter_bits - 1)))
+ val = x86_perf_event_update(event, hwc, idx);
+ if (val & (1ULL << (x86_pmu.event_bits - 1)))
continue;
/*
- * counter overflow
+ * event overflow
*/
handled = 1;
- data.period = counter->hw.last_period;
+ data.period = event->hw.last_period;
- if (!x86_perf_counter_set_period(counter, hwc, idx))
+ if (!x86_perf_event_set_period(event, hwc, idx))
continue;
- if (perf_counter_overflow(counter, 1, &data, regs))
- amd_pmu_disable_counter(hwc, idx);
+ if (perf_event_overflow(event, 1, &data, regs))
+ amd_pmu_disable_event(hwc, idx);
}
if (handled)
@@ -1783,18 +1783,18 @@ void smp_perf_pending_interrupt(struct pt_regs *regs)
irq_enter();
ack_APIC_irq();
inc_irq_stat(apic_pending_irqs);
- perf_counter_do_pending();
+ perf_event_do_pending();
irq_exit();
}
-void set_perf_counter_pending(void)
+void set_perf_event_pending(void)
{
#ifdef CONFIG_X86_LOCAL_APIC
apic->send_IPI_self(LOCAL_PENDING_VECTOR);
#endif
}
-void perf_counters_lapic_init(void)
+void perf_events_lapic_init(void)
{
#ifdef CONFIG_X86_LOCAL_APIC
if (!x86_pmu.apic || !x86_pmu_initialized())
@@ -1808,13 +1808,13 @@ void perf_counters_lapic_init(void)
}
static int __kprobes
-perf_counter_nmi_handler(struct notifier_block *self,
+perf_event_nmi_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
{
struct die_args *args = __args;
struct pt_regs *regs;
- if (!atomic_read(&active_counters))
+ if (!atomic_read(&active_events))
return NOTIFY_DONE;
switch (cmd) {
@@ -1833,7 +1833,7 @@ perf_counter_nmi_handler(struct notifier_block *self,
#endif
/*
* Can't rely on the handled return value to say it was our NMI, two
- * counters could trigger 'simultaneously' raising two back-to-back NMIs.
+ * events could trigger 'simultaneously' raising two back-to-back NMIs.
*
* If the first NMI handles both, the latter will be empty and daze
* the CPU.
@@ -1843,8 +1843,8 @@ perf_counter_nmi_handler(struct notifier_block *self,
return NOTIFY_STOP;
}
-static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
- .notifier_call = perf_counter_nmi_handler,
+static __read_mostly struct notifier_block perf_event_nmi_notifier = {
+ .notifier_call = perf_event_nmi_handler,
.next = NULL,
.priority = 1
};
@@ -1854,8 +1854,8 @@ static struct x86_pmu p6_pmu = {
.handle_irq = p6_pmu_handle_irq,
.disable_all = p6_pmu_disable_all,
.enable_all = p6_pmu_enable_all,
- .enable = p6_pmu_enable_counter,
- .disable = p6_pmu_disable_counter,
+ .enable = p6_pmu_enable_event,
+ .disable = p6_pmu_disable_event,
.eventsel = MSR_P6_EVNTSEL0,
.perfctr = MSR_P6_PERFCTR0,
.event_map = p6_pmu_event_map,
@@ -1864,16 +1864,16 @@ static struct x86_pmu p6_pmu = {
.apic = 1,
.max_period = (1ULL << 31) - 1,
.version = 0,
- .num_counters = 2,
+ .num_events = 2,
/*
- * Counters have 40 bits implemented. However they are designed such
+ * Events have 40 bits implemented. However they are designed such
* that bits [32-39] are sign extensions of bit 31. As such the
- * effective width of a counter for P6-like PMU is 32 bits only.
+ * effective width of a event for P6-like PMU is 32 bits only.
*
* See IA-32 Intel Architecture Software developer manual Vol 3B
*/
- .counter_bits = 32,
- .counter_mask = (1ULL << 32) - 1,
+ .event_bits = 32,
+ .event_mask = (1ULL << 32) - 1,
};
static struct x86_pmu intel_pmu = {
@@ -1881,8 +1881,8 @@ static struct x86_pmu intel_pmu = {
.handle_irq = intel_pmu_handle_irq,
.disable_all = intel_pmu_disable_all,
.enable_all = intel_pmu_enable_all,
- .enable = intel_pmu_enable_counter,
- .disable = intel_pmu_disable_counter,
+ .enable = intel_pmu_enable_event,
+ .disable = intel_pmu_disable_event,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
.event_map = intel_pmu_event_map,
@@ -1892,7 +1892,7 @@ static struct x86_pmu intel_pmu = {
/*
* Intel PMCs cannot be accessed sanely above 32 bit width,
* so we install an artificial 1<<31 period regardless of
- * the generic counter period:
+ * the generic event period:
*/
.max_period = (1ULL << 31) - 1,
.enable_bts = intel_pmu_enable_bts,
@@ -1904,16 +1904,16 @@ static struct x86_pmu amd_pmu = {
.handle_irq = amd_pmu_handle_irq,
.disable_all = amd_pmu_disable_all,
.enable_all = amd_pmu_enable_all,
- .enable = amd_pmu_enable_counter,
- .disable = amd_pmu_disable_counter,
+ .enable = amd_pmu_enable_event,
+ .disable = amd_pmu_disable_event,
.eventsel = MSR_K7_EVNTSEL0,
.perfctr = MSR_K7_PERFCTR0,
.event_map = amd_pmu_event_map,
.raw_event = amd_pmu_raw_event,
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
- .num_counters = 4,
- .counter_bits = 48,
- .counter_mask = (1ULL << 48) - 1,
+ .num_events = 4,
+ .event_bits = 48,
+ .event_mask = (1ULL << 48) - 1,
.apic = 1,
/* use highest bit to detect overflow */
.max_period = (1ULL << 47) - 1,
@@ -1970,7 +1970,7 @@ static int intel_pmu_init(void)
/*
* Check whether the Architectural PerfMon supports
- * Branch Misses Retired Event or not.
+ * Branch Misses Retired hw_event or not.
*/
cpuid(10, &eax.full, &ebx, &unused, &edx.full);
if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
@@ -1982,15 +1982,15 @@ static int intel_pmu_init(void)
x86_pmu = intel_pmu;
x86_pmu.version = version;
- x86_pmu.num_counters = eax.split.num_counters;
- x86_pmu.counter_bits = eax.split.bit_width;
- x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1;
+ x86_pmu.num_events = eax.split.num_events;
+ x86_pmu.event_bits = eax.split.bit_width;
+ x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
/*
- * Quirk: v2 perfmon does not report fixed-purpose counters, so
- * assume at least 3 counters:
+ * Quirk: v2 perfmon does not report fixed-purpose events, so
+ * assume at least 3 events:
*/
- x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
+ x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
/*
* Install the hw-cache-events table:
@@ -2037,11 +2037,11 @@ static int amd_pmu_init(void)
return 0;
}
-void __init init_hw_perf_counters(void)
+void __init init_hw_perf_events(void)
{
int err;
- pr_info("Performance Counters: ");
+ pr_info("Performance Events: ");
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
@@ -2054,45 +2054,45 @@ void __init init_hw_perf_counters(void)
return;
}
if (err != 0) {
- pr_cont("no PMU driver, software counters only.\n");
+ pr_cont("no PMU driver, software events only.\n");
return;
}
pr_cont("%s PMU driver.\n", x86_pmu.name);
- if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
- WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
- x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
- x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
+ if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
+ WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
+ x86_pmu.num_events, X86_PMC_MAX_GENERIC);
+ x86_pmu.num_events = X86_PMC_MAX_GENERIC;
}
- perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
- perf_max_counters = x86_pmu.num_counters;
+ perf_event_mask = (1 << x86_pmu.num_events) - 1;
+ perf_max_events = x86_pmu.num_events;
- if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
- WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
- x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
- x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
+ if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
+ WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
+ x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
+ x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
}
- perf_counter_mask |=
- ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
- x86_pmu.intel_ctrl = perf_counter_mask;
+ perf_event_mask |=
+ ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
+ x86_pmu.intel_ctrl = perf_event_mask;
- perf_counters_lapic_init();
- register_die_notifier(&perf_counter_nmi_notifier);
+ perf_events_lapic_init();
+ register_die_notifier(&perf_event_nmi_notifier);
- pr_info("... version: %d\n", x86_pmu.version);
- pr_info("... bit width: %d\n", x86_pmu.counter_bits);
- pr_info("... generic counters: %d\n", x86_pmu.num_counters);
- pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask);
- pr_info("... max period: %016Lx\n", x86_pmu.max_period);
- pr_info("... fixed-purpose counters: %d\n", x86_pmu.num_counters_fixed);
- pr_info("... counter mask: %016Lx\n", perf_counter_mask);
+ pr_info("... version: %d\n", x86_pmu.version);
+ pr_info("... bit width: %d\n", x86_pmu.event_bits);
+ pr_info("... generic registers: %d\n", x86_pmu.num_events);
+ pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
+ pr_info("... max period: %016Lx\n", x86_pmu.max_period);
+ pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
+ pr_info("... event mask: %016Lx\n", perf_event_mask);
}
-static inline void x86_pmu_read(struct perf_counter *counter)
+static inline void x86_pmu_read(struct perf_event *event)
{
- x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
+ x86_perf_event_update(event, &event->hw, event->hw.idx);
}
static const struct pmu pmu = {
@@ -2102,14 +2102,14 @@ static const struct pmu pmu = {
.unthrottle = x86_pmu_unthrottle,
};
-const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
+const struct pmu *hw_perf_event_init(struct perf_event *event)
{
int err;
- err = __hw_perf_counter_init(counter);
+ err = __hw_perf_event_init(event);
if (err) {
- if (counter->destroy)
- counter->destroy(counter);
+ if (event->destroy)
+ event->destroy(event);
return ERR_PTR(err);
}
@@ -2292,7 +2292,7 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
return entry;
}
-void hw_perf_counter_setup_online(int cpu)
+void hw_perf_event_setup_online(int cpu)
{
init_debug_store_on_cpu(cpu);
}
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 392bea43b89..fab786f60ed 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -20,7 +20,7 @@
#include <linux/kprobes.h>
#include <asm/apic.h>
-#include <asm/perf_counter.h>
+#include <asm/perf_event.h>
struct nmi_watchdog_ctlblk {
unsigned int cccr_msr;
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index bca5fba91c9..f7dd2a7c3bf 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -5,7 +5,6 @@
#include <linux/kallsyms.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
-#include <linux/utsname.h>
#include <linux/hardirq.h>
#include <linux/kdebug.h>
#include <linux/module.h>
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 54b0a327676..a071e6be177 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -5,7 +5,6 @@
#include <linux/kallsyms.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
-#include <linux/utsname.h>
#include <linux/hardirq.h>
#include <linux/kdebug.h>
#include <linux/module.h>
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index a3210ce1ecc..85419bb7d4a 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1331,7 +1331,7 @@ void __init e820_reserve_resources(void)
struct resource *res;
u64 end;
- res = alloc_bootmem_low(sizeof(struct resource) * e820.nr_map);
+ res = alloc_bootmem(sizeof(struct resource) * e820.nr_map);
e820_res = res;
for (i = 0; i < e820.nr_map; i++) {
end = e820.map[i].addr + e820.map[i].size - 1;
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index b11cab3c323..41fd965c80c 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -160,721 +160,6 @@ static struct console early_serial_console = {
.index = -1,
};
-#ifdef CONFIG_EARLY_PRINTK_DBGP
-
-static struct ehci_caps __iomem *ehci_caps;
-static struct ehci_regs __iomem *ehci_regs;
-static struct ehci_dbg_port __iomem *ehci_debug;
-static unsigned int dbgp_endpoint_out;
-
-struct ehci_dev {
- u32 bus;
- u32 slot;
- u32 func;
-};
-
-static struct ehci_dev ehci_dev;
-
-#define USB_DEBUG_DEVNUM 127
-
-#define DBGP_DATA_TOGGLE 0x8800
-
-static inline u32 dbgp_pid_update(u32 x, u32 tok)
-{
- return ((x ^ DBGP_DATA_TOGGLE) & 0xffff00) | (tok & 0xff);
-}
-
-static inline u32 dbgp_len_update(u32 x, u32 len)
-{
- return (x & ~0x0f) | (len & 0x0f);
-}
-
-/*
- * USB Packet IDs (PIDs)
- */
-
-/* token */
-#define USB_PID_OUT 0xe1
-#define USB_PID_IN 0x69
-#define USB_PID_SOF 0xa5
-#define USB_PID_SETUP 0x2d
-/* handshake */
-#define USB_PID_ACK 0xd2
-#define USB_PID_NAK 0x5a
-#define USB_PID_STALL 0x1e
-#define USB_PID_NYET 0x96
-/* data */
-#define USB_PID_DATA0 0xc3
-#define USB_PID_DATA1 0x4b
-#define USB_PID_DATA2 0x87
-#define USB_PID_MDATA 0x0f
-/* Special */
-#define USB_PID_PREAMBLE 0x3c
-#define USB_PID_ERR 0x3c
-#define USB_PID_SPLIT 0x78
-#define USB_PID_PING 0xb4
-#define USB_PID_UNDEF_0 0xf0
-
-#define USB_PID_DATA_TOGGLE 0x88
-#define DBGP_CLAIM (DBGP_OWNER | DBGP_ENABLED | DBGP_INUSE)
-
-#define PCI_CAP_ID_EHCI_DEBUG 0xa
-
-#define HUB_ROOT_RESET_TIME 50 /* times are in msec */
-#define HUB_SHORT_RESET_TIME 10
-#define HUB_LONG_RESET_TIME 200
-#define HUB_RESET_TIMEOUT 500
-
-#define DBGP_MAX_PACKET 8
-
-static int dbgp_wait_until_complete(void)
-{
- u32 ctrl;
- int loop = 0x100000;
-
- do {
- ctrl = readl(&ehci_debug->control);
- /* Stop when the transaction is finished */
- if (ctrl & DBGP_DONE)
- break;
- } while (--loop > 0);
-
- if (!loop)
- return -1;
-
- /*
- * Now that we have observed the completed transaction,
- * clear the done bit.
- */
- writel(ctrl | DBGP_DONE, &ehci_debug->control);
- return (ctrl & DBGP_ERROR) ? -DBGP_ERRCODE(ctrl) : DBGP_LEN(ctrl);
-}
-
-static void __init dbgp_mdelay(int ms)
-{
- int i;
-
- while (ms--) {
- for (i = 0; i < 1000; i++)
- outb(0x1, 0x80);
- }
-}
-
-static void dbgp_breath(void)
-{
- /* Sleep to give the debug port a chance to breathe */
-}
-
-static int dbgp_wait_until_done(unsigned ctrl)
-{
- u32 pids, lpid;
- int ret;
- int loop = 3;
-
-retry:
- writel(ctrl | DBGP_GO, &ehci_debug->control);
- ret = dbgp_wait_until_complete();
- pids = readl(&ehci_debug->pids);
- lpid = DBGP_PID_GET(pids);
-
- if (ret < 0)
- return ret;
-
- /*
- * If the port is getting full or it has dropped data
- * start pacing ourselves, not necessary but it's friendly.
- */
- if ((lpid == USB_PID_NAK) || (lpid == USB_PID_NYET))
- dbgp_breath();
-
- /* If I get a NACK reissue the transmission */
- if (lpid == USB_PID_NAK) {
- if (--loop > 0)
- goto retry;
- }
-
- return ret;
-}
-
-static void dbgp_set_data(const void *buf, int size)
-{
- const unsigned char *bytes = buf;
- u32 lo, hi;
- int i;
-
- lo = hi = 0;
- for (i = 0; i < 4 && i < size; i++)
- lo |= bytes[i] << (8*i);
- for (; i < 8 && i < size; i++)
- hi |= bytes[i] << (8*(i - 4));
- writel(lo, &ehci_debug->data03);
- writel(hi, &ehci_debug->data47);
-}
-
-static void __init dbgp_get_data(void *buf, int size)
-{
- unsigned char *bytes = buf;
- u32 lo, hi;
- int i;
-
- lo = readl(&ehci_debug->data03);
- hi = readl(&ehci_debug->data47);
- for (i = 0; i < 4 && i < size; i++)
- bytes[i] = (lo >> (8*i)) & 0xff;
- for (; i < 8 && i < size; i++)
- bytes[i] = (hi >> (8*(i - 4))) & 0xff;
-}
-
-static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
- const char *bytes, int size)
-{
- u32 pids, addr, ctrl;
- int ret;
-
- if (size > DBGP_MAX_PACKET)
- return -1;
-
- addr = DBGP_EPADDR(devnum, endpoint);
-
- pids = readl(&ehci_debug->pids);
- pids = dbgp_pid_update(pids, USB_PID_OUT);
-
- ctrl = readl(&ehci_debug->control);
- ctrl = dbgp_len_update(ctrl, size);
- ctrl |= DBGP_OUT;
- ctrl |= DBGP_GO;
-
- dbgp_set_data(bytes, size);
- writel(addr, &ehci_debug->address);
- writel(pids, &ehci_debug->pids);
-
- ret = dbgp_wait_until_done(ctrl);
- if (ret < 0)
- return ret;
-
- return ret;
-}
-
-static int __init dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
- int size)
-{
- u32 pids, addr, ctrl;
- int ret;
-
- if (size > DBGP_MAX_PACKET)
- return -1;
-
- addr = DBGP_EPADDR(devnum, endpoint);
-
- pids = readl(&ehci_debug->pids);
- pids = dbgp_pid_update(pids, USB_PID_IN);
-
- ctrl = readl(&ehci_debug->control);
- ctrl = dbgp_len_update(ctrl, size);
- ctrl &= ~DBGP_OUT;
- ctrl |= DBGP_GO;
-
- writel(addr, &ehci_debug->address);
- writel(pids, &ehci_debug->pids);
- ret = dbgp_wait_until_done(ctrl);
- if (ret < 0)
- return ret;
-
- if (size > ret)
- size = ret;
- dbgp_get_data(data, size);
- return ret;
-}
-
-static int __init dbgp_control_msg(unsigned devnum, int requesttype,
- int request, int value, int index, void *data, int size)
-{
- u32 pids, addr, ctrl;
- struct usb_ctrlrequest req;
- int read;
- int ret;
-
- read = (requesttype & USB_DIR_IN) != 0;
- if (size > (read ? DBGP_MAX_PACKET:0))
- return -1;
-
- /* Compute the control message */
- req.bRequestType = requesttype;
- req.bRequest = request;
- req.wValue = cpu_to_le16(value);
- req.wIndex = cpu_to_le16(index);
- req.wLength = cpu_to_le16(size);
-
- pids = DBGP_PID_SET(USB_PID_DATA0, USB_PID_SETUP);
- addr = DBGP_EPADDR(devnum, 0);
-
- ctrl = readl(&ehci_debug->control);
- ctrl = dbgp_len_update(ctrl, sizeof(req));
- ctrl |= DBGP_OUT;
- ctrl |= DBGP_GO;
-
- /* Send the setup message */
- dbgp_set_data(&req, sizeof(req));
- writel(addr, &ehci_debug->address);
- writel(pids, &ehci_debug->pids);
- ret = dbgp_wait_until_done(ctrl);
- if (ret < 0)
- return ret;
-
- /* Read the result */
- return dbgp_bulk_read(devnum, 0, data, size);
-}
-
-
-/* Find a PCI capability */
-static u32 __init find_cap(u32 num, u32 slot, u32 func, int cap)
-{
- u8 pos;
- int bytes;
-
- if (!(read_pci_config_16(num, slot, func, PCI_STATUS) &
- PCI_STATUS_CAP_LIST))
- return 0;
-
- pos = read_pci_config_byte(num, slot, func, PCI_CAPABILITY_LIST);
- for (bytes = 0; bytes < 48 && pos >= 0x40; bytes++) {
- u8 id;
-
- pos &= ~3;
- id = read_pci_config_byte(num, slot, func, pos+PCI_CAP_LIST_ID);
- if (id == 0xff)
- break;
- if (id == cap)
- return pos;
-
- pos = read_pci_config_byte(num, slot, func,
- pos+PCI_CAP_LIST_NEXT);
- }
- return 0;
-}
-
-static u32 __init __find_dbgp(u32 bus, u32 slot, u32 func)
-{
- u32 class;
-
- class = read_pci_config(bus, slot, func, PCI_CLASS_REVISION);
- if ((class >> 8) != PCI_CLASS_SERIAL_USB_EHCI)
- return 0;
-
- return find_cap(bus, slot, func, PCI_CAP_ID_EHCI_DEBUG);
-}
-
-static u32 __init find_dbgp(int ehci_num, u32 *rbus, u32 *rslot, u32 *rfunc)
-{
- u32 bus, slot, func;
-
- for (bus = 0; bus < 256; bus++) {
- for (slot = 0; slot < 32; slot++) {
- for (func = 0; func < 8; func++) {
- unsigned cap;
-
- cap = __find_dbgp(bus, slot, func);
-
- if (!cap)
- continue;
- if (ehci_num-- != 0)
- continue;
- *rbus = bus;
- *rslot = slot;
- *rfunc = func;
- return cap;
- }
- }
- }
- return 0;
-}
-
-static int __init ehci_reset_port(int port)
-{
- u32 portsc;
- u32 delay_time, delay;
- int loop;
-
- /* Reset the usb debug port */
- portsc = readl(&ehci_regs->port_status[port - 1]);
- portsc &= ~PORT_PE;
- portsc |= PORT_RESET;
- writel(portsc, &ehci_regs->port_status[port - 1]);
-
- delay = HUB_ROOT_RESET_TIME;
- for (delay_time = 0; delay_time < HUB_RESET_TIMEOUT;
- delay_time += delay) {
- dbgp_mdelay(delay);
-
- portsc = readl(&ehci_regs->port_status[port - 1]);
- if (portsc & PORT_RESET) {
- /* force reset to complete */
- loop = 2;
- writel(portsc & ~(PORT_RWC_BITS | PORT_RESET),
- &ehci_regs->port_status[port - 1]);
- do {
- portsc = readl(&ehci_regs->port_status[port-1]);
- } while ((portsc & PORT_RESET) && (--loop > 0));
- }
-
- /* Device went away? */
- if (!(portsc & PORT_CONNECT))
- return -ENOTCONN;
-
- /* bomb out completely if something weird happend */
- if ((portsc & PORT_CSC))
- return -EINVAL;
-
- /* If we've finished resetting, then break out of the loop */
- if (!(portsc & PORT_RESET) && (portsc & PORT_PE))
- return 0;
- }
- return -EBUSY;
-}
-
-static int __init ehci_wait_for_port(int port)
-{
- u32 status;
- int ret, reps;
-
- for (reps = 0; reps < 3; reps++) {
- dbgp_mdelay(100);
- status = readl(&ehci_regs->status);
- if (status & STS_PCD) {
- ret = ehci_reset_port(port);
- if (ret == 0)
- return 0;
- }
- }
- return -ENOTCONN;
-}
-
-#ifdef DBGP_DEBUG
-# define dbgp_printk early_printk
-#else
-static inline void dbgp_printk(const char *fmt, ...) { }
-#endif
-
-typedef void (*set_debug_port_t)(int port);
-
-static void __init default_set_debug_port(int port)
-{
-}
-
-static set_debug_port_t __initdata set_debug_port = default_set_debug_port;
-
-static void __init nvidia_set_debug_port(int port)
-{
- u32 dword;
- dword = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
- 0x74);
- dword &= ~(0x0f<<12);
- dword |= ((port & 0x0f)<<12);
- write_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, 0x74,
- dword);
- dbgp_printk("set debug port to %d\n", port);
-}
-
-static void __init detect_set_debug_port(void)
-{
- u32 vendorid;
-
- vendorid = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
- 0x00);
-
- if ((vendorid & 0xffff) == 0x10de) {
- dbgp_printk("using nvidia set_debug_port\n");
- set_debug_port = nvidia_set_debug_port;
- }
-}
-
-static int __init ehci_setup(void)
-{
- struct usb_debug_descriptor dbgp_desc;
- u32 cmd, ctrl, status, portsc, hcs_params;
- u32 debug_port, new_debug_port = 0, n_ports;
- u32 devnum;
- int ret, i;
- int loop;
- int port_map_tried;
- int playtimes = 3;
-
-try_next_time:
- port_map_tried = 0;
-
-try_next_port:
-
- hcs_params = readl(&ehci_caps->hcs_params);
- debug_port = HCS_DEBUG_PORT(hcs_params);
- n_ports = HCS_N_PORTS(hcs_params);
-
- dbgp_printk("debug_port: %d\n", debug_port);
- dbgp_printk("n_ports: %d\n", n_ports);
-
- for (i = 1; i <= n_ports; i++) {
- portsc = readl(&ehci_regs->port_status[i-1]);
- dbgp_printk("portstatus%d: %08x\n", i, portsc);
- }
-
- if (port_map_tried && (new_debug_port != debug_port)) {
- if (--playtimes) {
- set_debug_port(new_debug_port);
- goto try_next_time;
- }
- return -1;
- }
-
- loop = 100000;
- /* Reset the EHCI controller */
- cmd = readl(&ehci_regs->command);
- cmd |= CMD_RESET;
- writel(cmd, &ehci_regs->command);
- do {
- cmd = readl(&ehci_regs->command);
- } while ((cmd & CMD_RESET) && (--loop > 0));
-
- if (!loop) {
- dbgp_printk("can not reset ehci\n");
- return -1;
- }
- dbgp_printk("ehci reset done\n");
-
- /* Claim ownership, but do not enable yet */
- ctrl = readl(&ehci_debug->control);
- ctrl |= DBGP_OWNER;
- ctrl &= ~(DBGP_ENABLED | DBGP_INUSE);
- writel(ctrl, &ehci_debug->control);
-
- /* Start the ehci running */
- cmd = readl(&ehci_regs->command);
- cmd &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE | CMD_ASE | CMD_RESET);
- cmd |= CMD_RUN;
- writel(cmd, &ehci_regs->command);
-
- /* Ensure everything is routed to the EHCI */
- writel(FLAG_CF, &ehci_regs->configured_flag);
-
- /* Wait until the controller is no longer halted */
- loop = 10;
- do {
- status = readl(&ehci_regs->status);
- } while ((status & STS_HALT) && (--loop > 0));
-
- if (!loop) {
- dbgp_printk("ehci can be started\n");
- return -1;
- }
- dbgp_printk("ehci started\n");
-
- /* Wait for a device to show up in the debug port */
- ret = ehci_wait_for_port(debug_port);
- if (ret < 0) {
- dbgp_printk("No device found in debug port\n");
- goto next_debug_port;
- }
- dbgp_printk("ehci wait for port done\n");
-
- /* Enable the debug port */
- ctrl = readl(&ehci_debug->control);
- ctrl |= DBGP_CLAIM;
- writel(ctrl, &ehci_debug->control);
- ctrl = readl(&ehci_debug->control);
- if ((ctrl & DBGP_CLAIM) != DBGP_CLAIM) {
- dbgp_printk("No device in debug port\n");
- writel(ctrl & ~DBGP_CLAIM, &ehci_debug->control);
- goto err;
- }
- dbgp_printk("debug ported enabled\n");
-
- /* Completely transfer the debug device to the debug controller */
- portsc = readl(&ehci_regs->port_status[debug_port - 1]);
- portsc &= ~PORT_PE;
- writel(portsc, &ehci_regs->port_status[debug_port - 1]);
-
- dbgp_mdelay(100);
-
- /* Find the debug device and make it device number 127 */
- for (devnum = 0; devnum <= 127; devnum++) {
- ret = dbgp_control_msg(devnum,
- USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- USB_REQ_GET_DESCRIPTOR, (USB_DT_DEBUG << 8), 0,
- &dbgp_desc, sizeof(dbgp_desc));
- if (ret > 0)
- break;
- }
- if (devnum > 127) {
- dbgp_printk("Could not find attached debug device\n");
- goto err;
- }
- if (ret < 0) {
- dbgp_printk("Attached device is not a debug device\n");
- goto err;
- }
- dbgp_endpoint_out = dbgp_desc.bDebugOutEndpoint;
-
- /* Move the device to 127 if it isn't already there */
- if (devnum != USB_DEBUG_DEVNUM) {
- ret = dbgp_control_msg(devnum,
- USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- USB_REQ_SET_ADDRESS, USB_DEBUG_DEVNUM, 0, NULL, 0);
- if (ret < 0) {
- dbgp_printk("Could not move attached device to %d\n",
- USB_DEBUG_DEVNUM);
- goto err;
- }
- devnum = USB_DEBUG_DEVNUM;
- dbgp_printk("debug device renamed to 127\n");
- }
-
- /* Enable the debug interface */
- ret = dbgp_control_msg(USB_DEBUG_DEVNUM,
- USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- USB_REQ_SET_FEATURE, USB_DEVICE_DEBUG_MODE, 0, NULL, 0);
- if (ret < 0) {
- dbgp_printk(" Could not enable the debug device\n");
- goto err;
- }
- dbgp_printk("debug interface enabled\n");
-
- /* Perform a small write to get the even/odd data state in sync
- */
- ret = dbgp_bulk_write(USB_DEBUG_DEVNUM, dbgp_endpoint_out, " ", 1);
- if (ret < 0) {
- dbgp_printk("dbgp_bulk_write failed: %d\n", ret);
- goto err;
- }
- dbgp_printk("small write doned\n");
-
- return 0;
-err:
- /* Things didn't work so remove my claim */
- ctrl = readl(&ehci_debug->control);
- ctrl &= ~(DBGP_CLAIM | DBGP_OUT);
- writel(ctrl, &ehci_debug->control);
- return -1;
-
-next_debug_port:
- port_map_tried |= (1<<(debug_port - 1));
- new_debug_port = ((debug_port-1+1)%n_ports) + 1;
- if (port_map_tried != ((1<<n_ports) - 1)) {
- set_debug_port(new_debug_port);
- goto try_next_port;
- }
- if (--playtimes) {
- set_debug_port(new_debug_port);
- goto try_next_time;
- }
-
- return -1;
-}
-
-static int __init early_dbgp_init(char *s)
-{
- u32 debug_port, bar, offset;
- u32 bus, slot, func, cap;
- void __iomem *ehci_bar;
- u32 dbgp_num;
- u32 bar_val;
- char *e;
- int ret;
- u8 byte;
-
- if (!early_pci_allowed())
- return -1;
-
- dbgp_num = 0;
- if (*s)
- dbgp_num = simple_strtoul(s, &e, 10);
- dbgp_printk("dbgp_num: %d\n", dbgp_num);
-
- cap = find_dbgp(dbgp_num, &bus, &slot, &func);
- if (!cap)
- return -1;
-
- dbgp_printk("Found EHCI debug port on %02x:%02x.%1x\n", bus, slot,
- func);
-
- debug_port = read_pci_config(bus, slot, func, cap);
- bar = (debug_port >> 29) & 0x7;
- bar = (bar * 4) + 0xc;
- offset = (debug_port >> 16) & 0xfff;
- dbgp_printk("bar: %02x offset: %03x\n", bar, offset);
- if (bar != PCI_BASE_ADDRESS_0) {
- dbgp_printk("only debug ports on bar 1 handled.\n");
-
- return -1;
- }
-
- bar_val = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0);
- dbgp_printk("bar_val: %02x offset: %03x\n", bar_val, offset);
- if (bar_val & ~PCI_BASE_ADDRESS_MEM_MASK) {
- dbgp_printk("only simple 32bit mmio bars supported\n");
-
- return -1;
- }
-
- /* double check if the mem space is enabled */
- byte = read_pci_config_byte(bus, slot, func, 0x04);
- if (!(byte & 0x2)) {
- byte |= 0x02;
- write_pci_config_byte(bus, slot, func, 0x04, byte);
- dbgp_printk("mmio for ehci enabled\n");
- }
-
- /*
- * FIXME I don't have the bar size so just guess PAGE_SIZE is more
- * than enough. 1K is the biggest I have seen.
- */
- set_fixmap_nocache(FIX_DBGP_BASE, bar_val & PAGE_MASK);
- ehci_bar = (void __iomem *)__fix_to_virt(FIX_DBGP_BASE);
- ehci_bar += bar_val & ~PAGE_MASK;
- dbgp_printk("ehci_bar: %p\n", ehci_bar);
-
- ehci_caps = ehci_bar;
- ehci_regs = ehci_bar + HC_LENGTH(readl(&ehci_caps->hc_capbase));
- ehci_debug = ehci_bar + offset;
- ehci_dev.bus = bus;
- ehci_dev.slot = slot;
- ehci_dev.func = func;
-
- detect_set_debug_port();
-
- ret = ehci_setup();
- if (ret < 0) {
- dbgp_printk("ehci_setup failed\n");
- ehci_debug = NULL;
-
- return -1;
- }
-
- return 0;
-}
-
-static void early_dbgp_write(struct console *con, const char *str, u32 n)
-{
- int chunk, ret;
-
- if (!ehci_debug)
- return;
- while (n > 0) {
- chunk = n;
- if (chunk > DBGP_MAX_PACKET)
- chunk = DBGP_MAX_PACKET;
- ret = dbgp_bulk_write(USB_DEBUG_DEVNUM,
- dbgp_endpoint_out, str, chunk);
- str += chunk;
- n -= chunk;
- }
-}
-
-static struct console early_dbgp_console = {
- .name = "earlydbg",
- .write = early_dbgp_write,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
-#endif
-
/* Direct interface for emergencies */
static struct console *early_console = &early_vga_console;
static int __initdata early_console_initialized;
@@ -891,10 +176,24 @@ asmlinkage void early_printk(const char *fmt, ...)
va_end(ap);
}
+static inline void early_console_register(struct console *con, int keep_early)
+{
+ if (early_console->index != -1) {
+ printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n",
+ con->name);
+ return;
+ }
+ early_console = con;
+ if (keep_early)
+ early_console->flags &= ~CON_BOOT;
+ else
+ early_console->flags |= CON_BOOT;
+ register_console(early_console);
+}
static int __init setup_early_printk(char *buf)
{
- int keep_early;
+ int keep;
if (!buf)
return 0;
@@ -903,42 +202,34 @@ static int __init setup_early_printk(char *buf)
return 0;
early_console_initialized = 1;
- keep_early = (strstr(buf, "keep") != NULL);
-
- if (!strncmp(buf, "serial", 6)) {
- early_serial_init(buf + 6);
- early_console = &early_serial_console;
- } else if (!strncmp(buf, "ttyS", 4)) {
- early_serial_init(buf);
- early_console = &early_serial_console;
- } else if (!strncmp(buf, "vga", 3)
- && boot_params.screen_info.orig_video_isVGA == 1) {
- max_xpos = boot_params.screen_info.orig_video_cols;
- max_ypos = boot_params.screen_info.orig_video_lines;
- current_ypos = boot_params.screen_info.orig_y;
- early_console = &early_vga_console;
+ keep = (strstr(buf, "keep") != NULL);
+
+ while (*buf != '\0') {
+ if (!strncmp(buf, "serial", 6)) {
+ early_serial_init(buf + 6);
+ early_console_register(&early_serial_console, keep);
+ }
+ if (!strncmp(buf, "ttyS", 4)) {
+ early_serial_init(buf + 4);
+ early_console_register(&early_serial_console, keep);
+ }
+ if (!strncmp(buf, "vga", 3) &&
+ boot_params.screen_info.orig_video_isVGA == 1) {
+ max_xpos = boot_params.screen_info.orig_video_cols;
+ max_ypos = boot_params.screen_info.orig_video_lines;
+ current_ypos = boot_params.screen_info.orig_y;
+ early_console_register(&early_vga_console, keep);
+ }
#ifdef CONFIG_EARLY_PRINTK_DBGP
- } else if (!strncmp(buf, "dbgp", 4)) {
- if (early_dbgp_init(buf+4) < 0)
- return 0;
- early_console = &early_dbgp_console;
- /*
- * usb subsys will reset ehci controller, so don't keep
- * that early console
- */
- keep_early = 0;
+ if (!strncmp(buf, "dbgp", 4) && !early_dbgp_init(buf + 4))
+ early_console_register(&early_dbgp_console, keep);
#endif
#ifdef CONFIG_HVC_XEN
- } else if (!strncmp(buf, "xen", 3)) {
- early_console = &xenboot_console;
+ if (!strncmp(buf, "xen", 3))
+ early_console_register(&xenboot_console, keep);
#endif
+ buf++;
}
-
- if (keep_early)
- early_console->flags &= ~CON_BOOT;
- else
- early_console->flags |= CON_BOOT;
- register_console(early_console);
return 0;
}
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index d59fe323807..b5c061f8f35 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -536,20 +536,13 @@ sysret_signal:
bt $TIF_SYSCALL_AUDIT,%edx
jc sysret_audit
#endif
- /* edx: work flags (arg3) */
- leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
- xorl %esi,%esi # oldset -> arg2
- SAVE_REST
- FIXUP_TOP_OF_STACK %r11
- call do_notify_resume
- RESTORE_TOP_OF_STACK %r11
- RESTORE_REST
- movl $_TIF_WORK_MASK,%edi
- /* Use IRET because user could have changed frame. This
- works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
- DISABLE_INTERRUPTS(CLBR_NONE)
- TRACE_IRQS_OFF
- jmp int_with_check
+ /*
+ * We have a signal, or exit tracing or single-step.
+ * These all wind up with the iret return path anyway,
+ * so just join that path right now.
+ */
+ FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
+ jmp int_check_syscall_exit_work
badsys:
movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
@@ -654,6 +647,7 @@ int_careful:
int_very_careful:
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
+int_check_syscall_exit_work:
SAVE_REST
/* Check for syscall exit trace */
testl $_TIF_WORK_SYSCALL_EXIT,%edx
@@ -1021,7 +1015,7 @@ apicinterrupt ERROR_APIC_VECTOR \
apicinterrupt SPURIOUS_APIC_VECTOR \
spurious_interrupt smp_spurious_interrupt
-#ifdef CONFIG_PERF_COUNTERS
+#ifdef CONFIG_PERF_EVENTS
apicinterrupt LOCAL_PENDING_VECTOR \
perf_pending_interrupt smp_perf_pending_interrupt
#endif
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index b766e8c7252..050c278481b 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -79,7 +79,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
* any particular GDT layout, because we load our own as soon as we
* can.
*/
-.section .text.head,"ax",@progbits
+__HEAD
ENTRY(startup_32)
/* test KEEP_SEGMENTS flag to see if the bootloader is asking
us to not reload segments */
@@ -608,7 +608,7 @@ ENTRY(initial_code)
/*
* BSS section
*/
-.section ".bss.page_aligned","wa"
+__PAGE_ALIGNED_BSS
.align PAGE_SIZE_asm
#ifdef CONFIG_X86_PAE
swapper_pg_pmd:
@@ -626,7 +626,7 @@ ENTRY(empty_zero_page)
* This starts the data section.
*/
#ifdef CONFIG_X86_PAE
-.section ".data.page_aligned","wa"
+__PAGE_ALIGNED_DATA
/* Page-aligned for the benefit of paravirt? */
.align PAGE_SIZE_asm
ENTRY(swapper_pg_dir)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index fa54f78e2a0..780cd928fcd 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -40,7 +40,7 @@ L4_START_KERNEL = pgd_index(__START_KERNEL_map)
L3_START_KERNEL = pud_index(__START_KERNEL_map)
.text
- .section .text.head
+ __HEAD
.code64
.globl startup_64
startup_64:
@@ -418,7 +418,7 @@ ENTRY(phys_base)
ENTRY(idt_table)
.skip IDT_ENTRIES * 16
- .section .bss.page_aligned, "aw", @nobits
+ __PAGE_ALIGNED_BSS
.align PAGE_SIZE
ENTRY(empty_zero_page)
.skip PAGE_SIZE
diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
index 270ff83efc1..3a54dcb9cd0 100644
--- a/arch/x86/kernel/init_task.c
+++ b/arch/x86/kernel/init_task.c
@@ -20,9 +20,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"))) =
- { INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data =
+ { INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 300883112e3..40f30773fb2 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -208,7 +208,7 @@ static void __init apic_intr_init(void)
alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
/* Performance monitoring interrupts: */
-# ifdef CONFIG_PERF_COUNTERS
+# ifdef CONFIG_PERF_EVENTS
alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
# endif
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 71f1d99a635..ec6ef60cbd1 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -67,8 +67,8 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
#ifdef CONFIG_SMP
preempt_disable();
load_LDT(pc);
- if (!cpus_equal(current->mm->cpu_vm_mask,
- cpumask_of_cpu(smp_processor_id())))
+ if (!cpumask_equal(mm_cpumask(current->mm),
+ cpumask_of(smp_processor_id())))
smp_call_function(flush_ldt, current->mm, 1);
preempt_enable();
#else
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 0db7969b0dd..378e9a8f1bf 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -210,8 +210,8 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
{
ssize_t ret = -EINVAL;
- if ((len >> PAGE_SHIFT) > num_physpages) {
- pr_err("microcode: too much data (max %ld pages)\n", num_physpages);
+ if ((len >> PAGE_SHIFT) > totalram_pages) {
+ pr_err("microcode: too much data (max %ld pages)\n", totalram_pages);
return ret;
}
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index e8a35016115..aaa6b7839f1 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -46,9 +46,8 @@ void __init pci_swiotlb_init(void)
{
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
#ifdef CONFIG_X86_64
- if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) ||
- iommu_pass_through)
- swiotlb = 1;
+ if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN))
+ swiotlb = 1;
#endif
if (swiotlb_force)
swiotlb = 1;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 847ab416031..5284cd2b577 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -555,10 +555,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
void __init init_c1e_mask(void)
{
/* If we're using c1e_idle, we need to allocate c1e_mask. */
- if (pm_idle == c1e_idle) {
- alloc_cpumask_var(&c1e_mask, GFP_KERNEL);
- cpumask_clear(c1e_mask);
- }
+ if (pm_idle == c1e_idle)
+ zalloc_cpumask_var(&c1e_mask, GFP_KERNEL);
}
static int __init idle_setup(char *str)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 8d7d5c9c1be..7b058a2dc66 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -325,16 +325,6 @@ static int putreg(struct task_struct *child,
return set_flags(child, value);
#ifdef CONFIG_X86_64
- /*
- * Orig_ax is really just a flag with small positive and
- * negative values, so make sure to always sign-extend it
- * from 32 bits so that it works correctly regardless of
- * whether we come from a 32-bit environment or not.
- */
- case offsetof(struct user_regs_struct, orig_ax):
- value = (long) (s32) value;
- break;
-
case offsetof(struct user_regs_struct,fs_base):
if (value >= TASK_SIZE_OF(child))
return -EIO;
@@ -1126,10 +1116,15 @@ static int putreg32(struct task_struct *child, unsigned regno, u32 value)
case offsetof(struct user32, regs.orig_eax):
/*
- * Sign-extend the value so that orig_eax = -1
- * causes (long)orig_ax < 0 tests to fire correctly.
+ * A 32-bit debugger setting orig_eax means to restore
+ * the state of the task restarting a 32-bit syscall.
+ * Make sure we interpret the -ERESTART* codes correctly
+ * in case the task is not actually still sitting at the
+ * exit from a 32-bit syscall with TS_COMPAT still set.
*/
- regs->orig_ax = (long) (s32) value;
+ regs->orig_ax = value;
+ if (syscall_get_nr(child, regs) >= 0)
+ task_thread_info(child)->status |= TS_COMPAT;
break;
case offsetof(struct user32, regs.eflags):
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index f327bccf508..e09f0e2c14b 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -27,6 +27,7 @@
#include <linux/screen_info.h>
#include <linux/ioport.h>
#include <linux/acpi.h>
+#include <linux/sfi.h>
#include <linux/apm_bios.h>
#include <linux/initrd.h>
#include <linux/bootmem.h>
@@ -985,6 +986,8 @@ void __init setup_arch(char **cmdline_p)
*/
acpi_boot_init();
+ sfi_init();
+
/*
* get boot-time SMP configuration:
*/
diff --git a/arch/x86/kernel/sfi.c b/arch/x86/kernel/sfi.c
new file mode 100644
index 00000000000..34e09938265
--- /dev/null
+++ b/arch/x86/kernel/sfi.c
@@ -0,0 +1,122 @@
+/*
+ * sfi.c - x86 architecture SFI support.
+ *
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#define KMSG_COMPONENT "SFI"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/io.h>
+
+#include <asm/io_apic.h>
+#include <asm/mpspec.h>
+#include <asm/setup.h>
+#include <asm/apic.h>
+
+#ifdef CONFIG_X86_LOCAL_APIC
+static unsigned long sfi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
+
+void __init mp_sfi_register_lapic_address(unsigned long address)
+{
+ mp_lapic_addr = address;
+
+ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
+ if (boot_cpu_physical_apicid == -1U)
+ boot_cpu_physical_apicid = read_apic_id();
+
+ pr_info("Boot CPU = %d\n", boot_cpu_physical_apicid);
+}
+
+/* All CPUs enumerated by SFI must be present and enabled */
+void __cpuinit mp_sfi_register_lapic(u8 id)
+{
+ if (MAX_APICS - id <= 0) {
+ pr_warning("Processor #%d invalid (max %d)\n",
+ id, MAX_APICS);
+ return;
+ }
+
+ pr_info("registering lapic[%d]\n", id);
+
+ generic_processor_info(id, GET_APIC_VERSION(apic_read(APIC_LVR)));
+}
+
+static int __init sfi_parse_cpus(struct sfi_table_header *table)
+{
+ struct sfi_table_simple *sb;
+ struct sfi_cpu_table_entry *pentry;
+ int i;
+ int cpu_num;
+
+ sb = (struct sfi_table_simple *)table;
+ cpu_num = SFI_GET_NUM_ENTRIES(sb, struct sfi_cpu_table_entry);
+ pentry = (struct sfi_cpu_table_entry *)sb->pentry;
+
+ for (i = 0; i < cpu_num; i++) {
+ mp_sfi_register_lapic(pentry->apic_id);
+ pentry++;
+ }
+
+ smp_found_config = 1;
+ return 0;
+}
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+#ifdef CONFIG_X86_IO_APIC
+static u32 gsi_base;
+
+static int __init sfi_parse_ioapic(struct sfi_table_header *table)
+{
+ struct sfi_table_simple *sb;
+ struct sfi_apic_table_entry *pentry;
+ int i, num;
+
+ sb = (struct sfi_table_simple *)table;
+ num = SFI_GET_NUM_ENTRIES(sb, struct sfi_apic_table_entry);
+ pentry = (struct sfi_apic_table_entry *)sb->pentry;
+
+ for (i = 0; i < num; i++) {
+ mp_register_ioapic(i, pentry->phys_addr, gsi_base);
+ gsi_base += io_apic_get_redir_entries(i);
+ pentry++;
+ }
+
+ WARN(pic_mode, KERN_WARNING
+ "SFI: pic_mod shouldn't be 1 when IOAPIC table is present\n");
+ pic_mode = 0;
+ return 0;
+}
+#endif /* CONFIG_X86_IO_APIC */
+
+/*
+ * sfi_platform_init(): register lapics & io-apics
+ */
+int __init sfi_platform_init(void)
+{
+#ifdef CONFIG_X86_LOCAL_APIC
+ mp_sfi_register_lapic_address(sfi_lapic_addr);
+ sfi_table_parse(SFI_SIG_CPUS, NULL, NULL, sfi_parse_cpus);
+#endif
+#ifdef CONFIG_X86_IO_APIC
+ sfi_table_parse(SFI_SIG_APIC, NULL, NULL, sfi_parse_ioapic);
+#endif
+ return 0;
+}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 09c5e077dff..565ebc65920 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1059,12 +1059,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
#endif
current_thread_info()->cpu = 0; /* needed? */
for_each_possible_cpu(i) {
- alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
- alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
- alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
- cpumask_clear(per_cpu(cpu_core_map, i));
- cpumask_clear(per_cpu(cpu_sibling_map, i));
- cpumask_clear(cpu_data(i).llc_shared_map);
+ zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
+ zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
+ zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
}
set_cpu_sibling_map(0);
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index d51321ddafd..0157cd26d7c 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -335,4 +335,4 @@ ENTRY(sys_call_table)
.long sys_preadv
.long sys_pwritev
.long sys_rt_tgsigqueueinfo /* 335 */
- .long sys_perf_counter_open
+ .long sys_perf_event_open
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index e293ac56c72..dcb00d27851 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -93,7 +93,6 @@ static struct irqaction irq0 = {
void __init setup_default_timer_irq(void)
{
- irq0.mask = cpumask_of_cpu(0);
setup_irq(0, &irq0);
}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 9346e102338..7e37dcee0cc 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -14,7 +14,6 @@
#include <linux/spinlock.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
-#include <linux/utsname.h>
#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -73,11 +72,9 @@ char ignore_fpu_irq;
/*
* The IDT has to be page-aligned to simplify the Pentium
- * F0 0F bug workaround.. We have a special link segment
- * for this.
+ * F0 0F bug workaround.
*/
-gate_desc idt_table[NR_VECTORS]
- __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
+gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
#endif
DECLARE_BITMAP(used_vectors, NR_VECTORS);
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 027b5b49899..f37930954d1 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -114,7 +114,7 @@ void __cpuinit check_tsc_sync_source(int cpu)
return;
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
- pr_info("Skipping synchronization checks as TSC is reliable.\n");
+ printk_once(KERN_INFO "Skipping synchronization checks as TSC is reliable.\n");
return;
}
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index a46acccec38..92929fb3f9f 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -65,17 +65,11 @@ SECTIONS
#endif
/* Text and read-only data */
-
- /* bootstrapping code */
- .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) {
- _text = .;
- *(.text.head)
- } :text = 0x9090
-
- /* The rest of the text */
.text : AT(ADDR(.text) - LOAD_OFFSET) {
+ _text = .;
+ /* bootstrapping code */
+ HEAD_TEXT
#ifdef CONFIG_X86_32
- /* not really needed, already page aligned */
. = ALIGN(PAGE_SIZE);
*(.text.page_aligned)
#endif
@@ -94,13 +88,7 @@ SECTIONS
NOTES :text :note
- /* Exception table */
- . = ALIGN(16);
- __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
- } :text = 0x9090
+ EXCEPTION_TABLE(16) :text = 0x9090
RO_DATA(PAGE_SIZE)
@@ -118,7 +106,6 @@ SECTIONS
#endif
PAGE_ALIGNED_DATA(PAGE_SIZE)
- *(.data.idt)
CACHELINE_ALIGNED_DATA(CONFIG_X86_L1_CACHE_BYTES)
@@ -135,24 +122,21 @@ SECTIONS
#ifdef CONFIG_X86_64
#define VSYSCALL_ADDR (-10*1024*1024)
-#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data) + SIZEOF(.data) + \
- PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
-#define VSYSCALL_VIRT_ADDR ((ADDR(.data) + SIZEOF(.data) + \
- PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
-#define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
+#define VLOAD_OFFSET (VSYSCALL_ADDR - __vsyscall_0 + LOAD_OFFSET)
#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
-#define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
+#define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0)
#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
+ . = ALIGN(4096);
+ __vsyscall_0 = .;
+
. = VSYSCALL_ADDR;
- .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) {
+ .vsyscall_0 : AT(VLOAD(.vsyscall_0)) {
*(.vsyscall_0)
} :user
- __vsyscall_0 = VSYSCALL_VIRT_ADDR;
-
. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
.vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
*(.vsyscall_fn)
@@ -192,11 +176,9 @@ SECTIONS
*(.vsyscall_3)
}
- . = VSYSCALL_VIRT_ADDR + PAGE_SIZE;
+ . = __vsyscall_0 + PAGE_SIZE;
#undef VSYSCALL_ADDR
-#undef VSYSCALL_PHYS_ADDR
-#undef VSYSCALL_VIRT_ADDR
#undef VLOAD_OFFSET
#undef VLOAD
#undef VVIRT_OFFSET
@@ -219,36 +201,12 @@ SECTIONS
PERCPU_VADDR(0, :percpu)
#endif
- .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
- _sinittext = .;
- INIT_TEXT
- _einittext = .;
- }
+ INIT_TEXT_SECTION(PAGE_SIZE)
#ifdef CONFIG_X86_64
:init
#endif
- .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
- INIT_DATA
- }
-
- . = ALIGN(16);
- .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
- __setup_start = .;
- *(.init.setup)
- __setup_end = .;
- }
- .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
- __initcall_start = .;
- INITCALLS
- __initcall_end = .;
- }
-
- .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
- __con_initcall_start = .;
- *(.con_initcall.init)
- __con_initcall_end = .;
- }
+ INIT_DATA_SECTION(16)
.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
__x86_cpu_dev_start = .;
@@ -256,8 +214,6 @@ SECTIONS
__x86_cpu_dev_end = .;
}
- SECURITY_INIT
-
. = ALIGN(8);
.parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
__parainstructions = .;
@@ -288,15 +244,6 @@ SECTIONS
EXIT_DATA
}
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(PAGE_SIZE);
- .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
- __initramfs_start = .;
- *(.init.ramfs)
- __initramfs_end = .;
- }
-#endif
-
#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
PERCPU(PAGE_SIZE)
#endif
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index cf53a78e2dc..8cb4974ff59 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -228,19 +228,11 @@ static long __vsyscall(3) venosys_1(void)
}
#ifdef CONFIG_SYSCTL
-
-static int
-vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- return proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
-}
-
static ctl_table kernel_table2[] = {
{ .procname = "vsyscall64",
.data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = vsyscall_sysctl_change },
+ .proc_handler = proc_dointvec },
{}
};
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 4cb7d5d18b8..7e59dc1d3fc 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1135,11 +1135,6 @@ static struct notifier_block paniced = {
/* Setting up memory is fairly easy. */
static __init char *lguest_memory_setup(void)
{
- /* We do this here and not earlier because lockcheck used to barf if we
- * did it before start_kernel(). I think we fixed that, so it'd be
- * nice to move it back to lguest_init. Patch welcome... */
- atomic_notifier_chain_register(&panic_notifier_list, &paniced);
-
/*
*The Linux bootloader header contains an "e820" memory map: the
* Launcher populated the first entry with our memory limit.
@@ -1364,10 +1359,13 @@ __init void lguest_init(void)
/*
* If we don't initialize the lock dependency checker now, it crashes
- * paravirt_disable_iospace.
+ * atomic_notifier_chain_register, then paravirt_disable_iospace.
*/
lockdep_init();
+ /* Hook in our special panic hypercall code. */
+ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
+
/*
* The IDE code spends about 3 seconds probing for disks: if we reserve
* all the I/O ports up front it can't get them and so doesn't probe.
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 9b5a9f59a47..06630d26e56 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,9 +1,10 @@
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
- pat.o pgtable.o physaddr.o gup.o
+ pat.o pgtable.o physaddr.o gup.o setup_nx.o
# Make sure __phys_addr has no stackprotector
nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_physaddr.o := $(nostackp)
+CFLAGS_setup_nx.o := $(nostackp)
obj-$(CONFIG_SMP) += tlb.o
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 775a020990a..f4cee9028cf 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -10,7 +10,7 @@
#include <linux/bootmem.h> /* max_low_pfn */
#include <linux/kprobes.h> /* __kprobes, ... */
#include <linux/mmiotrace.h> /* kmmio_handler, ... */
-#include <linux/perf_counter.h> /* perf_swcounter_event */
+#include <linux/perf_event.h> /* perf_sw_event */
#include <asm/traps.h> /* dotraplinkage, ... */
#include <asm/pgalloc.h> /* pgd_*(), ... */
@@ -167,6 +167,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = (void __user *)address;
+ info.si_addr_lsb = si_code == BUS_MCEERR_AR ? PAGE_SHIFT : 0;
force_sig_info(si_signo, &info, tsk);
}
@@ -790,10 +791,12 @@ out_of_memory(struct pt_regs *regs, unsigned long error_code,
}
static void
-do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
+do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
+ unsigned int fault)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
+ int code = BUS_ADRERR;
up_read(&mm->mmap_sem);
@@ -809,7 +812,15 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
tsk->thread.error_code = error_code;
tsk->thread.trap_no = 14;
- force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
+#ifdef CONFIG_MEMORY_FAILURE
+ if (fault & VM_FAULT_HWPOISON) {
+ printk(KERN_ERR
+ "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
+ tsk->comm, tsk->pid, address);
+ code = BUS_MCEERR_AR;
+ }
+#endif
+ force_sig_info_fault(SIGBUS, code, address, tsk);
}
static noinline void
@@ -819,8 +830,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
if (fault & VM_FAULT_OOM) {
out_of_memory(regs, error_code, address);
} else {
- if (fault & VM_FAULT_SIGBUS)
- do_sigbus(regs, error_code, address);
+ if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON))
+ do_sigbus(regs, error_code, address, fault);
else
BUG();
}
@@ -1017,7 +1028,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (unlikely(error_code & PF_RSVD))
pgtable_bad(regs, error_code, address);
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
/*
* If we're in an interrupt, have no user context or are running
@@ -1114,11 +1125,11 @@ good_area:
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
tsk->min_flt++;
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 0607119cef9..73ffd5536f6 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -28,69 +28,6 @@ int direct_gbpages
#endif
;
-int nx_enabled;
-
-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
-static int disable_nx __cpuinitdata;
-
-/*
- * noexec = on|off
- *
- * Control non-executable mappings for processes.
- *
- * on Enable
- * off Disable
- */
-static int __init noexec_setup(char *str)
-{
- if (!str)
- return -EINVAL;
- if (!strncmp(str, "on", 2)) {
- __supported_pte_mask |= _PAGE_NX;
- disable_nx = 0;
- } else if (!strncmp(str, "off", 3)) {
- disable_nx = 1;
- __supported_pte_mask &= ~_PAGE_NX;
- }
- return 0;
-}
-early_param("noexec", noexec_setup);
-#endif
-
-#ifdef CONFIG_X86_PAE
-static void __init set_nx(void)
-{
- unsigned int v[4], l, h;
-
- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
-
- if ((v[3] & (1 << 20)) && !disable_nx) {
- rdmsr(MSR_EFER, l, h);
- l |= EFER_NX;
- wrmsr(MSR_EFER, l, h);
- nx_enabled = 1;
- __supported_pte_mask |= _PAGE_NX;
- }
- }
-}
-#else
-static inline void set_nx(void)
-{
-}
-#endif
-
-#ifdef CONFIG_X86_64
-void __cpuinit check_efer(void)
-{
- unsigned long efer;
-
- rdmsrl(MSR_EFER, efer);
- if (!(efer & EFER_NX) || disable_nx)
- __supported_pte_mask &= ~_PAGE_NX;
-}
-#endif
-
static void __init find_early_table_space(unsigned long end, int use_pse,
int use_gbpages)
{
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 3cd7711bb94..30938c1d8d5 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -84,7 +84,7 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
#ifdef CONFIG_X86_PAE
if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
if (after_bootmem)
- pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+ pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
else
pmd_table = (pmd_t *)alloc_low_page();
paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
@@ -116,7 +116,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
#endif
if (!page_table)
page_table =
- (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+ (pte_t *)alloc_bootmem_pages(PAGE_SIZE);
} else
page_table = (pte_t *)alloc_low_page();
@@ -857,8 +857,6 @@ static void __init test_wp_bit(void)
}
}
-static struct kcore_list kcore_mem, kcore_vmalloc;
-
void __init mem_init(void)
{
int codesize, reservedpages, datasize, initsize;
@@ -886,13 +884,9 @@ void __init mem_init(void)
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
- kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
- kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
- VMALLOC_END-VMALLOC_START);
-
printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
"%dk reserved, %dk data, %dk init, %ldk highmem)\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index ea56b8cbb6a..5a4398a6006 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -647,8 +647,7 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif /* CONFIG_MEMORY_HOTPLUG */
-static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
- kcore_modules, kcore_vsyscall;
+static struct kcore_list kcore_vsyscall;
void __init mem_init(void)
{
@@ -677,17 +676,12 @@ void __init mem_init(void)
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
/* Register memory areas for /proc/kcore */
- kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
- kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
- VMALLOC_END-VMALLOC_START);
- kclist_add(&kcore_kernel, &_stext, _end - _stext);
- kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
- VSYSCALL_END - VSYSCALL_START);
+ VSYSCALL_END - VSYSCALL_START, KCORE_OTHER);
printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
"%ldk absent, %ldk reserved, %ldk data, %ldk init)\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ nr_free_pages() << (PAGE_SHIFT-10),
max_pfn << (PAGE_SHIFT-10),
codesize >> 10,
absent_pages << (PAGE_SHIFT-10),
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
index 528bf954eb7..8cc18334414 100644
--- a/arch/x86/mm/kmemcheck/kmemcheck.c
+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
@@ -225,9 +225,6 @@ void kmemcheck_hide(struct pt_regs *regs)
BUG_ON(!irqs_disabled());
- if (data->balance == 0)
- return;
-
if (unlikely(data->balance != 1)) {
kmemcheck_show_all();
kmemcheck_error_save_bug(regs);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 24952fdc7e4..dd38bfbefd1 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -144,6 +144,7 @@ void clflush_cache_range(void *vaddr, unsigned int size)
mb();
}
+EXPORT_SYMBOL_GPL(clflush_cache_range);
static void __cpa_flush_all(void *arg)
{
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 7257cf3decf..e78cd0ec2bc 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -81,6 +81,7 @@ enum {
void pat_init(void)
{
u64 pat;
+ bool boot_cpu = !boot_pat_state;
if (!pat_enabled)
return;
@@ -122,8 +123,10 @@ void pat_init(void)
rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
wrmsrl(MSR_IA32_CR_PAT, pat);
- printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
- smp_processor_id(), boot_pat_state, pat);
+
+ if (boot_cpu)
+ printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
+ smp_processor_id(), boot_pat_state, pat);
}
#undef PAT
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
new file mode 100644
index 00000000000..513d8ed5d2e
--- /dev/null
+++ b/arch/x86/mm/setup_nx.c
@@ -0,0 +1,69 @@
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+#include <asm/pgtable.h>
+
+int nx_enabled;
+
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+static int disable_nx __cpuinitdata;
+
+/*
+ * noexec = on|off
+ *
+ * Control non-executable mappings for processes.
+ *
+ * on Enable
+ * off Disable
+ */
+static int __init noexec_setup(char *str)
+{
+ if (!str)
+ return -EINVAL;
+ if (!strncmp(str, "on", 2)) {
+ __supported_pte_mask |= _PAGE_NX;
+ disable_nx = 0;
+ } else if (!strncmp(str, "off", 3)) {
+ disable_nx = 1;
+ __supported_pte_mask &= ~_PAGE_NX;
+ }
+ return 0;
+}
+early_param("noexec", noexec_setup);
+#endif
+
+#ifdef CONFIG_X86_PAE
+void __init set_nx(void)
+{
+ unsigned int v[4], l, h;
+
+ if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
+ cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
+
+ if ((v[3] & (1 << 20)) && !disable_nx) {
+ rdmsr(MSR_EFER, l, h);
+ l |= EFER_NX;
+ wrmsr(MSR_EFER, l, h);
+ nx_enabled = 1;
+ __supported_pte_mask |= _PAGE_NX;
+ }
+ }
+}
+#else
+void set_nx(void)
+{
+}
+#endif
+
+#ifdef CONFIG_X86_64
+void __cpuinit check_efer(void)
+{
+ unsigned long efer;
+
+ rdmsrl(MSR_EFER, efer);
+ if (!(efer & EFER_NX) || disable_nx)
+ __supported_pte_mask &= ~_PAGE_NX;
+}
+#endif
+
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index c814e144a3f..36fe08eeb5c 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -59,7 +59,8 @@ void leave_mm(int cpu)
{
if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
BUG();
- cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask);
+ cpumask_clear_cpu(cpu,
+ mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
load_cr3(swapper_pg_dir);
}
EXPORT_SYMBOL_GPL(leave_mm);
@@ -234,8 +235,8 @@ void flush_tlb_current_task(void)
preempt_disable();
local_flush_tlb();
- if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
- flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
+ if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+ flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
preempt_enable();
}
@@ -249,8 +250,8 @@ void flush_tlb_mm(struct mm_struct *mm)
else
leave_mm(smp_processor_id());
}
- if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
- flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
+ if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+ flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
preempt_enable();
}
@@ -268,8 +269,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
leave_mm(smp_processor_id());
}
- if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
- flush_tlb_others(&mm->cpu_vm_mask, mm, va);
+ if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+ flush_tlb_others(mm_cpumask(mm), mm, va);
preempt_enable();
}
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 4899215999d..8eb05878554 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -234,11 +234,11 @@ static void arch_perfmon_setup_counters(void)
if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
current_cpu_data.x86_model == 15) {
eax.split.version_id = 2;
- eax.split.num_counters = 2;
+ eax.split.num_events = 2;
eax.split.bit_width = 40;
}
- num_counters = eax.split.num_counters;
+ num_counters = eax.split.num_events;
op_arch_perfmon_spec.num_counters = num_counters;
op_arch_perfmon_spec.num_controls = num_counters;
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index b83776180c7..7b8e75d1608 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -13,7 +13,7 @@
#define OP_X86_MODEL_H
#include <asm/types.h>
-#include <asm/perf_counter.h>
+#include <asm/perf_event.h>
struct op_msr {
unsigned long addr;
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 5db96d4304d..1331fcf2614 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -646,7 +646,7 @@ int get_mp_bus_to_node(int busnum)
#else /* CONFIG_X86_32 */
-static unsigned char mp_bus_to_node[BUS_NR] = {
+static int mp_bus_to_node[BUS_NR] = {
[0 ... BUS_NR - 1] = -1
};
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 712443ec6d4..602c172d3bd 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -13,10 +13,14 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/acpi.h>
+#include <linux/sfi_acpi.h>
#include <linux/bitmap.h>
#include <linux/sort.h>
#include <asm/e820.h>
#include <asm/pci_x86.h>
+#include <asm/acpi.h>
+
+#define PREFIX "PCI: "
/* aperture is up to 256MB but BIOS may reserve less */
#define MMCONFIG_APER_MIN (2 * 1024*1024)
@@ -491,7 +495,7 @@ static void __init pci_mmcfg_reject_broken(int early)
(unsigned int)cfg->start_bus_number,
(unsigned int)cfg->end_bus_number);
- if (!early)
+ if (!early && !acpi_disabled)
valid = is_mmconf_reserved(is_acpi_reserved, addr, size, i, cfg, 0);
if (valid)
@@ -606,7 +610,7 @@ static void __init __pci_mmcfg_init(int early)
}
if (!known_bridge)
- acpi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
+ acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
pci_mmcfg_reject_broken(early);
diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
index 8b2d561046a..f10a7e94a84 100644
--- a/arch/x86/pci/mmconfig_32.c
+++ b/arch/x86/pci/mmconfig_32.c
@@ -11,9 +11,9 @@
#include <linux/pci.h>
#include <linux/init.h>
-#include <linux/acpi.h>
#include <asm/e820.h>
#include <asm/pci_x86.h>
+#include <acpi/acpi.h>
/* Assume systems with more busses have correct MCFG */
#define mmcfg_virt_addr ((void __iomem *) fix_to_virt(FIX_PCIE_MCFG))
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index 88112b49f02..6b4ffedb93c 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
$(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
-Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
-VDSO_LDFLAGS = -fPIC -shared $(call ld-option, -Wl$(comma)--hash-style=sysv)
+VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
GCOV_PROFILE := n
#
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 544eb749653..3439616d69f 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1082,6 +1082,11 @@ asmlinkage void __init xen_start_kernel(void)
__supported_pte_mask |= _PAGE_IOMAP;
+#ifdef CONFIG_X86_64
+ /* Work out if we support NX */
+ check_efer();
+#endif
+
xen_setup_features();
/* Get mfn list */
@@ -1123,11 +1128,6 @@ asmlinkage void __init xen_start_kernel(void)
pgd = (pgd_t *)xen_start_info->pt_base;
-#ifdef CONFIG_X86_64
- /* Work out if we support NX */
- check_efer();
-#endif
-
/* Don't do the full vcpu_info placement stuff until we have a
possible map and a non-dummy shared_info. */
per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 093dd59b538..3bf7b1d250c 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1165,14 +1165,14 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
/* Get the "official" set of cpus referring to our pagetable. */
if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
for_each_online_cpu(cpu) {
- if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask)
+ if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
&& per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
continue;
smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
}
return;
}
- cpumask_copy(mask, &mm->cpu_vm_mask);
+ cpumask_copy(mask, mm_cpumask(mm));
/* It's possible that a vcpu may have a stale reference to our
cr3, because its in lazy mode, and it hasn't yet flushed
diff --git a/arch/xtensa/include/asm/mman.h b/arch/xtensa/include/asm/mman.h
index 9b92620c8a1..fca4db425f6 100644
--- a/arch/xtensa/include/asm/mman.h
+++ b/arch/xtensa/include/asm/mman.h
@@ -53,6 +53,8 @@
#define MAP_LOCKED 0x8000 /* pages are locked */
#define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x20000 /* do not block on IO */
+#define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */
+#define MAP_HUGETLB 0x80000 /* create a huge page mapping */
/*
* Flags for msync
@@ -78,6 +80,9 @@
#define MADV_DONTFORK 10 /* don't inherit across fork */
#define MADV_DOFORK 11 /* do inherit across fork */
+#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
+#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index fe3186de6a3..6f56d95f2c1 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -27,7 +27,8 @@ sed-y = -e 's/(\(\.[a-z]*it\|\.ref\|\)\.text)/(\1.literal \1.text)/g' \
-e 's/(\(\.text\.[a-z]*\))/(\1.literal \1)/g'
quiet_cmd__cpp_lds_S = LDS $@
- cmd__cpp_lds_S = $(CPP) $(cpp_flags) -D__ASSEMBLY__ $< | sed $(sed-y) >$@
+ cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \
+ | sed $(sed-y) >$@
$(obj)/vmlinux.lds: $(src)/vmlinux.lds.S FORCE
$(call if_changed_dep,_cpp_lds_S)
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index d9ddc1ba761..d215adcfd4e 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -235,7 +235,7 @@ should_never_return:
* BSS section
*/
-.section ".bss.page_aligned", "w"
+__PAGE_ALIGNED_BSS
#ifdef CONFIG_MMU
ENTRY(swapper_pg_dir)
.fill PAGE_SIZE, 1, 0
diff --git a/arch/xtensa/kernel/init_task.c b/arch/xtensa/kernel/init_task.c
index c4302f0e4ba..cd122fb7e48 100644
--- a/arch/xtensa/kernel/init_task.c
+++ b/arch/xtensa/kernel/init_task.c
@@ -23,9 +23,8 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"))) =
-{ INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data =
+ { INIT_THREAD_INFO(init_task) };
struct task_struct init_task = INIT_TASK(init_task);
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 921b6ff3b64..9b526154c9b 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -15,6 +15,8 @@
*/
#include <asm-generic/vmlinux.lds.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
#include <variant/core.h>
#include <platform/hardware.h>
@@ -107,41 +109,18 @@ SECTIONS
.fixup : { *(.fixup) }
- . = ALIGN(16);
-
- __ex_table : {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
- }
-
+ EXCEPTION_TABLE(16)
/* Data section */
- . = ALIGN(XCHAL_ICACHE_LINESIZE);
_fdata = .;
- .data :
- {
- DATA_DATA
- CONSTRUCTORS
- . = ALIGN(XCHAL_ICACHE_LINESIZE);
- *(.data.cacheline_aligned)
- }
-
+ RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
_edata = .;
- /* The initial task */
- . = ALIGN(8192);
- .data.init_task : { *(.data.init_task) }
-
/* Initialization code and data: */
- . = ALIGN(1 << 12);
+ . = ALIGN(PAGE_SIZE);
__init_begin = .;
- .init.text : {
- _sinittext = .;
- INIT_TEXT
- _einittext = .;
- }
+ INIT_TEXT_SECTION(PAGE_SIZE)
.init.data :
{
@@ -168,36 +147,15 @@ SECTIONS
.DebugInterruptVector.text);
__boot_reloc_table_end = ABSOLUTE(.) ;
- }
- . = ALIGN(XCHAL_ICACHE_LINESIZE);
-
- __setup_start = .;
- .init.setup : { *(.init.setup) }
- __setup_end = .;
-
- __initcall_start = .;
- .initcall.init : {
- INITCALLS
+ INIT_SETUP(XCHAL_ICACHE_LINESIZE)
+ INIT_CALLS
+ CON_INITCALL
+ SECURITY_INITCALL
+ INIT_RAM_FS
}
- __initcall_end = .;
-
- __con_initcall_start = .;
- .con_initcall.init : { *(.con_initcall.init) }
- __con_initcall_end = .;
-
- SECURITY_INIT
-
-
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(4096);
- __initramfs_start =.;
- .init.ramfs : { *(.init.ramfs) }
- __initramfs_end = .;
-#endif
-
- PERCPU(4096)
+ PERCPU(PAGE_SIZE)
/* We need this dummy segment here */
@@ -252,16 +210,11 @@ SECTIONS
.DoubleExceptionVector.literal)
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
- . = ALIGN(1 << 12);
+ . = ALIGN(PAGE_SIZE);
__init_end = .;
- . = ALIGN(8192);
-
- /* BSS section */
- _bss_start = .;
- .bss : { *(.bss.page_aligned) *(.bss) }
- _bss_end = .;
+ BSS_SECTION(0, 8192, 0)
_end = .;
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 427e14fa43c..cdbc27ca966 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -203,7 +203,7 @@ void __init mem_init(void)
printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, "
"%ldk data, %ldk init %ldk highmem)\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ nr_free_pages() << (PAGE_SHIFT-10),
ram << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig
index d8fb3914598..e5aeb2b79e6 100644
--- a/crypto/async_tx/Kconfig
+++ b/crypto/async_tx/Kconfig
@@ -14,3 +14,12 @@ config ASYNC_MEMSET
tristate
select ASYNC_CORE
+config ASYNC_PQ
+ tristate
+ select ASYNC_CORE
+
+config ASYNC_RAID6_RECOV
+ tristate
+ select ASYNC_CORE
+ select ASYNC_PQ
+
diff --git a/crypto/async_tx/Makefile b/crypto/async_tx/Makefile
index 27baa7d52fb..d1e0e6f72bc 100644
--- a/crypto/async_tx/Makefile
+++ b/crypto/async_tx/Makefile
@@ -2,3 +2,6 @@ obj-$(CONFIG_ASYNC_CORE) += async_tx.o
obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o
obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o
obj-$(CONFIG_ASYNC_XOR) += async_xor.o
+obj-$(CONFIG_ASYNC_PQ) += async_pq.o
+obj-$(CONFIG_ASYNC_RAID6_RECOV) += async_raid6_recov.o
+obj-$(CONFIG_ASYNC_RAID6_TEST) += raid6test.o
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index ddccfb01c41..0ec1fb69d4e 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -33,28 +33,31 @@
* async_memcpy - attempt to copy memory with a dma engine.
* @dest: destination page
* @src: src page
- * @offset: offset in pages to start transaction
+ * @dest_offset: offset into 'dest' to start transaction
+ * @src_offset: offset into 'src' to start transaction
* @len: length in bytes
- * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK,
- * @depend_tx: memcpy depends on the result of this transaction
- * @cb_fn: function to call when the memcpy completes
- * @cb_param: parameter to pass to the callback routine
+ * @submit: submission / completion modifiers
+ *
+ * honored flags: ASYNC_TX_ACK
*/
struct dma_async_tx_descriptor *
async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
- unsigned int src_offset, size_t len, enum async_tx_flags flags,
- struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_param)
+ unsigned int src_offset, size_t len,
+ struct async_submit_ctl *submit)
{
- struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY,
+ struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY,
&dest, 1, &src, 1, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
- if (device) {
+ if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
dma_addr_t dma_dest, dma_src;
- unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
+ unsigned long dma_prep_flags = 0;
+ if (submit->cb_fn)
+ dma_prep_flags |= DMA_PREP_INTERRUPT;
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_prep_flags |= DMA_PREP_FENCE;
dma_dest = dma_map_page(device->dev, dest, dest_offset, len,
DMA_FROM_DEVICE);
@@ -67,13 +70,13 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
if (tx) {
pr_debug("%s: (async) len: %zu\n", __func__, len);
- async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
+ async_tx_submit(chan, tx, submit);
} else {
void *dest_buf, *src_buf;
pr_debug("%s: (sync) len: %zu\n", __func__, len);
/* wait for any prerequisite operations */
- async_tx_quiesce(&depend_tx);
+ async_tx_quiesce(&submit->depend_tx);
dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
src_buf = kmap_atomic(src, KM_USER1) + src_offset;
@@ -83,26 +86,13 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
kunmap_atomic(dest_buf, KM_USER0);
kunmap_atomic(src_buf, KM_USER1);
- async_tx_sync_epilog(cb_fn, cb_param);
+ async_tx_sync_epilog(submit);
}
return tx;
}
EXPORT_SYMBOL_GPL(async_memcpy);
-static int __init async_memcpy_init(void)
-{
- return 0;
-}
-
-static void __exit async_memcpy_exit(void)
-{
- do { } while (0);
-}
-
-module_init(async_memcpy_init);
-module_exit(async_memcpy_exit);
-
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("asynchronous memcpy api");
MODULE_LICENSE("GPL");
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index 5b5eb99bb24..58e4a8752ae 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -35,26 +35,26 @@
* @val: fill value
* @offset: offset in pages to start transaction
* @len: length in bytes
- * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
- * @depend_tx: memset depends on the result of this transaction
- * @cb_fn: function to call when the memcpy completes
- * @cb_param: parameter to pass to the callback routine
+ *
+ * honored flags: ASYNC_TX_ACK
*/
struct dma_async_tx_descriptor *
-async_memset(struct page *dest, int val, unsigned int offset,
- size_t len, enum async_tx_flags flags,
- struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_param)
+async_memset(struct page *dest, int val, unsigned int offset, size_t len,
+ struct async_submit_ctl *submit)
{
- struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET,
+ struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMSET,
&dest, 1, NULL, 0, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
- if (device) {
+ if (device && is_dma_fill_aligned(device, offset, 0, len)) {
dma_addr_t dma_dest;
- unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
+ unsigned long dma_prep_flags = 0;
+ if (submit->cb_fn)
+ dma_prep_flags |= DMA_PREP_INTERRUPT;
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_prep_flags |= DMA_PREP_FENCE;
dma_dest = dma_map_page(device->dev, dest, offset, len,
DMA_FROM_DEVICE);
@@ -64,38 +64,25 @@ async_memset(struct page *dest, int val, unsigned int offset,
if (tx) {
pr_debug("%s: (async) len: %zu\n", __func__, len);
- async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
+ async_tx_submit(chan, tx, submit);
} else { /* run the memset synchronously */
void *dest_buf;
pr_debug("%s: (sync) len: %zu\n", __func__, len);
- dest_buf = (void *) (((char *) page_address(dest)) + offset);
+ dest_buf = page_address(dest) + offset;
/* wait for any prerequisite operations */
- async_tx_quiesce(&depend_tx);
+ async_tx_quiesce(&submit->depend_tx);
memset(dest_buf, val, len);
- async_tx_sync_epilog(cb_fn, cb_param);
+ async_tx_sync_epilog(submit);
}
return tx;
}
EXPORT_SYMBOL_GPL(async_memset);
-static int __init async_memset_init(void)
-{
- return 0;
-}
-
-static void __exit async_memset_exit(void)
-{
- do { } while (0);
-}
-
-module_init(async_memset_init);
-module_exit(async_memset_exit);
-
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("asynchronous memset api");
MODULE_LICENSE("GPL");
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
new file mode 100644
index 00000000000..b88db6d1dc6
--- /dev/null
+++ b/crypto/async_tx/async_pq.c
@@ -0,0 +1,395 @@
+/*
+ * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
+ * Copyright(c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/raid/pq.h>
+#include <linux/async_tx.h>
+
+/**
+ * scribble - space to hold throwaway P buffer for synchronous gen_syndrome
+ */
+static struct page *scribble;
+
+static bool is_raid6_zero_block(struct page *p)
+{
+ return p == (void *) raid6_empty_zero_page;
+}
+
+/* the struct page *blocks[] parameter passed to async_gen_syndrome()
+ * and async_syndrome_val() contains the 'P' destination address at
+ * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
+ *
+ * note: these are macros as they are used as lvalues
+ */
+#define P(b, d) (b[d-2])
+#define Q(b, d) (b[d-1])
+
+/**
+ * do_async_gen_syndrome - asynchronously calculate P and/or Q
+ */
+static __async_inline struct dma_async_tx_descriptor *
+do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
+ const unsigned char *scfs, unsigned int offset, int disks,
+ size_t len, dma_addr_t *dma_src,
+ struct async_submit_ctl *submit)
+{
+ struct dma_async_tx_descriptor *tx = NULL;
+ struct dma_device *dma = chan->device;
+ enum dma_ctrl_flags dma_flags = 0;
+ enum async_tx_flags flags_orig = submit->flags;
+ dma_async_tx_callback cb_fn_orig = submit->cb_fn;
+ dma_async_tx_callback cb_param_orig = submit->cb_param;
+ int src_cnt = disks - 2;
+ unsigned char coefs[src_cnt];
+ unsigned short pq_src_cnt;
+ dma_addr_t dma_dest[2];
+ int src_off = 0;
+ int idx;
+ int i;
+
+ /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */
+ if (P(blocks, disks))
+ dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset,
+ len, DMA_BIDIRECTIONAL);
+ else
+ dma_flags |= DMA_PREP_PQ_DISABLE_P;
+ if (Q(blocks, disks))
+ dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset,
+ len, DMA_BIDIRECTIONAL);
+ else
+ dma_flags |= DMA_PREP_PQ_DISABLE_Q;
+
+ /* convert source addresses being careful to collapse 'empty'
+ * sources and update the coefficients accordingly
+ */
+ for (i = 0, idx = 0; i < src_cnt; i++) {
+ if (is_raid6_zero_block(blocks[i]))
+ continue;
+ dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
+ DMA_TO_DEVICE);
+ coefs[idx] = scfs[i];
+ idx++;
+ }
+ src_cnt = idx;
+
+ while (src_cnt > 0) {
+ submit->flags = flags_orig;
+ pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
+ /* if we are submitting additional pqs, leave the chain open,
+ * clear the callback parameters, and leave the destination
+ * buffers mapped
+ */
+ if (src_cnt > pq_src_cnt) {
+ submit->flags &= ~ASYNC_TX_ACK;
+ submit->flags |= ASYNC_TX_FENCE;
+ dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP;
+ submit->cb_fn = NULL;
+ submit->cb_param = NULL;
+ } else {
+ dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP;
+ submit->cb_fn = cb_fn_orig;
+ submit->cb_param = cb_param_orig;
+ if (cb_fn_orig)
+ dma_flags |= DMA_PREP_INTERRUPT;
+ }
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_flags |= DMA_PREP_FENCE;
+
+ /* Since we have clobbered the src_list we are committed
+ * to doing this asynchronously. Drivers force forward
+ * progress in case they can not provide a descriptor
+ */
+ for (;;) {
+ tx = dma->device_prep_dma_pq(chan, dma_dest,
+ &dma_src[src_off],
+ pq_src_cnt,
+ &coefs[src_off], len,
+ dma_flags);
+ if (likely(tx))
+ break;
+ async_tx_quiesce(&submit->depend_tx);
+ dma_async_issue_pending(chan);
+ }
+
+ async_tx_submit(chan, tx, submit);
+ submit->depend_tx = tx;
+
+ /* drop completed sources */
+ src_cnt -= pq_src_cnt;
+ src_off += pq_src_cnt;
+
+ dma_flags |= DMA_PREP_CONTINUE;
+ }
+
+ return tx;
+}
+
+/**
+ * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
+ */
+static void
+do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
+ size_t len, struct async_submit_ctl *submit)
+{
+ void **srcs;
+ int i;
+
+ if (submit->scribble)
+ srcs = submit->scribble;
+ else
+ srcs = (void **) blocks;
+
+ for (i = 0; i < disks; i++) {
+ if (is_raid6_zero_block(blocks[i])) {
+ BUG_ON(i > disks - 3); /* P or Q can't be zero */
+ srcs[i] = blocks[i];
+ } else
+ srcs[i] = page_address(blocks[i]) + offset;
+ }
+ raid6_call.gen_syndrome(disks, len, srcs);
+ async_tx_sync_epilog(submit);
+}
+
+/**
+ * async_gen_syndrome - asynchronously calculate a raid6 syndrome
+ * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
+ * @offset: common offset into each block (src and dest) to start transaction
+ * @disks: number of blocks (including missing P or Q, see below)
+ * @len: length of operation in bytes
+ * @submit: submission/completion modifiers
+ *
+ * General note: This routine assumes a field of GF(2^8) with a
+ * primitive polynomial of 0x11d and a generator of {02}.
+ *
+ * 'disks' note: callers can optionally omit either P or Q (but not
+ * both) from the calculation by setting blocks[disks-2] or
+ * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
+ * PAGE_SIZE as a temporary buffer of this size is used in the
+ * synchronous path. 'disks' always accounts for both destination
+ * buffers.
+ *
+ * 'blocks' note: if submit->scribble is NULL then the contents of
+ * 'blocks' may be overridden
+ */
+struct dma_async_tx_descriptor *
+async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
+ size_t len, struct async_submit_ctl *submit)
+{
+ int src_cnt = disks - 2;
+ struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
+ &P(blocks, disks), 2,
+ blocks, src_cnt, len);
+ struct dma_device *device = chan ? chan->device : NULL;
+ dma_addr_t *dma_src = NULL;
+
+ BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
+
+ if (submit->scribble)
+ dma_src = submit->scribble;
+ else if (sizeof(dma_addr_t) <= sizeof(struct page *))
+ dma_src = (dma_addr_t *) blocks;
+
+ if (dma_src && device &&
+ (src_cnt <= dma_maxpq(device, 0) ||
+ dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
+ is_dma_pq_aligned(device, offset, 0, len)) {
+ /* run the p+q asynchronously */
+ pr_debug("%s: (async) disks: %d len: %zu\n",
+ __func__, disks, len);
+ return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset,
+ disks, len, dma_src, submit);
+ }
+
+ /* run the pq synchronously */
+ pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
+
+ /* wait for any prerequisite operations */
+ async_tx_quiesce(&submit->depend_tx);
+
+ if (!P(blocks, disks)) {
+ P(blocks, disks) = scribble;
+ BUG_ON(len + offset > PAGE_SIZE);
+ }
+ if (!Q(blocks, disks)) {
+ Q(blocks, disks) = scribble;
+ BUG_ON(len + offset > PAGE_SIZE);
+ }
+ do_sync_gen_syndrome(blocks, offset, disks, len, submit);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(async_gen_syndrome);
+
+/**
+ * async_syndrome_val - asynchronously validate a raid6 syndrome
+ * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
+ * @offset: common offset into each block (src and dest) to start transaction
+ * @disks: number of blocks (including missing P or Q, see below)
+ * @len: length of operation in bytes
+ * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
+ * @spare: temporary result buffer for the synchronous case
+ * @submit: submission / completion modifiers
+ *
+ * The same notes from async_gen_syndrome apply to the 'blocks',
+ * and 'disks' parameters of this routine. The synchronous path
+ * requires a temporary result buffer and submit->scribble to be
+ * specified.
+ */
+struct dma_async_tx_descriptor *
+async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
+ size_t len, enum sum_check_flags *pqres, struct page *spare,
+ struct async_submit_ctl *submit)
+{
+ struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL,
+ NULL, 0, blocks, disks,
+ len);
+ struct dma_device *device = chan ? chan->device : NULL;
+ struct dma_async_tx_descriptor *tx;
+ enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
+ dma_addr_t *dma_src = NULL;
+
+ BUG_ON(disks < 4);
+
+ if (submit->scribble)
+ dma_src = submit->scribble;
+ else if (sizeof(dma_addr_t) <= sizeof(struct page *))
+ dma_src = (dma_addr_t *) blocks;
+
+ if (dma_src && device && disks <= dma_maxpq(device, 0) &&
+ is_dma_pq_aligned(device, offset, 0, len)) {
+ struct device *dev = device->dev;
+ dma_addr_t *pq = &dma_src[disks-2];
+ int i;
+
+ pr_debug("%s: (async) disks: %d len: %zu\n",
+ __func__, disks, len);
+ if (!P(blocks, disks))
+ dma_flags |= DMA_PREP_PQ_DISABLE_P;
+ if (!Q(blocks, disks))
+ dma_flags |= DMA_PREP_PQ_DISABLE_Q;
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_flags |= DMA_PREP_FENCE;
+ for (i = 0; i < disks; i++)
+ if (likely(blocks[i])) {
+ BUG_ON(is_raid6_zero_block(blocks[i]));
+ dma_src[i] = dma_map_page(dev, blocks[i],
+ offset, len,
+ DMA_TO_DEVICE);
+ }
+
+ for (;;) {
+ tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
+ disks - 2,
+ raid6_gfexp,
+ len, pqres,
+ dma_flags);
+ if (likely(tx))
+ break;
+ async_tx_quiesce(&submit->depend_tx);
+ dma_async_issue_pending(chan);
+ }
+ async_tx_submit(chan, tx, submit);
+
+ return tx;
+ } else {
+ struct page *p_src = P(blocks, disks);
+ struct page *q_src = Q(blocks, disks);
+ enum async_tx_flags flags_orig = submit->flags;
+ dma_async_tx_callback cb_fn_orig = submit->cb_fn;
+ void *scribble = submit->scribble;
+ void *cb_param_orig = submit->cb_param;
+ void *p, *q, *s;
+
+ pr_debug("%s: (sync) disks: %d len: %zu\n",
+ __func__, disks, len);
+
+ /* caller must provide a temporary result buffer and
+ * allow the input parameters to be preserved
+ */
+ BUG_ON(!spare || !scribble);
+
+ /* wait for any prerequisite operations */
+ async_tx_quiesce(&submit->depend_tx);
+
+ /* recompute p and/or q into the temporary buffer and then
+ * check to see the result matches the current value
+ */
+ tx = NULL;
+ *pqres = 0;
+ if (p_src) {
+ init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
+ NULL, NULL, scribble);
+ tx = async_xor(spare, blocks, offset, disks-2, len, submit);
+ async_tx_quiesce(&tx);
+ p = page_address(p_src) + offset;
+ s = page_address(spare) + offset;
+ *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
+ }
+
+ if (q_src) {
+ P(blocks, disks) = NULL;
+ Q(blocks, disks) = spare;
+ init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
+ tx = async_gen_syndrome(blocks, offset, disks, len, submit);
+ async_tx_quiesce(&tx);
+ q = page_address(q_src) + offset;
+ s = page_address(spare) + offset;
+ *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
+ }
+
+ /* restore P, Q and submit */
+ P(blocks, disks) = p_src;
+ Q(blocks, disks) = q_src;
+
+ submit->cb_fn = cb_fn_orig;
+ submit->cb_param = cb_param_orig;
+ submit->flags = flags_orig;
+ async_tx_sync_epilog(submit);
+
+ return NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(async_syndrome_val);
+
+static int __init async_pq_init(void)
+{
+ scribble = alloc_page(GFP_KERNEL);
+
+ if (scribble)
+ return 0;
+
+ pr_err("%s: failed to allocate required spare page\n", __func__);
+
+ return -ENOMEM;
+}
+
+static void __exit async_pq_exit(void)
+{
+ put_page(scribble);
+}
+
+module_init(async_pq_init);
+module_exit(async_pq_exit);
+
+MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
+MODULE_LICENSE("GPL");
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
new file mode 100644
index 00000000000..6d73dde4786
--- /dev/null
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -0,0 +1,468 @@
+/*
+ * Asynchronous RAID-6 recovery calculations ASYNC_TX API.
+ * Copyright(c) 2009 Intel Corporation
+ *
+ * based on raid6recov.c:
+ * Copyright 2002 H. Peter Anvin
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/raid/pq.h>
+#include <linux/async_tx.h>
+
+static struct dma_async_tx_descriptor *
+async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
+ size_t len, struct async_submit_ctl *submit)
+{
+ struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
+ &dest, 1, srcs, 2, len);
+ struct dma_device *dma = chan ? chan->device : NULL;
+ const u8 *amul, *bmul;
+ u8 ax, bx;
+ u8 *a, *b, *c;
+
+ if (dma) {
+ dma_addr_t dma_dest[2];
+ dma_addr_t dma_src[2];
+ struct device *dev = dma->dev;
+ struct dma_async_tx_descriptor *tx;
+ enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
+
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_flags |= DMA_PREP_FENCE;
+ dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
+ dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE);
+ dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE);
+ tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef,
+ len, dma_flags);
+ if (tx) {
+ async_tx_submit(chan, tx, submit);
+ return tx;
+ }
+
+ /* could not get a descriptor, unmap and fall through to
+ * the synchronous path
+ */
+ dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
+ dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE);
+ }
+
+ /* run the operation synchronously */
+ async_tx_quiesce(&submit->depend_tx);
+ amul = raid6_gfmul[coef[0]];
+ bmul = raid6_gfmul[coef[1]];
+ a = page_address(srcs[0]);
+ b = page_address(srcs[1]);
+ c = page_address(dest);
+
+ while (len--) {
+ ax = amul[*a++];
+ bx = bmul[*b++];
+ *c++ = ax ^ bx;
+ }
+
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
+ struct async_submit_ctl *submit)
+{
+ struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
+ &dest, 1, &src, 1, len);
+ struct dma_device *dma = chan ? chan->device : NULL;
+ const u8 *qmul; /* Q multiplier table */
+ u8 *d, *s;
+
+ if (dma) {
+ dma_addr_t dma_dest[2];
+ dma_addr_t dma_src[1];
+ struct device *dev = dma->dev;
+ struct dma_async_tx_descriptor *tx;
+ enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
+
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_flags |= DMA_PREP_FENCE;
+ dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
+ dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE);
+ tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef,
+ len, dma_flags);
+ if (tx) {
+ async_tx_submit(chan, tx, submit);
+ return tx;
+ }
+
+ /* could not get a descriptor, unmap and fall through to
+ * the synchronous path
+ */
+ dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
+ }
+
+ /* no channel available, or failed to allocate a descriptor, so
+ * perform the operation synchronously
+ */
+ async_tx_quiesce(&submit->depend_tx);
+ qmul = raid6_gfmul[coef];
+ d = page_address(dest);
+ s = page_address(src);
+
+ while (len--)
+ *d++ = qmul[*s++];
+
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+__2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
+ struct async_submit_ctl *submit)
+{
+ struct dma_async_tx_descriptor *tx = NULL;
+ struct page *p, *q, *a, *b;
+ struct page *srcs[2];
+ unsigned char coef[2];
+ enum async_tx_flags flags = submit->flags;
+ dma_async_tx_callback cb_fn = submit->cb_fn;
+ void *cb_param = submit->cb_param;
+ void *scribble = submit->scribble;
+
+ p = blocks[4-2];
+ q = blocks[4-1];
+
+ a = blocks[faila];
+ b = blocks[failb];
+
+ /* in the 4 disk case P + Pxy == P and Q + Qxy == Q */
+ /* Dx = A*(P+Pxy) + B*(Q+Qxy) */
+ srcs[0] = p;
+ srcs[1] = q;
+ coef[0] = raid6_gfexi[failb-faila];
+ coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
+ init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
+ tx = async_sum_product(b, srcs, coef, bytes, submit);
+
+ /* Dy = P+Pxy+Dx */
+ srcs[0] = p;
+ srcs[1] = b;
+ init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn,
+ cb_param, scribble);
+ tx = async_xor(a, srcs, 0, 2, bytes, submit);
+
+ return tx;
+
+}
+
+static struct dma_async_tx_descriptor *
+__2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks,
+ struct async_submit_ctl *submit)
+{
+ struct dma_async_tx_descriptor *tx = NULL;
+ struct page *p, *q, *g, *dp, *dq;
+ struct page *srcs[2];
+ unsigned char coef[2];
+ enum async_tx_flags flags = submit->flags;
+ dma_async_tx_callback cb_fn = submit->cb_fn;
+ void *cb_param = submit->cb_param;
+ void *scribble = submit->scribble;
+ int uninitialized_var(good);
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ if (i == faila || i == failb)
+ continue;
+ else {
+ good = i;
+ break;
+ }
+ }
+ BUG_ON(i >= 3);
+
+ p = blocks[5-2];
+ q = blocks[5-1];
+ g = blocks[good];
+
+ /* Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for delta p and
+ * delta q
+ */
+ dp = blocks[faila];
+ dq = blocks[failb];
+
+ init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
+ tx = async_memcpy(dp, g, 0, 0, bytes, submit);
+ init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
+ tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit);
+
+ /* compute P + Pxy */
+ srcs[0] = dp;
+ srcs[1] = p;
+ init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
+ NULL, NULL, scribble);
+ tx = async_xor(dp, srcs, 0, 2, bytes, submit);
+
+ /* compute Q + Qxy */
+ srcs[0] = dq;
+ srcs[1] = q;
+ init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
+ NULL, NULL, scribble);
+ tx = async_xor(dq, srcs, 0, 2, bytes, submit);
+
+ /* Dx = A*(P+Pxy) + B*(Q+Qxy) */
+ srcs[0] = dp;
+ srcs[1] = dq;
+ coef[0] = raid6_gfexi[failb-faila];
+ coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
+ init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
+ tx = async_sum_product(dq, srcs, coef, bytes, submit);
+
+ /* Dy = P+Pxy+Dx */
+ srcs[0] = dp;
+ srcs[1] = dq;
+ init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
+ cb_param, scribble);
+ tx = async_xor(dp, srcs, 0, 2, bytes, submit);
+
+ return tx;
+}
+
+static struct dma_async_tx_descriptor *
+__2data_recov_n(int disks, size_t bytes, int faila, int failb,
+ struct page **blocks, struct async_submit_ctl *submit)
+{
+ struct dma_async_tx_descriptor *tx = NULL;
+ struct page *p, *q, *dp, *dq;
+ struct page *srcs[2];
+ unsigned char coef[2];
+ enum async_tx_flags flags = submit->flags;
+ dma_async_tx_callback cb_fn = submit->cb_fn;
+ void *cb_param = submit->cb_param;
+ void *scribble = submit->scribble;
+
+ p = blocks[disks-2];
+ q = blocks[disks-1];
+
+ /* Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for
+ * delta p and delta q
+ */
+ dp = blocks[faila];
+ blocks[faila] = (void *)raid6_empty_zero_page;
+ blocks[disks-2] = dp;
+ dq = blocks[failb];
+ blocks[failb] = (void *)raid6_empty_zero_page;
+ blocks[disks-1] = dq;
+
+ init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
+ tx = async_gen_syndrome(blocks, 0, disks, bytes, submit);
+
+ /* Restore pointer table */
+ blocks[faila] = dp;
+ blocks[failb] = dq;
+ blocks[disks-2] = p;
+ blocks[disks-1] = q;
+
+ /* compute P + Pxy */
+ srcs[0] = dp;
+ srcs[1] = p;
+ init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
+ NULL, NULL, scribble);
+ tx = async_xor(dp, srcs, 0, 2, bytes, submit);
+
+ /* compute Q + Qxy */
+ srcs[0] = dq;
+ srcs[1] = q;
+ init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
+ NULL, NULL, scribble);
+ tx = async_xor(dq, srcs, 0, 2, bytes, submit);
+
+ /* Dx = A*(P+Pxy) + B*(Q+Qxy) */
+ srcs[0] = dp;
+ srcs[1] = dq;
+ coef[0] = raid6_gfexi[failb-faila];
+ coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
+ init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
+ tx = async_sum_product(dq, srcs, coef, bytes, submit);
+
+ /* Dy = P+Pxy+Dx */
+ srcs[0] = dp;
+ srcs[1] = dq;
+ init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
+ cb_param, scribble);
+ tx = async_xor(dp, srcs, 0, 2, bytes, submit);
+
+ return tx;
+}
+
+/**
+ * async_raid6_2data_recov - asynchronously calculate two missing data blocks
+ * @disks: number of disks in the RAID-6 array
+ * @bytes: block size
+ * @faila: first failed drive index
+ * @failb: second failed drive index
+ * @blocks: array of source pointers where the last two entries are p and q
+ * @submit: submission/completion modifiers
+ */
+struct dma_async_tx_descriptor *
+async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
+ struct page **blocks, struct async_submit_ctl *submit)
+{
+ BUG_ON(faila == failb);
+ if (failb < faila)
+ swap(faila, failb);
+
+ pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
+
+ /* we need to preserve the contents of 'blocks' for the async
+ * case, so punt to synchronous if a scribble buffer is not available
+ */
+ if (!submit->scribble) {
+ void **ptrs = (void **) blocks;
+ int i;
+
+ async_tx_quiesce(&submit->depend_tx);
+ for (i = 0; i < disks; i++)
+ ptrs[i] = page_address(blocks[i]);
+
+ raid6_2data_recov(disks, bytes, faila, failb, ptrs);
+
+ async_tx_sync_epilog(submit);
+
+ return NULL;
+ }
+
+ switch (disks) {
+ case 4:
+ /* dma devices do not uniformly understand a zero source pq
+ * operation (in contrast to the synchronous case), so
+ * explicitly handle the 4 disk special case
+ */
+ return __2data_recov_4(bytes, faila, failb, blocks, submit);
+ case 5:
+ /* dma devices do not uniformly understand a single
+ * source pq operation (in contrast to the synchronous
+ * case), so explicitly handle the 5 disk special case
+ */
+ return __2data_recov_5(bytes, faila, failb, blocks, submit);
+ default:
+ return __2data_recov_n(disks, bytes, faila, failb, blocks, submit);
+ }
+}
+EXPORT_SYMBOL_GPL(async_raid6_2data_recov);
+
+/**
+ * async_raid6_datap_recov - asynchronously calculate a data and the 'p' block
+ * @disks: number of disks in the RAID-6 array
+ * @bytes: block size
+ * @faila: failed drive index
+ * @blocks: array of source pointers where the last two entries are p and q
+ * @submit: submission/completion modifiers
+ */
+struct dma_async_tx_descriptor *
+async_raid6_datap_recov(int disks, size_t bytes, int faila,
+ struct page **blocks, struct async_submit_ctl *submit)
+{
+ struct dma_async_tx_descriptor *tx = NULL;
+ struct page *p, *q, *dq;
+ u8 coef;
+ enum async_tx_flags flags = submit->flags;
+ dma_async_tx_callback cb_fn = submit->cb_fn;
+ void *cb_param = submit->cb_param;
+ void *scribble = submit->scribble;
+ struct page *srcs[2];
+
+ pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
+
+ /* we need to preserve the contents of 'blocks' for the async
+ * case, so punt to synchronous if a scribble buffer is not available
+ */
+ if (!scribble) {
+ void **ptrs = (void **) blocks;
+ int i;
+
+ async_tx_quiesce(&submit->depend_tx);
+ for (i = 0; i < disks; i++)
+ ptrs[i] = page_address(blocks[i]);
+
+ raid6_datap_recov(disks, bytes, faila, ptrs);
+
+ async_tx_sync_epilog(submit);
+
+ return NULL;
+ }
+
+ p = blocks[disks-2];
+ q = blocks[disks-1];
+
+ /* Compute syndrome with zero for the missing data page
+ * Use the dead data page as temporary storage for delta q
+ */
+ dq = blocks[faila];
+ blocks[faila] = (void *)raid6_empty_zero_page;
+ blocks[disks-1] = dq;
+
+ /* in the 4 disk case we only need to perform a single source
+ * multiplication
+ */
+ if (disks == 4) {
+ int good = faila == 0 ? 1 : 0;
+ struct page *g = blocks[good];
+
+ init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
+ scribble);
+ tx = async_memcpy(p, g, 0, 0, bytes, submit);
+
+ init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
+ scribble);
+ tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit);
+ } else {
+ init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
+ scribble);
+ tx = async_gen_syndrome(blocks, 0, disks, bytes, submit);
+ }
+
+ /* Restore pointer table */
+ blocks[faila] = dq;
+ blocks[disks-1] = q;
+
+ /* calculate g^{-faila} */
+ coef = raid6_gfinv[raid6_gfexp[faila]];
+
+ srcs[0] = dq;
+ srcs[1] = q;
+ init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
+ NULL, NULL, scribble);
+ tx = async_xor(dq, srcs, 0, 2, bytes, submit);
+
+ init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
+ tx = async_mult(dq, dq, coef, bytes, submit);
+
+ srcs[0] = p;
+ srcs[1] = dq;
+ init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
+ cb_param, scribble);
+ tx = async_xor(p, srcs, 0, 2, bytes, submit);
+
+ return tx;
+}
+EXPORT_SYMBOL_GPL(async_raid6_datap_recov);
+
+MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>");
+MODULE_DESCRIPTION("asynchronous RAID-6 recovery api");
+MODULE_LICENSE("GPL");
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 06eb6cc09fe..f9cdf04fe7c 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -42,16 +42,21 @@ static void __exit async_tx_exit(void)
async_dmaengine_put();
}
+module_init(async_tx_init);
+module_exit(async_tx_exit);
+
/**
* __async_tx_find_channel - find a channel to carry out the operation or let
* the transaction execute synchronously
- * @depend_tx: transaction dependency
+ * @submit: transaction dependency and submission modifiers
* @tx_type: transaction type
*/
struct dma_chan *
-__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
- enum dma_transaction_type tx_type)
+__async_tx_find_channel(struct async_submit_ctl *submit,
+ enum dma_transaction_type tx_type)
{
+ struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
+
/* see if we can keep the chain on one channel */
if (depend_tx &&
dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
@@ -59,17 +64,6 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
return async_dma_find_channel(tx_type);
}
EXPORT_SYMBOL_GPL(__async_tx_find_channel);
-#else
-static int __init async_tx_init(void)
-{
- printk(KERN_INFO "async_tx: api initialized (sync-only)\n");
- return 0;
-}
-
-static void __exit async_tx_exit(void)
-{
- do { } while (0);
-}
#endif
@@ -83,10 +77,14 @@ static void
async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
struct dma_async_tx_descriptor *tx)
{
- struct dma_chan *chan;
- struct dma_device *device;
+ struct dma_chan *chan = depend_tx->chan;
+ struct dma_device *device = chan->device;
struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
+ #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
+ BUG();
+ #endif
+
/* first check to see if we can still append to depend_tx */
spin_lock_bh(&depend_tx->lock);
if (depend_tx->parent && depend_tx->chan == tx->chan) {
@@ -96,11 +94,11 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
}
spin_unlock_bh(&depend_tx->lock);
- if (!intr_tx)
+ /* attached dependency, flush the parent channel */
+ if (!intr_tx) {
+ device->device_issue_pending(chan);
return;
-
- chan = depend_tx->chan;
- device = chan->device;
+ }
/* see if we can schedule an interrupt
* otherwise poll for completion
@@ -134,6 +132,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
intr_tx->tx_submit(intr_tx);
async_tx_ack(intr_tx);
}
+ device->device_issue_pending(chan);
} else {
if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
panic("%s: DMA_ERROR waiting for depend_tx\n",
@@ -144,13 +143,14 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
/**
- * submit_disposition - while holding depend_tx->lock we must avoid submitting
- * new operations to prevent a circular locking dependency with
- * drivers that already hold a channel lock when calling
- * async_tx_run_dependencies.
+ * submit_disposition - flags for routing an incoming operation
* @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
* @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
* @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
+ *
+ * while holding depend_tx->lock we must avoid submitting new operations
+ * to prevent a circular locking dependency with drivers that already
+ * hold a channel lock when calling async_tx_run_dependencies.
*/
enum submit_disposition {
ASYNC_TX_SUBMITTED,
@@ -160,11 +160,12 @@ enum submit_disposition {
void
async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
- enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_param)
+ struct async_submit_ctl *submit)
{
- tx->callback = cb_fn;
- tx->callback_param = cb_param;
+ struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
+
+ tx->callback = submit->cb_fn;
+ tx->callback_param = submit->cb_param;
if (depend_tx) {
enum submit_disposition s;
@@ -220,30 +221,29 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
tx->tx_submit(tx);
}
- if (flags & ASYNC_TX_ACK)
+ if (submit->flags & ASYNC_TX_ACK)
async_tx_ack(tx);
- if (depend_tx && (flags & ASYNC_TX_DEP_ACK))
+ if (depend_tx)
async_tx_ack(depend_tx);
}
EXPORT_SYMBOL_GPL(async_tx_submit);
/**
- * async_trigger_callback - schedules the callback function to be run after
- * any dependent operations have been completed.
- * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
- * @depend_tx: 'callback' requires the completion of this transaction
- * @cb_fn: function to call after depend_tx completes
- * @cb_param: parameter to pass to the callback routine
+ * async_trigger_callback - schedules the callback function to be run
+ * @submit: submission and completion parameters
+ *
+ * honored flags: ASYNC_TX_ACK
+ *
+ * The callback is run after any dependent operations have completed.
*/
struct dma_async_tx_descriptor *
-async_trigger_callback(enum async_tx_flags flags,
- struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_param)
+async_trigger_callback(struct async_submit_ctl *submit)
{
struct dma_chan *chan;
struct dma_device *device;
struct dma_async_tx_descriptor *tx;
+ struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
if (depend_tx) {
chan = depend_tx->chan;
@@ -262,14 +262,14 @@ async_trigger_callback(enum async_tx_flags flags,
if (tx) {
pr_debug("%s: (async)\n", __func__);
- async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
+ async_tx_submit(chan, tx, submit);
} else {
pr_debug("%s: (sync)\n", __func__);
/* wait for any prerequisite operations */
- async_tx_quiesce(&depend_tx);
+ async_tx_quiesce(&submit->depend_tx);
- async_tx_sync_epilog(cb_fn, cb_param);
+ async_tx_sync_epilog(submit);
}
return tx;
@@ -295,9 +295,6 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
}
EXPORT_SYMBOL_GPL(async_tx_quiesce);
-module_init(async_tx_init);
-module_exit(async_tx_exit);
-
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API");
MODULE_LICENSE("GPL");
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 90dd3f8bd28..b459a9034aa 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -33,19 +33,16 @@
/* do_async_xor - dma map the pages and perform the xor with an engine */
static __async_inline struct dma_async_tx_descriptor *
do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
- unsigned int offset, int src_cnt, size_t len,
- enum async_tx_flags flags,
- struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_param)
+ unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src,
+ struct async_submit_ctl *submit)
{
struct dma_device *dma = chan->device;
- dma_addr_t *dma_src = (dma_addr_t *) src_list;
struct dma_async_tx_descriptor *tx = NULL;
int src_off = 0;
int i;
- dma_async_tx_callback _cb_fn;
- void *_cb_param;
- enum async_tx_flags async_flags;
+ dma_async_tx_callback cb_fn_orig = submit->cb_fn;
+ void *cb_param_orig = submit->cb_param;
+ enum async_tx_flags flags_orig = submit->flags;
enum dma_ctrl_flags dma_flags;
int xor_src_cnt;
dma_addr_t dma_dest;
@@ -63,25 +60,27 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
}
while (src_cnt) {
- async_flags = flags;
+ submit->flags = flags_orig;
dma_flags = 0;
- xor_src_cnt = min(src_cnt, dma->max_xor);
+ xor_src_cnt = min(src_cnt, (int)dma->max_xor);
/* if we are submitting additional xors, leave the chain open,
* clear the callback parameters, and leave the destination
* buffer mapped
*/
if (src_cnt > xor_src_cnt) {
- async_flags &= ~ASYNC_TX_ACK;
+ submit->flags &= ~ASYNC_TX_ACK;
+ submit->flags |= ASYNC_TX_FENCE;
dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;
- _cb_fn = NULL;
- _cb_param = NULL;
+ submit->cb_fn = NULL;
+ submit->cb_param = NULL;
} else {
- _cb_fn = cb_fn;
- _cb_param = cb_param;
+ submit->cb_fn = cb_fn_orig;
+ submit->cb_param = cb_param_orig;
}
- if (_cb_fn)
+ if (submit->cb_fn)
dma_flags |= DMA_PREP_INTERRUPT;
-
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_flags |= DMA_PREP_FENCE;
/* Since we have clobbered the src_list we are committed
* to doing this asynchronously. Drivers force forward progress
* in case they can not provide a descriptor
@@ -90,7 +89,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
xor_src_cnt, len, dma_flags);
if (unlikely(!tx))
- async_tx_quiesce(&depend_tx);
+ async_tx_quiesce(&submit->depend_tx);
/* spin wait for the preceeding transactions to complete */
while (unlikely(!tx)) {
@@ -101,11 +100,8 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
dma_flags);
}
- async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn,
- _cb_param);
-
- depend_tx = tx;
- flags |= ASYNC_TX_DEP_ACK;
+ async_tx_submit(chan, tx, submit);
+ submit->depend_tx = tx;
if (src_cnt > xor_src_cnt) {
/* drop completed sources */
@@ -124,23 +120,27 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
static void
do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
- int src_cnt, size_t len, enum async_tx_flags flags,
- dma_async_tx_callback cb_fn, void *cb_param)
+ int src_cnt, size_t len, struct async_submit_ctl *submit)
{
int i;
int xor_src_cnt;
int src_off = 0;
void *dest_buf;
- void **srcs = (void **) src_list;
+ void **srcs;
+
+ if (submit->scribble)
+ srcs = submit->scribble;
+ else
+ srcs = (void **) src_list;
- /* reuse the 'src_list' array to convert to buffer pointers */
+ /* convert to buffer pointers */
for (i = 0; i < src_cnt; i++)
srcs[i] = page_address(src_list[i]) + offset;
/* set destination address */
dest_buf = page_address(dest) + offset;
- if (flags & ASYNC_TX_XOR_ZERO_DST)
+ if (submit->flags & ASYNC_TX_XOR_ZERO_DST)
memset(dest_buf, 0, len);
while (src_cnt > 0) {
@@ -153,61 +153,70 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
src_off += xor_src_cnt;
}
- async_tx_sync_epilog(cb_fn, cb_param);
+ async_tx_sync_epilog(submit);
}
/**
* async_xor - attempt to xor a set of blocks with a dma engine.
- * xor_blocks always uses the dest as a source so the ASYNC_TX_XOR_ZERO_DST
- * flag must be set to not include dest data in the calculation. The
- * assumption with dma eninges is that they only use the destination
- * buffer as a source when it is explicity specified in the source list.
* @dest: destination page
- * @src_list: array of source pages (if the dest is also a source it must be
- * at index zero). The contents of this array may be overwritten.
- * @offset: offset in pages to start transaction
+ * @src_list: array of source pages
+ * @offset: common src/dst offset to start transaction
* @src_cnt: number of source pages
* @len: length in bytes
- * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST,
- * ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
- * @depend_tx: xor depends on the result of this transaction.
- * @cb_fn: function to call when the xor completes
- * @cb_param: parameter to pass to the callback routine
+ * @submit: submission / completion modifiers
+ *
+ * honored flags: ASYNC_TX_ACK, ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DST
+ *
+ * xor_blocks always uses the dest as a source so the
+ * ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in
+ * the calculation. The assumption with dma eninges is that they only
+ * use the destination buffer as a source when it is explicity specified
+ * in the source list.
+ *
+ * src_list note: if the dest is also a source it must be at index zero.
+ * The contents of this array will be overwritten if a scribble region
+ * is not specified.
*/
struct dma_async_tx_descriptor *
async_xor(struct page *dest, struct page **src_list, unsigned int offset,
- int src_cnt, size_t len, enum async_tx_flags flags,
- struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_param)
+ int src_cnt, size_t len, struct async_submit_ctl *submit)
{
- struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR,
+ struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR,
&dest, 1, src_list,
src_cnt, len);
+ dma_addr_t *dma_src = NULL;
+
BUG_ON(src_cnt <= 1);
- if (chan) {
+ if (submit->scribble)
+ dma_src = submit->scribble;
+ else if (sizeof(dma_addr_t) <= sizeof(struct page *))
+ dma_src = (dma_addr_t *) src_list;
+
+ if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) {
/* run the xor asynchronously */
pr_debug("%s (async): len: %zu\n", __func__, len);
return do_async_xor(chan, dest, src_list, offset, src_cnt, len,
- flags, depend_tx, cb_fn, cb_param);
+ dma_src, submit);
} else {
/* run the xor synchronously */
pr_debug("%s (sync): len: %zu\n", __func__, len);
+ WARN_ONCE(chan, "%s: no space for dma address conversion\n",
+ __func__);
/* in the sync case the dest is an implied source
* (assumes the dest is the first source)
*/
- if (flags & ASYNC_TX_XOR_DROP_DST) {
+ if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
src_cnt--;
src_list++;
}
/* wait for any prerequisite operations */
- async_tx_quiesce(&depend_tx);
+ async_tx_quiesce(&submit->depend_tx);
- do_sync_xor(dest, src_list, offset, src_cnt, len,
- flags, cb_fn, cb_param);
+ do_sync_xor(dest, src_list, offset, src_cnt, len, submit);
return NULL;
}
@@ -222,104 +231,94 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
}
/**
- * async_xor_zero_sum - attempt a xor parity check with a dma engine.
+ * async_xor_val - attempt a xor parity check with a dma engine.
* @dest: destination page used if the xor is performed synchronously
- * @src_list: array of source pages. The dest page must be listed as a source
- * at index zero. The contents of this array may be overwritten.
+ * @src_list: array of source pages
* @offset: offset in pages to start transaction
* @src_cnt: number of source pages
* @len: length in bytes
* @result: 0 if sum == 0 else non-zero
- * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
- * @depend_tx: xor depends on the result of this transaction.
- * @cb_fn: function to call when the xor completes
- * @cb_param: parameter to pass to the callback routine
+ * @submit: submission / completion modifiers
+ *
+ * honored flags: ASYNC_TX_ACK
+ *
+ * src_list note: if the dest is also a source it must be at index zero.
+ * The contents of this array will be overwritten if a scribble region
+ * is not specified.
*/
struct dma_async_tx_descriptor *
-async_xor_zero_sum(struct page *dest, struct page **src_list,
- unsigned int offset, int src_cnt, size_t len,
- u32 *result, enum async_tx_flags flags,
- struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_param)
+async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
+ int src_cnt, size_t len, enum sum_check_flags *result,
+ struct async_submit_ctl *submit)
{
- struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM,
+ struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL,
&dest, 1, src_list,
src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
+ dma_addr_t *dma_src = NULL;
BUG_ON(src_cnt <= 1);
- if (device && src_cnt <= device->max_xor) {
- dma_addr_t *dma_src = (dma_addr_t *) src_list;
- unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
+ if (submit->scribble)
+ dma_src = submit->scribble;
+ else if (sizeof(dma_addr_t) <= sizeof(struct page *))
+ dma_src = (dma_addr_t *) src_list;
+
+ if (dma_src && device && src_cnt <= device->max_xor &&
+ is_dma_xor_aligned(device, offset, 0, len)) {
+ unsigned long dma_prep_flags = 0;
int i;
pr_debug("%s: (async) len: %zu\n", __func__, len);
+ if (submit->cb_fn)
+ dma_prep_flags |= DMA_PREP_INTERRUPT;
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_prep_flags |= DMA_PREP_FENCE;
for (i = 0; i < src_cnt; i++)
dma_src[i] = dma_map_page(device->dev, src_list[i],
offset, len, DMA_TO_DEVICE);
- tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
- len, result,
- dma_prep_flags);
+ tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt,
+ len, result,
+ dma_prep_flags);
if (unlikely(!tx)) {
- async_tx_quiesce(&depend_tx);
+ async_tx_quiesce(&submit->depend_tx);
while (!tx) {
dma_async_issue_pending(chan);
- tx = device->device_prep_dma_zero_sum(chan,
+ tx = device->device_prep_dma_xor_val(chan,
dma_src, src_cnt, len, result,
dma_prep_flags);
}
}
- async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
+ async_tx_submit(chan, tx, submit);
} else {
- unsigned long xor_flags = flags;
+ enum async_tx_flags flags_orig = submit->flags;
pr_debug("%s: (sync) len: %zu\n", __func__, len);
+ WARN_ONCE(device && src_cnt <= device->max_xor,
+ "%s: no space for dma address conversion\n",
+ __func__);
- xor_flags |= ASYNC_TX_XOR_DROP_DST;
- xor_flags &= ~ASYNC_TX_ACK;
+ submit->flags |= ASYNC_TX_XOR_DROP_DST;
+ submit->flags &= ~ASYNC_TX_ACK;
- tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags,
- depend_tx, NULL, NULL);
+ tx = async_xor(dest, src_list, offset, src_cnt, len, submit);
async_tx_quiesce(&tx);
- *result = page_is_zero(dest, offset, len) ? 0 : 1;
+ *result = !page_is_zero(dest, offset, len) << SUM_CHECK_P;
- async_tx_sync_epilog(cb_fn, cb_param);
+ async_tx_sync_epilog(submit);
+ submit->flags = flags_orig;
}
return tx;
}
-EXPORT_SYMBOL_GPL(async_xor_zero_sum);
-
-static int __init async_xor_init(void)
-{
- #ifdef CONFIG_ASYNC_TX_DMA
- /* To conserve stack space the input src_list (array of page pointers)
- * is reused to hold the array of dma addresses passed to the driver.
- * This conversion is only possible when dma_addr_t is less than the
- * the size of a pointer. HIGHMEM64G is known to violate this
- * assumption.
- */
- BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(struct page *));
- #endif
-
- return 0;
-}
-
-static void __exit async_xor_exit(void)
-{
- do { } while (0);
-}
-
-module_init(async_xor_init);
-module_exit(async_xor_exit);
+EXPORT_SYMBOL_GPL(async_xor_val);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api");
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c
new file mode 100644
index 00000000000..3ec27c7e62e
--- /dev/null
+++ b/crypto/async_tx/raid6test.c
@@ -0,0 +1,240 @@
+/*
+ * asynchronous raid6 recovery self test
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * based on drivers/md/raid6test/test.c:
+ * Copyright 2002-2007 H. Peter Anvin
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/async_tx.h>
+#include <linux/random.h>
+
+#undef pr
+#define pr(fmt, args...) pr_info("raid6test: " fmt, ##args)
+
+#define NDISKS 16 /* Including P and Q */
+
+static struct page *dataptrs[NDISKS];
+static addr_conv_t addr_conv[NDISKS];
+static struct page *data[NDISKS+3];
+static struct page *spare;
+static struct page *recovi;
+static struct page *recovj;
+
+static void callback(void *param)
+{
+ struct completion *cmp = param;
+
+ complete(cmp);
+}
+
+static void makedata(int disks)
+{
+ int i, j;
+
+ for (i = 0; i < disks; i++) {
+ for (j = 0; j < PAGE_SIZE/sizeof(u32); j += sizeof(u32)) {
+ u32 *p = page_address(data[i]) + j;
+
+ *p = random32();
+ }
+
+ dataptrs[i] = data[i];
+ }
+}
+
+static char disk_type(int d, int disks)
+{
+ if (d == disks - 2)
+ return 'P';
+ else if (d == disks - 1)
+ return 'Q';
+ else
+ return 'D';
+}
+
+/* Recover two failed blocks. */
+static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, struct page **ptrs)
+{
+ struct async_submit_ctl submit;
+ struct completion cmp;
+ struct dma_async_tx_descriptor *tx = NULL;
+ enum sum_check_flags result = ~0;
+
+ if (faila > failb)
+ swap(faila, failb);
+
+ if (failb == disks-1) {
+ if (faila == disks-2) {
+ /* P+Q failure. Just rebuild the syndrome. */
+ init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
+ tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit);
+ } else {
+ struct page *blocks[disks];
+ struct page *dest;
+ int count = 0;
+ int i;
+
+ /* data+Q failure. Reconstruct data from P,
+ * then rebuild syndrome
+ */
+ for (i = disks; i-- ; ) {
+ if (i == faila || i == failb)
+ continue;
+ blocks[count++] = ptrs[i];
+ }
+ dest = ptrs[faila];
+ init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL,
+ NULL, NULL, addr_conv);
+ tx = async_xor(dest, blocks, 0, count, bytes, &submit);
+
+ init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv);
+ tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit);
+ }
+ } else {
+ if (failb == disks-2) {
+ /* data+P failure. */
+ init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
+ tx = async_raid6_datap_recov(disks, bytes, faila, ptrs, &submit);
+ } else {
+ /* data+data failure. */
+ init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
+ tx = async_raid6_2data_recov(disks, bytes, faila, failb, ptrs, &submit);
+ }
+ }
+ init_completion(&cmp);
+ init_async_submit(&submit, ASYNC_TX_ACK, tx, callback, &cmp, addr_conv);
+ tx = async_syndrome_val(ptrs, 0, disks, bytes, &result, spare, &submit);
+ async_tx_issue_pending(tx);
+
+ if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0)
+ pr("%s: timeout! (faila: %d failb: %d disks: %d)\n",
+ __func__, faila, failb, disks);
+
+ if (result != 0)
+ pr("%s: validation failure! faila: %d failb: %d sum_check_flags: %x\n",
+ __func__, faila, failb, result);
+}
+
+static int test_disks(int i, int j, int disks)
+{
+ int erra, errb;
+
+ memset(page_address(recovi), 0xf0, PAGE_SIZE);
+ memset(page_address(recovj), 0xba, PAGE_SIZE);
+
+ dataptrs[i] = recovi;
+ dataptrs[j] = recovj;
+
+ raid6_dual_recov(disks, PAGE_SIZE, i, j, dataptrs);
+
+ erra = memcmp(page_address(data[i]), page_address(recovi), PAGE_SIZE);
+ errb = memcmp(page_address(data[j]), page_address(recovj), PAGE_SIZE);
+
+ pr("%s(%d, %d): faila=%3d(%c) failb=%3d(%c) %s\n",
+ __func__, i, j, i, disk_type(i, disks), j, disk_type(j, disks),
+ (!erra && !errb) ? "OK" : !erra ? "ERRB" : !errb ? "ERRA" : "ERRAB");
+
+ dataptrs[i] = data[i];
+ dataptrs[j] = data[j];
+
+ return erra || errb;
+}
+
+static int test(int disks, int *tests)
+{
+ struct dma_async_tx_descriptor *tx;
+ struct async_submit_ctl submit;
+ struct completion cmp;
+ int err = 0;
+ int i, j;
+
+ recovi = data[disks];
+ recovj = data[disks+1];
+ spare = data[disks+2];
+
+ makedata(disks);
+
+ /* Nuke syndromes */
+ memset(page_address(data[disks-2]), 0xee, PAGE_SIZE);
+ memset(page_address(data[disks-1]), 0xee, PAGE_SIZE);
+
+ /* Generate assumed good syndrome */
+ init_completion(&cmp);
+ init_async_submit(&submit, ASYNC_TX_ACK, NULL, callback, &cmp, addr_conv);
+ tx = async_gen_syndrome(dataptrs, 0, disks, PAGE_SIZE, &submit);
+ async_tx_issue_pending(tx);
+
+ if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0) {
+ pr("error: initial gen_syndrome(%d) timed out\n", disks);
+ return 1;
+ }
+
+ pr("testing the %d-disk case...\n", disks);
+ for (i = 0; i < disks-1; i++)
+ for (j = i+1; j < disks; j++) {
+ (*tests)++;
+ err += test_disks(i, j, disks);
+ }
+
+ return err;
+}
+
+
+static int raid6_test(void)
+{
+ int err = 0;
+ int tests = 0;
+ int i;
+
+ for (i = 0; i < NDISKS+3; i++) {
+ data[i] = alloc_page(GFP_KERNEL);
+ if (!data[i]) {
+ while (i--)
+ put_page(data[i]);
+ return -ENOMEM;
+ }
+ }
+
+ /* the 4-disk and 5-disk cases are special for the recovery code */
+ if (NDISKS > 4)
+ err += test(4, &tests);
+ if (NDISKS > 5)
+ err += test(5, &tests);
+ err += test(NDISKS, &tests);
+
+ pr("\n");
+ pr("complete (%d tests, %d failure%s)\n",
+ tests, err, err == 1 ? "" : "s");
+
+ for (i = 0; i < NDISKS+3; i++)
+ put_page(data[i]);
+
+ return 0;
+}
+
+static void raid6_test_exit(void)
+{
+}
+
+/* when compiled-in wait for drivers to load first (assumes dma drivers
+ * are also compliled-in)
+ */
+late_initcall(raid6_test);
+module_exit(raid6_test_exit);
+MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>");
+MODULE_DESCRIPTION("asynchronous RAID-6 recovery self tests");
+MODULE_LICENSE("GPL");
diff --git a/drivers/Makefile b/drivers/Makefile
index bc4205d2fc3..6ee53c7a57a 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_PARISC) += parisc/
obj-$(CONFIG_RAPIDIO) += rapidio/
obj-y += video/
obj-$(CONFIG_ACPI) += acpi/
+obj-$(CONFIG_SFI) += sfi/
# PnP must come after ACPI since it will eventually need to check if acpi
# was used and do nothing if so
obj-$(CONFIG_PNP) += pnp/
@@ -42,6 +43,8 @@ obj-y += macintosh/
obj-$(CONFIG_IDE) += ide/
obj-$(CONFIG_SCSI) += scsi/
obj-$(CONFIG_ATA) += ata/
+obj-$(CONFIG_MTD) += mtd/
+obj-$(CONFIG_SPI) += spi/
obj-y += net/
obj-$(CONFIG_ATM) += atm/
obj-$(CONFIG_FUSION) += message/
@@ -50,8 +53,6 @@ obj-y += ieee1394/
obj-$(CONFIG_UIO) += uio/
obj-y += cdrom/
obj-y += auxdisplay/
-obj-$(CONFIG_MTD) += mtd/
-obj-$(CONFIG_SPI) += spi/
obj-$(CONFIG_PCCARD) += pcmcia/
obj-$(CONFIG_DIO) += dio/
obj-$(CONFIG_SBUS) += sbus/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 7ec7d88c599..dd8729d674e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -60,7 +60,11 @@ config ACPI_PROCFS
/proc/acpi/fadt (/sys/firmware/acpi/tables/FACP)
/proc/acpi/debug_layer (/sys/module/acpi/parameters/debug_layer)
/proc/acpi/debug_level (/sys/module/acpi/parameters/debug_level)
-
+ /proc/acpi/processor/*/power (/sys/devices/system/cpu/*/cpuidle/*)
+ /proc/acpi/processor/*/performance (/sys/devices/system/cpu/*/
+ cpufreq/*)
+ /proc/acpi/processor/*/throttling (/sys/class/thermal/
+ cooling_device*/*)
This option has no effect on /proc/acpi/ files
and functions which do not yet exist in /sys.
@@ -82,6 +86,17 @@ config ACPI_PROCFS_POWER
Say N to delete power /proc/acpi/ directories that have moved to /sys/
+config ACPI_POWER_METER
+ tristate "ACPI 4.0 power meter"
+ depends on HWMON
+ help
+ This driver exposes ACPI 4.0 power meters as hardware monitoring
+ devices. Say Y (or M) if you have a computer with ACPI 4.0 firmware
+ and a power meter.
+
+ To compile this driver as a module, choose M here:
+ the module will be called power-meter.
+
config ACPI_SYSFS_POWER
bool "Future power /sys interface"
select POWER_SUPPLY
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 03a985be3fe..82cd49dc603 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o
obj-$(CONFIG_ACPI_BATTERY) += battery.o
obj-$(CONFIG_ACPI_SBS) += sbshc.o
obj-$(CONFIG_ACPI_SBS) += sbs.o
+obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o
# processor has its own "processor." module_param namespace
processor-y := processor_core.o processor_throttling.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 0df8fcb687d..98b9690b015 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -37,6 +37,8 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#define PREFIX "ACPI: "
+
#define ACPI_AC_CLASS "ac_adapter"
#define ACPI_AC_DEVICE_NAME "AC Adapter"
#define ACPI_AC_FILE_STATE "state"
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 9a62224cc27..28ccdbc05ac 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -53,7 +53,6 @@ MODULE_LICENSE("GPL");
static int acpi_memory_device_add(struct acpi_device *device);
static int acpi_memory_device_remove(struct acpi_device *device, int type);
-static int acpi_memory_device_start(struct acpi_device *device);
static const struct acpi_device_id memory_device_ids[] = {
{ACPI_MEMORY_DEVICE_HID, 0},
@@ -68,7 +67,6 @@ static struct acpi_driver acpi_memory_device_driver = {
.ops = {
.add = acpi_memory_device_add,
.remove = acpi_memory_device_remove,
- .start = acpi_memory_device_start,
},
};
@@ -431,28 +429,6 @@ static int acpi_memory_device_add(struct acpi_device *device)
printk(KERN_DEBUG "%s \n", acpi_device_name(device));
- return result;
-}
-
-static int acpi_memory_device_remove(struct acpi_device *device, int type)
-{
- struct acpi_memory_device *mem_device = NULL;
-
-
- if (!device || !acpi_driver_data(device))
- return -EINVAL;
-
- mem_device = acpi_driver_data(device);
- kfree(mem_device);
-
- return 0;
-}
-
-static int acpi_memory_device_start (struct acpi_device *device)
-{
- struct acpi_memory_device *mem_device;
- int result = 0;
-
/*
* Early boot code has recognized memory area by EFI/E820.
* If DSDT shows these memory devices on boot, hotplug is not necessary
@@ -462,8 +438,6 @@ static int acpi_memory_device_start (struct acpi_device *device)
if (!acpi_hotmem_initialized)
return 0;
- mem_device = acpi_driver_data(device);
-
if (!acpi_memory_check_device(mem_device)) {
/* call add_memory func */
result = acpi_memory_enable_device(mem_device);
@@ -474,6 +448,20 @@ static int acpi_memory_device_start (struct acpi_device *device)
return result;
}
+static int acpi_memory_device_remove(struct acpi_device *device, int type)
+{
+ struct acpi_memory_device *mem_device = NULL;
+
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ mem_device = acpi_driver_data(device);
+ kfree(mem_device);
+
+ return 0;
+}
+
/*
* Helper function to check for memory device
*/
@@ -481,26 +469,23 @@ static acpi_status is_memory_device(acpi_handle handle)
{
char *hardware_id;
acpi_status status;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_device_info *info;
-
- status = acpi_get_object_info(handle, &buffer);
+ status = acpi_get_object_info(handle, &info);
if (ACPI_FAILURE(status))
return status;
- info = buffer.pointer;
if (!(info->valid & ACPI_VALID_HID)) {
- kfree(buffer.pointer);
+ kfree(info);
return AE_ERROR;
}
- hardware_id = info->hardware_id.value;
+ hardware_id = info->hardware_id.string;
if ((hardware_id == NULL) ||
(strcmp(hardware_id, ACPI_MEMORY_DEVICE_HID)))
status = AE_ERROR;
- kfree(buffer.pointer);
+ kfree(info);
return status;
}
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 72ac28da14e..e7973bc1684 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -28,7 +28,7 @@ acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
acpi-y += nsaccess.o nsload.o nssearch.o nsxfeval.o \
nsalloc.o nseval.o nsnames.o nsutils.o nsxfname.o \
nsdump.o nsinit.o nsobject.o nswalk.o nsxfobj.o \
- nsparse.o nspredef.o
+ nsparse.o nspredef.o nsrepair.o
acpi-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
@@ -44,4 +44,4 @@ acpi-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
- utstate.o utmutex.o utobject.o utresrc.o utlock.o
+ utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index e6777fb883d..8e679ef5b23 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -183,7 +183,7 @@
/* Operation regions */
-#define ACPI_NUM_PREDEFINED_REGIONS 8
+#define ACPI_NUM_PREDEFINED_REGIONS 9
#define ACPI_USER_REGION_BEGIN 0x80
/* Maximum space_ids for Operation Regions */
@@ -199,9 +199,15 @@
#define ACPI_RSDP_CHECKSUM_LENGTH 20
#define ACPI_RSDP_XCHECKSUM_LENGTH 36
-/* SMBus bidirectional buffer size */
+/* SMBus and IPMI bidirectional buffer size */
#define ACPI_SMBUS_BUFFER_SIZE 34
+#define ACPI_IPMI_BUFFER_SIZE 66
+
+/* _sx_d and _sx_w control methods */
+
+#define ACPI_NUM_sx_d_METHODS 4
+#define ACPI_NUM_sx_w_METHODS 5
/******************************************************************************
*
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 62c59df3b86..a4fb001d96f 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -154,10 +154,6 @@ void
acpi_db_display_argument_object(union acpi_operand_object *obj_desc,
struct acpi_walk_state *walk_state);
-void acpi_db_check_predefined_names(void);
-
-void acpi_db_batch_execute(void);
-
/*
* dbexec - debugger control method execution
*/
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 3d87362d17e..29ba66d5a79 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -58,6 +58,10 @@
#define ACPI_INIT_GLOBAL(a,b) a
#endif
+#ifdef DEFINE_ACPI_GLOBALS
+
+/* Public globals, available from outside ACPICA subsystem */
+
/*****************************************************************************
*
* Runtime configuration (static defaults that can be overriden at runtime)
@@ -78,7 +82,7 @@
* 5) Allow unresolved references (invalid target name) in package objects
* 6) Enable warning messages for behavior that is not ACPI spec compliant
*/
-ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_interpreter_slack, FALSE);
+u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_interpreter_slack, FALSE);
/*
* Automatically serialize ALL control methods? Default is FALSE, meaning
@@ -86,27 +90,36 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_interpreter_slack, FALSE);
* Only change this if the ASL code is poorly written and cannot handle
* reentrancy even though methods are marked "NotSerialized".
*/
-ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_all_methods_serialized, FALSE);
+u8 ACPI_INIT_GLOBAL(acpi_gbl_all_methods_serialized, FALSE);
/*
* Create the predefined _OSI method in the namespace? Default is TRUE
* because ACPI CA is fully compatible with other ACPI implementations.
* Changing this will revert ACPI CA (and machine ASL) to pre-OSI behavior.
*/
-ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_create_osi_method, TRUE);
+u8 ACPI_INIT_GLOBAL(acpi_gbl_create_osi_method, TRUE);
/*
* Disable wakeup GPEs during runtime? Default is TRUE because WAKE and
* RUNTIME GPEs should never be shared, and WAKE GPEs should typically only
* be enabled just before going to sleep.
*/
-ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE);
+u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE);
/*
* Optionally use default values for the ACPI register widths. Set this to
* TRUE to use the defaults, if an FADT contains incorrect widths/lengths.
*/
-ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE);
+u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE);
+
+/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
+
+struct acpi_table_fadt acpi_gbl_FADT;
+u32 acpi_current_gpe_count;
+u32 acpi_gbl_trace_flags;
+acpi_name acpi_gbl_trace_method_name;
+
+#endif
/*****************************************************************************
*
@@ -114,11 +127,6 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE);
*
****************************************************************************/
-/* Runtime configuration of debug print levels */
-
-extern u32 acpi_dbg_level;
-extern u32 acpi_dbg_layer;
-
/* Procedure nesting level for debug output */
extern u32 acpi_gbl_nesting_level;
@@ -127,10 +135,8 @@ extern u32 acpi_gbl_nesting_level;
ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
ACPI_EXTERN u32 acpi_gbl_original_dbg_layer;
-ACPI_EXTERN acpi_name acpi_gbl_trace_method_name;
ACPI_EXTERN u32 acpi_gbl_trace_dbg_level;
ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
-ACPI_EXTERN u32 acpi_gbl_trace_flags;
/*****************************************************************************
*
@@ -142,10 +148,8 @@ ACPI_EXTERN u32 acpi_gbl_trace_flags;
* acpi_gbl_root_table_list is the master list of ACPI tables found in the
* RSDT/XSDT.
*
- * acpi_gbl_FADT is a local copy of the FADT, converted to a common format.
*/
ACPI_EXTERN struct acpi_internal_rsdt acpi_gbl_root_table_list;
-ACPI_EXTERN struct acpi_table_fadt acpi_gbl_FADT;
ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS;
/* These addresses are calculated from the FADT Event Block addresses */
@@ -261,7 +265,8 @@ ACPI_EXTERN u8 acpi_gbl_osi_data;
extern u8 acpi_gbl_shutdown;
extern u32 acpi_gbl_startup_flags;
extern const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT];
-extern const char *acpi_gbl_highest_dstate_names[4];
+extern const char *acpi_gbl_lowest_dstate_names[ACPI_NUM_sx_w_METHODS];
+extern const char *acpi_gbl_highest_dstate_names[ACPI_NUM_sx_d_METHODS];
extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
@@ -290,6 +295,7 @@ extern char const *acpi_gbl_exception_names_ctrl[];
ACPI_EXTERN struct acpi_namespace_node acpi_gbl_root_node_struct;
ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_root_node;
ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_fadt_gpe_device;
+ACPI_EXTERN union acpi_operand_object *acpi_gbl_module_code_list;
extern const u8 acpi_gbl_ns_properties[ACPI_NUM_NS_TYPES];
extern const struct acpi_predefined_names
@@ -340,7 +346,6 @@ ACPI_EXTERN struct acpi_fixed_event_handler
ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
ACPI_EXTERN struct acpi_gpe_block_info
*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
-ACPI_EXTERN u32 acpi_current_gpe_count;
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 4afa3d8e0ef..36192f142fb 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -62,6 +62,14 @@ u32 acpi_hw_get_mode(void);
/*
* hwregs - ACPI Register I/O
*/
+acpi_status
+acpi_hw_validate_register(struct acpi_generic_address *reg,
+ u8 max_bit_width, u64 *address);
+
+acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg);
+
+acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg);
+
struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id);
acpi_status acpi_hw_write_pm1_control(u32 pm1a_control, u32 pm1b_control);
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index e8db7a3143a..5db9f2916f7 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -461,9 +461,9 @@ void acpi_ex_acquire_global_lock(u32 rule);
void acpi_ex_release_global_lock(u32 rule);
-void acpi_ex_eisa_id_to_string(u32 numeric_id, char *out_string);
+void acpi_ex_eisa_id_to_string(char *dest, acpi_integer compressed_id);
-void acpi_ex_unsigned_integer_to_string(acpi_integer value, char *out_string);
+void acpi_ex_integer_to_string(char *dest, acpi_integer value);
/*
* exregion - default op_region handlers
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index ee986edfa0d..81e64f47867 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -369,6 +369,19 @@ union acpi_predefined_info {
struct acpi_package_info3 ret_info3;
};
+/* Data block used during object validation */
+
+struct acpi_predefined_data {
+ char *pathname;
+ const union acpi_predefined_info *predefined;
+ u32 flags;
+ u8 node_flags;
+};
+
+/* Defines for Flags field above */
+
+#define ACPI_OBJECT_REPAIRED 1
+
/*
* Bitmapped return value types
* Note: the actual data types must be contiguous, a loop in nspredef.c
@@ -885,6 +898,9 @@ struct acpi_bit_register_info {
#define ACPI_OSI_WIN_XP_SP2 0x05
#define ACPI_OSI_WINSRV_2003_SP1 0x06
#define ACPI_OSI_WIN_VISTA 0x07
+#define ACPI_OSI_WINSRV_2008 0x08
+#define ACPI_OSI_WIN_VISTA_SP1 0x09
+#define ACPI_OSI_WIN_7 0x0A
#define ACPI_ALWAYS_ILLEGAL 0x00
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 91ac7d7b440..3acd9c6760e 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -340,6 +340,7 @@
*/
#define ACPI_ERROR_NAMESPACE(s, e) acpi_ns_report_error (AE_INFO, s, e);
#define ACPI_ERROR_METHOD(s, n, p, e) acpi_ns_report_method_error (AE_INFO, s, n, p, e);
+#define ACPI_WARN_PREDEFINED(plist) acpi_ut_predefined_warning plist
#else
@@ -347,6 +348,7 @@
#define ACPI_ERROR_NAMESPACE(s, e)
#define ACPI_ERROR_METHOD(s, n, p, e)
+#define ACPI_WARN_PREDEFINED(plist)
#endif /* ACPI_NO_ERROR_MESSAGES */
/*
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 94cdc2b8cb9..09a2764c734 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -73,6 +73,14 @@
#define ACPI_NS_WALK_UNLOCK 0x01
#define ACPI_NS_WALK_TEMP_NODES 0x02
+/* Object is not a package element */
+
+#define ACPI_NOT_PACKAGE_ELEMENT ACPI_UINT32_MAX
+
+/* Always emit warning message, not dependent on node flags */
+
+#define ACPI_WARN_ALWAYS 0
+
/*
* nsinit - Namespace initialization
*/
@@ -144,6 +152,8 @@ struct acpi_namespace_node *acpi_ns_create_node(u32 name);
void acpi_ns_delete_node(struct acpi_namespace_node *node);
+void acpi_ns_remove_node(struct acpi_namespace_node *node);
+
void
acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_handle);
@@ -186,6 +196,8 @@ acpi_ns_dump_objects(acpi_object_type type,
*/
acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info);
+void acpi_ns_exec_module_code_list(void);
+
/*
* nspredef - Support for predefined/reserved names
*/
@@ -260,6 +272,19 @@ acpi_ns_get_attached_data(struct acpi_namespace_node *node,
acpi_object_handler handler, void **data);
/*
+ * nsrepair - return object repair for predefined methods/objects
+ */
+acpi_status
+acpi_ns_repair_object(struct acpi_predefined_data *data,
+ u32 expected_btypes,
+ u32 package_index,
+ union acpi_operand_object **return_object_ptr);
+
+acpi_status
+acpi_ns_repair_package_list(struct acpi_predefined_data *data,
+ union acpi_operand_object **obj_desc_ptr);
+
+/*
* nssearch - Namespace searching and entry
*/
acpi_status
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index eb6f038b03d..b39d682a214 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -98,6 +98,7 @@
#define AOPOBJ_SETUP_COMPLETE 0x10
#define AOPOBJ_SINGLE_DATUM 0x20
#define AOPOBJ_INVALID 0x40 /* Used if host OS won't allow an op_region address */
+#define AOPOBJ_MODULE_LEVEL 0x80
/******************************************************************************
*
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 23ee0fbf561..22881e8ce22 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -62,6 +62,8 @@
#define ACPI_PARSE_DEFERRED_OP 0x0100
#define ACPI_PARSE_DISASSEMBLE 0x0200
+#define ACPI_PARSE_MODULE_LEVEL 0x0400
+
/******************************************************************************
*
* Parser interfaces
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 63f656ae360..cd80d1dd195 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -64,8 +64,8 @@
* (Used for _PRW)
*
*
- * 2) PTYPE2 packages contain a variable number of sub-packages. Each of the
- * different types describe the contents of each of the sub-packages.
+ * 2) PTYPE2 packages contain a Variable-length number of sub-packages. Each
+ * of the different types describe the contents of each of the sub-packages.
*
* ACPI_PTYPE2: Each subpackage contains 1 or 2 object types:
* object type
@@ -91,6 +91,9 @@
* ACPI_PTYPE2_MIN: Each subpackage has a variable but minimum length
* (Used for _HPX)
*
+ * ACPI_PTYPE2_REV_FIXED: Revision at start, each subpackage is Fixed-length
+ * (Used for _ART, _FPS)
+ *
*****************************************************************************/
enum acpi_return_package_types {
@@ -101,9 +104,11 @@ enum acpi_return_package_types {
ACPI_PTYPE2_COUNT = 5,
ACPI_PTYPE2_PKG_COUNT = 6,
ACPI_PTYPE2_FIXED = 7,
- ACPI_PTYPE2_MIN = 8
+ ACPI_PTYPE2_MIN = 8,
+ ACPI_PTYPE2_REV_FIXED = 9
};
+#ifdef ACPI_CREATE_PREDEFINED_TABLE
/*
* Predefined method/object information table.
*
@@ -136,239 +141,384 @@ enum acpi_return_package_types {
* is saved here (rather than in a separate table) in order to minimize the
* overall size of the stored data.
*/
-static const union acpi_predefined_info predefined_names[] = {
- {.info = {"_AC0", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_AC1", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_AC2", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_AC3", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_AC4", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_AC5", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_AC6", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_AC7", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_AC8", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_AC9", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_ADR", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_AL0", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0}}, /* variable (Refs) */
- {.info = {"_AL1", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0}}, /* variable (Refs) */
- {.info = {"_AL2", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0}}, /* variable (Refs) */
- {.info = {"_AL3", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0}}, /* variable (Refs) */
- {.info = {"_AL4", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0}}, /* variable (Refs) */
- {.info = {"_AL5", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0}}, /* variable (Refs) */
- {.info = {"_AL6", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0}}, /* variable (Refs) */
- {.info = {"_AL7", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0}}, /* variable (Refs) */
- {.info = {"_AL8", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0}}, /* variable (Refs) */
- {.info = {"_AL9", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0}}, /* variable (Refs) */
- {.info = {"_ALC", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_ALI", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_ALP", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_ALR", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 2, 0, 0, 0}}, /* variable (Pkgs) each 2 (Ints) */
- {.info = {"_ALT", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_BBN", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_BCL", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0, 0, 0, 0}}, /* variable (Ints) */
- {.info = {"_BCM", 1, 0}},
- {.info = {"_BDN", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_BFS", 1, 0}},
- {.info = {"_BIF", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER,
- 9,
- ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER, 4, 0}}, /* fixed (9 Int),(4 Str) */
- {.info = {"_BLT", 3, 0}},
- {.info = {"_BMC", 1, 0}},
- {.info = {"_BMD", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 5, 0, 0, 0}}, /* fixed (5 Int) */
- {.info = {"_BQC", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_BST", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0}}, /* fixed (4 Int) */
- {.info = {"_BTM", 1, ACPI_RTYPE_INTEGER}},
- {.info = {"_BTP", 1, 0}},
- {.info = {"_CBA", 0, ACPI_RTYPE_INTEGER}}, /* see PCI firmware spec 3.0 */
- {.info = {"_CID", 0,
- ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE}},
- {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0, 0, 0, 0}}, /* variable (Ints/Strs) */
- {.info = {"_CRS", 0, ACPI_RTYPE_BUFFER}},
- {.info = {"_CRT", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_CSD", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 0, 0, 0, 0}}, /* variable (1 Int(n), n-1 Int) */
- {.info = {"_CST", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE2_PKG_COUNT,
- ACPI_RTYPE_BUFFER, 1,
- ACPI_RTYPE_INTEGER, 3, 0}}, /* variable (1 Int(n), n Pkg (1 Buf/3 Int) */
- {.info = {"_DCK", 1, ACPI_RTYPE_INTEGER}},
- {.info = {"_DCS", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_DDC", 1, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER}},
- {.info = {"_DDN", 0, ACPI_RTYPE_STRING}},
- {.info = {"_DGS", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_DIS", 0, 0}},
- {.info = {"_DMA", 0, ACPI_RTYPE_BUFFER}},
- {.info = {"_DOD", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0, 0, 0, 0}}, /* variable (Ints) */
- {.info = {"_DOS", 1, 0}},
- {.info = {"_DSM", 4, ACPI_RTYPE_ALL}}, /* Must return a type, but it can be of any type */
- {.info = {"_DSS", 1, 0}},
- {.info = {"_DSW", 3, 0}},
- {.info = {"_EC_", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_EDL", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0}}, /* variable (Refs) */
- {.info = {"_EJ0", 1, 0}},
- {.info = {"_EJ1", 1, 0}},
- {.info = {"_EJ2", 1, 0}},
- {.info = {"_EJ3", 1, 0}},
- {.info = {"_EJ4", 1, 0}},
- {.info = {"_EJD", 0, ACPI_RTYPE_STRING}},
- {.info = {"_FDE", 0, ACPI_RTYPE_BUFFER}},
- {.info = {"_FDI", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16, 0, 0, 0}}, /* fixed (16 Int) */
- {.info = {"_FDM", 1, 0}},
- {.info = {"_FIX", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0, 0, 0, 0}}, /* variable (Ints) */
- {.info = {"_GLK", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_GPD", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_GPE", 0, ACPI_RTYPE_INTEGER}}, /* _GPE method, not _GPE scope */
- {.info = {"_GSB", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_GTF", 0, ACPI_RTYPE_BUFFER}},
- {.info = {"_GTM", 0, ACPI_RTYPE_BUFFER}},
- {.info = {"_GTS", 1, 0}},
- {.info = {"_HID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}},
- {.info = {"_HOT", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_HPP", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0}}, /* fixed (4 Int) */
+static const union acpi_predefined_info predefined_names[] =
+{
+ {{"_AC0", 0, ACPI_RTYPE_INTEGER}},
+ {{"_AC1", 0, ACPI_RTYPE_INTEGER}},
+ {{"_AC2", 0, ACPI_RTYPE_INTEGER}},
+ {{"_AC3", 0, ACPI_RTYPE_INTEGER}},
+ {{"_AC4", 0, ACPI_RTYPE_INTEGER}},
+ {{"_AC5", 0, ACPI_RTYPE_INTEGER}},
+ {{"_AC6", 0, ACPI_RTYPE_INTEGER}},
+ {{"_AC7", 0, ACPI_RTYPE_INTEGER}},
+ {{"_AC8", 0, ACPI_RTYPE_INTEGER}},
+ {{"_AC9", 0, ACPI_RTYPE_INTEGER}},
+ {{"_ADR", 0, ACPI_RTYPE_INTEGER}},
+ {{"_AL0", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+
+ {{"_AL1", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+
+ {{"_AL2", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+
+ {{"_AL3", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+
+ {{"_AL4", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+
+ {{"_AL5", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+
+ {{"_AL6", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+
+ {{"_AL7", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+
+ {{"_AL8", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+
+ {{"_AL9", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+
+ {{"_ALC", 0, ACPI_RTYPE_INTEGER}},
+ {{"_ALI", 0, ACPI_RTYPE_INTEGER}},
+ {{"_ALP", 0, ACPI_RTYPE_INTEGER}},
+ {{"_ALR", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 2 (Ints) */
+ {{{ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 2,0}, 0,0}},
+
+ {{"_ALT", 0, ACPI_RTYPE_INTEGER}},
+ {{"_ART", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(rev), n Pkg (2 Ref/11 Int) */
+ {{{ACPI_PTYPE2_REV_FIXED, ACPI_RTYPE_REFERENCE, 2, ACPI_RTYPE_INTEGER},
+ 11, 0}},
+
+ {{"_BBN", 0, ACPI_RTYPE_INTEGER}},
+ {{"_BCL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
+
+ {{"_BCM", 1, 0}},
+ {{"_BCT", 1, ACPI_RTYPE_INTEGER}},
+ {{"_BDN", 0, ACPI_RTYPE_INTEGER}},
+ {{"_BFS", 1, 0}},
+ {{"_BIF", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (9 Int),(4 Str) */
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9, ACPI_RTYPE_STRING}, 4,0}},
+
+ {{"_BIX", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int),(4 Str) */
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16, ACPI_RTYPE_STRING}, 4,
+ 0}},
+
+ {{"_BLT", 3, 0}},
+ {{"_BMA", 1, ACPI_RTYPE_INTEGER}},
+ {{"_BMC", 1, 0}},
+ {{"_BMD", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (5 Int) */
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
+
+ {{"_BMS", 1, ACPI_RTYPE_INTEGER}},
+ {{"_BQC", 0, ACPI_RTYPE_INTEGER}},
+ {{"_BST", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4,0}, 0,0}},
+
+ {{"_BTM", 1, ACPI_RTYPE_INTEGER}},
+ {{"_BTP", 1, 0}},
+ {{"_CBA", 0, ACPI_RTYPE_INTEGER}}, /* See PCI firmware spec 3.0 */
+ {{"_CDM", 0, ACPI_RTYPE_INTEGER}},
+ {{"_CID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Strs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0,0}, 0,0}},
+
+ {{"_CRS", 0, ACPI_RTYPE_BUFFER}},
+ {{"_CRT", 0, ACPI_RTYPE_INTEGER}},
+ {{"_CSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n-1 Int) */
+ {{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
+
+ {{"_CST", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */
+ {{{ACPI_PTYPE2_PKG_COUNT,ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3,0}},
+
+ {{"_DCK", 1, ACPI_RTYPE_INTEGER}},
+ {{"_DCS", 0, ACPI_RTYPE_INTEGER}},
+ {{"_DDC", 1, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER}},
+ {{"_DDN", 0, ACPI_RTYPE_STRING}},
+ {{"_DGS", 0, ACPI_RTYPE_INTEGER}},
+ {{"_DIS", 0, 0}},
+ {{"_DMA", 0, ACPI_RTYPE_BUFFER}},
+ {{"_DOD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
+
+ {{"_DOS", 1, 0}},
+ {{"_DSM", 4, ACPI_RTYPE_ALL}}, /* Must return a type, but it can be of any type */
+ {{"_DSS", 1, 0}},
+ {{"_DSW", 3, 0}},
+ {{"_DTI", 1, 0}},
+ {{"_EC_", 0, ACPI_RTYPE_INTEGER}},
+ {{"_EDL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs)*/
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+
+ {{"_EJ0", 1, 0}},
+ {{"_EJ1", 1, 0}},
+ {{"_EJ2", 1, 0}},
+ {{"_EJ3", 1, 0}},
+ {{"_EJ4", 1, 0}},
+ {{"_EJD", 0, ACPI_RTYPE_STRING}},
+ {{"_FDE", 0, ACPI_RTYPE_BUFFER}},
+ {{"_FDI", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int) */
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,0}, 0,0}},
+
+ {{"_FDM", 1, 0}},
+ {{"_FIF", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0}, 0, 0}},
+
+ {{"_FIX", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
+
+ {{"_FPS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(rev), n Pkg (5 Int) */
+ {{{ACPI_PTYPE2_REV_FIXED, ACPI_RTYPE_INTEGER, 5, 0}, 0, 0}},
+
+ {{"_FSL", 1, 0}},
+ {{"_FST", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (3 Int) */
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
+
+ {{"_GAI", 0, ACPI_RTYPE_INTEGER}},
+ {{"_GHL", 0, ACPI_RTYPE_INTEGER}},
+ {{"_GLK", 0, ACPI_RTYPE_INTEGER}},
+ {{"_GPD", 0, ACPI_RTYPE_INTEGER}},
+ {{"_GPE", 0, ACPI_RTYPE_INTEGER}}, /* _GPE method, not _GPE scope */
+ {{"_GSB", 0, ACPI_RTYPE_INTEGER}},
+ {{"_GTF", 0, ACPI_RTYPE_BUFFER}},
+ {{"_GTM", 0, ACPI_RTYPE_BUFFER}},
+ {{"_GTS", 1, 0}},
+ {{"_HID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}},
+ {{"_HOT", 0, ACPI_RTYPE_INTEGER}},
+ {{"_HPP", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4,0}, 0,0}},
/*
- * For _HPX, a single package is returned, containing a variable number of sub-packages.
- * Each sub-package contains a PCI record setting. There are several different type of
- * record settings, of different lengths, but all elements of all settings are Integers.
+ * For _HPX, a single package is returned, containing a Variable-length number
+ * of sub-packages. Each sub-package contains a PCI record setting.
+ * There are several different type of record settings, of different
+ * lengths, but all elements of all settings are Integers.
*/
- {.info = {"_HPX", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE2_MIN, ACPI_RTYPE_INTEGER, 5, 0, 0, 0}}, /* variable (Pkgs) each (var Ints) */
- {.info = {"_IFT", 0, ACPI_RTYPE_INTEGER}}, /* see IPMI spec */
- {.info = {"_INI", 0, 0}},
- {.info = {"_IRC", 0, 0}},
- {.info = {"_LCK", 1, 0}},
- {.info = {"_LID", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_MAT", 0, ACPI_RTYPE_BUFFER}},
- {.info = {"_MLS", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE2, ACPI_RTYPE_STRING, 2, 0, 0, 0}}, /* variable (Pkgs) each (2 Str) */
- {.info = {"_MSG", 1, 0}},
- {.info = {"_OFF", 0, 0}},
- {.info = {"_ON_", 0, 0}},
- {.info = {"_OS_", 0, ACPI_RTYPE_STRING}},
- {.info = {"_OSC", 4, ACPI_RTYPE_BUFFER}},
- {.info = {"_OST", 3, 0}},
- {.info = {"_PCL", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0}}, /* variable (Refs) */
- {.info = {"_PCT", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_FIXED, ACPI_RTYPE_BUFFER, 2, 0, 0, 0}}, /* fixed (2 Buf) */
- {.info = {"_PDC", 1, 0}},
- {.info = {"_PIC", 1, 0}},
- {.info = {"_PLD", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_BUFFER, 0, 0, 0, 0}}, /* variable (Bufs) */
- {.info = {"_PPC", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_PPE", 0, ACPI_RTYPE_INTEGER}}, /* see dig64 spec */
- {.info = {"_PR0", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0}}, /* variable (Refs) */
- {.info = {"_PR1", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0}}, /* variable (Refs) */
- {.info = {"_PR2", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0}}, /* variable (Refs) */
- {.info = {"_PRS", 0, ACPI_RTYPE_BUFFER}},
+ {{"_HPX", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (var Ints) */
+ {{{ACPI_PTYPE2_MIN, ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
+
+ {{"_IFT", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
+ {{"_INI", 0, 0}},
+ {{"_IRC", 0, 0}},
+ {{"_LCK", 1, 0}},
+ {{"_LID", 0, ACPI_RTYPE_INTEGER}},
+ {{"_MAT", 0, ACPI_RTYPE_BUFFER}},
+ {{"_MBM", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (8 Int) */
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 8, 0}, 0, 0}},
+
+ {{"_MLS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (2 Str) */
+ {{{ACPI_PTYPE2, ACPI_RTYPE_STRING, 2,0}, 0,0}},
+
+ {{"_MSG", 1, 0}},
+ {{"_MSM", 4, ACPI_RTYPE_INTEGER}},
+ {{"_NTT", 0, ACPI_RTYPE_INTEGER}},
+ {{"_OFF", 0, 0}},
+ {{"_ON_", 0, 0}},
+ {{"_OS_", 0, ACPI_RTYPE_STRING}},
+ {{"_OSC", 4, ACPI_RTYPE_BUFFER}},
+ {{"_OST", 3, 0}},
+ {{"_PAI", 1, ACPI_RTYPE_INTEGER}},
+ {{"_PCL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+
+ {{"_PCT", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (2 Buf) */
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_BUFFER, 2,0}, 0,0}},
+
+ {{"_PDC", 1, 0}},
+ {{"_PDL", 0, ACPI_RTYPE_INTEGER}},
+ {{"_PIC", 1, 0}},
+ {{"_PIF", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (3 Int),(3 Str) */
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, ACPI_RTYPE_STRING}, 3, 0}},
+
+ {{"_PLD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Bufs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_BUFFER, 0,0}, 0,0}},
+
+ {{"_PMC", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (11 Int),(3 Str) */
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 11, ACPI_RTYPE_STRING}, 3,
+ 0}},
+
+ {{"_PMD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+
+ {{"_PMM", 0, ACPI_RTYPE_INTEGER}},
+ {{"_PPC", 0, ACPI_RTYPE_INTEGER}},
+ {{"_PPE", 0, ACPI_RTYPE_INTEGER}}, /* See dig64 spec */
+ {{"_PR0", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+
+ {{"_PR1", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+
+ {{"_PR2", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+
+ {{"_PR3", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+
+ {{"_PRL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+
+ {{"_PRS", 0, ACPI_RTYPE_BUFFER}},
/*
- * For _PRT, many BIOSs reverse the 2nd and 3rd Package elements. This bug is so prevalent that there
- * is code in the ACPICA Resource Manager to detect this and switch them back. For now, do not allow
- * and issue a warning. To allow this and eliminate the warning, add the ACPI_RTYPE_REFERENCE
- * type to the 2nd element (index 1) in the statement below.
+ * For _PRT, many BIOSs reverse the 3rd and 4th Package elements (Source
+ * and source_index). This bug is so prevalent that there is code in the
+ * ACPICA Resource Manager to detect this and switch them back. For now,
+ * do not allow and issue a warning. To allow this and eliminate the
+ * warning, add the ACPI_RTYPE_REFERENCE type to the 4th element (index 3)
+ * in the statement below.
*/
- {.info = {"_PRT", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE2_FIXED, 4,
- ACPI_RTYPE_INTEGER,
- ACPI_RTYPE_INTEGER,
- ACPI_RTYPE_INTEGER | ACPI_RTYPE_REFERENCE, ACPI_RTYPE_INTEGER}}, /* variable (Pkgs) each (4): Int,Int,Int/Ref,Int */
-
- {.info = {"_PRW", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_OPTION, 2,
- ACPI_RTYPE_INTEGER |
- ACPI_RTYPE_PACKAGE,
- ACPI_RTYPE_INTEGER, ACPI_RTYPE_REFERENCE, 0}}, /* variable (Pkgs) each: Pkg/Int,Int,[variable Refs] (Pkg is Ref/Int) */
-
- {.info = {"_PS0", 0, 0}},
- {.info = {"_PS1", 0, 0}},
- {.info = {"_PS2", 0, 0}},
- {.info = {"_PS3", 0, 0}},
- {.info = {"_PSC", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_PSD", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 0, 0, 0, 0}}, /* variable (Pkgs) each (5 Int) with count */
- {.info = {"_PSL", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0}}, /* variable (Refs) */
- {.info = {"_PSR", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_PSS", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 6, 0, 0, 0}}, /* variable (Pkgs) each (6 Int) */
- {.info = {"_PSV", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_PSW", 1, 0}},
- {.info = {"_PTC", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_FIXED, ACPI_RTYPE_BUFFER, 2, 0, 0, 0}}, /* fixed (2 Buf) */
- {.info = {"_PTS", 1, 0}},
- {.info = {"_PXM", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_REG", 2, 0}},
- {.info = {"_REV", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_RMV", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_ROM", 2, ACPI_RTYPE_BUFFER}},
- {.info = {"_RTV", 0, ACPI_RTYPE_INTEGER}},
+ {{"_PRT", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (4): Int,Int,Int/Ref,Int */
+ {{{ACPI_PTYPE2_FIXED, 4, ACPI_RTYPE_INTEGER,ACPI_RTYPE_INTEGER},
+ ACPI_RTYPE_INTEGER | ACPI_RTYPE_REFERENCE,
+ ACPI_RTYPE_INTEGER}},
+
+ {{"_PRW", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each: Pkg/Int,Int,[Variable-length Refs] (Pkg is Ref/Int) */
+ {{{ACPI_PTYPE1_OPTION, 2, ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE,
+ ACPI_RTYPE_INTEGER}, ACPI_RTYPE_REFERENCE,0}},
+
+ {{"_PS0", 0, 0}},
+ {{"_PS1", 0, 0}},
+ {{"_PS2", 0, 0}},
+ {{"_PS3", 0, 0}},
+ {{"_PSC", 0, ACPI_RTYPE_INTEGER}},
+ {{"_PSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (5 Int) with count */
+ {{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER,0,0}, 0,0}},
+
+ {{"_PSL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+
+ {{"_PSR", 0, ACPI_RTYPE_INTEGER}},
+ {{"_PSS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (6 Int) */
+ {{{ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 6,0}, 0,0}},
+
+ {{"_PSV", 0, ACPI_RTYPE_INTEGER}},
+ {{"_PSW", 1, 0}},
+ {{"_PTC", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (2 Buf) */
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_BUFFER, 2,0}, 0,0}},
+
+ {{"_PTP", 2, ACPI_RTYPE_INTEGER}},
+ {{"_PTS", 1, 0}},
+ {{"_PUR", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (2 Int) */
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2, 0}, 0, 0}},
+
+ {{"_PXM", 0, ACPI_RTYPE_INTEGER}},
+ {{"_REG", 2, 0}},
+ {{"_REV", 0, ACPI_RTYPE_INTEGER}},
+ {{"_RMV", 0, ACPI_RTYPE_INTEGER}},
+ {{"_ROM", 2, ACPI_RTYPE_BUFFER}},
+ {{"_RTV", 0, ACPI_RTYPE_INTEGER}},
/*
- * For _S0_ through _S5_, the ACPI spec defines a return Package containing 1 Integer,
- * but most DSDTs have it wrong - 2,3, or 4 integers. Allow this by making the objects "variable length",
- * but all elements must be Integers.
+ * For _S0_ through _S5_, the ACPI spec defines a return Package
+ * containing 1 Integer, but most DSDTs have it wrong - 2,3, or 4 integers.
+ * Allow this by making the objects "Variable-length length", but all elements
+ * must be Integers.
*/
- {.info = {"_S0_", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0}}, /* fixed (1 Int) */
- {.info = {"_S1_", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0}}, /* fixed (1 Int) */
- {.info = {"_S2_", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0}}, /* fixed (1 Int) */
- {.info = {"_S3_", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0}}, /* fixed (1 Int) */
- {.info = {"_S4_", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0}}, /* fixed (1 Int) */
- {.info = {"_S5_", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0}}, /* fixed (1 Int) */
-
- {.info = {"_S1D", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_S2D", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_S3D", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_S4D", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_S0W", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_S1W", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_S2W", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_S3W", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_S4W", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_SBS", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_SCP", 0x13, 0}}, /* Acpi 1.0 allowed 1 arg. Acpi 3.0 expanded to 3 args. Allow both. */
- /* Note: the 3-arg definition may be removed for ACPI 4.0 */
- {.info = {"_SDD", 1, 0}},
- {.info = {"_SEG", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_SLI", 0, ACPI_RTYPE_BUFFER}},
- {.info = {"_SPD", 1, ACPI_RTYPE_INTEGER}},
- {.info = {"_SRS", 1, 0}},
- {.info = {"_SRV", 0, ACPI_RTYPE_INTEGER}}, /* see IPMI spec */
- {.info = {"_SST", 1, 0}},
- {.info = {"_STA", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_STM", 3, 0}},
- {.info = {"_STR", 0, ACPI_RTYPE_BUFFER}},
- {.info = {"_SUN", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_SWS", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_TC1", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_TC2", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_TMP", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_TPC", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_TPT", 1, 0}},
- {.info = {"_TRT", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE2, ACPI_RTYPE_REFERENCE, 2,
- ACPI_RTYPE_INTEGER, 6, 0}}, /* variable (Pkgs) each 2_ref/6_int */
- {.info = {"_TSD", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 5, 0, 0, 0}}, /* variable (Pkgs) each 5_int with count */
- {.info = {"_TSP", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_TSS", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 5, 0, 0, 0}}, /* variable (Pkgs) each 5_int */
- {.info = {"_TST", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_TTS", 1, 0}},
- {.info = {"_TZD", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0}}, /* variable (Refs) */
- {.info = {"_TZM", 0, ACPI_RTYPE_REFERENCE}},
- {.info = {"_TZP", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_UID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}},
- {.info = {"_UPC", 0, ACPI_RTYPE_PACKAGE}}, {.ret_info = {ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0}}, /* fixed (4 Int) */
- {.info = {"_UPD", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_UPP", 0, ACPI_RTYPE_INTEGER}},
- {.info = {"_VPO", 0, ACPI_RTYPE_INTEGER}},
+ {{"_S0_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}},
+
+ {{"_S1_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}},
+
+ {{"_S2_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}},
+
+ {{"_S3_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}},
+
+ {{"_S4_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}},
+
+ {{"_S5_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}},
+
+ {{"_S1D", 0, ACPI_RTYPE_INTEGER}},
+ {{"_S2D", 0, ACPI_RTYPE_INTEGER}},
+ {{"_S3D", 0, ACPI_RTYPE_INTEGER}},
+ {{"_S4D", 0, ACPI_RTYPE_INTEGER}},
+ {{"_S0W", 0, ACPI_RTYPE_INTEGER}},
+ {{"_S1W", 0, ACPI_RTYPE_INTEGER}},
+ {{"_S2W", 0, ACPI_RTYPE_INTEGER}},
+ {{"_S3W", 0, ACPI_RTYPE_INTEGER}},
+ {{"_S4W", 0, ACPI_RTYPE_INTEGER}},
+ {{"_SBS", 0, ACPI_RTYPE_INTEGER}},
+ {{"_SCP", 0x13, 0}}, /* Acpi 1.0 allowed 1 arg. Acpi 3.0 expanded to 3 args. Allow both. */
+ /* Note: the 3-arg definition may be removed for ACPI 4.0 */
+ {{"_SDD", 1, 0}},
+ {{"_SEG", 0, ACPI_RTYPE_INTEGER}},
+ {{"_SHL", 1, ACPI_RTYPE_INTEGER}},
+ {{"_SLI", 0, ACPI_RTYPE_BUFFER}},
+ {{"_SPD", 1, ACPI_RTYPE_INTEGER}},
+ {{"_SRS", 1, 0}},
+ {{"_SRV", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
+ {{"_SST", 1, 0}},
+ {{"_STA", 0, ACPI_RTYPE_INTEGER}},
+ {{"_STM", 3, 0}},
+ {{"_STP", 2, ACPI_RTYPE_INTEGER}},
+ {{"_STR", 0, ACPI_RTYPE_BUFFER}},
+ {{"_STV", 2, ACPI_RTYPE_INTEGER}},
+ {{"_SUN", 0, ACPI_RTYPE_INTEGER}},
+ {{"_SWS", 0, ACPI_RTYPE_INTEGER}},
+ {{"_TC1", 0, ACPI_RTYPE_INTEGER}},
+ {{"_TC2", 0, ACPI_RTYPE_INTEGER}},
+ {{"_TIP", 1, ACPI_RTYPE_INTEGER}},
+ {{"_TIV", 1, ACPI_RTYPE_INTEGER}},
+ {{"_TMP", 0, ACPI_RTYPE_INTEGER}},
+ {{"_TPC", 0, ACPI_RTYPE_INTEGER}},
+ {{"_TPT", 1, 0}},
+ {{"_TRT", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 2_ref/6_int */
+ {{{ACPI_PTYPE2, ACPI_RTYPE_REFERENCE, 2, ACPI_RTYPE_INTEGER}, 6, 0}},
+
+ {{"_TSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 5_int with count */
+ {{{ACPI_PTYPE2_COUNT,ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
+
+ {{"_TSP", 0, ACPI_RTYPE_INTEGER}},
+ {{"_TSS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 5_int */
+ {{{ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
+
+ {{"_TST", 0, ACPI_RTYPE_INTEGER}},
+ {{"_TTS", 1, 0}},
+ {{"_TZD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+
+ {{"_TZM", 0, ACPI_RTYPE_REFERENCE}},
+ {{"_TZP", 0, ACPI_RTYPE_INTEGER}},
+ {{"_UID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}},
+ {{"_UPC", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4,0}, 0,0}},
+
+ {{"_UPD", 0, ACPI_RTYPE_INTEGER}},
+ {{"_UPP", 0, ACPI_RTYPE_INTEGER}},
+ {{"_VPO", 0, ACPI_RTYPE_INTEGER}},
/* Acpi 1.0 defined _WAK with no return value. Later, it was changed to return a package */
- {.info = {"_WAK", 1, ACPI_RTYPE_NONE | ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE}},
- {.ret_info = {ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2, 0, 0, 0}}, /* fixed (2 Int), but is optional */
- {.ret_info = {0, 0, 0, 0, 0, 0}} /* Table terminator */
+ {{"_WAK", 1, ACPI_RTYPE_NONE | ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE}},
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2,0}, 0,0}}, /* Fixed-length (2 Int), but is optional */
+
+ {{{0,0,0,0}, 0,0}} /* Table terminator */
};
#if 0
/* Not implemented */
-{
-"_WDG", 0, ACPI_RTYPE_BUFFER}, /* MS Extension */
+ {{"_WDG", 0, ACPI_RTYPE_BUFFER}}, /* MS Extension */
+ {{"_WED", 1, ACPI_RTYPE_PACKAGE}}, /* MS Extension */
-{
-"_WED", 1, ACPI_RTYPE_PACKAGE}, /* MS Extension */
+ /* This is an internally implemented control method, no need to check */
+ {{"_OSI", 1, ACPI_RTYPE_INTEGER}},
- /* This is an internally implemented control method, no need to check */
-{
-"_OSI", 1, ACPI_RTYPE_INTEGER},
+ /* TBD: */
+
+ _PRT - currently ignore reversed entries. attempt to fix here?
+ think about possibly fixing package elements like _BIF, etc.
+#endif
- /* TBD: */
- _PRT - currently ignore reversed entries.attempt to fix here ?
- think about code that attempts to fix package elements like _BIF, etc.
#endif
#endif
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 897810ba0cc..863a264b829 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -324,26 +324,30 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
acpi_status
acpi_ut_evaluate_numeric_object(char *object_name,
struct acpi_namespace_node *device_node,
- acpi_integer * address);
+ acpi_integer *value);
acpi_status
-acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
- struct acpica_device_id *hid);
+acpi_ut_execute_STA(struct acpi_namespace_node *device_node, u32 *status_flags);
acpi_status
-acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
- struct acpi_compatible_id_list **return_cid_list);
+acpi_ut_execute_power_methods(struct acpi_namespace_node *device_node,
+ const char **method_names,
+ u8 method_count, u8 *out_values);
+/*
+ * utids - device ID support
+ */
acpi_status
-acpi_ut_execute_STA(struct acpi_namespace_node *device_node,
- u32 * status_flags);
+acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
+ struct acpica_device_id **return_id);
acpi_status
acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
- struct acpica_device_id *uid);
+ struct acpica_device_id **return_id);
acpi_status
-acpi_ut_execute_sxds(struct acpi_namespace_node *device_node, u8 * highest);
+acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
+ struct acpica_device_id_list **return_cid_list);
/*
* utlock - reader/writer locks
@@ -445,6 +449,8 @@ acpi_ut_short_divide(acpi_integer in_dividend,
*/
const char *acpi_ut_validate_exception(acpi_status status);
+u8 acpi_ut_is_pci_root_bridge(char *id);
+
u8 acpi_ut_is_aml_table(struct acpi_table_header *table);
acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id);
@@ -469,6 +475,12 @@ u8 acpi_ut_valid_acpi_char(char character, u32 position);
acpi_status
acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer);
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_predefined_warning(const char *module_name,
+ u32 line_number,
+ char *pathname,
+ u8 node_flags, const char *format, ...);
+
/* Values for Base above (16=Hex, 10=Decimal) */
#define ACPI_ANY_BASE 0
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 067f967eb38..4940249f252 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -404,6 +404,7 @@ typedef enum {
REGION_SMBUS,
REGION_CMOS,
REGION_PCI_BAR,
+ REGION_IPMI,
REGION_DATA_TABLE, /* Internal use only */
REGION_FIXED_HW = 0x7F
} AML_REGION_TYPES;
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 53e27bc5a73..54a225e56a6 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -123,9 +123,12 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE |
ACPI_NS_ERROR_IF_FOUND;
- /* Mark node temporary if we are executing a method */
-
- if (walk_state->method_node) {
+ /*
+ * Mark node temporary if we are executing a normal control
+ * method. (Don't mark if this is a module-level code method)
+ */
+ if (walk_state->method_node &&
+ !(walk_state->parse_flags & ACPI_PARSE_MODULE_LEVEL)) {
flags |= ACPI_NS_TEMPORARY;
}
@@ -456,9 +459,12 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE |
ACPI_NS_ERROR_IF_FOUND;
- /* Mark node(s) temporary if we are executing a method */
-
- if (walk_state->method_node) {
+ /*
+ * Mark node(s) temporary if we are executing a normal control
+ * method. (Don't mark if this is a module-level code method)
+ */
+ if (walk_state->method_node &&
+ !(walk_state->parse_flags & ACPI_PARSE_MODULE_LEVEL)) {
flags |= ACPI_NS_TEMPORARY;
}
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 14b8b8ed802..567a4899a01 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -578,10 +578,15 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
}
/*
- * Delete any namespace objects created anywhere within
- * the namespace by the execution of this method
+ * Delete any namespace objects created anywhere within the
+ * namespace by the execution of this method. Unless this method
+ * is a module-level executable code method, in which case we
+ * want make the objects permanent.
*/
- acpi_ns_delete_namespace_by_owner(method_desc->method.owner_id);
+ if (!(method_desc->method.flags & AOPOBJ_MODULE_LEVEL)) {
+ acpi_ns_delete_namespace_by_owner(method_desc->method.
+ owner_id);
+ }
}
/* Decrement the thread count on the method */
@@ -622,7 +627,9 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
/* No more threads, we can free the owner_id */
- acpi_ut_release_owner_id(&method_desc->method.owner_id);
+ if (!(method_desc->method.flags & AOPOBJ_MODULE_LEVEL)) {
+ acpi_ut_release_owner_id(&method_desc->method.owner_id);
+ }
}
return_VOID;
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index 22b1a3ce2c9..7d077bb2f52 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -433,10 +433,10 @@ acpi_ds_method_data_get_value(u8 type,
case ACPI_REFCLASS_LOCAL:
- ACPI_ERROR((AE_INFO,
- "Uninitialized Local[%d] at node %p",
- index, node));
-
+ /*
+ * No error message for this case, will be trapped again later to
+ * detect and ignore cases of Store(local_x,local_x)
+ */
return_ACPI_STATUS(AE_AML_UNINITIALIZED_LOCAL);
default:
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 02e6caad4a7..507e1f0bbdf 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -482,14 +482,27 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
if (arg) {
/*
* num_elements was exhausted, but there are remaining elements in the
- * package_list.
+ * package_list. Truncate the package to num_elements.
*
* Note: technically, this is an error, from ACPI spec: "It is an error
* for NumElements to be less than the number of elements in the
- * PackageList". However, for now, we just print an error message and
- * no exception is returned.
+ * PackageList". However, we just print an error message and
+ * no exception is returned. This provides Windows compatibility. Some
+ * BIOSs will alter the num_elements on the fly, creating this type
+ * of ill-formed package object.
*/
while (arg) {
+ /*
+ * We must delete any package elements that were created earlier
+ * and are not going to be used because of the package truncation.
+ */
+ if (arg->common.node) {
+ acpi_ut_remove_reference(ACPI_CAST_PTR
+ (union
+ acpi_operand_object,
+ arg->common.node));
+ arg->common.node = NULL;
+ }
/* Find out how many elements there really are */
@@ -498,7 +511,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
}
ACPI_WARNING((AE_INFO,
- "Package List length (%X) larger than NumElements count (%X), truncated\n",
+ "Package List length (0x%X) larger than NumElements count (0x%X), truncated\n",
i, element_count));
} else if (i < element_count) {
/*
@@ -506,7 +519,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
* Note: this is not an error, the package is padded out with NULLs.
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Package List length (%X) smaller than NumElements count (%X), padded with null elements\n",
+ "Package List length (0x%X) smaller than NumElements count (0x%X), padded with null elements\n",
i, element_count));
}
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 3023ceaa8d5..6de3a99d4cd 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -581,21 +581,6 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
if ((!(walk_state->op_info->flags & AML_NSOPCODE) &&
(walk_state->opcode != AML_INT_NAMEPATH_OP)) ||
(!(walk_state->op_info->flags & AML_NAMED))) {
-#ifdef ACPI_ENABLE_MODULE_LEVEL_CODE
- if ((walk_state->op_info->class == AML_CLASS_EXECUTE) ||
- (walk_state->op_info->class == AML_CLASS_CONTROL)) {
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "Begin/EXEC: %s (fl %8.8X)\n",
- walk_state->op_info->name,
- walk_state->op_info->flags));
-
- /* Executing a type1 or type2 opcode outside of a method */
-
- status =
- acpi_ds_exec_begin_op(walk_state, out_op);
- return_ACPI_STATUS(status);
- }
-#endif
return_ACPI_STATUS(AE_OK);
}
@@ -768,7 +753,13 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
/* Execution mode, node cannot already exist, node is temporary */
- flags |= (ACPI_NS_ERROR_IF_FOUND | ACPI_NS_TEMPORARY);
+ flags |= ACPI_NS_ERROR_IF_FOUND;
+
+ if (!
+ (walk_state->
+ parse_flags & ACPI_PARSE_MODULE_LEVEL)) {
+ flags |= ACPI_NS_TEMPORARY;
+ }
}
/* Add new entry or lookup existing entry */
@@ -851,24 +842,6 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
/* Check if opcode had an associated namespace object */
if (!(walk_state->op_info->flags & AML_NSOBJECT)) {
-#ifndef ACPI_NO_METHOD_EXECUTION
-#ifdef ACPI_ENABLE_MODULE_LEVEL_CODE
- /* No namespace object. Executable opcode? */
-
- if ((walk_state->op_info->class == AML_CLASS_EXECUTE) ||
- (walk_state->op_info->class == AML_CLASS_CONTROL)) {
- ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "End/EXEC: %s (fl %8.8X)\n",
- walk_state->op_info->name,
- walk_state->op_info->flags));
-
- /* Executing a type1 or type2 opcode outside of a method */
-
- status = acpi_ds_exec_end_op(walk_state);
- return_ACPI_STATUS(status);
- }
-#endif
-#endif
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index b9d8ee69ca6..afacf4416c7 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -424,8 +424,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
/* Read the Status Register */
status =
- acpi_read(&status_reg,
- &gpe_register_info->status_address);
+ acpi_hw_read(&status_reg,
+ &gpe_register_info->status_address);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
}
@@ -433,8 +433,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
/* Read the Enable Register */
status =
- acpi_read(&enable_reg,
- &gpe_register_info->enable_address);
+ acpi_hw_read(&enable_reg,
+ &gpe_register_info->enable_address);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
}
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 7b346363942..a60aaa7635f 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -843,14 +843,14 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
/* Disable all GPEs within this register */
- status = acpi_write(0x00, &this_register->enable_address);
+ status = acpi_hw_write(0x00, &this_register->enable_address);
if (ACPI_FAILURE(status)) {
goto error_exit;
}
/* Clear any pending GPE events within this register */
- status = acpi_write(0xFF, &this_register->status_address);
+ status = acpi_hw_write(0xFF, &this_register->status_address);
if (ACPI_FAILURE(status)) {
goto error_exit;
}
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 284a7becbe9..cf29c495302 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -50,8 +50,6 @@
ACPI_MODULE_NAME("evrgnini")
/* Local prototypes */
-static u8 acpi_ev_match_pci_root_bridge(char *id);
-
static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
/*******************************************************************************
@@ -332,37 +330,6 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
/*******************************************************************************
*
- * FUNCTION: acpi_ev_match_pci_root_bridge
- *
- * PARAMETERS: Id - The HID/CID in string format
- *
- * RETURN: TRUE if the Id is a match for a PCI/PCI-Express Root Bridge
- *
- * DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID.
- *
- ******************************************************************************/
-
-static u8 acpi_ev_match_pci_root_bridge(char *id)
-{
-
- /*
- * Check if this is a PCI root.
- * ACPI 3.0+: check for a PCI Express root also.
- */
- if (!(ACPI_STRNCMP(id,
- PCI_ROOT_HID_STRING,
- sizeof(PCI_ROOT_HID_STRING))) ||
- !(ACPI_STRNCMP(id,
- PCI_EXPRESS_ROOT_HID_STRING,
- sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) {
- return (TRUE);
- }
-
- return (FALSE);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ev_is_pci_root_bridge
*
* PARAMETERS: Node - Device node being examined
@@ -377,9 +344,10 @@ static u8 acpi_ev_match_pci_root_bridge(char *id)
static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
{
acpi_status status;
- struct acpica_device_id hid;
- struct acpi_compatible_id_list *cid;
+ struct acpica_device_id *hid;
+ struct acpica_device_id_list *cid;
u32 i;
+ u8 match;
/* Get the _HID and check for a PCI Root Bridge */
@@ -388,7 +356,10 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
return (FALSE);
}
- if (acpi_ev_match_pci_root_bridge(hid.value)) {
+ match = acpi_ut_is_pci_root_bridge(hid->string);
+ ACPI_FREE(hid);
+
+ if (match) {
return (TRUE);
}
@@ -402,7 +373,7 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
/* Check all _CIDs in the returned list */
for (i = 0; i < cid->count; i++) {
- if (acpi_ev_match_pci_root_bridge(cid->id[i].value)) {
+ if (acpi_ut_is_pci_root_bridge(cid->ids[i].string)) {
ACPI_FREE(cid);
return (TRUE);
}
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 277fd609611..24afef81af3 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -110,8 +110,15 @@ acpi_ex_add_table(u32 table_index,
if (ACPI_FAILURE(status)) {
acpi_ut_remove_reference(obj_desc);
*ddb_handle = NULL;
+ return_ACPI_STATUS(status);
}
+ /* Execute any module-level code that was found in the table */
+
+ acpi_ex_exit_interpreter();
+ acpi_ns_exec_module_code_list();
+ acpi_ex_enter_interpreter();
+
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index ec524614e70..de3446372dd 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -418,9 +418,9 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
case ACPI_EXD_REFERENCE:
acpi_ex_out_string("Class Name",
- (char *)
- acpi_ut_get_reference_name
- (obj_desc));
+ ACPI_CAST_PTR(char,
+ acpi_ut_get_reference_name
+ (obj_desc)));
acpi_ex_dump_reference_obj(obj_desc);
break;
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 546dcdd8678..0b33d6c887b 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -72,6 +72,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
union acpi_operand_object *buffer_desc;
acpi_size length;
void *buffer;
+ u32 function;
ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc);
@@ -97,13 +98,27 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
}
} else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
(obj_desc->field.region_obj->region.space_id ==
- ACPI_ADR_SPACE_SMBUS)) {
+ ACPI_ADR_SPACE_SMBUS
+ || obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_IPMI)) {
/*
- * This is an SMBus read. We must create a buffer to hold the data
- * and directly access the region handler.
+ * This is an SMBus or IPMI read. We must create a buffer to hold
+ * the data and then directly access the region handler.
+ *
+ * Note: Smbus protocol value is passed in upper 16-bits of Function
*/
- buffer_desc =
- acpi_ut_create_buffer_object(ACPI_SMBUS_BUFFER_SIZE);
+ if (obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_SMBUS) {
+ length = ACPI_SMBUS_BUFFER_SIZE;
+ function =
+ ACPI_READ | (obj_desc->field.attribute << 16);
+ } else { /* IPMI */
+
+ length = ACPI_IPMI_BUFFER_SIZE;
+ function = ACPI_READ;
+ }
+
+ buffer_desc = acpi_ut_create_buffer_object(length);
if (!buffer_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -112,16 +127,13 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
- /*
- * Perform the read.
- * Note: Smbus protocol value is passed in upper 16-bits of Function
- */
+ /* Call the region handler for the read */
+
status = acpi_ex_access_region(obj_desc, 0,
ACPI_CAST_PTR(acpi_integer,
buffer_desc->
buffer.pointer),
- ACPI_READ | (obj_desc->field.
- attribute << 16));
+ function);
acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
goto exit;
}
@@ -212,6 +224,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
u32 length;
void *buffer;
union acpi_operand_object *buffer_desc;
+ u32 function;
ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc);
@@ -234,39 +247,56 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
}
} else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
(obj_desc->field.region_obj->region.space_id ==
- ACPI_ADR_SPACE_SMBUS)) {
+ ACPI_ADR_SPACE_SMBUS
+ || obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_IPMI)) {
/*
- * This is an SMBus write. We will bypass the entire field mechanism
- * and handoff the buffer directly to the handler.
+ * This is an SMBus or IPMI write. We will bypass the entire field
+ * mechanism and handoff the buffer directly to the handler. For
+ * these address spaces, the buffer is bi-directional; on a write,
+ * return data is returned in the same buffer.
+ *
+ * Source must be a buffer of sufficient size:
+ * ACPI_SMBUS_BUFFER_SIZE or ACPI_IPMI_BUFFER_SIZE.
*
- * Source must be a buffer of sufficient size (ACPI_SMBUS_BUFFER_SIZE).
+ * Note: SMBus protocol type is passed in upper 16-bits of Function
*/
if (source_desc->common.type != ACPI_TYPE_BUFFER) {
ACPI_ERROR((AE_INFO,
- "SMBus write requires Buffer, found type %s",
+ "SMBus or IPMI write requires Buffer, found type %s",
acpi_ut_get_object_type_name(source_desc)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
- if (source_desc->buffer.length < ACPI_SMBUS_BUFFER_SIZE) {
+ if (obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_SMBUS) {
+ length = ACPI_SMBUS_BUFFER_SIZE;
+ function =
+ ACPI_WRITE | (obj_desc->field.attribute << 16);
+ } else { /* IPMI */
+
+ length = ACPI_IPMI_BUFFER_SIZE;
+ function = ACPI_WRITE;
+ }
+
+ if (source_desc->buffer.length < length) {
ACPI_ERROR((AE_INFO,
- "SMBus write requires Buffer of length %X, found length %X",
- ACPI_SMBUS_BUFFER_SIZE,
- source_desc->buffer.length));
+ "SMBus or IPMI write requires Buffer of length %X, found length %X",
+ length, source_desc->buffer.length));
return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
}
- buffer_desc =
- acpi_ut_create_buffer_object(ACPI_SMBUS_BUFFER_SIZE);
+ /* Create the bi-directional buffer */
+
+ buffer_desc = acpi_ut_create_buffer_object(length);
if (!buffer_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
buffer = buffer_desc->buffer.pointer;
- ACPI_MEMCPY(buffer, source_desc->buffer.pointer,
- ACPI_SMBUS_BUFFER_SIZE);
+ ACPI_MEMCPY(buffer, source_desc->buffer.pointer, length);
/* Lock entire transaction if requested */
@@ -275,12 +305,10 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
/*
* Perform the write (returns status and perhaps data in the
* same buffer)
- * Note: SMBus protocol type is passed in upper 16-bits of Function.
*/
status = acpi_ex_access_region(obj_desc, 0,
(acpi_integer *) buffer,
- ACPI_WRITE | (obj_desc->field.
- attribute << 16));
+ function);
acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
*result_desc = buffer_desc;
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 6687be167f5..d7b3b418fb4 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -120,12 +120,13 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
}
/*
- * Exit now for SMBus address space, it has a non-linear address space
+ * Exit now for SMBus or IPMI address space, it has a non-linear address space
* and the request cannot be directly validated
*/
- if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS) {
+ if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS ||
+ rgn_desc->region.space_id == ACPI_ADR_SPACE_IPMI) {
- /* SMBus has a non-linear address space */
+ /* SMBus or IPMI has a non-linear address space */
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 87730e94413..7d41f99f705 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -358,50 +358,67 @@ static u32 acpi_ex_digits_needed(acpi_integer value, u32 base)
*
* FUNCTION: acpi_ex_eisa_id_to_string
*
- * PARAMETERS: numeric_id - EISA ID to be converted
+ * PARAMETERS: compressed_id - EISAID to be converted
* out_string - Where to put the converted string (8 bytes)
*
* RETURN: None
*
- * DESCRIPTION: Convert a numeric EISA ID to string representation
+ * DESCRIPTION: Convert a numeric EISAID to string representation. Return
+ * buffer must be large enough to hold the string. The string
+ * returned is always exactly of length ACPI_EISAID_STRING_SIZE
+ * (includes null terminator). The EISAID is always 32 bits.
*
******************************************************************************/
-void acpi_ex_eisa_id_to_string(u32 numeric_id, char *out_string)
+void acpi_ex_eisa_id_to_string(char *out_string, acpi_integer compressed_id)
{
- u32 eisa_id;
+ u32 swapped_id;
ACPI_FUNCTION_ENTRY();
+ /* The EISAID should be a 32-bit integer */
+
+ if (compressed_id > ACPI_UINT32_MAX) {
+ ACPI_WARNING((AE_INFO,
+ "Expected EISAID is larger than 32 bits: 0x%8.8X%8.8X, truncating",
+ ACPI_FORMAT_UINT64(compressed_id)));
+ }
+
/* Swap ID to big-endian to get contiguous bits */
- eisa_id = acpi_ut_dword_byte_swap(numeric_id);
+ swapped_id = acpi_ut_dword_byte_swap((u32)compressed_id);
- out_string[0] = (char)('@' + (((unsigned long)eisa_id >> 26) & 0x1f));
- out_string[1] = (char)('@' + ((eisa_id >> 21) & 0x1f));
- out_string[2] = (char)('@' + ((eisa_id >> 16) & 0x1f));
- out_string[3] = acpi_ut_hex_to_ascii_char((acpi_integer) eisa_id, 12);
- out_string[4] = acpi_ut_hex_to_ascii_char((acpi_integer) eisa_id, 8);
- out_string[5] = acpi_ut_hex_to_ascii_char((acpi_integer) eisa_id, 4);
- out_string[6] = acpi_ut_hex_to_ascii_char((acpi_integer) eisa_id, 0);
+ /* First 3 bytes are uppercase letters. Next 4 bytes are hexadecimal */
+
+ out_string[0] =
+ (char)(0x40 + (((unsigned long)swapped_id >> 26) & 0x1F));
+ out_string[1] = (char)(0x40 + ((swapped_id >> 21) & 0x1F));
+ out_string[2] = (char)(0x40 + ((swapped_id >> 16) & 0x1F));
+ out_string[3] = acpi_ut_hex_to_ascii_char((acpi_integer)swapped_id, 12);
+ out_string[4] = acpi_ut_hex_to_ascii_char((acpi_integer)swapped_id, 8);
+ out_string[5] = acpi_ut_hex_to_ascii_char((acpi_integer)swapped_id, 4);
+ out_string[6] = acpi_ut_hex_to_ascii_char((acpi_integer)swapped_id, 0);
out_string[7] = 0;
}
/*******************************************************************************
*
- * FUNCTION: acpi_ex_unsigned_integer_to_string
+ * FUNCTION: acpi_ex_integer_to_string
*
- * PARAMETERS: Value - Value to be converted
- * out_string - Where to put the converted string (8 bytes)
+ * PARAMETERS: out_string - Where to put the converted string. At least
+ * 21 bytes are needed to hold the largest
+ * possible 64-bit integer.
+ * Value - Value to be converted
*
* RETURN: None, string
*
- * DESCRIPTION: Convert a number to string representation. Assumes string
- * buffer is large enough to hold the string.
+ * DESCRIPTION: Convert a 64-bit integer to decimal string representation.
+ * Assumes string buffer is large enough to hold the string. The
+ * largest string is (ACPI_MAX64_DECIMAL_DIGITS + 1).
*
******************************************************************************/
-void acpi_ex_unsigned_integer_to_string(acpi_integer value, char *out_string)
+void acpi_ex_integer_to_string(char *out_string, acpi_integer value)
{
u32 count;
u32 digits_needed;
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index d3b7e37c9ee..c28c41b3180 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -82,7 +82,7 @@ acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
/* Get current value of the enable register that contains this GPE */
- status = acpi_read(&enable_mask, &gpe_register_info->enable_address);
+ status = acpi_hw_read(&enable_mask, &gpe_register_info->enable_address);
if (ACPI_FAILURE(status)) {
return (status);
}
@@ -95,7 +95,7 @@ acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
/* Write the updated enable mask */
- status = acpi_write(enable_mask, &gpe_register_info->enable_address);
+ status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
return (status);
}
@@ -130,8 +130,8 @@ acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info * gpe_event_info)
/* Write the entire GPE (runtime) enable register */
- status = acpi_write(gpe_register_info->enable_for_run,
- &gpe_register_info->enable_address);
+ status = acpi_hw_write(gpe_register_info->enable_for_run,
+ &gpe_register_info->enable_address);
return (status);
}
@@ -163,8 +163,8 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
* Write a one to the appropriate bit in the status register to
* clear this GPE.
*/
- status = acpi_write(register_bit,
- &gpe_event_info->register_info->status_address);
+ status = acpi_hw_write(register_bit,
+ &gpe_event_info->register_info->status_address);
return (status);
}
@@ -222,7 +222,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
/* GPE currently active (status bit == 1)? */
- status = acpi_read(&in_byte, &gpe_register_info->status_address);
+ status = acpi_hw_read(&in_byte, &gpe_register_info->status_address);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
}
@@ -266,8 +266,8 @@ acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
/* Disable all GPEs in this register */
status =
- acpi_write(0x00,
- &gpe_block->register_info[i].enable_address);
+ acpi_hw_write(0x00,
+ &gpe_block->register_info[i].enable_address);
if (ACPI_FAILURE(status)) {
return (status);
}
@@ -303,8 +303,8 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
/* Clear status on all GPEs in this register */
status =
- acpi_write(0xFF,
- &gpe_block->register_info[i].status_address);
+ acpi_hw_write(0xFF,
+ &gpe_block->register_info[i].status_address);
if (ACPI_FAILURE(status)) {
return (status);
}
@@ -345,9 +345,9 @@ acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
/* Enable all "runtime" GPEs in this register */
- status = acpi_write(gpe_block->register_info[i].enable_for_run,
- &gpe_block->register_info[i].
- enable_address);
+ status =
+ acpi_hw_write(gpe_block->register_info[i].enable_for_run,
+ &gpe_block->register_info[i].enable_address);
if (ACPI_FAILURE(status)) {
return (status);
}
@@ -387,9 +387,9 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
/* Enable all "wake" GPEs in this register */
- status = acpi_write(gpe_block->register_info[i].enable_for_wake,
- &gpe_block->register_info[i].
- enable_address);
+ status =
+ acpi_hw_write(gpe_block->register_info[i].enable_for_wake,
+ &gpe_block->register_info[i].enable_address);
if (ACPI_FAILURE(status)) {
return (status);
}
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 23d5505cb1f..15c9ed2be85 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -62,6 +62,184 @@ acpi_hw_write_multiple(u32 value,
struct acpi_generic_address *register_a,
struct acpi_generic_address *register_b);
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_validate_register
+ *
+ * PARAMETERS: Reg - GAS register structure
+ * max_bit_width - Max bit_width supported (32 or 64)
+ * Address - Pointer to where the gas->address
+ * is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Validate the contents of a GAS register. Checks the GAS
+ * pointer, Address, space_id, bit_width, and bit_offset.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_hw_validate_register(struct acpi_generic_address *reg,
+ u8 max_bit_width, u64 *address)
+{
+
+ /* Must have a valid pointer to a GAS structure */
+
+ if (!reg) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Copy the target address. This handles possible alignment issues.
+ * Address must not be null. A null address also indicates an optional
+ * ACPI register that is not supported, so no error message.
+ */
+ ACPI_MOVE_64_TO_64(address, &reg->address);
+ if (!(*address)) {
+ return (AE_BAD_ADDRESS);
+ }
+
+ /* Validate the space_iD */
+
+ if ((reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
+ (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
+ ACPI_ERROR((AE_INFO,
+ "Unsupported address space: 0x%X", reg->space_id));
+ return (AE_SUPPORT);
+ }
+
+ /* Validate the bit_width */
+
+ if ((reg->bit_width != 8) &&
+ (reg->bit_width != 16) &&
+ (reg->bit_width != 32) && (reg->bit_width != max_bit_width)) {
+ ACPI_ERROR((AE_INFO,
+ "Unsupported register bit width: 0x%X",
+ reg->bit_width));
+ return (AE_SUPPORT);
+ }
+
+ /* Validate the bit_offset. Just a warning for now. */
+
+ if (reg->bit_offset != 0) {
+ ACPI_WARNING((AE_INFO,
+ "Unsupported register bit offset: 0x%X",
+ reg->bit_offset));
+ }
+
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_read
+ *
+ * PARAMETERS: Value - Where the value is returned
+ * Reg - GAS register structure
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Read from either memory or IO space. This is a 32-bit max
+ * version of acpi_read, used internally since the overhead of
+ * 64-bit values is not needed.
+ *
+ * LIMITATIONS: <These limitations also apply to acpi_hw_write>
+ * bit_width must be exactly 8, 16, or 32.
+ * space_iD must be system_memory or system_iO.
+ * bit_offset and access_width are currently ignored, as there has
+ * not been a need to implement these.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
+{
+ u64 address;
+ acpi_status status;
+
+ ACPI_FUNCTION_NAME(hw_read);
+
+ /* Validate contents of the GAS register */
+
+ status = acpi_hw_validate_register(reg, 32, &address);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Initialize entire 32-bit return value to zero */
+
+ *value = 0;
+
+ /*
+ * Two address spaces supported: Memory or IO. PCI_Config is
+ * not supported here because the GAS structure is insufficient
+ */
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ status = acpi_os_read_memory((acpi_physical_address)
+ address, value, reg->bit_width);
+ } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
+
+ status = acpi_hw_read_port((acpi_io_address)
+ address, value, reg->bit_width);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_IO,
+ "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n",
+ *value, reg->bit_width, ACPI_FORMAT_UINT64(address),
+ acpi_ut_get_region_name(reg->space_id)));
+
+ return (status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_write
+ *
+ * PARAMETERS: Value - Value to be written
+ * Reg - GAS register structure
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Write to either memory or IO space. This is a 32-bit max
+ * version of acpi_write, used internally since the overhead of
+ * 64-bit values is not needed.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
+{
+ u64 address;
+ acpi_status status;
+
+ ACPI_FUNCTION_NAME(hw_write);
+
+ /* Validate contents of the GAS register */
+
+ status = acpi_hw_validate_register(reg, 32, &address);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /*
+ * Two address spaces supported: Memory or IO. PCI_Config is
+ * not supported here because the GAS structure is insufficient
+ */
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ status = acpi_os_write_memory((acpi_physical_address)
+ address, value, reg->bit_width);
+ } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
+
+ status = acpi_hw_write_port((acpi_io_address)
+ address, value, reg->bit_width);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_IO,
+ "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n",
+ value, reg->bit_width, ACPI_FORMAT_UINT64(address),
+ acpi_ut_get_region_name(reg->space_id)));
+
+ return (status);
+}
+
/*******************************************************************************
*
* FUNCTION: acpi_hw_clear_acpi_status
@@ -152,15 +330,16 @@ acpi_status acpi_hw_write_pm1_control(u32 pm1a_control, u32 pm1b_control)
ACPI_FUNCTION_TRACE(hw_write_pm1_control);
- status = acpi_write(pm1a_control, &acpi_gbl_FADT.xpm1a_control_block);
+ status =
+ acpi_hw_write(pm1a_control, &acpi_gbl_FADT.xpm1a_control_block);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
if (acpi_gbl_FADT.xpm1b_control_block.address) {
status =
- acpi_write(pm1b_control,
- &acpi_gbl_FADT.xpm1b_control_block);
+ acpi_hw_write(pm1b_control,
+ &acpi_gbl_FADT.xpm1b_control_block);
}
return_ACPI_STATUS(status);
}
@@ -218,12 +397,13 @@ acpi_hw_register_read(u32 register_id, u32 * return_value)
case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */
- status = acpi_read(&value, &acpi_gbl_FADT.xpm2_control_block);
+ status =
+ acpi_hw_read(&value, &acpi_gbl_FADT.xpm2_control_block);
break;
case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
- status = acpi_read(&value, &acpi_gbl_FADT.xpm_timer_block);
+ status = acpi_hw_read(&value, &acpi_gbl_FADT.xpm_timer_block);
break;
case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */
@@ -340,7 +520,8 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
* as per the ACPI spec.
*/
status =
- acpi_read(&read_value, &acpi_gbl_FADT.xpm2_control_block);
+ acpi_hw_read(&read_value,
+ &acpi_gbl_FADT.xpm2_control_block);
if (ACPI_FAILURE(status)) {
goto exit;
}
@@ -350,12 +531,13 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
ACPI_INSERT_BITS(value, ACPI_PM2_CONTROL_PRESERVED_BITS,
read_value);
- status = acpi_write(value, &acpi_gbl_FADT.xpm2_control_block);
+ status =
+ acpi_hw_write(value, &acpi_gbl_FADT.xpm2_control_block);
break;
case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
- status = acpi_write(value, &acpi_gbl_FADT.xpm_timer_block);
+ status = acpi_hw_write(value, &acpi_gbl_FADT.xpm_timer_block);
break;
case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */
@@ -401,7 +583,7 @@ acpi_hw_read_multiple(u32 *value,
/* The first register is always required */
- status = acpi_read(&value_a, register_a);
+ status = acpi_hw_read(&value_a, register_a);
if (ACPI_FAILURE(status)) {
return (status);
}
@@ -409,7 +591,7 @@ acpi_hw_read_multiple(u32 *value,
/* Second register is optional */
if (register_b->address) {
- status = acpi_read(&value_b, register_b);
+ status = acpi_hw_read(&value_b, register_b);
if (ACPI_FAILURE(status)) {
return (status);
}
@@ -452,7 +634,7 @@ acpi_hw_write_multiple(u32 value,
/* The first register is always required */
- status = acpi_write(value, register_a);
+ status = acpi_hw_write(value, register_a);
if (ACPI_FAILURE(status)) {
return (status);
}
@@ -470,7 +652,7 @@ acpi_hw_write_multiple(u32 value,
* and writes have no side effects"
*/
if (register_b->address) {
- status = acpi_write(value, register_b);
+ status = acpi_hw_write(value, register_b);
}
return (status);
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index b7f522c8f02..6b282e85d03 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -100,7 +100,7 @@ acpi_status acpi_get_timer(u32 * ticks)
}
status =
- acpi_hw_low_level_read(32, ticks, &acpi_gbl_FADT.xpm_timer_block);
+ acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 9829979f2bd..647c7b6e675 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -78,9 +78,22 @@ acpi_status acpi_reset(void)
return_ACPI_STATUS(AE_NOT_EXIST);
}
- /* Write the reset value to the reset register */
+ if (reset_reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ /*
+ * For I/O space, write directly to the OSL. This bypasses the port
+ * validation mechanism, which may block a valid write to the reset
+ * register.
+ */
+ status =
+ acpi_os_write_port((acpi_io_address) reset_reg->address,
+ acpi_gbl_FADT.reset_value,
+ reset_reg->bit_width);
+ } else {
+ /* Write the reset value to the reset register */
+
+ status = acpi_hw_write(acpi_gbl_FADT.reset_value, reset_reg);
+ }
- status = acpi_write(acpi_gbl_FADT.reset_value, reset_reg);
return_ACPI_STATUS(status);
}
@@ -97,67 +110,92 @@ ACPI_EXPORT_SYMBOL(acpi_reset)
*
* DESCRIPTION: Read from either memory or IO space.
*
+ * LIMITATIONS: <These limitations also apply to acpi_write>
+ * bit_width must be exactly 8, 16, 32, or 64.
+ * space_iD must be system_memory or system_iO.
+ * bit_offset and access_width are currently ignored, as there has
+ * not been a need to implement these.
+ *
******************************************************************************/
-acpi_status acpi_read(u32 *value, struct acpi_generic_address *reg)
+acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
{
+ u32 value;
u32 width;
u64 address;
acpi_status status;
ACPI_FUNCTION_NAME(acpi_read);
- /*
- * Must have a valid pointer to a GAS structure, and a non-zero address
- * within.
- */
- if (!reg) {
+ if (!return_value) {
return (AE_BAD_PARAMETER);
}
- /* Get a local copy of the address. Handles possible alignment issues */
+ /* Validate contents of the GAS register. Allow 64-bit transfers */
- ACPI_MOVE_64_TO_64(&address, &reg->address);
- if (!address) {
- return (AE_BAD_ADDRESS);
+ status = acpi_hw_validate_register(reg, 64, &address);
+ if (ACPI_FAILURE(status)) {
+ return (status);
}
- /* Supported widths are 8/16/32 */
-
width = reg->bit_width;
- if ((width != 8) && (width != 16) && (width != 32)) {
- return (AE_SUPPORT);
+ if (width == 64) {
+ width = 32; /* Break into two 32-bit transfers */
}
- /* Initialize entire 32-bit return value to zero */
+ /* Initialize entire 64-bit return value to zero */
- *value = 0;
+ *return_value = 0;
+ value = 0;
/*
* Two address spaces supported: Memory or IO. PCI_Config is
* not supported here because the GAS structure is insufficient
*/
- switch (reg->space_id) {
- case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ status = acpi_os_read_memory((acpi_physical_address)
+ address, &value, width);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ *return_value = value;
- status = acpi_os_read_memory((acpi_physical_address) address,
- value, width);
- break;
+ if (reg->bit_width == 64) {
- case ACPI_ADR_SPACE_SYSTEM_IO:
+ /* Read the top 32 bits */
- status =
- acpi_hw_read_port((acpi_io_address) address, value, width);
- break;
+ status = acpi_os_read_memory((acpi_physical_address)
+ (address + 4), &value, 32);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ *return_value |= ((u64)value << 32);
+ }
+ } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
- default:
- ACPI_ERROR((AE_INFO,
- "Unsupported address space: %X", reg->space_id));
- return (AE_BAD_PARAMETER);
+ status = acpi_hw_read_port((acpi_io_address)
+ address, &value, width);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ *return_value = value;
+
+ if (reg->bit_width == 64) {
+
+ /* Read the top 32 bits */
+
+ status = acpi_hw_read_port((acpi_io_address)
+ (address + 4), &value, 32);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ *return_value |= ((u64)value << 32);
+ }
}
ACPI_DEBUG_PRINT((ACPI_DB_IO,
- "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n",
- *value, width, ACPI_FORMAT_UINT64(address),
+ "Read: %8.8X%8.8X width %2d from %8.8X%8.8X (%s)\n",
+ ACPI_FORMAT_UINT64(*return_value), reg->bit_width,
+ ACPI_FORMAT_UINT64(address),
acpi_ut_get_region_name(reg->space_id)));
return (status);
@@ -169,7 +207,7 @@ ACPI_EXPORT_SYMBOL(acpi_read)
*
* FUNCTION: acpi_write
*
- * PARAMETERS: Value - To be written
+ * PARAMETERS: Value - Value to be written
* Reg - GAS register structure
*
* RETURN: Status
@@ -177,7 +215,7 @@ ACPI_EXPORT_SYMBOL(acpi_read)
* DESCRIPTION: Write to either memory or IO space.
*
******************************************************************************/
-acpi_status acpi_write(u32 value, struct acpi_generic_address *reg)
+acpi_status acpi_write(u64 value, struct acpi_generic_address *reg)
{
u32 width;
u64 address;
@@ -185,54 +223,61 @@ acpi_status acpi_write(u32 value, struct acpi_generic_address *reg)
ACPI_FUNCTION_NAME(acpi_write);
- /*
- * Must have a valid pointer to a GAS structure, and a non-zero address
- * within.
- */
- if (!reg) {
- return (AE_BAD_PARAMETER);
- }
-
- /* Get a local copy of the address. Handles possible alignment issues */
+ /* Validate contents of the GAS register. Allow 64-bit transfers */
- ACPI_MOVE_64_TO_64(&address, &reg->address);
- if (!address) {
- return (AE_BAD_ADDRESS);
+ status = acpi_hw_validate_register(reg, 64, &address);
+ if (ACPI_FAILURE(status)) {
+ return (status);
}
- /* Supported widths are 8/16/32 */
-
width = reg->bit_width;
- if ((width != 8) && (width != 16) && (width != 32)) {
- return (AE_SUPPORT);
+ if (width == 64) {
+ width = 32; /* Break into two 32-bit transfers */
}
/*
- * Two address spaces supported: Memory or IO.
- * PCI_Config is not supported here because the GAS struct is insufficient
+ * Two address spaces supported: Memory or IO. PCI_Config is
+ * not supported here because the GAS structure is insufficient
*/
- switch (reg->space_id) {
- case ACPI_ADR_SPACE_SYSTEM_MEMORY:
-
- status = acpi_os_write_memory((acpi_physical_address) address,
- value, width);
- break;
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ status = acpi_os_write_memory((acpi_physical_address)
+ address, ACPI_LODWORD(value),
+ width);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
- case ACPI_ADR_SPACE_SYSTEM_IO:
+ if (reg->bit_width == 64) {
+ status = acpi_os_write_memory((acpi_physical_address)
+ (address + 4),
+ ACPI_HIDWORD(value), 32);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+ } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
- status = acpi_hw_write_port((acpi_io_address) address, value,
+ status = acpi_hw_write_port((acpi_io_address)
+ address, ACPI_LODWORD(value),
width);
- break;
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
- default:
- ACPI_ERROR((AE_INFO,
- "Unsupported address space: %X", reg->space_id));
- return (AE_BAD_PARAMETER);
+ if (reg->bit_width == 64) {
+ status = acpi_hw_write_port((acpi_io_address)
+ (address + 4),
+ ACPI_HIDWORD(value), 32);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
}
ACPI_DEBUG_PRINT((ACPI_DB_IO,
- "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n",
- value, width, ACPI_FORMAT_UINT64(address),
+ "Wrote: %8.8X%8.8X width %2d to %8.8X%8.8X (%s)\n",
+ ACPI_FORMAT_UINT64(value), reg->bit_width,
+ ACPI_FORMAT_UINT64(address),
acpi_ut_get_region_name(reg->space_id)));
return (status);
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index efc971ab7d6..8a58a1b85aa 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -96,17 +96,68 @@ struct acpi_namespace_node *acpi_ns_create_node(u32 name)
*
* RETURN: None
*
- * DESCRIPTION: Delete a namespace node
+ * DESCRIPTION: Delete a namespace node. All node deletions must come through
+ * here. Detaches any attached objects, including any attached
+ * data. If a handler is associated with attached data, it is
+ * invoked before the node is deleted.
*
******************************************************************************/
void acpi_ns_delete_node(struct acpi_namespace_node *node)
{
+ union acpi_operand_object *obj_desc;
+
+ ACPI_FUNCTION_NAME(ns_delete_node);
+
+ /* Detach an object if there is one */
+
+ acpi_ns_detach_object(node);
+
+ /*
+ * Delete an attached data object if present (an object that was created
+ * and attached via acpi_attach_data). Note: After any normal object is
+ * detached above, the only possible remaining object is a data object.
+ */
+ obj_desc = node->object;
+ if (obj_desc && (obj_desc->common.type == ACPI_TYPE_LOCAL_DATA)) {
+
+ /* Invoke the attached data deletion handler if present */
+
+ if (obj_desc->data.handler) {
+ obj_desc->data.handler(node, obj_desc->data.pointer);
+ }
+
+ acpi_ut_remove_reference(obj_desc);
+ }
+
+ /* Now we can delete the node */
+
+ (void)acpi_os_release_object(acpi_gbl_namespace_cache, node);
+
+ ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_freed++);
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Node %p, Remaining %X\n",
+ node, acpi_gbl_current_node_count));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_remove_node
+ *
+ * PARAMETERS: Node - Node to be removed/deleted
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Remove (unlink) and delete a namespace node
+ *
+ ******************************************************************************/
+
+void acpi_ns_remove_node(struct acpi_namespace_node *node)
+{
struct acpi_namespace_node *parent_node;
struct acpi_namespace_node *prev_node;
struct acpi_namespace_node *next_node;
- ACPI_FUNCTION_TRACE_PTR(ns_delete_node, node);
+ ACPI_FUNCTION_TRACE_PTR(ns_remove_node, node);
parent_node = acpi_ns_get_parent_node(node);
@@ -142,12 +193,9 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
}
}
- ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_freed++);
-
- /* Detach an object if there is one, then delete the node */
+ /* Delete the node and any attached objects */
- acpi_ns_detach_object(node);
- (void)acpi_os_release_object(acpi_gbl_namespace_cache, node);
+ acpi_ns_delete_node(node);
return_VOID;
}
@@ -273,25 +321,11 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node)
parent_node, child_node));
}
- /* Now we can free this child object */
-
- ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_freed++);
-
- ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
- "Object %p, Remaining %X\n", child_node,
- acpi_gbl_current_node_count));
-
- /* Detach an object if there is one, then free the child node */
-
- acpi_ns_detach_object(child_node);
-
- /* Now we can delete the node */
-
- (void)acpi_os_release_object(acpi_gbl_namespace_cache,
- child_node);
-
- /* And move on to the next child in the list */
-
+ /*
+ * Delete this child node and move on to the next child in the list.
+ * No need to unlink the node since we are deleting the entire branch.
+ */
+ acpi_ns_delete_node(child_node);
child_node = next_node;
} while (!(flags & ANOBJ_END_OF_PEER_LIST));
@@ -433,7 +467,7 @@ void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id)
if (deletion_node) {
acpi_ns_delete_children(deletion_node);
- acpi_ns_delete_node(deletion_node);
+ acpi_ns_remove_node(deletion_node);
deletion_node = NULL;
}
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 41994fe7fbb..0fe87f1aef1 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -70,7 +70,6 @@ static acpi_status
acpi_ns_dump_one_device(acpi_handle obj_handle,
u32 level, void *context, void **return_value)
{
- struct acpi_buffer buffer;
struct acpi_device_info *info;
acpi_status status;
u32 i;
@@ -80,17 +79,15 @@ acpi_ns_dump_one_device(acpi_handle obj_handle,
status =
acpi_ns_dump_one_object(obj_handle, level, context, return_value);
- buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
- status = acpi_get_object_info(obj_handle, &buffer);
+ status = acpi_get_object_info(obj_handle, &info);
if (ACPI_SUCCESS(status)) {
- info = buffer.pointer;
for (i = 0; i < level; i++) {
ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES, " "));
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES,
" HID: %s, ADR: %8.8X%8.8X, Status: %X\n",
- info->hardware_id.value,
+ info->hardware_id.string,
ACPI_FORMAT_UINT64(info->address),
info->current_status));
ACPI_FREE(info);
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 8e7dec1176c..846d1132feb 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -50,6 +50,11 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nseval")
+/* Local prototypes */
+static void
+acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
+ struct acpi_evaluate_info *info);
+
/*******************************************************************************
*
* FUNCTION: acpi_ns_evaluate
@@ -76,6 +81,7 @@ ACPI_MODULE_NAME("nseval")
* MUTEX: Locks interpreter
*
******************************************************************************/
+
acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
{
acpi_status status;
@@ -276,3 +282,134 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
*/
return_ACPI_STATUS(status);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_exec_module_code_list
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None. Exceptions during method execution are ignored, since
+ * we cannot abort a table load.
+ *
+ * DESCRIPTION: Execute all elements of the global module-level code list.
+ * Each element is executed as a single control method.
+ *
+ ******************************************************************************/
+
+void acpi_ns_exec_module_code_list(void)
+{
+ union acpi_operand_object *prev;
+ union acpi_operand_object *next;
+ struct acpi_evaluate_info *info;
+ u32 method_count = 0;
+
+ ACPI_FUNCTION_TRACE(ns_exec_module_code_list);
+
+ /* Exit now if the list is empty */
+
+ next = acpi_gbl_module_code_list;
+ if (!next) {
+ return_VOID;
+ }
+
+ /* Allocate the evaluation information block */
+
+ info = ACPI_ALLOCATE(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_VOID;
+ }
+
+ /* Walk the list, executing each "method" */
+
+ while (next) {
+ prev = next;
+ next = next->method.mutex;
+
+ /* Clear the link field and execute the method */
+
+ prev->method.mutex = NULL;
+ acpi_ns_exec_module_code(prev, info);
+ method_count++;
+
+ /* Delete the (temporary) method object */
+
+ acpi_ut_remove_reference(prev);
+ }
+
+ ACPI_INFO((AE_INFO,
+ "Executed %u blocks of module-level executable AML code",
+ method_count));
+
+ ACPI_FREE(info);
+ acpi_gbl_module_code_list = NULL;
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_exec_module_code
+ *
+ * PARAMETERS: method_obj - Object container for the module-level code
+ * Info - Info block for method evaluation
+ *
+ * RETURN: None. Exceptions during method execution are ignored, since
+ * we cannot abort a table load.
+ *
+ * DESCRIPTION: Execute a control method containing a block of module-level
+ * executable AML code. The control method is temporarily
+ * installed to the root node, then evaluated.
+ *
+ ******************************************************************************/
+
+static void
+acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
+ struct acpi_evaluate_info *info)
+{
+ union acpi_operand_object *root_obj;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ns_exec_module_code);
+
+ /* Initialize the evaluation information block */
+
+ ACPI_MEMSET(info, 0, sizeof(struct acpi_evaluate_info));
+ info->prefix_node = acpi_gbl_root_node;
+
+ /*
+ * Get the currently attached root object. Add a reference, because the
+ * ref count will be decreased when the method object is installed to
+ * the root node.
+ */
+ root_obj = acpi_ns_get_attached_object(acpi_gbl_root_node);
+ acpi_ut_add_reference(root_obj);
+
+ /* Install the method (module-level code) in the root node */
+
+ status = acpi_ns_attach_object(acpi_gbl_root_node, method_obj,
+ ACPI_TYPE_METHOD);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* Execute the root node as a control method */
+
+ status = acpi_ns_evaluate(info);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT, "Executed module-level code at %p\n",
+ method_obj->method.aml_start));
+
+ /* Detach the temporary method object */
+
+ acpi_ns_detach_object(acpi_gbl_root_node);
+
+ /* Restore the original root object */
+
+ status =
+ acpi_ns_attach_object(acpi_gbl_root_node, root_obj,
+ ACPI_TYPE_DEVICE);
+
+ exit:
+ acpi_ut_remove_reference(root_obj);
+ return_VOID;
+}
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 2adfcf329e1..1d5b360eb25 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -170,6 +170,21 @@ acpi_status acpi_ns_initialize_devices(void)
goto error_exit;
}
+ /*
+ * Execute the "global" _INI method that may appear at the root. This
+ * support is provided for Windows compatibility (Vista+) and is not
+ * part of the ACPI specification.
+ */
+ info.evaluate_info->prefix_node = acpi_gbl_root_node;
+ info.evaluate_info->pathname = METHOD_NAME__INI;
+ info.evaluate_info->parameters = NULL;
+ info.evaluate_info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+ status = acpi_ns_evaluate(info.evaluate_info);
+ if (ACPI_SUCCESS(status)) {
+ info.num_INI++;
+ }
+
/* Walk namespace to execute all _INIs on present devices */
status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index dcd7a6adbbb..a7234e60e98 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -270,8 +270,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle)
/* Now delete the starting object, and we are done */
- acpi_ns_delete_node(child_handle);
-
+ acpi_ns_remove_node(child_handle);
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 7f8e066b12a..f8427afeebd 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -42,6 +42,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#define ACPI_CREATE_PREDEFINED_TABLE
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -72,30 +74,31 @@ ACPI_MODULE_NAME("nspredef")
******************************************************************************/
/* Local prototypes */
static acpi_status
-acpi_ns_check_package(char *pathname,
- union acpi_operand_object **return_object_ptr,
- const union acpi_predefined_info *predefined);
+acpi_ns_check_package(struct acpi_predefined_data *data,
+ union acpi_operand_object **return_object_ptr);
+
+static acpi_status
+acpi_ns_check_package_list(struct acpi_predefined_data *data,
+ const union acpi_predefined_info *package,
+ union acpi_operand_object **elements, u32 count);
static acpi_status
-acpi_ns_check_package_elements(char *pathname,
+acpi_ns_check_package_elements(struct acpi_predefined_data *data,
union acpi_operand_object **elements,
u8 type1,
u32 count1,
u8 type2, u32 count2, u32 start_index);
static acpi_status
-acpi_ns_check_object_type(char *pathname,
+acpi_ns_check_object_type(struct acpi_predefined_data *data,
union acpi_operand_object **return_object_ptr,
u32 expected_btypes, u32 package_index);
static acpi_status
-acpi_ns_check_reference(char *pathname,
+acpi_ns_check_reference(struct acpi_predefined_data *data,
union acpi_operand_object *return_object);
-static acpi_status
-acpi_ns_repair_object(u32 expected_btypes,
- u32 package_index,
- union acpi_operand_object **return_object_ptr);
+static void acpi_ns_get_expected_types(char *buffer, u32 expected_btypes);
/*
* Names for the types that can be returned by the predefined objects.
@@ -109,13 +112,13 @@ static const char *acpi_rtype_names[] = {
"/Reference",
};
-#define ACPI_NOT_PACKAGE ACPI_UINT32_MAX
-
/*******************************************************************************
*
* FUNCTION: acpi_ns_check_predefined_names
*
* PARAMETERS: Node - Namespace node for the method/object
+ * user_param_count - Number of parameters actually passed
+ * return_status - Status from the object evaluation
* return_object_ptr - Pointer to the object returned from the
* evaluation of a method or object
*
@@ -135,12 +138,13 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
acpi_status status = AE_OK;
const union acpi_predefined_info *predefined;
char *pathname;
+ struct acpi_predefined_data *data;
/* Match the name for this method/object against the predefined list */
predefined = acpi_ns_check_for_predefined_name(node);
- /* Get the full pathname to the object, for use in error messages */
+ /* Get the full pathname to the object, for use in warning messages */
pathname = acpi_ns_get_external_pathname(node);
if (!pathname) {
@@ -158,28 +162,17 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
/* If not a predefined name, we cannot validate the return object */
if (!predefined) {
- goto exit;
- }
-
- /* If the method failed, we cannot validate the return object */
-
- if ((return_status != AE_OK) && (return_status != AE_CTRL_RETURN_VALUE)) {
- goto exit;
+ goto cleanup;
}
/*
- * Only validate the return value on the first successful evaluation of
- * the method. This ensures that any warnings will only be emitted during
- * the very first evaluation of the method/object.
+ * If the method failed or did not actually return an object, we cannot
+ * validate the return object
*/
- if (node->flags & ANOBJ_EVALUATED) {
- goto exit;
+ if ((return_status != AE_OK) && (return_status != AE_CTRL_RETURN_VALUE)) {
+ goto cleanup;
}
- /* Mark the node as having been successfully evaluated */
-
- node->flags |= ANOBJ_EVALUATED;
-
/*
* If there is no return value, check if we require a return value for
* this predefined name. Either one return value is expected, or none,
@@ -190,46 +183,67 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
if (!return_object) {
if ((predefined->info.expected_btypes) &&
(!(predefined->info.expected_btypes & ACPI_RTYPE_NONE))) {
- ACPI_ERROR((AE_INFO,
- "%s: Missing expected return value",
- pathname));
+ ACPI_WARN_PREDEFINED((AE_INFO, pathname,
+ ACPI_WARN_ALWAYS,
+ "Missing expected return value"));
status = AE_AML_NO_RETURN_VALUE;
}
- goto exit;
+ goto cleanup;
}
/*
- * We have a return value, but if one wasn't expected, just exit, this is
- * not a problem
+ * 1) We have a return value, but if one wasn't expected, just exit, this is
+ * not a problem. For example, if the "Implicit Return" feature is
+ * enabled, methods will always return a value.
*
- * For example, if the "Implicit Return" feature is enabled, methods will
- * always return a value
+ * 2) If the return value can be of any type, then we cannot perform any
+ * validation, exit.
*/
- if (!predefined->info.expected_btypes) {
- goto exit;
+ if ((!predefined->info.expected_btypes) ||
+ (predefined->info.expected_btypes == ACPI_RTYPE_ALL)) {
+ goto cleanup;
}
+ /* Create the parameter data block for object validation */
+
+ data = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_predefined_data));
+ if (!data) {
+ goto cleanup;
+ }
+ data->predefined = predefined;
+ data->node_flags = node->flags;
+ data->pathname = pathname;
+
/*
* Check that the type of the return object is what is expected for
* this predefined name
*/
- status = acpi_ns_check_object_type(pathname, return_object_ptr,
+ status = acpi_ns_check_object_type(data, return_object_ptr,
predefined->info.expected_btypes,
- ACPI_NOT_PACKAGE);
+ ACPI_NOT_PACKAGE_ELEMENT);
if (ACPI_FAILURE(status)) {
- goto exit;
+ goto check_validation_status;
}
/* For returned Package objects, check the type of all sub-objects */
if (return_object->common.type == ACPI_TYPE_PACKAGE) {
- status =
- acpi_ns_check_package(pathname, return_object_ptr,
- predefined);
+ status = acpi_ns_check_package(data, return_object_ptr);
+ }
+
+check_validation_status:
+ /*
+ * If the object validation failed or if we successfully repaired one
+ * or more objects, mark the parent node to suppress further warning
+ * messages during the next evaluation of the same method/object.
+ */
+ if (ACPI_FAILURE(status) || (data->flags & ACPI_OBJECT_REPAIRED)) {
+ node->flags |= ANOBJ_EVALUATED;
}
+ ACPI_FREE(data);
- exit:
+cleanup:
ACPI_FREE(pathname);
return (status);
}
@@ -268,64 +282,58 @@ acpi_ns_check_parameter_count(char *pathname,
param_count = node->object->method.param_count;
}
- /* Argument count check for non-predefined methods/objects */
-
if (!predefined) {
/*
+ * Check the parameter count for non-predefined methods/objects.
+ *
* Warning if too few or too many arguments have been passed by the
* caller. An incorrect number of arguments may not cause the method
* to fail. However, the method will fail if there are too few
* arguments and the method attempts to use one of the missing ones.
*/
if (user_param_count < param_count) {
- ACPI_WARNING((AE_INFO,
- "%s: Insufficient arguments - needs %d, found %d",
- pathname, param_count, user_param_count));
+ ACPI_WARN_PREDEFINED((AE_INFO, pathname,
+ ACPI_WARN_ALWAYS,
+ "Insufficient arguments - needs %u, found %u",
+ param_count, user_param_count));
} else if (user_param_count > param_count) {
- ACPI_WARNING((AE_INFO,
- "%s: Excess arguments - needs %d, found %d",
- pathname, param_count, user_param_count));
+ ACPI_WARN_PREDEFINED((AE_INFO, pathname,
+ ACPI_WARN_ALWAYS,
+ "Excess arguments - needs %u, found %u",
+ param_count, user_param_count));
}
return;
}
- /* Allow two different legal argument counts (_SCP, etc.) */
-
+ /*
+ * Validate the user-supplied parameter count.
+ * Allow two different legal argument counts (_SCP, etc.)
+ */
required_params_current = predefined->info.param_count & 0x0F;
required_params_old = predefined->info.param_count >> 4;
if (user_param_count != ACPI_UINT32_MAX) {
-
- /* Validate the user-supplied parameter count */
-
if ((user_param_count != required_params_current) &&
(user_param_count != required_params_old)) {
- ACPI_WARNING((AE_INFO,
- "%s: Parameter count mismatch - "
- "caller passed %d, ACPI requires %d",
- pathname, user_param_count,
- required_params_current));
+ ACPI_WARN_PREDEFINED((AE_INFO, pathname,
+ ACPI_WARN_ALWAYS,
+ "Parameter count mismatch - "
+ "caller passed %u, ACPI requires %u",
+ user_param_count,
+ required_params_current));
}
}
/*
- * Only validate the argument count on the first successful evaluation of
- * the method. This ensures that any warnings will only be emitted during
- * the very first evaluation of the method/object.
- */
- if (node->flags & ANOBJ_EVALUATED) {
- return;
- }
-
- /*
* Check that the ASL-defined parameter count is what is expected for
- * this predefined name.
+ * this predefined name (parameter count as defined by the ACPI
+ * specification)
*/
if ((param_count != required_params_current) &&
(param_count != required_params_old)) {
- ACPI_WARNING((AE_INFO,
- "%s: Parameter count mismatch - ASL declared %d, ACPI requires %d",
- pathname, param_count, required_params_current));
+ ACPI_WARN_PREDEFINED((AE_INFO, pathname, node->flags,
+ "Parameter count mismatch - ASL declared %u, ACPI requires %u",
+ param_count, required_params_current));
}
}
@@ -358,9 +366,6 @@ const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
this_name = predefined_names;
while (this_name->info.name[0]) {
if (ACPI_COMPARE_NAME(node->name.ascii, this_name->info.name)) {
-
- /* Return pointer to this table entry */
-
return (this_name);
}
@@ -375,17 +380,16 @@ const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
this_name++;
}
- return (NULL);
+ return (NULL); /* Not found */
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_check_package
*
- * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
+ * PARAMETERS: Data - Pointer to validation data structure
* return_object_ptr - Pointer to the object returned from the
* evaluation of a method or object
- * Predefined - Pointer to entry in predefined name table
*
* RETURN: Status
*
@@ -395,30 +399,26 @@ const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
******************************************************************************/
static acpi_status
-acpi_ns_check_package(char *pathname,
- union acpi_operand_object **return_object_ptr,
- const union acpi_predefined_info *predefined)
+acpi_ns_check_package(struct acpi_predefined_data *data,
+ union acpi_operand_object **return_object_ptr)
{
union acpi_operand_object *return_object = *return_object_ptr;
const union acpi_predefined_info *package;
- union acpi_operand_object *sub_package;
union acpi_operand_object **elements;
- union acpi_operand_object **sub_elements;
- acpi_status status;
+ acpi_status status = AE_OK;
u32 expected_count;
u32 count;
u32 i;
- u32 j;
ACPI_FUNCTION_NAME(ns_check_package);
/* The package info for this name is in the next table entry */
- package = predefined + 1;
+ package = data->predefined + 1;
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"%s Validating return Package of Type %X, Count %X\n",
- pathname, package->ret_info.type,
+ data->pathname, package->ret_info.type,
return_object->package.count));
/* Extract package count and elements array */
@@ -429,9 +429,8 @@ acpi_ns_check_package(char *pathname,
/* The package must have at least one element, else invalid */
if (!count) {
- ACPI_WARNING((AE_INFO,
- "%s: Return Package has no elements (empty)",
- pathname));
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
+ "Return Package has no elements (empty)"));
return (AE_AML_OPERAND_VALUE);
}
@@ -456,15 +455,16 @@ acpi_ns_check_package(char *pathname,
if (count < expected_count) {
goto package_too_small;
} else if (count > expected_count) {
- ACPI_WARNING((AE_INFO,
- "%s: Return Package is larger than needed - "
- "found %u, expected %u", pathname, count,
- expected_count));
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname,
+ data->node_flags,
+ "Return Package is larger than needed - "
+ "found %u, expected %u", count,
+ expected_count));
}
/* Validate all elements of the returned package */
- status = acpi_ns_check_package_elements(pathname, elements,
+ status = acpi_ns_check_package_elements(data, elements,
package->ret_info.
object_type1,
package->ret_info.
@@ -473,9 +473,6 @@ acpi_ns_check_package(char *pathname,
object_type2,
package->ret_info.
count2, 0);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
break;
case ACPI_PTYPE1_VAR:
@@ -485,7 +482,7 @@ acpi_ns_check_package(char *pathname,
* elements must be of the same type
*/
for (i = 0; i < count; i++) {
- status = acpi_ns_check_object_type(pathname, elements,
+ status = acpi_ns_check_object_type(data, elements,
package->ret_info.
object_type1, i);
if (ACPI_FAILURE(status)) {
@@ -517,8 +514,7 @@ acpi_ns_check_package(char *pathname,
/* These are the required package elements (0, 1, or 2) */
status =
- acpi_ns_check_object_type(pathname,
- elements,
+ acpi_ns_check_object_type(data, elements,
package->
ret_info3.
object_type[i],
@@ -530,8 +526,7 @@ acpi_ns_check_package(char *pathname,
/* These are the optional package elements */
status =
- acpi_ns_check_object_type(pathname,
- elements,
+ acpi_ns_check_object_type(data, elements,
package->
ret_info3.
tail_object_type,
@@ -544,11 +539,30 @@ acpi_ns_check_package(char *pathname,
}
break;
+ case ACPI_PTYPE2_REV_FIXED:
+
+ /* First element is the (Integer) revision */
+
+ status = acpi_ns_check_object_type(data, elements,
+ ACPI_RTYPE_INTEGER, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ elements++;
+ count--;
+
+ /* Examine the sub-packages */
+
+ status =
+ acpi_ns_check_package_list(data, package, elements, count);
+ break;
+
case ACPI_PTYPE2_PKG_COUNT:
/* First element is the (Integer) count of sub-packages to follow */
- status = acpi_ns_check_object_type(pathname, elements,
+ status = acpi_ns_check_object_type(data, elements,
ACPI_RTYPE_INTEGER, 0);
if (ACPI_FAILURE(status)) {
return (status);
@@ -566,9 +580,11 @@ acpi_ns_check_package(char *pathname,
count = expected_count;
elements++;
- /* Now we can walk the sub-packages */
+ /* Examine the sub-packages */
- /*lint -fallthrough */
+ status =
+ acpi_ns_check_package_list(data, package, elements, count);
+ break;
case ACPI_PTYPE2:
case ACPI_PTYPE2_FIXED:
@@ -576,176 +592,240 @@ acpi_ns_check_package(char *pathname,
case ACPI_PTYPE2_COUNT:
/*
- * These types all return a single package that consists of a variable
- * number of sub-packages
+ * These types all return a single Package that consists of a
+ * variable number of sub-Packages.
+ *
+ * First, ensure that the first element is a sub-Package. If not,
+ * the BIOS may have incorrectly returned the object as a single
+ * package instead of a Package of Packages (a common error if
+ * there is only one entry). We may be able to repair this by
+ * wrapping the returned Package with a new outer Package.
*/
- for (i = 0; i < count; i++) {
- sub_package = *elements;
- sub_elements = sub_package->package.elements;
+ if ((*elements)->common.type != ACPI_TYPE_PACKAGE) {
- /* Each sub-object must be of type Package */
+ /* Create the new outer package and populate it */
status =
- acpi_ns_check_object_type(pathname, &sub_package,
- ACPI_RTYPE_PACKAGE, i);
+ acpi_ns_repair_package_list(data,
+ return_object_ptr);
if (ACPI_FAILURE(status)) {
return (status);
}
- /* Examine the different types of sub-packages */
+ /* Update locals to point to the new package (of 1 element) */
- switch (package->ret_info.type) {
- case ACPI_PTYPE2:
- case ACPI_PTYPE2_PKG_COUNT:
+ return_object = *return_object_ptr;
+ elements = return_object->package.elements;
+ count = 1;
+ }
- /* Each subpackage has a fixed number of elements */
+ /* Examine the sub-packages */
- expected_count =
- package->ret_info.count1 +
- package->ret_info.count2;
- if (sub_package->package.count !=
- expected_count) {
- count = sub_package->package.count;
- goto package_too_small;
- }
+ status =
+ acpi_ns_check_package_list(data, package, elements, count);
+ break;
- status =
- acpi_ns_check_package_elements(pathname,
- sub_elements,
- package->
- ret_info.
- object_type1,
- package->
- ret_info.
- count1,
- package->
- ret_info.
- object_type2,
- package->
- ret_info.
- count2, 0);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- break;
+ default:
- case ACPI_PTYPE2_FIXED:
+ /* Should not get here if predefined info table is correct */
- /* Each sub-package has a fixed length */
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
+ "Invalid internal return type in table entry: %X",
+ package->ret_info.type));
- expected_count = package->ret_info2.count;
- if (sub_package->package.count < expected_count) {
- count = sub_package->package.count;
- goto package_too_small;
- }
+ return (AE_AML_INTERNAL);
+ }
- /* Check the type of each sub-package element */
+ return (status);
- for (j = 0; j < expected_count; j++) {
- status =
- acpi_ns_check_object_type(pathname,
- &sub_elements[j],
- package->ret_info2.object_type[j], j);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- }
- break;
+package_too_small:
- case ACPI_PTYPE2_MIN:
+ /* Error exit for the case with an incorrect package count */
- /* Each sub-package has a variable but minimum length */
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
+ "Return Package is too small - found %u elements, expected %u",
+ count, expected_count));
- expected_count = package->ret_info.count1;
- if (sub_package->package.count < expected_count) {
- count = sub_package->package.count;
- goto package_too_small;
- }
+ return (AE_AML_OPERAND_VALUE);
+}
- /* Check the type of each sub-package element */
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_package_list
+ *
+ * PARAMETERS: Data - Pointer to validation data structure
+ * Package - Pointer to package-specific info for method
+ * Elements - Element list of parent package. All elements
+ * of this list should be of type Package.
+ * Count - Count of subpackages
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Examine a list of subpackages
+ *
+ ******************************************************************************/
- status =
- acpi_ns_check_package_elements(pathname,
- sub_elements,
- package->
- ret_info.
- object_type1,
- sub_package->
- package.
- count, 0, 0,
- 0);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- break;
+static acpi_status
+acpi_ns_check_package_list(struct acpi_predefined_data *data,
+ const union acpi_predefined_info *package,
+ union acpi_operand_object **elements, u32 count)
+{
+ union acpi_operand_object *sub_package;
+ union acpi_operand_object **sub_elements;
+ acpi_status status;
+ u32 expected_count;
+ u32 i;
+ u32 j;
- case ACPI_PTYPE2_COUNT:
+ /* Validate each sub-Package in the parent Package */
- /* First element is the (Integer) count of elements to follow */
+ for (i = 0; i < count; i++) {
+ sub_package = *elements;
+ sub_elements = sub_package->package.elements;
- status =
- acpi_ns_check_object_type(pathname,
- sub_elements,
- ACPI_RTYPE_INTEGER,
- 0);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
+ /* Each sub-object must be of type Package */
- /* Make sure package is large enough for the Count */
+ status = acpi_ns_check_object_type(data, &sub_package,
+ ACPI_RTYPE_PACKAGE, i);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
- expected_count =
- (u32) (*sub_elements)->integer.value;
- if (sub_package->package.count < expected_count) {
- count = sub_package->package.count;
- goto package_too_small;
- }
+ /* Examine the different types of expected sub-packages */
+
+ switch (package->ret_info.type) {
+ case ACPI_PTYPE2:
+ case ACPI_PTYPE2_PKG_COUNT:
+ case ACPI_PTYPE2_REV_FIXED:
+
+ /* Each subpackage has a fixed number of elements */
+
+ expected_count =
+ package->ret_info.count1 + package->ret_info.count2;
+ if (sub_package->package.count < expected_count) {
+ goto package_too_small;
+ }
+
+ status =
+ acpi_ns_check_package_elements(data, sub_elements,
+ package->ret_info.
+ object_type1,
+ package->ret_info.
+ count1,
+ package->ret_info.
+ object_type2,
+ package->ret_info.
+ count2, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
- /* Check the type of each sub-package element */
+ case ACPI_PTYPE2_FIXED:
+ /* Each sub-package has a fixed length */
+
+ expected_count = package->ret_info2.count;
+ if (sub_package->package.count < expected_count) {
+ goto package_too_small;
+ }
+
+ /* Check the type of each sub-package element */
+
+ for (j = 0; j < expected_count; j++) {
status =
- acpi_ns_check_package_elements(pathname,
- (sub_elements
- + 1),
- package->
- ret_info.
- object_type1,
- (expected_count
- - 1), 0, 0,
- 1);
+ acpi_ns_check_object_type(data,
+ &sub_elements[j],
+ package->
+ ret_info2.
+ object_type[j],
+ j);
if (ACPI_FAILURE(status)) {
return (status);
}
- break;
+ }
+ break;
+
+ case ACPI_PTYPE2_MIN:
- default:
- break;
+ /* Each sub-package has a variable but minimum length */
+
+ expected_count = package->ret_info.count1;
+ if (sub_package->package.count < expected_count) {
+ goto package_too_small;
}
- elements++;
- }
- break;
+ /* Check the type of each sub-package element */
- default:
+ status =
+ acpi_ns_check_package_elements(data, sub_elements,
+ package->ret_info.
+ object_type1,
+ sub_package->package.
+ count, 0, 0, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
- /* Should not get here if predefined info table is correct */
+ case ACPI_PTYPE2_COUNT:
+
+ /*
+ * First element is the (Integer) count of elements, including
+ * the count field.
+ */
+ status = acpi_ns_check_object_type(data, sub_elements,
+ ACPI_RTYPE_INTEGER,
+ 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
- ACPI_WARNING((AE_INFO,
- "%s: Invalid internal return type in table entry: %X",
- pathname, package->ret_info.type));
+ /*
+ * Make sure package is large enough for the Count and is
+ * is as large as the minimum size
+ */
+ expected_count = (u32)(*sub_elements)->integer.value;
+ if (sub_package->package.count < expected_count) {
+ goto package_too_small;
+ }
+ if (sub_package->package.count <
+ package->ret_info.count1) {
+ expected_count = package->ret_info.count1;
+ goto package_too_small;
+ }
- return (AE_AML_INTERNAL);
+ /* Check the type of each sub-package element */
+
+ status =
+ acpi_ns_check_package_elements(data,
+ (sub_elements + 1),
+ package->ret_info.
+ object_type1,
+ (expected_count - 1),
+ 0, 0, 1);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
+ default: /* Should not get here, type was validated by caller */
+
+ return (AE_AML_INTERNAL);
+ }
+
+ elements++;
}
return (AE_OK);
- package_too_small:
+package_too_small:
- /* Error exit for the case with an incorrect package count */
+ /* The sub-package count was smaller than required */
- ACPI_WARNING((AE_INFO, "%s: Return Package is too small - "
- "found %u, expected %u", pathname, count,
- expected_count));
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
+ "Return Sub-Package[%u] is too small - found %u elements, expected %u",
+ i, sub_package->package.count, expected_count));
return (AE_AML_OPERAND_VALUE);
}
@@ -754,7 +834,7 @@ acpi_ns_check_package(char *pathname,
*
* FUNCTION: acpi_ns_check_package_elements
*
- * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
+ * PARAMETERS: Data - Pointer to validation data structure
* Elements - Pointer to the package elements array
* Type1 - Object type for first group
* Count1 - Count for first group
@@ -770,7 +850,7 @@ acpi_ns_check_package(char *pathname,
******************************************************************************/
static acpi_status
-acpi_ns_check_package_elements(char *pathname,
+acpi_ns_check_package_elements(struct acpi_predefined_data *data,
union acpi_operand_object **elements,
u8 type1,
u32 count1,
@@ -786,7 +866,7 @@ acpi_ns_check_package_elements(char *pathname,
* The second group can have a count of zero.
*/
for (i = 0; i < count1; i++) {
- status = acpi_ns_check_object_type(pathname, this_element,
+ status = acpi_ns_check_object_type(data, this_element,
type1, i + start_index);
if (ACPI_FAILURE(status)) {
return (status);
@@ -795,7 +875,7 @@ acpi_ns_check_package_elements(char *pathname,
}
for (i = 0; i < count2; i++) {
- status = acpi_ns_check_object_type(pathname, this_element,
+ status = acpi_ns_check_object_type(data, this_element,
type2,
(i + count1 + start_index));
if (ACPI_FAILURE(status)) {
@@ -811,12 +891,13 @@ acpi_ns_check_package_elements(char *pathname,
*
* FUNCTION: acpi_ns_check_object_type
*
- * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
+ * PARAMETERS: Data - Pointer to validation data structure
* return_object_ptr - Pointer to the object returned from the
* evaluation of a method or object
* expected_btypes - Bitmap of expected return type(s)
* package_index - Index of object within parent package (if
- * applicable - ACPI_NOT_PACKAGE otherwise)
+ * applicable - ACPI_NOT_PACKAGE_ELEMENT
+ * otherwise)
*
* RETURN: Status
*
@@ -826,7 +907,7 @@ acpi_ns_check_package_elements(char *pathname,
******************************************************************************/
static acpi_status
-acpi_ns_check_object_type(char *pathname,
+acpi_ns_check_object_type(struct acpi_predefined_data *data,
union acpi_operand_object **return_object_ptr,
u32 expected_btypes, u32 package_index)
{
@@ -834,9 +915,6 @@ acpi_ns_check_object_type(char *pathname,
acpi_status status = AE_OK;
u32 return_btype;
char type_buffer[48]; /* Room for 5 types */
- u32 this_rtype;
- u32 i;
- u32 j;
/*
* If we get a NULL return_object here, it is a NULL package element,
@@ -849,10 +927,11 @@ acpi_ns_check_object_type(char *pathname,
/* A Namespace node should not get here, but make sure */
if (ACPI_GET_DESCRIPTOR_TYPE(return_object) == ACPI_DESC_TYPE_NAMED) {
- ACPI_WARNING((AE_INFO,
- "%s: Invalid return type - Found a Namespace node [%4.4s] type %s",
- pathname, return_object->node.name.ascii,
- acpi_ut_get_type_name(return_object->node.type)));
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
+ "Invalid return type - Found a Namespace node [%4.4s] type %s",
+ return_object->node.name.ascii,
+ acpi_ut_get_type_name(return_object->node.
+ type)));
return (AE_AML_OPERAND_TYPE);
}
@@ -897,10 +976,11 @@ acpi_ns_check_object_type(char *pathname,
/* Type mismatch -- attempt repair of the returned object */
- status = acpi_ns_repair_object(expected_btypes, package_index,
+ status = acpi_ns_repair_object(data, expected_btypes,
+ package_index,
return_object_ptr);
if (ACPI_SUCCESS(status)) {
- return (status);
+ return (AE_OK); /* Repair was successful */
}
goto type_error_exit;
}
@@ -908,7 +988,7 @@ acpi_ns_check_object_type(char *pathname,
/* For reference objects, check that the reference type is correct */
if (return_object->common.type == ACPI_TYPE_LOCAL_REFERENCE) {
- status = acpi_ns_check_reference(pathname, return_object);
+ status = acpi_ns_check_reference(data, return_object);
}
return (status);
@@ -917,33 +997,19 @@ acpi_ns_check_object_type(char *pathname,
/* Create a string with all expected types for this predefined object */
- j = 1;
- type_buffer[0] = 0;
- this_rtype = ACPI_RTYPE_INTEGER;
-
- for (i = 0; i < ACPI_NUM_RTYPES; i++) {
-
- /* If one of the expected types, concatenate the name of this type */
-
- if (expected_btypes & this_rtype) {
- ACPI_STRCAT(type_buffer, &acpi_rtype_names[i][j]);
- j = 0; /* Use name separator from now on */
- }
- this_rtype <<= 1; /* Next Rtype */
- }
+ acpi_ns_get_expected_types(type_buffer, expected_btypes);
- if (package_index == ACPI_NOT_PACKAGE) {
- ACPI_WARNING((AE_INFO,
- "%s: Return type mismatch - found %s, expected %s",
- pathname,
- acpi_ut_get_object_type_name(return_object),
- type_buffer));
+ if (package_index == ACPI_NOT_PACKAGE_ELEMENT) {
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
+ "Return type mismatch - found %s, expected %s",
+ acpi_ut_get_object_type_name
+ (return_object), type_buffer));
} else {
- ACPI_WARNING((AE_INFO,
- "%s: Return Package type mismatch at index %u - "
- "found %s, expected %s", pathname, package_index,
- acpi_ut_get_object_type_name(return_object),
- type_buffer));
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
+ "Return Package type mismatch at index %u - "
+ "found %s, expected %s", package_index,
+ acpi_ut_get_object_type_name
+ (return_object), type_buffer));
}
return (AE_AML_OPERAND_TYPE);
@@ -953,7 +1019,7 @@ acpi_ns_check_object_type(char *pathname,
*
* FUNCTION: acpi_ns_check_reference
*
- * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
+ * PARAMETERS: Data - Pointer to validation data structure
* return_object - Object returned from the evaluation of a
* method or object
*
@@ -966,7 +1032,7 @@ acpi_ns_check_object_type(char *pathname,
******************************************************************************/
static acpi_status
-acpi_ns_check_reference(char *pathname,
+acpi_ns_check_reference(struct acpi_predefined_data *data,
union acpi_operand_object *return_object)
{
@@ -979,94 +1045,46 @@ acpi_ns_check_reference(char *pathname,
return (AE_OK);
}
- ACPI_WARNING((AE_INFO,
- "%s: Return type mismatch - "
- "unexpected reference object type [%s] %2.2X",
- pathname, acpi_ut_get_reference_name(return_object),
- return_object->reference.class));
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
+ "Return type mismatch - unexpected reference object type [%s] %2.2X",
+ acpi_ut_get_reference_name(return_object),
+ return_object->reference.class));
return (AE_AML_OPERAND_TYPE);
}
/*******************************************************************************
*
- * FUNCTION: acpi_ns_repair_object
+ * FUNCTION: acpi_ns_get_expected_types
*
- * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
- * package_index - Used to determine if target is in a package
- * return_object_ptr - Pointer to the object returned from the
- * evaluation of a method or object
+ * PARAMETERS: Buffer - Pointer to where the string is returned
+ * expected_btypes - Bitmap of expected return type(s)
*
- * RETURN: Status. AE_OK if repair was successful.
+ * RETURN: Buffer is populated with type names.
*
- * DESCRIPTION: Attempt to repair/convert a return object of a type that was
- * not expected.
+ * DESCRIPTION: Translate the expected types bitmap into a string of ascii
+ * names of expected types, for use in warning messages.
*
******************************************************************************/
-static acpi_status
-acpi_ns_repair_object(u32 expected_btypes,
- u32 package_index,
- union acpi_operand_object **return_object_ptr)
+static void acpi_ns_get_expected_types(char *buffer, u32 expected_btypes)
{
- union acpi_operand_object *return_object = *return_object_ptr;
- union acpi_operand_object *new_object;
- acpi_size length;
-
- switch (return_object->common.type) {
- case ACPI_TYPE_BUFFER:
-
- if (!(expected_btypes & ACPI_RTYPE_STRING)) {
- return (AE_AML_OPERAND_TYPE);
- }
-
- /*
- * Have a Buffer, expected a String, convert. Use a to_string
- * conversion, no transform performed on the buffer data. The best
- * example of this is the _BIF method, where the string data from
- * the battery is often (incorrectly) returned as buffer object(s).
- */
- length = 0;
- while ((length < return_object->buffer.length) &&
- (return_object->buffer.pointer[length])) {
- length++;
- }
-
- /* Allocate a new string object */
-
- new_object = acpi_ut_create_string_object(length);
- if (!new_object) {
- return (AE_NO_MEMORY);
- }
+ u32 this_rtype;
+ u32 i;
+ u32 j;
- /*
- * Copy the raw buffer data with no transform. String is already NULL
- * terminated at Length+1.
- */
- ACPI_MEMCPY(new_object->string.pointer,
- return_object->buffer.pointer, length);
+ j = 1;
+ buffer[0] = 0;
+ this_rtype = ACPI_RTYPE_INTEGER;
- /* Install the new return object */
+ for (i = 0; i < ACPI_NUM_RTYPES; i++) {
- acpi_ut_remove_reference(return_object);
- *return_object_ptr = new_object;
+ /* If one of the expected types, concatenate the name of this type */
- /*
- * If the object is a package element, we need to:
- * 1. Decrement the reference count of the orignal object, it was
- * incremented when building the package
- * 2. Increment the reference count of the new object, it will be
- * decremented when releasing the package
- */
- if (package_index != ACPI_NOT_PACKAGE) {
- acpi_ut_remove_reference(return_object);
- acpi_ut_add_reference(new_object);
+ if (expected_btypes & this_rtype) {
+ ACPI_STRCAT(buffer, &acpi_rtype_names[i][j]);
+ j = 0; /* Use name separator from now on */
}
- return (AE_OK);
-
- default:
- break;
+ this_rtype <<= 1; /* Next Rtype */
}
-
- return (AE_AML_OPERAND_TYPE);
}
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
new file mode 100644
index 00000000000..db2b2a99c3a
--- /dev/null
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -0,0 +1,203 @@
+/******************************************************************************
+ *
+ * Module Name: nsrepair - Repair for objects returned by predefined methods
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2009, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "acpredef.h"
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsrepair")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_repair_object
+ *
+ * PARAMETERS: Data - Pointer to validation data structure
+ * expected_btypes - Object types expected
+ * package_index - Index of object within parent package (if
+ * applicable - ACPI_NOT_PACKAGE_ELEMENT
+ * otherwise)
+ * return_object_ptr - Pointer to the object returned from the
+ * evaluation of a method or object
+ *
+ * RETURN: Status. AE_OK if repair was successful.
+ *
+ * DESCRIPTION: Attempt to repair/convert a return object of a type that was
+ * not expected.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ns_repair_object(struct acpi_predefined_data *data,
+ u32 expected_btypes,
+ u32 package_index,
+ union acpi_operand_object **return_object_ptr)
+{
+ union acpi_operand_object *return_object = *return_object_ptr;
+ union acpi_operand_object *new_object;
+ acpi_size length;
+
+ switch (return_object->common.type) {
+ case ACPI_TYPE_BUFFER:
+
+ /* Does the method/object legally return a string? */
+
+ if (!(expected_btypes & ACPI_RTYPE_STRING)) {
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ /*
+ * Have a Buffer, expected a String, convert. Use a to_string
+ * conversion, no transform performed on the buffer data. The best
+ * example of this is the _BIF method, where the string data from
+ * the battery is often (incorrectly) returned as buffer object(s).
+ */
+ length = 0;
+ while ((length < return_object->buffer.length) &&
+ (return_object->buffer.pointer[length])) {
+ length++;
+ }
+
+ /* Allocate a new string object */
+
+ new_object = acpi_ut_create_string_object(length);
+ if (!new_object) {
+ return (AE_NO_MEMORY);
+ }
+
+ /*
+ * Copy the raw buffer data with no transform. String is already NULL
+ * terminated at Length+1.
+ */
+ ACPI_MEMCPY(new_object->string.pointer,
+ return_object->buffer.pointer, length);
+
+ /*
+ * If the original object is a package element, we need to:
+ * 1. Set the reference count of the new object to match the
+ * reference count of the old object.
+ * 2. Decrement the reference count of the original object.
+ */
+ if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
+ new_object->common.reference_count =
+ return_object->common.reference_count;
+
+ if (return_object->common.reference_count > 1) {
+ return_object->common.reference_count--;
+ }
+
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname,
+ data->node_flags,
+ "Converted Buffer to expected String at index %u",
+ package_index));
+ } else {
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname,
+ data->node_flags,
+ "Converted Buffer to expected String"));
+ }
+
+ /* Delete old object, install the new return object */
+
+ acpi_ut_remove_reference(return_object);
+ *return_object_ptr = new_object;
+ data->flags |= ACPI_OBJECT_REPAIRED;
+ return (AE_OK);
+
+ default:
+ break;
+ }
+
+ return (AE_AML_OPERAND_TYPE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_repair_package_list
+ *
+ * PARAMETERS: Data - Pointer to validation data structure
+ * obj_desc_ptr - Pointer to the object to repair. The new
+ * package object is returned here,
+ * overwriting the old object.
+ *
+ * RETURN: Status, new object in *obj_desc_ptr
+ *
+ * DESCRIPTION: Repair a common problem with objects that are defined to return
+ * a variable-length Package of Packages. If the variable-length
+ * is one, some BIOS code mistakenly simply declares a single
+ * Package instead of a Package with one sub-Package. This
+ * function attempts to repair this error by wrapping a Package
+ * object around the original Package, creating the correct
+ * Package with one sub-Package.
+ *
+ * Names that can be repaired in this manner include:
+ * _ALR, _CSD, _HPX, _MLS, _PRT, _PSS, _TRT, TSS
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_repair_package_list(struct acpi_predefined_data *data,
+ union acpi_operand_object **obj_desc_ptr)
+{
+ union acpi_operand_object *pkg_obj_desc;
+
+ /*
+ * Create the new outer package and populate it. The new package will
+ * have a single element, the lone subpackage.
+ */
+ pkg_obj_desc = acpi_ut_create_package_object(1);
+ if (!pkg_obj_desc) {
+ return (AE_NO_MEMORY);
+ }
+
+ pkg_obj_desc->package.elements[0] = *obj_desc_ptr;
+
+ /* Return the new object in the object pointer */
+
+ *obj_desc_ptr = pkg_obj_desc;
+ data->flags |= ACPI_OBJECT_REPAIRED;
+
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
+ "Incorrectly formed Package, attempting repair"));
+
+ return (AE_OK);
+}
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 78277ed0833..ea55ab4f984 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -88,7 +88,8 @@ acpi_ns_report_error(const char *module_name,
/* There is a non-ascii character in the name */
- ACPI_MOVE_32_TO_32(&bad_name, internal_name);
+ ACPI_MOVE_32_TO_32(&bad_name,
+ ACPI_CAST_PTR(u32, internal_name));
acpi_os_printf("[0x%4.4X] (NON-ASCII)", bad_name);
} else {
/* Convert path to external format */
@@ -836,7 +837,7 @@ acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
acpi_status status;
char *internal_path;
- ACPI_FUNCTION_TRACE_PTR(ns_get_node, pathname);
+ ACPI_FUNCTION_TRACE_PTR(ns_get_node, ACPI_CAST_PTR(char, pathname));
if (!pathname) {
*return_node = prefix_node;
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index daf4ad37896..4929dbdbc8f 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -535,10 +535,11 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
acpi_status status;
struct acpi_namespace_node *node;
u32 flags;
- struct acpica_device_id hid;
- struct acpi_compatible_id_list *cid;
+ struct acpica_device_id *hid;
+ struct acpica_device_id_list *cid;
u32 i;
- int found;
+ u8 found;
+ int no_match;
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
@@ -582,10 +583,14 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
return (AE_CTRL_DEPTH);
}
- if (ACPI_STRNCMP(hid.value, info->hid, sizeof(hid.value)) != 0) {
-
- /* Get the list of Compatible IDs */
+ no_match = ACPI_STRCMP(hid->string, info->hid);
+ ACPI_FREE(hid);
+ if (no_match) {
+ /*
+ * HID does not match, attempt match within the
+ * list of Compatible IDs (CIDs)
+ */
status = acpi_ut_execute_CID(node, &cid);
if (status == AE_NOT_FOUND) {
return (AE_OK);
@@ -597,10 +602,8 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
found = 0;
for (i = 0; i < cid->count; i++) {
- if (ACPI_STRNCMP(cid->id[i].value, info->hid,
- sizeof(struct
- acpi_compatible_id)) ==
- 0) {
+ if (ACPI_STRCMP(cid->ids[i].string, info->hid)
+ == 0) {
found = 1;
break;
}
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index f23593d6add..ddc84af6336 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -51,6 +51,11 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsxfname")
+/* Local prototypes */
+static char *acpi_ns_copy_device_id(struct acpica_device_id *dest,
+ struct acpica_device_id *source,
+ char *string_area);
+
/******************************************************************************
*
* FUNCTION: acpi_get_handle
@@ -68,6 +73,7 @@ ACPI_MODULE_NAME("nsxfname")
* namespace handle.
*
******************************************************************************/
+
acpi_status
acpi_get_handle(acpi_handle parent,
acpi_string pathname, acpi_handle * ret_handle)
@@ -210,10 +216,38 @@ ACPI_EXPORT_SYMBOL(acpi_get_name)
/******************************************************************************
*
+ * FUNCTION: acpi_ns_copy_device_id
+ *
+ * PARAMETERS: Dest - Pointer to the destination DEVICE_ID
+ * Source - Pointer to the source DEVICE_ID
+ * string_area - Pointer to where to copy the dest string
+ *
+ * RETURN: Pointer to the next string area
+ *
+ * DESCRIPTION: Copy a single DEVICE_ID, including the string data.
+ *
+ ******************************************************************************/
+static char *acpi_ns_copy_device_id(struct acpica_device_id *dest,
+ struct acpica_device_id *source,
+ char *string_area)
+{
+ /* Create the destination DEVICE_ID */
+
+ dest->string = string_area;
+ dest->length = source->length;
+
+ /* Copy actual string and return a pointer to the next string area */
+
+ ACPI_MEMCPY(string_area, source->string, source->length);
+ return (string_area + source->length);
+}
+
+/******************************************************************************
+ *
* FUNCTION: acpi_get_object_info
*
- * PARAMETERS: Handle - Object Handle
- * Buffer - Where the info is returned
+ * PARAMETERS: Handle - Object Handle
+ * return_buffer - Where the info is returned
*
* RETURN: Status
*
@@ -221,33 +255,37 @@ ACPI_EXPORT_SYMBOL(acpi_get_name)
* namespace node and possibly by running several standard
* control methods (Such as in the case of a device.)
*
+ * For Device and Processor objects, run the Device _HID, _UID, _CID, _STA,
+ * _ADR, _sx_w, and _sx_d methods.
+ *
+ * Note: Allocates the return buffer, must be freed by the caller.
+ *
******************************************************************************/
+
acpi_status
-acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
+acpi_get_object_info(acpi_handle handle,
+ struct acpi_device_info **return_buffer)
{
- acpi_status status;
struct acpi_namespace_node *node;
struct acpi_device_info *info;
- struct acpi_device_info *return_info;
- struct acpi_compatible_id_list *cid_list = NULL;
- acpi_size size;
+ struct acpica_device_id_list *cid_list = NULL;
+ struct acpica_device_id *hid = NULL;
+ struct acpica_device_id *uid = NULL;
+ char *next_id_string;
+ acpi_object_type type;
+ acpi_name name;
+ u8 param_count = 0;
+ u8 valid = 0;
+ u32 info_size;
+ u32 i;
+ acpi_status status;
/* Parameter validation */
- if (!handle || !buffer) {
+ if (!handle || !return_buffer) {
return (AE_BAD_PARAMETER);
}
- status = acpi_ut_validate_buffer(buffer);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_device_info));
- if (!info) {
- return (AE_NO_MEMORY);
- }
-
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
goto cleanup;
@@ -256,66 +294,91 @@ acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
node = acpi_ns_map_handle_to_node(handle);
if (!node) {
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- status = AE_BAD_PARAMETER;
- goto cleanup;
+ return (AE_BAD_PARAMETER);
}
- /* Init return structure */
-
- size = sizeof(struct acpi_device_info);
+ /* Get the namespace node data while the namespace is locked */
- info->type = node->type;
- info->name = node->name.integer;
- info->valid = 0;
+ info_size = sizeof(struct acpi_device_info);
+ type = node->type;
+ name = node->name.integer;
if (node->type == ACPI_TYPE_METHOD) {
- info->param_count = node->object->method.param_count;
+ param_count = node->object->method.param_count;
}
status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
- goto cleanup;
+ return (status);
}
- /* If not a device, we are all done */
-
- if (info->type == ACPI_TYPE_DEVICE) {
+ if ((type == ACPI_TYPE_DEVICE) || (type == ACPI_TYPE_PROCESSOR)) {
/*
- * Get extra info for ACPI Devices objects only:
- * Run the Device _HID, _UID, _CID, _STA, _ADR and _sx_d methods.
+ * Get extra info for ACPI Device/Processor objects only:
+ * Run the Device _HID, _UID, and _CID methods.
*
* Note: none of these methods are required, so they may or may
- * not be present for this device. The Info->Valid bitfield is used
- * to indicate which methods were found and ran successfully.
+ * not be present for this device. The Info->Valid bitfield is used
+ * to indicate which methods were found and run successfully.
*/
/* Execute the Device._HID method */
- status = acpi_ut_execute_HID(node, &info->hardware_id);
+ status = acpi_ut_execute_HID(node, &hid);
if (ACPI_SUCCESS(status)) {
- info->valid |= ACPI_VALID_HID;
+ info_size += hid->length;
+ valid |= ACPI_VALID_HID;
}
/* Execute the Device._UID method */
- status = acpi_ut_execute_UID(node, &info->unique_id);
+ status = acpi_ut_execute_UID(node, &uid);
if (ACPI_SUCCESS(status)) {
- info->valid |= ACPI_VALID_UID;
+ info_size += uid->length;
+ valid |= ACPI_VALID_UID;
}
/* Execute the Device._CID method */
status = acpi_ut_execute_CID(node, &cid_list);
if (ACPI_SUCCESS(status)) {
- size += cid_list->size;
- info->valid |= ACPI_VALID_CID;
+
+ /* Add size of CID strings and CID pointer array */
+
+ info_size +=
+ (cid_list->list_size -
+ sizeof(struct acpica_device_id_list));
+ valid |= ACPI_VALID_CID;
}
+ }
+
+ /*
+ * Now that we have the variable-length data, we can allocate the
+ * return buffer
+ */
+ info = ACPI_ALLOCATE_ZEROED(info_size);
+ if (!info) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /* Get the fixed-length data */
+
+ if ((type == ACPI_TYPE_DEVICE) || (type == ACPI_TYPE_PROCESSOR)) {
+ /*
+ * Get extra info for ACPI Device/Processor objects only:
+ * Run the _STA, _ADR and, sx_w, and _sx_d methods.
+ *
+ * Note: none of these methods are required, so they may or may
+ * not be present for this device. The Info->Valid bitfield is used
+ * to indicate which methods were found and run successfully.
+ */
/* Execute the Device._STA method */
status = acpi_ut_execute_STA(node, &info->current_status);
if (ACPI_SUCCESS(status)) {
- info->valid |= ACPI_VALID_STA;
+ valid |= ACPI_VALID_STA;
}
/* Execute the Device._ADR method */
@@ -323,36 +386,100 @@ acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
status = acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, node,
&info->address);
if (ACPI_SUCCESS(status)) {
- info->valid |= ACPI_VALID_ADR;
+ valid |= ACPI_VALID_ADR;
+ }
+
+ /* Execute the Device._sx_w methods */
+
+ status = acpi_ut_execute_power_methods(node,
+ acpi_gbl_lowest_dstate_names,
+ ACPI_NUM_sx_w_METHODS,
+ info->lowest_dstates);
+ if (ACPI_SUCCESS(status)) {
+ valid |= ACPI_VALID_SXWS;
}
/* Execute the Device._sx_d methods */
- status = acpi_ut_execute_sxds(node, info->highest_dstates);
+ status = acpi_ut_execute_power_methods(node,
+ acpi_gbl_highest_dstate_names,
+ ACPI_NUM_sx_d_METHODS,
+ info->highest_dstates);
if (ACPI_SUCCESS(status)) {
- info->valid |= ACPI_VALID_SXDS;
+ valid |= ACPI_VALID_SXDS;
}
}
- /* Validate/Allocate/Clear caller buffer */
+ /*
+ * Create a pointer to the string area of the return buffer.
+ * Point to the end of the base struct acpi_device_info structure.
+ */
+ next_id_string = ACPI_CAST_PTR(char, info->compatible_id_list.ids);
+ if (cid_list) {
- status = acpi_ut_initialize_buffer(buffer, size);
- if (ACPI_FAILURE(status)) {
- goto cleanup;
+ /* Point past the CID DEVICE_ID array */
+
+ next_id_string +=
+ ((acpi_size) cid_list->count *
+ sizeof(struct acpica_device_id));
}
- /* Populate the return buffer */
+ /*
+ * Copy the HID, UID, and CIDs to the return buffer. The variable-length
+ * strings are copied to the reserved area at the end of the buffer.
+ *
+ * For HID and CID, check if the ID is a PCI Root Bridge.
+ */
+ if (hid) {
+ next_id_string = acpi_ns_copy_device_id(&info->hardware_id,
+ hid, next_id_string);
+
+ if (acpi_ut_is_pci_root_bridge(hid->string)) {
+ info->flags |= ACPI_PCI_ROOT_BRIDGE;
+ }
+ }
- return_info = buffer->pointer;
- ACPI_MEMCPY(return_info, info, sizeof(struct acpi_device_info));
+ if (uid) {
+ next_id_string = acpi_ns_copy_device_id(&info->unique_id,
+ uid, next_id_string);
+ }
if (cid_list) {
- ACPI_MEMCPY(&return_info->compatibility_id, cid_list,
- cid_list->size);
+ info->compatible_id_list.count = cid_list->count;
+ info->compatible_id_list.list_size = cid_list->list_size;
+
+ /* Copy each CID */
+
+ for (i = 0; i < cid_list->count; i++) {
+ next_id_string =
+ acpi_ns_copy_device_id(&info->compatible_id_list.
+ ids[i], &cid_list->ids[i],
+ next_id_string);
+
+ if (acpi_ut_is_pci_root_bridge(cid_list->ids[i].string)) {
+ info->flags |= ACPI_PCI_ROOT_BRIDGE;
+ }
+ }
}
+ /* Copy the fixed-length data */
+
+ info->info_size = info_size;
+ info->type = type;
+ info->name = name;
+ info->param_count = param_count;
+ info->valid = valid;
+
+ *return_buffer = info;
+ status = AE_OK;
+
cleanup:
- ACPI_FREE(info);
+ if (hid) {
+ ACPI_FREE(hid);
+ }
+ if (uid) {
+ ACPI_FREE(uid);
+ }
if (cid_list) {
ACPI_FREE(cid_list);
}
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index c5f6ce19a40..cd7995b3aed 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -86,6 +86,9 @@ static acpi_status
acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
union acpi_parse_object *op, acpi_status status);
+static void
+acpi_ps_link_module_code(u8 *aml_start, u32 aml_length, acpi_owner_id owner_id);
+
/*******************************************************************************
*
* FUNCTION: acpi_ps_get_aml_opcode
@@ -390,6 +393,7 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
{
acpi_status status = AE_OK;
union acpi_parse_object *arg = NULL;
+ const struct acpi_opcode_info *op_info;
ACPI_FUNCTION_TRACE_PTR(ps_get_arguments, walk_state);
@@ -449,13 +453,11 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
INCREMENT_ARG_LIST(walk_state->arg_types);
}
- /* Special processing for certain opcodes */
-
- /* TBD (remove): Temporary mechanism to disable this code if needed */
-
-#ifdef ACPI_ENABLE_MODULE_LEVEL_CODE
-
- if ((walk_state->pass_number <= ACPI_IMODE_LOAD_PASS1) &&
+ /*
+ * Handle executable code at "module-level". This refers to
+ * executable opcodes that appear outside of any control method.
+ */
+ if ((walk_state->pass_number <= ACPI_IMODE_LOAD_PASS2) &&
((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) == 0)) {
/*
* We want to skip If/Else/While constructs during Pass1 because we
@@ -469,6 +471,23 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
case AML_ELSE_OP:
case AML_WHILE_OP:
+ /*
+ * Currently supported module-level opcodes are:
+ * IF/ELSE/WHILE. These appear to be the most common,
+ * and easiest to support since they open an AML
+ * package.
+ */
+ if (walk_state->pass_number ==
+ ACPI_IMODE_LOAD_PASS1) {
+ acpi_ps_link_module_code(aml_op_start,
+ walk_state->
+ parser_state.
+ pkg_end -
+ aml_op_start,
+ walk_state->
+ owner_id);
+ }
+
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"Pass1: Skipping an If/Else/While body\n"));
@@ -480,10 +499,34 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
break;
default:
+ /*
+ * Check for an unsupported executable opcode at module
+ * level. We must be in PASS1, the parent must be a SCOPE,
+ * The opcode class must be EXECUTE, and the opcode must
+ * not be an argument to another opcode.
+ */
+ if ((walk_state->pass_number ==
+ ACPI_IMODE_LOAD_PASS1)
+ && (op->common.parent->common.aml_opcode ==
+ AML_SCOPE_OP)) {
+ op_info =
+ acpi_ps_get_opcode_info(op->common.
+ aml_opcode);
+ if ((op_info->class ==
+ AML_CLASS_EXECUTE) && (!arg)) {
+ ACPI_WARNING((AE_INFO,
+ "Detected an unsupported executable opcode "
+ "at module-level: [0x%.4X] at table offset 0x%.4X",
+ op->common.aml_opcode,
+ (u32)((aml_op_start - walk_state->parser_state.aml_start)
+ + sizeof(struct acpi_table_header))));
+ }
+ }
break;
}
}
-#endif
+
+ /* Special processing for certain opcodes */
switch (op->common.aml_opcode) {
case AML_METHOD_OP:
@@ -553,6 +596,66 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
/*******************************************************************************
*
+ * FUNCTION: acpi_ps_link_module_code
+ *
+ * PARAMETERS: aml_start - Pointer to the AML
+ * aml_length - Length of executable AML
+ * owner_id - owner_id of module level code
+ *
+ * RETURN: None.
+ *
+ * DESCRIPTION: Wrap the module-level code with a method object and link the
+ * object to the global list. Note, the mutex field of the method
+ * object is used to link multiple module-level code objects.
+ *
+ ******************************************************************************/
+
+static void
+acpi_ps_link_module_code(u8 *aml_start, u32 aml_length, acpi_owner_id owner_id)
+{
+ union acpi_operand_object *prev;
+ union acpi_operand_object *next;
+ union acpi_operand_object *method_obj;
+
+ /* Get the tail of the list */
+
+ prev = next = acpi_gbl_module_code_list;
+ while (next) {
+ prev = next;
+ next = next->method.mutex;
+ }
+
+ /*
+ * Insert the module level code into the list. Merge it if it is
+ * adjacent to the previous element.
+ */
+ if (!prev ||
+ ((prev->method.aml_start + prev->method.aml_length) != aml_start)) {
+
+ /* Create, initialize, and link a new temporary method object */
+
+ method_obj = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
+ if (!method_obj) {
+ return;
+ }
+
+ method_obj->method.aml_start = aml_start;
+ method_obj->method.aml_length = aml_length;
+ method_obj->method.owner_id = owner_id;
+ method_obj->method.flags |= AOPOBJ_MODULE_LEVEL;
+
+ if (!prev) {
+ acpi_gbl_module_code_list = method_obj;
+ } else {
+ prev->method.mutex = method_obj;
+ }
+ } else {
+ prev->method.aml_length += aml_length;
+ }
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ps_complete_op
*
* PARAMETERS: walk_state - Current state
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index ff06032c0f0..dd9731c29a7 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -280,6 +280,10 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
goto cleanup;
}
+ if (info->obj_desc->method.flags & AOPOBJ_MODULE_LEVEL) {
+ walk_state->parse_flags |= ACPI_PARSE_MODULE_LEVEL;
+ }
+
/* Invoke an internal method if necessary */
if (info->obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index ef7d2c2d8f0..1f15497f00d 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -49,6 +49,12 @@
ACPI_MODULE_NAME("tbutils")
/* Local prototypes */
+static void acpi_tb_fix_string(char *string, acpi_size length);
+
+static void
+acpi_tb_cleanup_table_header(struct acpi_table_header *out_header,
+ struct acpi_table_header *header);
+
static acpi_physical_address
acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size);
@@ -161,6 +167,59 @@ u8 acpi_tb_tables_loaded(void)
/*******************************************************************************
*
+ * FUNCTION: acpi_tb_fix_string
+ *
+ * PARAMETERS: String - String to be repaired
+ * Length - Maximum length
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Replace every non-printable or non-ascii byte in the string
+ * with a question mark '?'.
+ *
+ ******************************************************************************/
+
+static void acpi_tb_fix_string(char *string, acpi_size length)
+{
+
+ while (length && *string) {
+ if (!ACPI_IS_PRINT(*string)) {
+ *string = '?';
+ }
+ string++;
+ length--;
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_cleanup_table_header
+ *
+ * PARAMETERS: out_header - Where the cleaned header is returned
+ * Header - Input ACPI table header
+ *
+ * RETURN: Returns the cleaned header in out_header
+ *
+ * DESCRIPTION: Copy the table header and ensure that all "string" fields in
+ * the header consist of printable characters.
+ *
+ ******************************************************************************/
+
+static void
+acpi_tb_cleanup_table_header(struct acpi_table_header *out_header,
+ struct acpi_table_header *header)
+{
+
+ ACPI_MEMCPY(out_header, header, sizeof(struct acpi_table_header));
+
+ acpi_tb_fix_string(out_header->signature, ACPI_NAME_SIZE);
+ acpi_tb_fix_string(out_header->oem_id, ACPI_OEM_ID_SIZE);
+ acpi_tb_fix_string(out_header->oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
+ acpi_tb_fix_string(out_header->asl_compiler_id, ACPI_NAME_SIZE);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_tb_print_table_header
*
* PARAMETERS: Address - Table physical address
@@ -176,6 +235,7 @@ void
acpi_tb_print_table_header(acpi_physical_address address,
struct acpi_table_header *header)
{
+ struct acpi_table_header local_header;
/*
* The reason that the Address is cast to a void pointer is so that we
@@ -192,6 +252,11 @@ acpi_tb_print_table_header(acpi_physical_address address,
/* RSDP has no common fields */
+ ACPI_MEMCPY(local_header.oem_id,
+ ACPI_CAST_PTR(struct acpi_table_rsdp,
+ header)->oem_id, ACPI_OEM_ID_SIZE);
+ acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
+
ACPI_INFO((AE_INFO, "RSDP %p %05X (v%.2d %6.6s)",
ACPI_CAST_PTR (void, address),
(ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
@@ -200,18 +265,21 @@ acpi_tb_print_table_header(acpi_physical_address address,
header)->length : 20,
ACPI_CAST_PTR(struct acpi_table_rsdp,
header)->revision,
- ACPI_CAST_PTR(struct acpi_table_rsdp,
- header)->oem_id));
+ local_header.oem_id));
} else {
/* Standard ACPI table with full common header */
+ acpi_tb_cleanup_table_header(&local_header, header);
+
ACPI_INFO((AE_INFO,
"%4.4s %p %05X (v%.2d %6.6s %8.8s %08X %4.4s %08X)",
- header->signature, ACPI_CAST_PTR (void, address),
- header->length, header->revision, header->oem_id,
- header->oem_table_id, header->oem_revision,
- header->asl_compiler_id,
- header->asl_compiler_revision));
+ local_header.signature, ACPI_CAST_PTR(void, address),
+ local_header.length, local_header.revision,
+ local_header.oem_id, local_header.oem_table_id,
+ local_header.oem_revision,
+ local_header.asl_compiler_id,
+ local_header.asl_compiler_revision));
+
}
}
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index bc171031508..96e26e70c63 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -215,6 +215,12 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"***** Region %p\n", object));
+ /* Invalidate the region address/length via the host OS */
+
+ acpi_os_invalidate_address(object->region.space_id,
+ object->region.address,
+ (acpi_size) object->region.length);
+
second_desc = acpi_ns_get_secondary_object(object);
if (second_desc) {
/*
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 006b16c2601..5d54e36ab45 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -44,19 +44,10 @@
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
-#include "acinterp.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("uteval")
-/* Local prototypes */
-static void
-acpi_ut_copy_id_string(char *destination, char *source, acpi_size max_length);
-
-static acpi_status
-acpi_ut_translate_one_cid(union acpi_operand_object *obj_desc,
- struct acpi_compatible_id *one_cid);
-
/*
* Strings supported by the _OSI predefined (internal) method.
*
@@ -78,6 +69,9 @@ static struct acpi_interface_info acpi_interfaces_supported[] = {
{"Windows 2001 SP2", ACPI_OSI_WIN_XP_SP2}, /* Windows XP SP2 */
{"Windows 2001.1 SP1", ACPI_OSI_WINSRV_2003_SP1}, /* Windows Server 2003 SP1 - Added 03/2006 */
{"Windows 2006", ACPI_OSI_WIN_VISTA}, /* Windows Vista - Added 03/2006 */
+ {"Windows 2006.1", ACPI_OSI_WINSRV_2008}, /* Windows Server 2008 - Added 09/2009 */
+ {"Windows 2006 SP1", ACPI_OSI_WIN_VISTA_SP1}, /* Windows Vista SP1 - Added 09/2009 */
+ {"Windows 2009", ACPI_OSI_WIN_7}, /* Windows 7 and Server 2008 R2 - Added 09/2009 */
/* Feature Group Strings */
@@ -213,7 +207,7 @@ acpi_status acpi_osi_invalidate(char *interface)
* RETURN: Status
*
* DESCRIPTION: Evaluates a namespace object and verifies the type of the
- * return object. Common code that simplifies accessing objects
+ * return object. Common code that simplifies accessing objects
* that have required return objects of fixed types.
*
* NOTE: Internal function, no parameter validation
@@ -298,7 +292,7 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
if ((acpi_gbl_enable_interpreter_slack) && (!expected_return_btypes)) {
/*
- * We received a return object, but one was not expected. This can
+ * We received a return object, but one was not expected. This can
* happen frequently if the "implicit return" feature is enabled.
* Just delete the return object and return AE_OK.
*/
@@ -340,12 +334,12 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
*
* PARAMETERS: object_name - Object name to be evaluated
* device_node - Node for the device
- * Address - Where the value is returned
+ * Value - Where the value is returned
*
* RETURN: Status
*
* DESCRIPTION: Evaluates a numeric namespace object for a selected device
- * and stores result in *Address.
+ * and stores result in *Value.
*
* NOTE: Internal function, no parameter validation
*
@@ -354,7 +348,7 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
acpi_status
acpi_ut_evaluate_numeric_object(char *object_name,
struct acpi_namespace_node *device_node,
- acpi_integer * address)
+ acpi_integer *value)
{
union acpi_operand_object *obj_desc;
acpi_status status;
@@ -369,295 +363,7 @@ acpi_ut_evaluate_numeric_object(char *object_name,
/* Get the returned Integer */
- *address = obj_desc->integer.value;
-
- /* On exit, we must delete the return object */
-
- acpi_ut_remove_reference(obj_desc);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_copy_id_string
- *
- * PARAMETERS: Destination - Where to copy the string
- * Source - Source string
- * max_length - Length of the destination buffer
- *
- * RETURN: None
- *
- * DESCRIPTION: Copies an ID string for the _HID, _CID, and _UID methods.
- * Performs removal of a leading asterisk if present -- workaround
- * for a known issue on a bunch of machines.
- *
- ******************************************************************************/
-
-static void
-acpi_ut_copy_id_string(char *destination, char *source, acpi_size max_length)
-{
-
- /*
- * Workaround for ID strings that have a leading asterisk. This construct
- * is not allowed by the ACPI specification (ID strings must be
- * alphanumeric), but enough existing machines have this embedded in their
- * ID strings that the following code is useful.
- */
- if (*source == '*') {
- source++;
- }
-
- /* Do the actual copy */
-
- ACPI_STRNCPY(destination, source, max_length);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_execute_HID
- *
- * PARAMETERS: device_node - Node for the device
- * Hid - Where the HID is returned
- *
- * RETURN: Status
- *
- * DESCRIPTION: Executes the _HID control method that returns the hardware
- * ID of the device.
- *
- * NOTE: Internal function, no parameter validation
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
- struct acpica_device_id *hid)
-{
- union acpi_operand_object *obj_desc;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ut_execute_HID);
-
- status = acpi_ut_evaluate_object(device_node, METHOD_NAME__HID,
- ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING,
- &obj_desc);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
-
- /* Convert the Numeric HID to string */
-
- acpi_ex_eisa_id_to_string((u32) obj_desc->integer.value,
- hid->value);
- } else {
- /* Copy the String HID from the returned object */
-
- acpi_ut_copy_id_string(hid->value, obj_desc->string.pointer,
- sizeof(hid->value));
- }
-
- /* On exit, we must delete the return object */
-
- acpi_ut_remove_reference(obj_desc);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_translate_one_cid
- *
- * PARAMETERS: obj_desc - _CID object, must be integer or string
- * one_cid - Where the CID string is returned
- *
- * RETURN: Status
- *
- * DESCRIPTION: Return a numeric or string _CID value as a string.
- * (Compatible ID)
- *
- * NOTE: Assumes a maximum _CID string length of
- * ACPI_MAX_CID_LENGTH.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ut_translate_one_cid(union acpi_operand_object *obj_desc,
- struct acpi_compatible_id *one_cid)
-{
-
- switch (obj_desc->common.type) {
- case ACPI_TYPE_INTEGER:
-
- /* Convert the Numeric CID to string */
-
- acpi_ex_eisa_id_to_string((u32) obj_desc->integer.value,
- one_cid->value);
- return (AE_OK);
-
- case ACPI_TYPE_STRING:
-
- if (obj_desc->string.length > ACPI_MAX_CID_LENGTH) {
- return (AE_AML_STRING_LIMIT);
- }
-
- /* Copy the String CID from the returned object */
-
- acpi_ut_copy_id_string(one_cid->value, obj_desc->string.pointer,
- ACPI_MAX_CID_LENGTH);
- return (AE_OK);
-
- default:
-
- return (AE_TYPE);
- }
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_execute_CID
- *
- * PARAMETERS: device_node - Node for the device
- * return_cid_list - Where the CID list is returned
- *
- * RETURN: Status
- *
- * DESCRIPTION: Executes the _CID control method that returns one or more
- * compatible hardware IDs for the device.
- *
- * NOTE: Internal function, no parameter validation
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_execute_CID(struct acpi_namespace_node * device_node,
- struct acpi_compatible_id_list ** return_cid_list)
-{
- union acpi_operand_object *obj_desc;
- acpi_status status;
- u32 count;
- u32 size;
- struct acpi_compatible_id_list *cid_list;
- u32 i;
-
- ACPI_FUNCTION_TRACE(ut_execute_CID);
-
- /* Evaluate the _CID method for this device */
-
- status = acpi_ut_evaluate_object(device_node, METHOD_NAME__CID,
- ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING
- | ACPI_BTYPE_PACKAGE, &obj_desc);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Get the number of _CIDs returned */
-
- count = 1;
- if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
- count = obj_desc->package.count;
- }
-
- /* Allocate a worst-case buffer for the _CIDs */
-
- size = (((count - 1) * sizeof(struct acpi_compatible_id)) +
- sizeof(struct acpi_compatible_id_list));
-
- cid_list = ACPI_ALLOCATE_ZEROED((acpi_size) size);
- if (!cid_list) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- /* Init CID list */
-
- cid_list->count = count;
- cid_list->size = size;
-
- /*
- * A _CID can return either a single compatible ID or a package of
- * compatible IDs. Each compatible ID can be one of the following:
- * 1) Integer (32 bit compressed EISA ID) or
- * 2) String (PCI ID format, e.g. "PCI\VEN_vvvv&DEV_dddd&SUBSYS_ssssssss")
- */
-
- /* The _CID object can be either a single CID or a package (list) of CIDs */
-
- if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
-
- /* Translate each package element */
-
- for (i = 0; i < count; i++) {
- status =
- acpi_ut_translate_one_cid(obj_desc->package.
- elements[i],
- &cid_list->id[i]);
- if (ACPI_FAILURE(status)) {
- break;
- }
- }
- } else {
- /* Only one CID, translate to a string */
-
- status = acpi_ut_translate_one_cid(obj_desc, cid_list->id);
- }
-
- /* Cleanup on error */
-
- if (ACPI_FAILURE(status)) {
- ACPI_FREE(cid_list);
- } else {
- *return_cid_list = cid_list;
- }
-
- /* On exit, we must delete the _CID return object */
-
- acpi_ut_remove_reference(obj_desc);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_execute_UID
- *
- * PARAMETERS: device_node - Node for the device
- * Uid - Where the UID is returned
- *
- * RETURN: Status
- *
- * DESCRIPTION: Executes the _UID control method that returns the hardware
- * ID of the device.
- *
- * NOTE: Internal function, no parameter validation
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
- struct acpica_device_id *uid)
-{
- union acpi_operand_object *obj_desc;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ut_execute_UID);
-
- status = acpi_ut_evaluate_object(device_node, METHOD_NAME__UID,
- ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING,
- &obj_desc);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
-
- /* Convert the Numeric UID to string */
-
- acpi_ex_unsigned_integer_to_string(obj_desc->integer.value,
- uid->value);
- } else {
- /* Copy the String UID from the returned object */
-
- acpi_ut_copy_id_string(uid->value, obj_desc->string.pointer,
- sizeof(uid->value));
- }
+ *value = obj_desc->integer.value;
/* On exit, we must delete the return object */
@@ -716,60 +422,64 @@ acpi_ut_execute_STA(struct acpi_namespace_node *device_node, u32 * flags)
/*******************************************************************************
*
- * FUNCTION: acpi_ut_execute_Sxds
+ * FUNCTION: acpi_ut_execute_power_methods
*
* PARAMETERS: device_node - Node for the device
- * Flags - Where the status flags are returned
+ * method_names - Array of power method names
+ * method_count - Number of methods to execute
+ * out_values - Where the power method values are returned
*
- * RETURN: Status
+ * RETURN: Status, out_values
*
- * DESCRIPTION: Executes _STA for selected device and stores results in
- * *Flags.
+ * DESCRIPTION: Executes the specified power methods for the device and returns
+ * the result(s).
*
* NOTE: Internal function, no parameter validation
*
- ******************************************************************************/
+******************************************************************************/
acpi_status
-acpi_ut_execute_sxds(struct acpi_namespace_node *device_node, u8 * highest)
+acpi_ut_execute_power_methods(struct acpi_namespace_node *device_node,
+ const char **method_names,
+ u8 method_count, u8 *out_values)
{
union acpi_operand_object *obj_desc;
acpi_status status;
+ acpi_status final_status = AE_NOT_FOUND;
u32 i;
- ACPI_FUNCTION_TRACE(ut_execute_sxds);
+ ACPI_FUNCTION_TRACE(ut_execute_power_methods);
- for (i = 0; i < 4; i++) {
- highest[i] = 0xFF;
+ for (i = 0; i < method_count; i++) {
+ /*
+ * Execute the power method (_sx_d or _sx_w). The only allowable
+ * return type is an Integer.
+ */
status = acpi_ut_evaluate_object(device_node,
ACPI_CAST_PTR(char,
- acpi_gbl_highest_dstate_names
- [i]),
+ method_names[i]),
ACPI_BTYPE_INTEGER, &obj_desc);
- if (ACPI_FAILURE(status)) {
- if (status != AE_NOT_FOUND) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "%s on Device %4.4s, %s\n",
- ACPI_CAST_PTR(char,
- acpi_gbl_highest_dstate_names
- [i]),
- acpi_ut_get_node_name
- (device_node),
- acpi_format_exception
- (status)));
-
- return_ACPI_STATUS(status);
- }
- } else {
- /* Extract the Dstate value */
-
- highest[i] = (u8) obj_desc->integer.value;
+ if (ACPI_SUCCESS(status)) {
+ out_values[i] = (u8)obj_desc->integer.value;
/* Delete the return object */
acpi_ut_remove_reference(obj_desc);
+ final_status = AE_OK; /* At least one value is valid */
+ continue;
}
+
+ out_values[i] = ACPI_UINT8_MAX;
+ if (status == AE_NOT_FOUND) {
+ continue; /* Ignore if not found */
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Failed %s on Device %4.4s, %s\n",
+ ACPI_CAST_PTR(char, method_names[i]),
+ acpi_ut_get_node_name(device_node),
+ acpi_format_exception(status)));
}
- return_ACPI_STATUS(AE_OK);
+ return_ACPI_STATUS(final_status);
}
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 59e46f257c0..3f2c68f4e95 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -90,7 +90,15 @@ const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT] = {
"\\_S5_"
};
-const char *acpi_gbl_highest_dstate_names[4] = {
+const char *acpi_gbl_lowest_dstate_names[ACPI_NUM_sx_w_METHODS] = {
+ "_S0W",
+ "_S1W",
+ "_S2W",
+ "_S3W",
+ "_S4W"
+};
+
+const char *acpi_gbl_highest_dstate_names[ACPI_NUM_sx_d_METHODS] = {
"_S1D",
"_S2D",
"_S3D",
@@ -351,6 +359,7 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"SMBus",
"SystemCMOS",
"PCIBARTarget",
+ "IPMI",
"DataTable"
};
@@ -798,6 +807,7 @@ acpi_status acpi_ut_init_globals(void)
/* Namespace */
+ acpi_gbl_module_code_list = NULL;
acpi_gbl_root_node = NULL;
acpi_gbl_root_node_struct.name.integer = ACPI_ROOT_NAME;
acpi_gbl_root_node_struct.descriptor_type = ACPI_DESC_TYPE_NAMED;
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
new file mode 100644
index 00000000000..52eaae40455
--- /dev/null
+++ b/drivers/acpi/acpica/utids.c
@@ -0,0 +1,382 @@
+/******************************************************************************
+ *
+ * Module Name: utids - support for device IDs - HID, UID, CID
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2009, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utids")
+
+/* Local prototypes */
+static void acpi_ut_copy_id_string(char *destination, char *source);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_copy_id_string
+ *
+ * PARAMETERS: Destination - Where to copy the string
+ * Source - Source string
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Copies an ID string for the _HID, _CID, and _UID methods.
+ * Performs removal of a leading asterisk if present -- workaround
+ * for a known issue on a bunch of machines.
+ *
+ ******************************************************************************/
+
+static void acpi_ut_copy_id_string(char *destination, char *source)
+{
+
+ /*
+ * Workaround for ID strings that have a leading asterisk. This construct
+ * is not allowed by the ACPI specification (ID strings must be
+ * alphanumeric), but enough existing machines have this embedded in their
+ * ID strings that the following code is useful.
+ */
+ if (*source == '*') {
+ source++;
+ }
+
+ /* Do the actual copy */
+
+ ACPI_STRCPY(destination, source);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_execute_HID
+ *
+ * PARAMETERS: device_node - Node for the device
+ * return_id - Where the string HID is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Executes the _HID control method that returns the hardware
+ * ID of the device. The HID is either an 32-bit encoded EISAID
+ * Integer or a String. A string is always returned. An EISAID
+ * is converted to a string.
+ *
+ * NOTE: Internal function, no parameter validation
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
+ struct acpica_device_id **return_id)
+{
+ union acpi_operand_object *obj_desc;
+ struct acpica_device_id *hid;
+ u32 length;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_execute_HID);
+
+ status = acpi_ut_evaluate_object(device_node, METHOD_NAME__HID,
+ ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING,
+ &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Get the size of the String to be returned, includes null terminator */
+
+ if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
+ length = ACPI_EISAID_STRING_SIZE;
+ } else {
+ length = obj_desc->string.length + 1;
+ }
+
+ /* Allocate a buffer for the HID */
+
+ hid =
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpica_device_id) +
+ (acpi_size) length);
+ if (!hid) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /* Area for the string starts after DEVICE_ID struct */
+
+ hid->string = ACPI_ADD_PTR(char, hid, sizeof(struct acpica_device_id));
+
+ /* Convert EISAID to a string or simply copy existing string */
+
+ if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
+ acpi_ex_eisa_id_to_string(hid->string, obj_desc->integer.value);
+ } else {
+ acpi_ut_copy_id_string(hid->string, obj_desc->string.pointer);
+ }
+
+ hid->length = length;
+ *return_id = hid;
+
+cleanup:
+
+ /* On exit, we must delete the return object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_execute_UID
+ *
+ * PARAMETERS: device_node - Node for the device
+ * return_id - Where the string UID is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Executes the _UID control method that returns the unique
+ * ID of the device. The UID is either a 64-bit Integer (NOT an
+ * EISAID) or a string. Always returns a string. A 64-bit integer
+ * is converted to a decimal string.
+ *
+ * NOTE: Internal function, no parameter validation
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
+ struct acpica_device_id **return_id)
+{
+ union acpi_operand_object *obj_desc;
+ struct acpica_device_id *uid;
+ u32 length;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_execute_UID);
+
+ status = acpi_ut_evaluate_object(device_node, METHOD_NAME__UID,
+ ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING,
+ &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Get the size of the String to be returned, includes null terminator */
+
+ if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
+ length = ACPI_MAX64_DECIMAL_DIGITS + 1;
+ } else {
+ length = obj_desc->string.length + 1;
+ }
+
+ /* Allocate a buffer for the UID */
+
+ uid =
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpica_device_id) +
+ (acpi_size) length);
+ if (!uid) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /* Area for the string starts after DEVICE_ID struct */
+
+ uid->string = ACPI_ADD_PTR(char, uid, sizeof(struct acpica_device_id));
+
+ /* Convert an Integer to string, or just copy an existing string */
+
+ if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
+ acpi_ex_integer_to_string(uid->string, obj_desc->integer.value);
+ } else {
+ acpi_ut_copy_id_string(uid->string, obj_desc->string.pointer);
+ }
+
+ uid->length = length;
+ *return_id = uid;
+
+cleanup:
+
+ /* On exit, we must delete the return object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_execute_CID
+ *
+ * PARAMETERS: device_node - Node for the device
+ * return_cid_list - Where the CID list is returned
+ *
+ * RETURN: Status, list of CID strings
+ *
+ * DESCRIPTION: Executes the _CID control method that returns one or more
+ * compatible hardware IDs for the device.
+ *
+ * NOTE: Internal function, no parameter validation
+ *
+ * A _CID method can return either a single compatible ID or a package of
+ * compatible IDs. Each compatible ID can be one of the following:
+ * 1) Integer (32 bit compressed EISA ID) or
+ * 2) String (PCI ID format, e.g. "PCI\VEN_vvvv&DEV_dddd&SUBSYS_ssssssss")
+ *
+ * The Integer CIDs are converted to string format by this function.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
+ struct acpica_device_id_list **return_cid_list)
+{
+ union acpi_operand_object **cid_objects;
+ union acpi_operand_object *obj_desc;
+ struct acpica_device_id_list *cid_list;
+ char *next_id_string;
+ u32 string_area_size;
+ u32 length;
+ u32 cid_list_size;
+ acpi_status status;
+ u32 count;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ut_execute_CID);
+
+ /* Evaluate the _CID method for this device */
+
+ status = acpi_ut_evaluate_object(device_node, METHOD_NAME__CID,
+ ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING
+ | ACPI_BTYPE_PACKAGE, &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Get the count and size of the returned _CIDs. _CID can return either
+ * a Package of Integers/Strings or a single Integer or String.
+ * Note: This section also validates that all CID elements are of the
+ * correct type (Integer or String).
+ */
+ if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
+ count = obj_desc->package.count;
+ cid_objects = obj_desc->package.elements;
+ } else { /* Single Integer or String CID */
+
+ count = 1;
+ cid_objects = &obj_desc;
+ }
+
+ string_area_size = 0;
+ for (i = 0; i < count; i++) {
+
+ /* String lengths include null terminator */
+
+ switch (cid_objects[i]->common.type) {
+ case ACPI_TYPE_INTEGER:
+ string_area_size += ACPI_EISAID_STRING_SIZE;
+ break;
+
+ case ACPI_TYPE_STRING:
+ string_area_size += cid_objects[i]->string.length + 1;
+ break;
+
+ default:
+ status = AE_TYPE;
+ goto cleanup;
+ }
+ }
+
+ /*
+ * Now that we know the length of the CIDs, allocate return buffer:
+ * 1) Size of the base structure +
+ * 2) Size of the CID DEVICE_ID array +
+ * 3) Size of the actual CID strings
+ */
+ cid_list_size = sizeof(struct acpica_device_id_list) +
+ ((count - 1) * sizeof(struct acpica_device_id)) + string_area_size;
+
+ cid_list = ACPI_ALLOCATE_ZEROED(cid_list_size);
+ if (!cid_list) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /* Area for CID strings starts after the CID DEVICE_ID array */
+
+ next_id_string = ACPI_CAST_PTR(char, cid_list->ids) +
+ ((acpi_size) count * sizeof(struct acpica_device_id));
+
+ /* Copy/convert the CIDs to the return buffer */
+
+ for (i = 0; i < count; i++) {
+ if (cid_objects[i]->common.type == ACPI_TYPE_INTEGER) {
+
+ /* Convert the Integer (EISAID) CID to a string */
+
+ acpi_ex_eisa_id_to_string(next_id_string,
+ cid_objects[i]->integer.
+ value);
+ length = ACPI_EISAID_STRING_SIZE;
+ } else { /* ACPI_TYPE_STRING */
+
+ /* Copy the String CID from the returned object */
+
+ acpi_ut_copy_id_string(next_id_string,
+ cid_objects[i]->string.pointer);
+ length = cid_objects[i]->string.length + 1;
+ }
+
+ cid_list->ids[i].string = next_id_string;
+ cid_list->ids[i].length = length;
+ next_id_string += length;
+ }
+
+ /* Finish the CID list */
+
+ cid_list->count = count;
+ cid_list->list_size = cid_list_size;
+ *return_cid_list = cid_list;
+
+cleanup:
+
+ /* On exit, we must delete the _CID return object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index a54ca84eb36..9d0919ebf7b 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -99,33 +99,19 @@ static void acpi_ut_terminate(void)
*
* FUNCTION: acpi_ut_subsystem_shutdown
*
- * PARAMETERS: none
+ * PARAMETERS: None
*
- * RETURN: none
+ * RETURN: None
*
- * DESCRIPTION: Shutdown the various subsystems. Don't delete the mutex
- * objects here -- because the AML debugger may be still running.
+ * DESCRIPTION: Shutdown the various components. Do not delete the mutex
+ * objects here, because the AML debugger may be still running.
*
******************************************************************************/
void acpi_ut_subsystem_shutdown(void)
{
-
ACPI_FUNCTION_TRACE(ut_subsystem_shutdown);
- /* Just exit if subsystem is already shutdown */
-
- if (acpi_gbl_shutdown) {
- ACPI_ERROR((AE_INFO, "ACPI Subsystem is already terminated"));
- return_VOID;
- }
-
- /* Subsystem appears active, go ahead and shut it down */
-
- acpi_gbl_shutdown = TRUE;
- acpi_gbl_startup_flags = 0;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Shutting down ACPI Subsystem\n"));
-
#ifndef ACPI_ASL_COMPILER
/* Close the acpi_event Handling */
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index fbe782348b0..61f6315fce9 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -50,6 +50,11 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utmisc")
+/*
+ * Common suffix for messages
+ */
+#define ACPI_COMMON_MSG_SUFFIX \
+ acpi_os_printf(" (%8.8X/%s-%u)\n", ACPI_CA_VERSION, module_name, line_number)
/*******************************************************************************
*
* FUNCTION: acpi_ut_validate_exception
@@ -120,6 +125,34 @@ const char *acpi_ut_validate_exception(acpi_status status)
/*******************************************************************************
*
+ * FUNCTION: acpi_ut_is_pci_root_bridge
+ *
+ * PARAMETERS: Id - The HID/CID in string format
+ *
+ * RETURN: TRUE if the Id is a match for a PCI/PCI-Express Root Bridge
+ *
+ * DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID.
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_is_pci_root_bridge(char *id)
+{
+
+ /*
+ * Check if this is a PCI root bridge.
+ * ACPI 3.0+: check for a PCI Express root also.
+ */
+ if (!(ACPI_STRCMP(id,
+ PCI_ROOT_HID_STRING)) ||
+ !(ACPI_STRCMP(id, PCI_EXPRESS_ROOT_HID_STRING))) {
+ return (TRUE);
+ }
+
+ return (FALSE);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ut_is_aml_table
*
* PARAMETERS: Table - An ACPI table
@@ -1037,8 +1070,7 @@ acpi_error(const char *module_name, u32 line_number, const char *format, ...)
va_start(args, format);
acpi_os_vprintf(format, args);
- acpi_os_printf(" %8.8X %s-%u\n", ACPI_CA_VERSION, module_name,
- line_number);
+ ACPI_COMMON_MSG_SUFFIX;
va_end(args);
}
@@ -1052,8 +1084,7 @@ acpi_exception(const char *module_name,
va_start(args, format);
acpi_os_vprintf(format, args);
- acpi_os_printf(" %8.8X %s-%u\n", ACPI_CA_VERSION, module_name,
- line_number);
+ ACPI_COMMON_MSG_SUFFIX;
va_end(args);
}
@@ -1066,8 +1097,7 @@ acpi_warning(const char *module_name, u32 line_number, const char *format, ...)
va_start(args, format);
acpi_os_vprintf(format, args);
- acpi_os_printf(" %8.8X %s-%u\n", ACPI_CA_VERSION, module_name,
- line_number);
+ ACPI_COMMON_MSG_SUFFIX;
va_end(args);
}
@@ -1088,3 +1118,46 @@ ACPI_EXPORT_SYMBOL(acpi_error)
ACPI_EXPORT_SYMBOL(acpi_exception)
ACPI_EXPORT_SYMBOL(acpi_warning)
ACPI_EXPORT_SYMBOL(acpi_info)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_predefined_warning
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * Pathname - Full pathname to the node
+ * node_flags - From Namespace node for the method/object
+ * Format - Printf format string + additional args
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Warnings for the predefined validation module. Messages are
+ * only emitted the first time a problem with a particular
+ * method/object is detected. This prevents a flood of error
+ * messages for methods that are repeatedly evaluated.
+ *
+******************************************************************************/
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_ut_predefined_warning(const char *module_name,
+ u32 line_number,
+ char *pathname,
+ u8 node_flags, const char *format, ...)
+{
+ va_list args;
+
+ /*
+ * Warning messages for this method/object will be disabled after the
+ * first time a validation fails or an object is successfully repaired.
+ */
+ if (node_flags & ANOBJ_EVALUATED) {
+ return;
+ }
+
+ acpi_os_printf("ACPI Warning for %s: ", pathname);
+
+ va_start(args, format);
+ acpi_os_vprintf(format, args);
+ ACPI_COMMON_MSG_SUFFIX;
+ va_end(args);
+}
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 078a22728c6..b1f5f680bc7 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -251,6 +251,16 @@ acpi_status acpi_initialize_objects(u32 flags)
}
/*
+ * Execute any module-level code that was detected during the table load
+ * phase. Although illegal since ACPI 2.0, there are many machines that
+ * contain this type of code. Each block of detected executable AML code
+ * outside of any control method is wrapped with a temporary control
+ * method object and placed on a global list. The methods on this list
+ * are executed below.
+ */
+ acpi_ns_exec_module_code_list();
+
+ /*
* Initialize the objects that remain uninitialized. This runs the
* executable AML that may be part of the declaration of these objects:
* operation_regions, buffer_fields, Buffers, and Packages.
@@ -318,7 +328,7 @@ ACPI_EXPORT_SYMBOL(acpi_initialize_objects)
*
* RETURN: Status
*
- * DESCRIPTION: Shutdown the ACPI subsystem. Release all resources.
+ * DESCRIPTION: Shutdown the ACPICA subsystem and release all resources.
*
******************************************************************************/
acpi_status acpi_terminate(void)
@@ -327,6 +337,19 @@ acpi_status acpi_terminate(void)
ACPI_FUNCTION_TRACE(acpi_terminate);
+ /* Just exit if subsystem is already shutdown */
+
+ if (acpi_gbl_shutdown) {
+ ACPI_ERROR((AE_INFO, "ACPI Subsystem is already terminated"));
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Subsystem appears active, go ahead and shut it down */
+
+ acpi_gbl_shutdown = TRUE;
+ acpi_gbl_startup_flags = 0;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Shutting down ACPI Subsystem\n"));
+
/* Terminate the AML Debugger if present */
ACPI_DEBUGGER_EXEC(acpi_gbl_db_terminate_threads = TRUE);
@@ -353,6 +376,7 @@ acpi_status acpi_terminate(void)
}
ACPI_EXPORT_SYMBOL(acpi_terminate)
+
#ifndef ACPI_ASL_COMPILER
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 58b4517ce71..3f4602b8f28 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -31,6 +31,7 @@
#include <linux/types.h>
#include <linux/jiffies.h>
#include <linux/async.h>
+#include <linux/dmi.h>
#ifdef CONFIG_ACPI_PROCFS_POWER
#include <linux/proc_fs.h>
@@ -45,6 +46,8 @@
#include <linux/power_supply.h>
#endif
+#define PREFIX "ACPI: "
+
#define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF
#define ACPI_BATTERY_CLASS "battery"
@@ -85,6 +88,10 @@ static const struct acpi_device_id battery_device_ids[] = {
MODULE_DEVICE_TABLE(acpi, battery_device_ids);
+/* For buggy DSDTs that report negative 16-bit values for either charging
+ * or discharging current and/or report 0 as 65536 due to bad math.
+ */
+#define QUIRK_SIGNED16_CURRENT 0x0001
struct acpi_battery {
struct mutex lock;
@@ -112,6 +119,7 @@ struct acpi_battery {
int state;
int power_unit;
u8 alarm_present;
+ long quirks;
};
#define to_acpi_battery(x) container_of(x, struct acpi_battery, bat);
@@ -390,6 +398,11 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
state_offsets, ARRAY_SIZE(state_offsets));
battery->update_time = jiffies;
kfree(buffer.pointer);
+
+ if ((battery->quirks & QUIRK_SIGNED16_CURRENT) &&
+ battery->rate_now != -1)
+ battery->rate_now = abs((s16)battery->rate_now);
+
return result;
}
@@ -495,6 +508,14 @@ static void sysfs_remove_battery(struct acpi_battery *battery)
}
#endif
+static void acpi_battery_quirks(struct acpi_battery *battery)
+{
+ battery->quirks = 0;
+ if (dmi_name_in_vendors("Acer") && battery->power_unit) {
+ battery->quirks |= QUIRK_SIGNED16_CURRENT;
+ }
+}
+
static int acpi_battery_update(struct acpi_battery *battery)
{
int result, old_present = acpi_battery_present(battery);
@@ -513,6 +534,7 @@ static int acpi_battery_update(struct acpi_battery *battery)
result = acpi_battery_get_info(battery);
if (result)
return result;
+ acpi_battery_quirks(battery);
acpi_battery_init_alarm(battery);
}
#ifdef CONFIG_ACPI_SYSFS_POWER
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 0c4ca4d318b..e56b2a7b53d 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -34,6 +34,8 @@
#include <acpi/acpi_bus.h>
#include <linux/dmi.h>
+#include "internal.h"
+
enum acpi_blacklist_predicates {
all_versions,
less_than_or_equal,
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 2876fc70c3a..74119152435 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -38,6 +38,7 @@
#include <linux/pci.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#include <linux/dmi.h>
#include "internal.h"
@@ -93,36 +94,33 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
EXPORT_SYMBOL(acpi_bus_get_device);
-int acpi_bus_get_status(struct acpi_device *device)
+acpi_status acpi_bus_get_status_handle(acpi_handle handle,
+ unsigned long long *sta)
{
- acpi_status status = AE_OK;
- unsigned long long sta = 0;
-
+ acpi_status status;
- if (!device)
- return -EINVAL;
+ status = acpi_evaluate_integer(handle, "_STA", NULL, sta);
+ if (ACPI_SUCCESS(status))
+ return AE_OK;
- /*
- * Evaluate _STA if present.
- */
- if (device->flags.dynamic_status) {
- status =
- acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
- if (ACPI_FAILURE(status))
- return -ENODEV;
- STRUCT_TO_INT(device->status) = (int)sta;
+ if (status == AE_NOT_FOUND) {
+ *sta = ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED |
+ ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING;
+ return AE_OK;
}
+ return status;
+}
- /*
- * According to ACPI spec some device can be present and functional
- * even if the parent is not present but functional.
- * In such conditions the child device should not inherit the status
- * from the parent.
- */
- else
- STRUCT_TO_INT(device->status) =
- ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED |
- ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING;
+int acpi_bus_get_status(struct acpi_device *device)
+{
+ acpi_status status;
+ unsigned long long sta;
+
+ status = acpi_bus_get_status_handle(device->handle, &sta);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ STRUCT_TO_INT(device->status) = (int) sta;
if (device->status.functional && !device->status.present) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]: "
@@ -134,14 +132,12 @@ int acpi_bus_get_status(struct acpi_device *device)
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]\n",
device->pnp.bus_id,
(u32) STRUCT_TO_INT(device->status)));
-
return 0;
}
-
EXPORT_SYMBOL(acpi_bus_get_status);
void acpi_bus_private_data_handler(acpi_handle handle,
- u32 function, void *context)
+ void *context)
{
return;
}
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 9195deba9d9..9335b87c517 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -33,6 +33,8 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#define PREFIX "ACPI: "
+
#define ACPI_BUTTON_CLASS "button"
#define ACPI_BUTTON_FILE_INFO "info"
#define ACPI_BUTTON_FILE_STATE "state"
@@ -113,6 +115,9 @@ static const struct file_operations acpi_button_state_fops = {
.release = single_release,
};
+static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
+static struct acpi_device *lid_device;
+
/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
@@ -229,11 +234,38 @@ static int acpi_button_remove_fs(struct acpi_device *device)
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
+int acpi_lid_notifier_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&acpi_lid_notifier, nb);
+}
+EXPORT_SYMBOL(acpi_lid_notifier_register);
+
+int acpi_lid_notifier_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&acpi_lid_notifier, nb);
+}
+EXPORT_SYMBOL(acpi_lid_notifier_unregister);
+
+int acpi_lid_open(void)
+{
+ acpi_status status;
+ unsigned long long state;
+
+ status = acpi_evaluate_integer(lid_device->handle, "_LID", NULL,
+ &state);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return !!state;
+}
+EXPORT_SYMBOL(acpi_lid_open);
+
static int acpi_lid_send_state(struct acpi_device *device)
{
struct acpi_button *button = acpi_driver_data(device);
unsigned long long state;
acpi_status status;
+ int ret;
status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state);
if (ACPI_FAILURE(status))
@@ -242,7 +274,12 @@ static int acpi_lid_send_state(struct acpi_device *device)
/* input layer checks if event is redundant */
input_report_switch(button->input, SW_LID, !state);
input_sync(button->input);
- return 0;
+
+ ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
+ if (ret == NOTIFY_DONE)
+ ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
+ device);
+ return ret;
}
static void acpi_button_notify(struct acpi_device *device, u32 event)
@@ -364,8 +401,14 @@ static int acpi_button_add(struct acpi_device *device)
error = input_register_device(input);
if (error)
goto err_remove_fs;
- if (button->type == ACPI_BUTTON_TYPE_LID)
+ if (button->type == ACPI_BUTTON_TYPE_LID) {
acpi_lid_send_state(device);
+ /*
+ * This assumes there's only one lid device, or if there are
+ * more we only care about the last one...
+ */
+ lid_device = device;
+ }
if (device->wakeup.flags.valid) {
/* Button's GPE is run-wake GPE */
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
index 332fe4b2170..6c9ee68e46f 100644
--- a/drivers/acpi/cm_sbs.c
+++ b/drivers/acpi/cm_sbs.c
@@ -28,6 +28,8 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#define PREFIX "ACPI: "
+
ACPI_MODULE_NAME("cm_sbs");
#define ACPI_AC_CLASS "ac_adapter"
#define ACPI_BATTERY_CLASS "battery"
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index fe0cdf83641..642bb305cb6 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -35,6 +35,8 @@
#include <acpi/acpi_drivers.h>
#include <acpi/container.h>
+#define PREFIX "ACPI: "
+
#define ACPI_CONTAINER_DEVICE_NAME "ACPI container device"
#define ACPI_CONTAINER_CLASS "container"
@@ -200,20 +202,17 @@ container_walk_namespace_cb(acpi_handle handle,
u32 lvl, void *context, void **rv)
{
char *hid = NULL;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_device_info *info;
acpi_status status;
int *action = context;
-
- status = acpi_get_object_info(handle, &buffer);
- if (ACPI_FAILURE(status) || !buffer.pointer) {
+ status = acpi_get_object_info(handle, &info);
+ if (ACPI_FAILURE(status)) {
return AE_OK;
}
- info = buffer.pointer;
if (info->valid & ACPI_VALID_HID)
- hid = info->hardware_id.value;
+ hid = info->hardware_id.string;
if (hid == NULL) {
goto end;
@@ -240,7 +239,7 @@ container_walk_namespace_cb(acpi_handle handle,
}
end:
- kfree(buffer.pointer);
+ kfree(info);
return AE_OK;
}
diff --git a/drivers/acpi/debug.c b/drivers/acpi/debug.c
index a8287be0870..8a690c3b8e2 100644
--- a/drivers/acpi/debug.c
+++ b/drivers/acpi/debug.c
@@ -3,6 +3,7 @@
*/
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -201,72 +202,54 @@ module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
#define ACPI_SYSTEM_FILE_DEBUG_LAYER "debug_layer"
#define ACPI_SYSTEM_FILE_DEBUG_LEVEL "debug_level"
-static int
-acpi_system_read_debug(char *page,
- char **start, off_t off, int count, int *eof, void *data)
+static int acpi_system_debug_proc_show(struct seq_file *m, void *v)
{
- char *p = page;
- int size = 0;
unsigned int i;
- if (off != 0)
- goto end;
+ seq_printf(m, "%-25s\tHex SET\n", "Description");
- p += sprintf(p, "%-25s\tHex SET\n", "Description");
-
- switch ((unsigned long)data) {
+ switch ((unsigned long)m->private) {
case 0:
for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
- p += sprintf(p, "%-25s\t0x%08lX [%c]\n",
+ seq_printf(m, "%-25s\t0x%08lX [%c]\n",
acpi_debug_layers[i].name,
acpi_debug_layers[i].value,
(acpi_dbg_layer & acpi_debug_layers[i].
value) ? '*' : ' ');
}
- p += sprintf(p, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
+ seq_printf(m, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
ACPI_ALL_DRIVERS,
(acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer &
ACPI_ALL_DRIVERS) ==
0 ? ' ' : '-');
- p += sprintf(p,
+ seq_printf(m,
"--\ndebug_layer = 0x%08X (* = enabled, - = partial)\n",
acpi_dbg_layer);
break;
case 1:
for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
- p += sprintf(p, "%-25s\t0x%08lX [%c]\n",
+ seq_printf(m, "%-25s\t0x%08lX [%c]\n",
acpi_debug_levels[i].name,
acpi_debug_levels[i].value,
(acpi_dbg_level & acpi_debug_levels[i].
value) ? '*' : ' ');
}
- p += sprintf(p, "--\ndebug_level = 0x%08X (* = enabled)\n",
+ seq_printf(m, "--\ndebug_level = 0x%08X (* = enabled)\n",
acpi_dbg_level);
break;
- default:
- p += sprintf(p, "Invalid debug option\n");
- break;
}
+ return 0;
+}
- end:
- size = (p - page);
- if (size <= off + count)
- *eof = 1;
- *start = page + off;
- size -= off;
- if (size > count)
- size = count;
- if (size < 0)
- size = 0;
-
- return size;
+static int acpi_system_debug_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_system_debug_proc_show, PDE(inode)->data);
}
-static int
-acpi_system_write_debug(struct file *file,
+static ssize_t acpi_system_debug_proc_write(struct file *file,
const char __user * buffer,
- unsigned long count, void *data)
+ size_t count, loff_t *pos)
{
char debug_string[12] = { '\0' };
@@ -279,7 +262,7 @@ acpi_system_write_debug(struct file *file,
debug_string[count] = '\0';
- switch ((unsigned long)data) {
+ switch ((unsigned long)PDE(file->f_path.dentry->d_inode)->data) {
case 0:
acpi_dbg_layer = simple_strtoul(debug_string, NULL, 0);
break;
@@ -292,6 +275,15 @@ acpi_system_write_debug(struct file *file,
return count;
}
+
+static const struct file_operations acpi_system_debug_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = acpi_system_debug_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = acpi_system_debug_proc_write,
+};
#endif
int __init acpi_debug_init(void)
@@ -303,24 +295,18 @@ int __init acpi_debug_init(void)
/* 'debug_layer' [R/W] */
name = ACPI_SYSTEM_FILE_DEBUG_LAYER;
- entry =
- create_proc_read_entry(name, S_IFREG | S_IRUGO | S_IWUSR,
- acpi_root_dir, acpi_system_read_debug,
- (void *)0);
- if (entry)
- entry->write_proc = acpi_system_write_debug;
- else
+ entry = proc_create_data(name, S_IFREG | S_IRUGO | S_IWUSR,
+ acpi_root_dir, &acpi_system_debug_proc_fops,
+ (void *)0);
+ if (!entry)
goto Error;
/* 'debug_level' [R/W] */
name = ACPI_SYSTEM_FILE_DEBUG_LEVEL;
- entry =
- create_proc_read_entry(name, S_IFREG | S_IRUGO | S_IWUSR,
- acpi_root_dir, acpi_system_read_debug,
- (void *)1);
- if (entry)
- entry->write_proc = acpi_system_write_debug;
- else
+ entry = proc_create_data(name, S_IFREG | S_IRUGO | S_IWUSR,
+ acpi_root_dir, &acpi_system_debug_proc_fops,
+ (void *)1);
+ if (!entry)
goto Error;
Done:
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index efb959d6c8a..3a2cfefc71a 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -33,6 +33,8 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#define PREFIX "ACPI: "
+
#define ACPI_DOCK_DRIVER_DESCRIPTION "ACPI Dock Station Driver"
ACPI_MODULE_NAME("dock");
@@ -231,18 +233,16 @@ static int is_ata(acpi_handle handle)
static int is_battery(acpi_handle handle)
{
struct acpi_device_info *info;
- struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
int ret = 1;
- if (!ACPI_SUCCESS(acpi_get_object_info(handle, &buffer)))
+ if (!ACPI_SUCCESS(acpi_get_object_info(handle, &info)))
return 0;
- info = buffer.pointer;
if (!(info->valid & ACPI_VALID_HID))
ret = 0;
else
- ret = !strcmp("PNP0C0A", info->hardware_id.value);
+ ret = !strcmp("PNP0C0A", info->hardware_id.string);
- kfree(buffer.pointer);
+ kfree(info);
return ret;
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 391f331674c..f70796081c4 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -42,12 +42,12 @@
#include <asm/io.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#include <linux/dmi.h>
#define ACPI_EC_CLASS "embedded_controller"
#define ACPI_EC_DEVICE_NAME "Embedded Controller"
#define ACPI_EC_FILE_INFO "info"
-#undef PREFIX
#define PREFIX "ACPI: EC: "
/* EC status register */
@@ -68,15 +68,13 @@ enum ec_command {
#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
#define ACPI_EC_CDELAY 10 /* Wait 10us before polling EC */
+#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts
per one transaction */
enum {
EC_FLAGS_QUERY_PENDING, /* Query is pending */
- EC_FLAGS_GPE_MODE, /* Expect GPE to be sent
- * for status change */
- EC_FLAGS_NO_GPE, /* Don't use GPE mode */
EC_FLAGS_GPE_STORM, /* GPE storm detected */
EC_FLAGS_HANDLERS_INSTALLED /* Handlers for GPE and
* OpReg are installed */
@@ -170,7 +168,7 @@ static void start_transaction(struct acpi_ec *ec)
acpi_ec_write_cmd(ec, ec->curr->command);
}
-static void gpe_transaction(struct acpi_ec *ec, u8 status)
+static void advance_transaction(struct acpi_ec *ec, u8 status)
{
unsigned long flags;
spin_lock_irqsave(&ec->curr_lock, flags);
@@ -201,29 +199,6 @@ unlock:
spin_unlock_irqrestore(&ec->curr_lock, flags);
}
-static int acpi_ec_wait(struct acpi_ec *ec)
-{
- if (wait_event_timeout(ec->wait, ec_transaction_done(ec),
- msecs_to_jiffies(ACPI_EC_DELAY)))
- return 0;
- /* try restart command if we get any false interrupts */
- if (ec->curr->irq_count &&
- (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
- pr_debug(PREFIX "controller reset, restart transaction\n");
- start_transaction(ec);
- if (wait_event_timeout(ec->wait, ec_transaction_done(ec),
- msecs_to_jiffies(ACPI_EC_DELAY)))
- return 0;
- }
- /* missing GPEs, switch back to poll mode */
- if (printk_ratelimit())
- pr_info(PREFIX "missing confirmations, "
- "switch off interrupt mode.\n");
- set_bit(EC_FLAGS_NO_GPE, &ec->flags);
- clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
- return 1;
-}
-
static void acpi_ec_gpe_query(void *ec_cxt);
static int ec_check_sci(struct acpi_ec *ec, u8 state)
@@ -236,43 +211,51 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state)
return 0;
}
-static void ec_delay(void)
-{
- /* EC in MSI notebooks don't tolerate delays other than 550 usec */
- if (EC_FLAGS_MSI)
- udelay(ACPI_EC_DELAY);
- else
- /* Use shortest sleep available */
- msleep(1);
-}
-
static int ec_poll(struct acpi_ec *ec)
{
- unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
- udelay(ACPI_EC_CDELAY);
- while (time_before(jiffies, delay)) {
- gpe_transaction(ec, acpi_ec_read_status(ec));
- ec_delay();
- if (ec_transaction_done(ec))
- return 0;
+ unsigned long flags;
+ int repeat = 2; /* number of command restarts */
+ while (repeat--) {
+ unsigned long delay = jiffies +
+ msecs_to_jiffies(ACPI_EC_DELAY);
+ do {
+ /* don't sleep with disabled interrupts */
+ if (EC_FLAGS_MSI || irqs_disabled()) {
+ udelay(ACPI_EC_MSI_UDELAY);
+ if (ec_transaction_done(ec))
+ return 0;
+ } else {
+ if (wait_event_timeout(ec->wait,
+ ec_transaction_done(ec),
+ msecs_to_jiffies(1)))
+ return 0;
+ }
+ advance_transaction(ec, acpi_ec_read_status(ec));
+ } while (time_before(jiffies, delay));
+ if (!ec->curr->irq_count ||
+ (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF))
+ break;
+ /* try restart command if we get any false interrupts */
+ pr_debug(PREFIX "controller reset, restart transaction\n");
+ spin_lock_irqsave(&ec->curr_lock, flags);
+ start_transaction(ec);
+ spin_unlock_irqrestore(&ec->curr_lock, flags);
}
return -ETIME;
}
static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
- struct transaction *t,
- int force_poll)
+ struct transaction *t)
{
unsigned long tmp;
int ret = 0;
pr_debug(PREFIX "transaction start\n");
/* disable GPE during transaction if storm is detected */
if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
- clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
acpi_disable_gpe(NULL, ec->gpe);
}
if (EC_FLAGS_MSI)
- udelay(ACPI_EC_DELAY);
+ udelay(ACPI_EC_MSI_UDELAY);
/* start transaction */
spin_lock_irqsave(&ec->curr_lock, tmp);
/* following two actions should be kept atomic */
@@ -281,11 +264,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
if (ec->curr->command == ACPI_EC_COMMAND_QUERY)
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
spin_unlock_irqrestore(&ec->curr_lock, tmp);
- /* if we selected poll mode or failed in GPE-mode do a poll loop */
- if (force_poll ||
- !test_bit(EC_FLAGS_GPE_MODE, &ec->flags) ||
- acpi_ec_wait(ec))
- ret = ec_poll(ec);
+ ret = ec_poll(ec);
pr_debug(PREFIX "transaction end\n");
spin_lock_irqsave(&ec->curr_lock, tmp);
ec->curr = NULL;
@@ -295,8 +274,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
ec_check_sci(ec, acpi_ec_read_status(ec));
/* it is safe to enable GPE outside of transaction */
acpi_enable_gpe(NULL, ec->gpe);
- } else if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags) &&
- t->irq_count > ACPI_EC_STORM_THRESHOLD) {
+ } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
pr_info(PREFIX "GPE storm detected, "
"transactions will use polling mode\n");
set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
@@ -314,16 +292,14 @@ static int ec_wait_ibf0(struct acpi_ec *ec)
{
unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
/* interrupt wait manually if GPE mode is not active */
- unsigned long timeout = test_bit(EC_FLAGS_GPE_MODE, &ec->flags) ?
- msecs_to_jiffies(ACPI_EC_DELAY) : msecs_to_jiffies(1);
while (time_before(jiffies, delay))
- if (wait_event_timeout(ec->wait, ec_check_ibf0(ec), timeout))
+ if (wait_event_timeout(ec->wait, ec_check_ibf0(ec),
+ msecs_to_jiffies(1)))
return 0;
return -ETIME;
}
-static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t,
- int force_poll)
+static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
{
int status;
u32 glk;
@@ -345,7 +321,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t,
status = -ETIME;
goto end;
}
- status = acpi_ec_transaction_unlocked(ec, t, force_poll);
+ status = acpi_ec_transaction_unlocked(ec, t);
end:
if (ec->global_lock)
acpi_release_global_lock(glk);
@@ -354,10 +330,6 @@ unlock:
return status;
}
-/*
- * Note: samsung nv5000 doesn't work with ec burst mode.
- * http://bugzilla.kernel.org/show_bug.cgi?id=4980
- */
static int acpi_ec_burst_enable(struct acpi_ec *ec)
{
u8 d;
@@ -365,7 +337,7 @@ static int acpi_ec_burst_enable(struct acpi_ec *ec)
.wdata = NULL, .rdata = &d,
.wlen = 0, .rlen = 1};
- return acpi_ec_transaction(ec, &t, 0);
+ return acpi_ec_transaction(ec, &t);
}
static int acpi_ec_burst_disable(struct acpi_ec *ec)
@@ -375,7 +347,7 @@ static int acpi_ec_burst_disable(struct acpi_ec *ec)
.wlen = 0, .rlen = 0};
return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
- acpi_ec_transaction(ec, &t, 0) : 0;
+ acpi_ec_transaction(ec, &t) : 0;
}
static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data)
@@ -386,7 +358,7 @@ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data)
.wdata = &address, .rdata = &d,
.wlen = 1, .rlen = 1};
- result = acpi_ec_transaction(ec, &t, 0);
+ result = acpi_ec_transaction(ec, &t);
*data = d;
return result;
}
@@ -398,7 +370,7 @@ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
.wdata = wdata, .rdata = NULL,
.wlen = 2, .rlen = 0};
- return acpi_ec_transaction(ec, &t, 0);
+ return acpi_ec_transaction(ec, &t);
}
/*
@@ -466,7 +438,7 @@ int ec_transaction(u8 command,
if (!first_ec)
return -ENODEV;
- return acpi_ec_transaction(first_ec, &t, force_poll);
+ return acpi_ec_transaction(first_ec, &t);
}
EXPORT_SYMBOL(ec_transaction);
@@ -487,7 +459,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
* bit to be cleared (and thus clearing the interrupt source).
*/
- result = acpi_ec_transaction(ec, &t, 0);
+ result = acpi_ec_transaction(ec, &t);
if (result)
return result;
@@ -570,28 +542,10 @@ static u32 acpi_ec_gpe_handler(void *data)
pr_debug(PREFIX "~~~> interrupt\n");
status = acpi_ec_read_status(ec);
- if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) {
- gpe_transaction(ec, status);
- if (ec_transaction_done(ec) &&
- (status & ACPI_EC_FLAG_IBF) == 0)
- wake_up(&ec->wait);
- }
-
+ advance_transaction(ec, status);
+ if (ec_transaction_done(ec) && (status & ACPI_EC_FLAG_IBF) == 0)
+ wake_up(&ec->wait);
ec_check_sci(ec, status);
- if (!test_bit(EC_FLAGS_GPE_MODE, &ec->flags) &&
- !test_bit(EC_FLAGS_NO_GPE, &ec->flags)) {
- /* this is non-query, must be confirmation */
- if (!test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
- if (printk_ratelimit())
- pr_info(PREFIX "non-query interrupt received,"
- " switching to interrupt mode\n");
- } else {
- /* hush, STORM switches the mode every transaction */
- pr_debug(PREFIX "non-query interrupt received,"
- " switching to interrupt mode\n");
- }
- set_bit(EC_FLAGS_GPE_MODE, &ec->flags);
- }
return ACPI_INTERRUPT_HANDLED;
}
@@ -617,7 +571,8 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
if (bits != 8 && acpi_strict)
return AE_BAD_PARAMETER;
- acpi_ec_burst_enable(ec);
+ if (EC_FLAGS_MSI)
+ acpi_ec_burst_enable(ec);
if (function == ACPI_READ) {
result = acpi_ec_read(ec, address, &temp);
@@ -638,7 +593,8 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
}
}
- acpi_ec_burst_disable(ec);
+ if (EC_FLAGS_MSI)
+ acpi_ec_burst_disable(ec);
switch (result) {
case -EINVAL:
@@ -788,6 +744,42 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
return AE_CTRL_TERMINATE;
}
+static int ec_install_handlers(struct acpi_ec *ec)
+{
+ acpi_status status;
+ if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
+ return 0;
+ status = acpi_install_gpe_handler(NULL, ec->gpe,
+ ACPI_GPE_EDGE_TRIGGERED,
+ &acpi_ec_gpe_handler, ec);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+ acpi_set_gpe_type(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
+ acpi_enable_gpe(NULL, ec->gpe);
+ status = acpi_install_address_space_handler(ec->handle,
+ ACPI_ADR_SPACE_EC,
+ &acpi_ec_space_handler,
+ NULL, ec);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_NOT_FOUND) {
+ /*
+ * Maybe OS fails in evaluating the _REG object.
+ * The AE_NOT_FOUND error will be ignored and OS
+ * continue to initialize EC.
+ */
+ printk(KERN_ERR "Fail in evaluating the _REG object"
+ " of EC device. Broken bios is suspected.\n");
+ } else {
+ acpi_remove_gpe_handler(NULL, ec->gpe,
+ &acpi_ec_gpe_handler);
+ return -ENODEV;
+ }
+ }
+
+ set_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
+ return 0;
+}
+
static void ec_remove_handlers(struct acpi_ec *ec)
{
if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
@@ -802,9 +794,8 @@ static void ec_remove_handlers(struct acpi_ec *ec)
static int acpi_ec_add(struct acpi_device *device)
{
struct acpi_ec *ec = NULL;
+ int ret;
- if (!device)
- return -EINVAL;
strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_EC_CLASS);
@@ -837,9 +828,12 @@ static int acpi_ec_add(struct acpi_device *device)
acpi_ec_add_fs(device);
pr_info(PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
ec->gpe, ec->command_addr, ec->data_addr);
- pr_info(PREFIX "driver started in %s mode\n",
- (test_bit(EC_FLAGS_GPE_MODE, &ec->flags))?"interrupt":"poll");
- return 0;
+
+ ret = ec_install_handlers(ec);
+
+ /* EC is fully operational, allow queries */
+ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+ return ret;
}
static int acpi_ec_remove(struct acpi_device *device, int type)
@@ -851,6 +845,7 @@ static int acpi_ec_remove(struct acpi_device *device, int type)
return -EINVAL;
ec = acpi_driver_data(device);
+ ec_remove_handlers(ec);
mutex_lock(&ec->lock);
list_for_each_entry_safe(handler, tmp, &ec->list, node) {
list_del(&handler->node);
@@ -888,75 +883,6 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
return AE_OK;
}
-static int ec_install_handlers(struct acpi_ec *ec)
-{
- acpi_status status;
- if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
- return 0;
- status = acpi_install_gpe_handler(NULL, ec->gpe,
- ACPI_GPE_EDGE_TRIGGERED,
- &acpi_ec_gpe_handler, ec);
- if (ACPI_FAILURE(status))
- return -ENODEV;
- acpi_set_gpe_type(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
- acpi_enable_gpe(NULL, ec->gpe);
- status = acpi_install_address_space_handler(ec->handle,
- ACPI_ADR_SPACE_EC,
- &acpi_ec_space_handler,
- NULL, ec);
- if (ACPI_FAILURE(status)) {
- if (status == AE_NOT_FOUND) {
- /*
- * Maybe OS fails in evaluating the _REG object.
- * The AE_NOT_FOUND error will be ignored and OS
- * continue to initialize EC.
- */
- printk(KERN_ERR "Fail in evaluating the _REG object"
- " of EC device. Broken bios is suspected.\n");
- } else {
- acpi_remove_gpe_handler(NULL, ec->gpe,
- &acpi_ec_gpe_handler);
- return -ENODEV;
- }
- }
-
- set_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
- return 0;
-}
-
-static int acpi_ec_start(struct acpi_device *device)
-{
- struct acpi_ec *ec;
- int ret = 0;
-
- if (!device)
- return -EINVAL;
-
- ec = acpi_driver_data(device);
-
- if (!ec)
- return -EINVAL;
-
- ret = ec_install_handlers(ec);
-
- /* EC is fully operational, allow queries */
- clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
- return ret;
-}
-
-static int acpi_ec_stop(struct acpi_device *device, int type)
-{
- struct acpi_ec *ec;
- if (!device)
- return -EINVAL;
- ec = acpi_driver_data(device);
- if (!ec)
- return -EINVAL;
- ec_remove_handlers(ec);
-
- return 0;
-}
-
int __init acpi_boot_ec_enable(void)
{
if (!boot_ec || test_bit(EC_FLAGS_HANDLERS_INSTALLED, &boot_ec->flags))
@@ -1054,8 +980,6 @@ static int acpi_ec_suspend(struct acpi_device *device, pm_message_t state)
{
struct acpi_ec *ec = acpi_driver_data(device);
/* Stop using GPE */
- set_bit(EC_FLAGS_NO_GPE, &ec->flags);
- clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
acpi_disable_gpe(NULL, ec->gpe);
return 0;
}
@@ -1064,8 +988,6 @@ static int acpi_ec_resume(struct acpi_device *device)
{
struct acpi_ec *ec = acpi_driver_data(device);
/* Enable use of GPE back */
- clear_bit(EC_FLAGS_NO_GPE, &ec->flags);
- set_bit(EC_FLAGS_GPE_MODE, &ec->flags);
acpi_enable_gpe(NULL, ec->gpe);
return 0;
}
@@ -1077,8 +999,6 @@ static struct acpi_driver acpi_ec_driver = {
.ops = {
.add = acpi_ec_add,
.remove = acpi_ec_remove,
- .start = acpi_ec_start,
- .stop = acpi_ec_stop,
.suspend = acpi_ec_suspend,
.resume = acpi_ec_resume,
},
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index aeb7e5fb4a0..c511071bfd7 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -14,6 +14,8 @@
#include <net/netlink.h>
#include <net/genetlink.h>
+#include "internal.h"
+
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME("event");
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 53698ea0837..f419849a0d3 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -34,6 +34,8 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#define PREFIX "ACPI: "
+
#define ACPI_FAN_CLASS "fan"
#define ACPI_FAN_FILE_STATE "state"
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index a8a5c29958c..c6645f26224 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -12,6 +12,8 @@
#include <linux/rwsem.h>
#include <linux/acpi.h>
+#include "internal.h"
+
#define ACPI_GLUE_DEBUG 0
#if ACPI_GLUE_DEBUG
#define DBG(x...) printk(PREFIX x)
@@ -93,15 +95,13 @@ do_acpi_find_child(acpi_handle handle, u32 lvl, void *context, void **rv)
{
acpi_status status;
struct acpi_device_info *info;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_find_child *find = context;
- status = acpi_get_object_info(handle, &buffer);
+ status = acpi_get_object_info(handle, &info);
if (ACPI_SUCCESS(status)) {
- info = buffer.pointer;
if (info->address == find->address)
find->handle = handle;
- kfree(buffer.pointer);
+ kfree(info);
}
return AE_OK;
}
@@ -121,7 +121,7 @@ EXPORT_SYMBOL(acpi_get_child);
/* Link ACPI devices with physical devices */
static void acpi_glue_data_handler(acpi_handle handle,
- u32 function, void *context)
+ void *context)
{
/* we provide an empty handler */
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 11a69b53004..074cf8682d5 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -1,4 +1,24 @@
-/* For use by Linux/ACPI infrastructure, not drivers */
+/*
+ * acpi/internal.h
+ * For use by Linux/ACPI infrastructure, not drivers
+ *
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#define PREFIX "ACPI: "
int init_acpi_device_notify(void);
int acpi_scan_init(void);
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index d440ccd27d9..202dd0c976a 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -30,6 +30,8 @@
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
+#define PREFIX "ACPI: "
+
#define ACPI_NUMA 0x80000000
#define _COMPONENT ACPI_NUMA
ACPI_MODULE_NAME("numa");
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 5691f165a95..5633b86e3ed 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -58,6 +58,7 @@ struct acpi_os_dpc {
acpi_osd_exec_callback function;
void *context;
struct work_struct work;
+ int wait;
};
#ifdef CONFIG_ACPI_CUSTOM_DSDT
@@ -88,6 +89,7 @@ struct acpi_res_list {
char name[5]; /* only can have a length of 4 chars, make use of this
one instead of res->name, no need to kalloc then */
struct list_head resource_list;
+ int count;
};
static LIST_HEAD(resource_list_head);
@@ -191,7 +193,7 @@ acpi_status __init acpi_os_initialize(void)
static void bind_to_cpu0(struct work_struct *work)
{
- set_cpus_allowed(current, cpumask_of_cpu(0));
+ set_cpus_allowed_ptr(current, cpumask_of(0));
kfree(work);
}
@@ -697,31 +699,12 @@ void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */
static void acpi_os_execute_deferred(struct work_struct *work)
{
struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
- if (!dpc) {
- printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
- return;
- }
-
- dpc->function(dpc->context);
- kfree(dpc);
-
- return;
-}
-
-static void acpi_os_execute_hp_deferred(struct work_struct *work)
-{
- struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
- if (!dpc) {
- printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
- return;
- }
- acpi_os_wait_events_complete(NULL);
+ if (dpc->wait)
+ acpi_os_wait_events_complete(NULL);
dpc->function(dpc->context);
kfree(dpc);
-
- return;
}
/*******************************************************************************
@@ -745,15 +728,11 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
struct workqueue_struct *queue;
- work_func_t func;
int ret;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Scheduling function [%p(%p)] for deferred execution.\n",
function, context));
- if (!function)
- return AE_BAD_PARAMETER;
-
/*
* Allocate/initialize DPC structure. Note that this memory will be
* freed by the callee. The kernel handles the work_struct list in a
@@ -778,8 +757,8 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
*/
queue = hp ? kacpi_hotplug_wq :
(type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq);
- func = hp ? acpi_os_execute_hp_deferred : acpi_os_execute_deferred;
- INIT_WORK(&dpc->work, func);
+ dpc->wait = hp ? 1 : 0;
+ INIT_WORK(&dpc->work, acpi_os_execute_deferred);
ret = queue_work(queue, &dpc->work);
if (!ret) {
@@ -1358,6 +1337,89 @@ acpi_os_validate_interface (char *interface)
return AE_SUPPORT;
}
+static inline int acpi_res_list_add(struct acpi_res_list *res)
+{
+ struct acpi_res_list *res_list_elem;
+
+ list_for_each_entry(res_list_elem, &resource_list_head,
+ resource_list) {
+
+ if (res->resource_type == res_list_elem->resource_type &&
+ res->start == res_list_elem->start &&
+ res->end == res_list_elem->end) {
+
+ /*
+ * The Region(addr,len) already exist in the list,
+ * just increase the count
+ */
+
+ res_list_elem->count++;
+ return 0;
+ }
+ }
+
+ res->count = 1;
+ list_add(&res->resource_list, &resource_list_head);
+ return 1;
+}
+
+static inline void acpi_res_list_del(struct acpi_res_list *res)
+{
+ struct acpi_res_list *res_list_elem;
+
+ list_for_each_entry(res_list_elem, &resource_list_head,
+ resource_list) {
+
+ if (res->resource_type == res_list_elem->resource_type &&
+ res->start == res_list_elem->start &&
+ res->end == res_list_elem->end) {
+
+ /*
+ * If the res count is decreased to 0,
+ * remove and free it
+ */
+
+ if (--res_list_elem->count == 0) {
+ list_del(&res_list_elem->resource_list);
+ kfree(res_list_elem);
+ }
+ return;
+ }
+ }
+}
+
+acpi_status
+acpi_os_invalidate_address(
+ u8 space_id,
+ acpi_physical_address address,
+ acpi_size length)
+{
+ struct acpi_res_list res;
+
+ switch (space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ /* Only interference checks against SystemIO and SytemMemory
+ are needed */
+ res.start = address;
+ res.end = address + length - 1;
+ res.resource_type = space_id;
+ spin_lock(&acpi_res_lock);
+ acpi_res_list_del(&res);
+ spin_unlock(&acpi_res_lock);
+ break;
+ case ACPI_ADR_SPACE_PCI_CONFIG:
+ case ACPI_ADR_SPACE_EC:
+ case ACPI_ADR_SPACE_SMBUS:
+ case ACPI_ADR_SPACE_CMOS:
+ case ACPI_ADR_SPACE_PCI_BAR_TARGET:
+ case ACPI_ADR_SPACE_DATA_TABLE:
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+ break;
+ }
+ return AE_OK;
+}
+
/******************************************************************************
*
* FUNCTION: acpi_os_validate_address
@@ -1382,6 +1444,7 @@ acpi_os_validate_address (
char *name)
{
struct acpi_res_list *res;
+ int added;
if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
return AE_OK;
@@ -1399,14 +1462,17 @@ acpi_os_validate_address (
res->end = address + length - 1;
res->resource_type = space_id;
spin_lock(&acpi_res_lock);
- list_add(&res->resource_list, &resource_list_head);
+ added = acpi_res_list_add(res);
spin_unlock(&acpi_res_lock);
- pr_debug("Added %s resource: start: 0x%llx, end: 0x%llx, "
- "name: %s\n", (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+ pr_debug("%s %s resource: start: 0x%llx, end: 0x%llx, "
+ "name: %s\n", added ? "Added" : "Already exist",
+ (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
? "SystemIO" : "System Memory",
(unsigned long long)res->start,
(unsigned long long)res->end,
res->name);
+ if (!added)
+ kfree(res);
break;
case ACPI_ADR_SPACE_PCI_CONFIG:
case ACPI_ADR_SPACE_EC:
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index b794eb88ab9..843699ed93f 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -40,6 +40,8 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#define PREFIX "ACPI: "
+
#define _COMPONENT ACPI_PCI_COMPONENT
ACPI_MODULE_NAME("pci_irq");
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 16e0f9d3d17..394ae89409c 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -43,6 +43,8 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#define PREFIX "ACPI: "
+
#define _COMPONENT ACPI_PCI_COMPONENT
ACPI_MODULE_NAME("pci_link");
#define ACPI_PCI_LINK_CLASS "pci_irq_routing"
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 31b961c2f22..31122214e0e 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -36,6 +36,8 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#define PREFIX "ACPI: "
+
#define _COMPONENT ACPI_PCI_COMPONENT
ACPI_MODULE_NAME("pci_root");
#define ACPI_PCI_ROOT_CLASS "pci_bridge"
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index 12158e0d009..45da2bae36c 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -31,6 +31,7 @@
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#include <linux/dmi.h>
static int debug;
static int check_sta_before_sun;
@@ -57,7 +58,7 @@ ACPI_MODULE_NAME("pci_slot");
MY_NAME , ## arg); \
} while (0)
-#define SLOT_NAME_SIZE 20 /* Inspired by #define in acpiphp.h */
+#define SLOT_NAME_SIZE 21 /* Inspired by #define in acpiphp.h */
struct acpi_pci_slot {
acpi_handle root_handle; /* handle of the root bridge */
@@ -149,7 +150,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
}
- snprintf(name, sizeof(name), "%u", (u32)sun);
+ snprintf(name, sizeof(name), "%llu", sun);
pci_slot = pci_create_slot(pci_bus, device, name, NULL);
if (IS_ERR(pci_slot)) {
err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot));
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 5a09bf392ec..22b29791651 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -43,9 +43,10 @@
#include <linux/seq_file.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
-
#include "sleep.h"
+#define PREFIX "ACPI: "
+
#define _COMPONENT ACPI_POWER_COMPONENT
ACPI_MODULE_NAME("power");
#define ACPI_POWER_CLASS "power_resource"
diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
new file mode 100644
index 00000000000..e6bfd77986b
--- /dev/null
+++ b/drivers/acpi/power_meter.c
@@ -0,0 +1,1018 @@
+/*
+ * A hwmon driver for ACPI 4.0 power meters
+ * Copyright (C) 2009 IBM
+ *
+ * Author: Darrick J. Wong <djwong@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/dmi.h>
+#include <linux/kdev_t.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/acpi_bus.h>
+
+#define ACPI_POWER_METER_NAME "power_meter"
+ACPI_MODULE_NAME(ACPI_POWER_METER_NAME);
+#define ACPI_POWER_METER_DEVICE_NAME "Power Meter"
+#define ACPI_POWER_METER_CLASS "power_meter_resource"
+
+#define NUM_SENSORS 17
+
+#define POWER_METER_CAN_MEASURE (1 << 0)
+#define POWER_METER_CAN_TRIP (1 << 1)
+#define POWER_METER_CAN_CAP (1 << 2)
+#define POWER_METER_CAN_NOTIFY (1 << 3)
+#define POWER_METER_IS_BATTERY (1 << 8)
+#define UNKNOWN_HYSTERESIS 0xFFFFFFFF
+
+#define METER_NOTIFY_CONFIG 0x80
+#define METER_NOTIFY_TRIP 0x81
+#define METER_NOTIFY_CAP 0x82
+#define METER_NOTIFY_CAPPING 0x83
+#define METER_NOTIFY_INTERVAL 0x84
+
+#define POWER_AVERAGE_NAME "power1_average"
+#define POWER_CAP_NAME "power1_cap"
+#define POWER_AVG_INTERVAL_NAME "power1_average_interval"
+#define POWER_ALARM_NAME "power1_alarm"
+
+static int cap_in_hardware;
+static int force_cap_on;
+
+static int can_cap_in_hardware(void)
+{
+ return force_cap_on || cap_in_hardware;
+}
+
+static struct acpi_device_id power_meter_ids[] = {
+ {"ACPI000D", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, power_meter_ids);
+
+struct acpi_power_meter_capabilities {
+ acpi_integer flags;
+ acpi_integer units;
+ acpi_integer type;
+ acpi_integer accuracy;
+ acpi_integer sampling_time;
+ acpi_integer min_avg_interval;
+ acpi_integer max_avg_interval;
+ acpi_integer hysteresis;
+ acpi_integer configurable_cap;
+ acpi_integer min_cap;
+ acpi_integer max_cap;
+};
+
+struct acpi_power_meter_resource {
+ struct acpi_device *acpi_dev;
+ acpi_bus_id name;
+ struct mutex lock;
+ struct device *hwmon_dev;
+ struct acpi_power_meter_capabilities caps;
+ acpi_string model_number;
+ acpi_string serial_number;
+ acpi_string oem_info;
+ acpi_integer power;
+ acpi_integer cap;
+ acpi_integer avg_interval;
+ int sensors_valid;
+ unsigned long sensors_last_updated;
+ struct sensor_device_attribute sensors[NUM_SENSORS];
+ int num_sensors;
+ int trip[2];
+ int num_domain_devices;
+ struct acpi_device **domain_devices;
+ struct kobject *holders_dir;
+};
+
+struct ro_sensor_template {
+ char *label;
+ ssize_t (*show)(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf);
+ int index;
+};
+
+struct rw_sensor_template {
+ char *label;
+ ssize_t (*show)(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf);
+ ssize_t (*set)(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count);
+ int index;
+};
+
+/* Averaging interval */
+static int update_avg_interval(struct acpi_power_meter_resource *resource)
+{
+ unsigned long long data;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(resource->acpi_dev->handle, "_GAI",
+ NULL, &data);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _GAI"));
+ return -ENODEV;
+ }
+
+ resource->avg_interval = data;
+ return 0;
+}
+
+static ssize_t show_avg_interval(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
+
+ mutex_lock(&resource->lock);
+ update_avg_interval(resource);
+ mutex_unlock(&resource->lock);
+
+ return sprintf(buf, "%llu\n", resource->avg_interval);
+}
+
+static ssize_t set_avg_interval(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { 1, &arg0 };
+ int res;
+ unsigned long temp;
+ unsigned long long data;
+ acpi_status status;
+
+ res = strict_strtoul(buf, 10, &temp);
+ if (res)
+ return res;
+
+ if (temp > resource->caps.max_avg_interval ||
+ temp < resource->caps.min_avg_interval)
+ return -EINVAL;
+ arg0.integer.value = temp;
+
+ mutex_lock(&resource->lock);
+ status = acpi_evaluate_integer(resource->acpi_dev->handle, "_PAI",
+ &args, &data);
+ if (!ACPI_FAILURE(status))
+ resource->avg_interval = temp;
+ mutex_unlock(&resource->lock);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PAI"));
+ return -EINVAL;
+ }
+
+ /* _PAI returns 0 on success, nonzero otherwise */
+ if (data)
+ return -EINVAL;
+
+ return count;
+}
+
+/* Cap functions */
+static int update_cap(struct acpi_power_meter_resource *resource)
+{
+ unsigned long long data;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(resource->acpi_dev->handle, "_GHL",
+ NULL, &data);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _GHL"));
+ return -ENODEV;
+ }
+
+ resource->cap = data;
+ return 0;
+}
+
+static ssize_t show_cap(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
+
+ mutex_lock(&resource->lock);
+ update_cap(resource);
+ mutex_unlock(&resource->lock);
+
+ return sprintf(buf, "%llu\n", resource->cap * 1000);
+}
+
+static ssize_t set_cap(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { 1, &arg0 };
+ int res;
+ unsigned long temp;
+ unsigned long long data;
+ acpi_status status;
+
+ res = strict_strtoul(buf, 10, &temp);
+ if (res)
+ return res;
+
+ temp /= 1000;
+ if (temp > resource->caps.max_cap || temp < resource->caps.min_cap)
+ return -EINVAL;
+ arg0.integer.value = temp;
+
+ mutex_lock(&resource->lock);
+ status = acpi_evaluate_integer(resource->acpi_dev->handle, "_SHL",
+ &args, &data);
+ if (!ACPI_FAILURE(status))
+ resource->cap = temp;
+ mutex_unlock(&resource->lock);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _SHL"));
+ return -EINVAL;
+ }
+
+ /* _SHL returns 0 on success, nonzero otherwise */
+ if (data)
+ return -EINVAL;
+
+ return count;
+}
+
+/* Power meter trip points */
+static int set_acpi_trip(struct acpi_power_meter_resource *resource)
+{
+ union acpi_object arg_objs[] = {
+ {ACPI_TYPE_INTEGER},
+ {ACPI_TYPE_INTEGER}
+ };
+ struct acpi_object_list args = { 2, arg_objs };
+ unsigned long long data;
+ acpi_status status;
+
+ /* Both trip levels must be set */
+ if (resource->trip[0] < 0 || resource->trip[1] < 0)
+ return 0;
+
+ /* This driver stores min, max; ACPI wants max, min. */
+ arg_objs[0].integer.value = resource->trip[1];
+ arg_objs[1].integer.value = resource->trip[0];
+
+ status = acpi_evaluate_integer(resource->acpi_dev->handle, "_PTP",
+ &args, &data);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTP"));
+ return -EINVAL;
+ }
+
+ return data;
+}
+
+static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
+ int res;
+ unsigned long temp;
+
+ res = strict_strtoul(buf, 10, &temp);
+ if (res)
+ return res;
+
+ temp /= 1000;
+ if (temp < 0)
+ return -EINVAL;
+
+ mutex_lock(&resource->lock);
+ resource->trip[attr->index - 7] = temp;
+ res = set_acpi_trip(resource);
+ mutex_unlock(&resource->lock);
+
+ if (res)
+ return res;
+
+ return count;
+}
+
+/* Power meter */
+static int update_meter(struct acpi_power_meter_resource *resource)
+{
+ unsigned long long data;
+ acpi_status status;
+ unsigned long local_jiffies = jiffies;
+
+ if (time_before(local_jiffies, resource->sensors_last_updated +
+ msecs_to_jiffies(resource->caps.sampling_time)) &&
+ resource->sensors_valid)
+ return 0;
+
+ status = acpi_evaluate_integer(resource->acpi_dev->handle, "_PMM",
+ NULL, &data);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PMM"));
+ return -ENODEV;
+ }
+
+ resource->power = data;
+ resource->sensors_valid = 1;
+ resource->sensors_last_updated = jiffies;
+ return 0;
+}
+
+static ssize_t show_power(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
+
+ mutex_lock(&resource->lock);
+ update_meter(resource);
+ mutex_unlock(&resource->lock);
+
+ return sprintf(buf, "%llu\n", resource->power * 1000);
+}
+
+/* Miscellaneous */
+static ssize_t show_str(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
+ acpi_string val;
+
+ switch (attr->index) {
+ case 0:
+ val = resource->model_number;
+ break;
+ case 1:
+ val = resource->serial_number;
+ break;
+ case 2:
+ val = resource->oem_info;
+ break;
+ default:
+ BUG();
+ }
+
+ return sprintf(buf, "%s\n", val);
+}
+
+static ssize_t show_val(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
+ acpi_integer val = 0;
+
+ switch (attr->index) {
+ case 0:
+ val = resource->caps.min_avg_interval;
+ break;
+ case 1:
+ val = resource->caps.max_avg_interval;
+ break;
+ case 2:
+ val = resource->caps.min_cap * 1000;
+ break;
+ case 3:
+ val = resource->caps.max_cap * 1000;
+ break;
+ case 4:
+ if (resource->caps.hysteresis == UNKNOWN_HYSTERESIS)
+ return sprintf(buf, "unknown\n");
+
+ val = resource->caps.hysteresis * 1000;
+ break;
+ case 5:
+ if (resource->caps.flags & POWER_METER_IS_BATTERY)
+ val = 1;
+ else
+ val = 0;
+ break;
+ case 6:
+ if (resource->power > resource->cap)
+ val = 1;
+ else
+ val = 0;
+ break;
+ case 7:
+ case 8:
+ if (resource->trip[attr->index - 7] < 0)
+ return sprintf(buf, "unknown\n");
+
+ val = resource->trip[attr->index - 7] * 1000;
+ break;
+ default:
+ BUG();
+ }
+
+ return sprintf(buf, "%llu\n", val);
+}
+
+static ssize_t show_accuracy(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
+ unsigned int acc = resource->caps.accuracy;
+
+ return sprintf(buf, "%u.%u%%\n", acc / 1000, acc % 1000);
+}
+
+static ssize_t show_name(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", ACPI_POWER_METER_NAME);
+}
+
+/* Sensor descriptions. If you add a sensor, update NUM_SENSORS above! */
+static struct ro_sensor_template meter_ro_attrs[] = {
+{POWER_AVERAGE_NAME, show_power, 0},
+{"power1_accuracy", show_accuracy, 0},
+{"power1_average_interval_min", show_val, 0},
+{"power1_average_interval_max", show_val, 1},
+{"power1_is_battery", show_val, 5},
+{NULL, NULL, 0},
+};
+
+static struct rw_sensor_template meter_rw_attrs[] = {
+{POWER_AVG_INTERVAL_NAME, show_avg_interval, set_avg_interval, 0},
+{NULL, NULL, NULL, 0},
+};
+
+static struct ro_sensor_template misc_cap_attrs[] = {
+{"power1_cap_min", show_val, 2},
+{"power1_cap_max", show_val, 3},
+{"power1_cap_hyst", show_val, 4},
+{POWER_ALARM_NAME, show_val, 6},
+{NULL, NULL, 0},
+};
+
+static struct ro_sensor_template ro_cap_attrs[] = {
+{POWER_CAP_NAME, show_cap, 0},
+{NULL, NULL, 0},
+};
+
+static struct rw_sensor_template rw_cap_attrs[] = {
+{POWER_CAP_NAME, show_cap, set_cap, 0},
+{NULL, NULL, NULL, 0},
+};
+
+static struct rw_sensor_template trip_attrs[] = {
+{"power1_average_min", show_val, set_trip, 7},
+{"power1_average_max", show_val, set_trip, 8},
+{NULL, NULL, NULL, 0},
+};
+
+static struct ro_sensor_template misc_attrs[] = {
+{"name", show_name, 0},
+{"power1_model_number", show_str, 0},
+{"power1_oem_info", show_str, 2},
+{"power1_serial_number", show_str, 1},
+{NULL, NULL, 0},
+};
+
+/* Read power domain data */
+static void remove_domain_devices(struct acpi_power_meter_resource *resource)
+{
+ int i;
+
+ if (!resource->num_domain_devices)
+ return;
+
+ for (i = 0; i < resource->num_domain_devices; i++) {
+ struct acpi_device *obj = resource->domain_devices[i];
+ if (!obj)
+ continue;
+
+ sysfs_remove_link(resource->holders_dir,
+ kobject_name(&obj->dev.kobj));
+ put_device(&obj->dev);
+ }
+
+ kfree(resource->domain_devices);
+ kobject_put(resource->holders_dir);
+}
+
+static int read_domain_devices(struct acpi_power_meter_resource *resource)
+{
+ int res = 0;
+ int i;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *pss;
+ acpi_status status;
+
+ status = acpi_evaluate_object(resource->acpi_dev->handle, "_PMD", NULL,
+ &buffer);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PMD"));
+ return -ENODEV;
+ }
+
+ pss = buffer.pointer;
+ if (!pss ||
+ pss->type != ACPI_TYPE_PACKAGE) {
+ dev_err(&resource->acpi_dev->dev, ACPI_POWER_METER_NAME
+ "Invalid _PMD data\n");
+ res = -EFAULT;
+ goto end;
+ }
+
+ if (!pss->package.count)
+ goto end;
+
+ resource->domain_devices = kzalloc(sizeof(struct acpi_device *) *
+ pss->package.count, GFP_KERNEL);
+ if (!resource->domain_devices) {
+ res = -ENOMEM;
+ goto end;
+ }
+
+ resource->holders_dir = kobject_create_and_add("measures",
+ &resource->acpi_dev->dev.kobj);
+ if (!resource->holders_dir) {
+ res = -ENOMEM;
+ goto exit_free;
+ }
+
+ resource->num_domain_devices = pss->package.count;
+
+ for (i = 0; i < pss->package.count; i++) {
+ struct acpi_device *obj;
+ union acpi_object *element = &(pss->package.elements[i]);
+
+ /* Refuse non-references */
+ if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
+ continue;
+
+ /* Create a symlink to domain objects */
+ resource->domain_devices[i] = NULL;
+ status = acpi_bus_get_device(element->reference.handle,
+ &resource->domain_devices[i]);
+ if (ACPI_FAILURE(status))
+ continue;
+
+ obj = resource->domain_devices[i];
+ get_device(&obj->dev);
+
+ res = sysfs_create_link(resource->holders_dir, &obj->dev.kobj,
+ kobject_name(&obj->dev.kobj));
+ if (res) {
+ put_device(&obj->dev);
+ resource->domain_devices[i] = NULL;
+ }
+ }
+
+ res = 0;
+ goto end;
+
+exit_free:
+ kfree(resource->domain_devices);
+end:
+ kfree(buffer.pointer);
+ return res;
+}
+
+/* Registration and deregistration */
+static int register_ro_attrs(struct acpi_power_meter_resource *resource,
+ struct ro_sensor_template *ro)
+{
+ struct device *dev = &resource->acpi_dev->dev;
+ struct sensor_device_attribute *sensors =
+ &resource->sensors[resource->num_sensors];
+ int res = 0;
+
+ while (ro->label) {
+ sensors->dev_attr.attr.name = ro->label;
+ sensors->dev_attr.attr.mode = S_IRUGO;
+ sensors->dev_attr.show = ro->show;
+ sensors->index = ro->index;
+
+ res = device_create_file(dev, &sensors->dev_attr);
+ if (res) {
+ sensors->dev_attr.attr.name = NULL;
+ goto error;
+ }
+ sensors++;
+ resource->num_sensors++;
+ ro++;
+ }
+
+error:
+ return res;
+}
+
+static int register_rw_attrs(struct acpi_power_meter_resource *resource,
+ struct rw_sensor_template *rw)
+{
+ struct device *dev = &resource->acpi_dev->dev;
+ struct sensor_device_attribute *sensors =
+ &resource->sensors[resource->num_sensors];
+ int res = 0;
+
+ while (rw->label) {
+ sensors->dev_attr.attr.name = rw->label;
+ sensors->dev_attr.attr.mode = S_IRUGO | S_IWUSR;
+ sensors->dev_attr.show = rw->show;
+ sensors->dev_attr.store = rw->set;
+ sensors->index = rw->index;
+
+ res = device_create_file(dev, &sensors->dev_attr);
+ if (res) {
+ sensors->dev_attr.attr.name = NULL;
+ goto error;
+ }
+ sensors++;
+ resource->num_sensors++;
+ rw++;
+ }
+
+error:
+ return res;
+}
+
+static void remove_attrs(struct acpi_power_meter_resource *resource)
+{
+ int i;
+
+ for (i = 0; i < resource->num_sensors; i++) {
+ if (!resource->sensors[i].dev_attr.attr.name)
+ continue;
+ device_remove_file(&resource->acpi_dev->dev,
+ &resource->sensors[i].dev_attr);
+ }
+
+ remove_domain_devices(resource);
+
+ resource->num_sensors = 0;
+}
+
+static int setup_attrs(struct acpi_power_meter_resource *resource)
+{
+ int res = 0;
+
+ res = read_domain_devices(resource);
+ if (res)
+ return res;
+
+ if (resource->caps.flags & POWER_METER_CAN_MEASURE) {
+ res = register_ro_attrs(resource, meter_ro_attrs);
+ if (res)
+ goto error;
+ res = register_rw_attrs(resource, meter_rw_attrs);
+ if (res)
+ goto error;
+ }
+
+ if (resource->caps.flags & POWER_METER_CAN_CAP) {
+ if (!can_cap_in_hardware()) {
+ dev_err(&resource->acpi_dev->dev,
+ "Ignoring unsafe software power cap!\n");
+ goto skip_unsafe_cap;
+ }
+
+ if (resource->caps.configurable_cap) {
+ res = register_rw_attrs(resource, rw_cap_attrs);
+ if (res)
+ goto error;
+ } else {
+ res = register_ro_attrs(resource, ro_cap_attrs);
+ if (res)
+ goto error;
+ }
+ res = register_ro_attrs(resource, misc_cap_attrs);
+ if (res)
+ goto error;
+ }
+skip_unsafe_cap:
+
+ if (resource->caps.flags & POWER_METER_CAN_TRIP) {
+ res = register_rw_attrs(resource, trip_attrs);
+ if (res)
+ goto error;
+ }
+
+ res = register_ro_attrs(resource, misc_attrs);
+ if (res)
+ goto error;
+
+ return res;
+error:
+ remove_domain_devices(resource);
+ remove_attrs(resource);
+ return res;
+}
+
+static void free_capabilities(struct acpi_power_meter_resource *resource)
+{
+ acpi_string *str;
+ int i;
+
+ str = &resource->model_number;
+ for (i = 0; i < 3; i++, str++)
+ kfree(*str);
+}
+
+static int read_capabilities(struct acpi_power_meter_resource *resource)
+{
+ int res = 0;
+ int i;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer state = { 0, NULL };
+ struct acpi_buffer format = { sizeof("NNNNNNNNNNN"), "NNNNNNNNNNN" };
+ union acpi_object *pss;
+ acpi_string *str;
+ acpi_status status;
+
+ status = acpi_evaluate_object(resource->acpi_dev->handle, "_PMC", NULL,
+ &buffer);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PMC"));
+ return -ENODEV;
+ }
+
+ pss = buffer.pointer;
+ if (!pss ||
+ pss->type != ACPI_TYPE_PACKAGE ||
+ pss->package.count != 14) {
+ dev_err(&resource->acpi_dev->dev, ACPI_POWER_METER_NAME
+ "Invalid _PMC data\n");
+ res = -EFAULT;
+ goto end;
+ }
+
+ /* Grab all the integer data at once */
+ state.length = sizeof(struct acpi_power_meter_capabilities);
+ state.pointer = &resource->caps;
+
+ status = acpi_extract_package(pss, &format, &state);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Invalid data"));
+ res = -EFAULT;
+ goto end;
+ }
+
+ if (resource->caps.units) {
+ dev_err(&resource->acpi_dev->dev, ACPI_POWER_METER_NAME
+ "Unknown units %llu.\n",
+ resource->caps.units);
+ res = -EINVAL;
+ goto end;
+ }
+
+ /* Grab the string data */
+ str = &resource->model_number;
+
+ for (i = 11; i < 14; i++) {
+ union acpi_object *element = &(pss->package.elements[i]);
+
+ if (element->type != ACPI_TYPE_STRING) {
+ res = -EINVAL;
+ goto error;
+ }
+
+ *str = kzalloc(sizeof(u8) * (element->string.length + 1),
+ GFP_KERNEL);
+ if (!*str) {
+ res = -ENOMEM;
+ goto error;
+ }
+
+ strncpy(*str, element->string.pointer, element->string.length);
+ str++;
+ }
+
+ dev_info(&resource->acpi_dev->dev, "Found ACPI power meter.\n");
+ goto end;
+error:
+ str = &resource->model_number;
+ for (i = 0; i < 3; i++, str++)
+ kfree(*str);
+end:
+ kfree(buffer.pointer);
+ return res;
+}
+
+/* Handle ACPI event notifications */
+static void acpi_power_meter_notify(struct acpi_device *device, u32 event)
+{
+ struct acpi_power_meter_resource *resource;
+ int res;
+
+ if (!device || !acpi_driver_data(device))
+ return;
+
+ resource = acpi_driver_data(device);
+
+ mutex_lock(&resource->lock);
+ switch (event) {
+ case METER_NOTIFY_CONFIG:
+ free_capabilities(resource);
+ res = read_capabilities(resource);
+ if (res)
+ break;
+
+ remove_attrs(resource);
+ setup_attrs(resource);
+ break;
+ case METER_NOTIFY_TRIP:
+ sysfs_notify(&device->dev.kobj, NULL, POWER_AVERAGE_NAME);
+ update_meter(resource);
+ break;
+ case METER_NOTIFY_CAP:
+ sysfs_notify(&device->dev.kobj, NULL, POWER_CAP_NAME);
+ update_cap(resource);
+ break;
+ case METER_NOTIFY_INTERVAL:
+ sysfs_notify(&device->dev.kobj, NULL, POWER_AVG_INTERVAL_NAME);
+ update_avg_interval(resource);
+ break;
+ case METER_NOTIFY_CAPPING:
+ sysfs_notify(&device->dev.kobj, NULL, POWER_ALARM_NAME);
+ dev_info(&device->dev, "Capping in progress.\n");
+ break;
+ default:
+ BUG();
+ }
+ mutex_unlock(&resource->lock);
+
+ acpi_bus_generate_netlink_event(ACPI_POWER_METER_CLASS,
+ dev_name(&device->dev), event, 0);
+}
+
+static int acpi_power_meter_add(struct acpi_device *device)
+{
+ int res;
+ struct acpi_power_meter_resource *resource;
+
+ if (!device)
+ return -EINVAL;
+
+ resource = kzalloc(sizeof(struct acpi_power_meter_resource),
+ GFP_KERNEL);
+ if (!resource)
+ return -ENOMEM;
+
+ resource->sensors_valid = 0;
+ resource->acpi_dev = device;
+ mutex_init(&resource->lock);
+ strcpy(acpi_device_name(device), ACPI_POWER_METER_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_POWER_METER_CLASS);
+ device->driver_data = resource;
+
+ free_capabilities(resource);
+ res = read_capabilities(resource);
+ if (res)
+ goto exit_free;
+
+ resource->trip[0] = resource->trip[1] = -1;
+
+ res = setup_attrs(resource);
+ if (res)
+ goto exit_free;
+
+ resource->hwmon_dev = hwmon_device_register(&device->dev);
+ if (IS_ERR(resource->hwmon_dev)) {
+ res = PTR_ERR(resource->hwmon_dev);
+ goto exit_remove;
+ }
+
+ res = 0;
+ goto exit;
+
+exit_remove:
+ remove_attrs(resource);
+exit_free:
+ kfree(resource);
+exit:
+ return res;
+}
+
+static int acpi_power_meter_remove(struct acpi_device *device, int type)
+{
+ struct acpi_power_meter_resource *resource;
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ resource = acpi_driver_data(device);
+ hwmon_device_unregister(resource->hwmon_dev);
+
+ free_capabilities(resource);
+ remove_attrs(resource);
+
+ kfree(resource);
+ return 0;
+}
+
+static int acpi_power_meter_resume(struct acpi_device *device)
+{
+ struct acpi_power_meter_resource *resource;
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ resource = acpi_driver_data(device);
+ free_capabilities(resource);
+ read_capabilities(resource);
+
+ return 0;
+}
+
+static struct acpi_driver acpi_power_meter_driver = {
+ .name = "power_meter",
+ .class = ACPI_POWER_METER_CLASS,
+ .ids = power_meter_ids,
+ .ops = {
+ .add = acpi_power_meter_add,
+ .remove = acpi_power_meter_remove,
+ .resume = acpi_power_meter_resume,
+ .notify = acpi_power_meter_notify,
+ },
+};
+
+/* Module init/exit routines */
+static int __init enable_cap_knobs(const struct dmi_system_id *d)
+{
+ cap_in_hardware = 1;
+ return 0;
+}
+
+static struct dmi_system_id __initdata pm_dmi_table[] = {
+ {
+ enable_cap_knobs, "IBM Active Energy Manager",
+ {
+ DMI_MATCH(DMI_SYS_VENDOR, "IBM")
+ },
+ },
+ {}
+};
+
+static int __init acpi_power_meter_init(void)
+{
+ int result;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ dmi_check_system(pm_dmi_table);
+
+ result = acpi_bus_register_driver(&acpi_power_meter_driver);
+ if (result < 0)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __exit acpi_power_meter_exit(void)
+{
+ acpi_bus_unregister_driver(&acpi_power_meter_driver);
+}
+
+MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
+MODULE_DESCRIPTION("ACPI 4.0 power meter driver");
+MODULE_LICENSE("GPL");
+
+module_param(force_cap_on, bool, 0644);
+MODULE_PARM_DESC(force_cap_on, "Enable power cap even it is unsafe to do so.");
+
+module_init(acpi_power_meter_init);
+module_exit(acpi_power_meter_exit);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 2cc4b303387..c2d4d6e0936 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -59,6 +59,8 @@
#include <acpi/acpi_drivers.h>
#include <acpi/processor.h>
+#define PREFIX "ACPI: "
+
#define ACPI_PROCESSOR_CLASS "processor"
#define ACPI_PROCESSOR_DEVICE_NAME "Processor"
#define ACPI_PROCESSOR_FILE_INFO "info"
@@ -79,9 +81,10 @@ MODULE_DESCRIPTION("ACPI Processor Driver");
MODULE_LICENSE("GPL");
static int acpi_processor_add(struct acpi_device *device);
-static int acpi_processor_start(struct acpi_device *device);
static int acpi_processor_remove(struct acpi_device *device, int type);
+#ifdef CONFIG_ACPI_PROCFS
static int acpi_processor_info_open_fs(struct inode *inode, struct file *file);
+#endif
static void acpi_processor_notify(struct acpi_device *device, u32 event);
static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
static int acpi_processor_handle_eject(struct acpi_processor *pr);
@@ -101,7 +104,6 @@ static struct acpi_driver acpi_processor_driver = {
.ops = {
.add = acpi_processor_add,
.remove = acpi_processor_remove,
- .start = acpi_processor_start,
.suspend = acpi_processor_suspend,
.resume = acpi_processor_resume,
.notify = acpi_processor_notify,
@@ -110,7 +112,7 @@ static struct acpi_driver acpi_processor_driver = {
#define INSTALL_NOTIFY_HANDLER 1
#define UNINSTALL_NOTIFY_HANDLER 2
-
+#ifdef CONFIG_ACPI_PROCFS
static const struct file_operations acpi_processor_info_fops = {
.owner = THIS_MODULE,
.open = acpi_processor_info_open_fs,
@@ -118,6 +120,7 @@ static const struct file_operations acpi_processor_info_fops = {
.llseek = seq_lseek,
.release = single_release,
};
+#endif
DEFINE_PER_CPU(struct acpi_processor *, processors);
struct acpi_processor_errata errata __read_mostly;
@@ -316,6 +319,7 @@ static int acpi_processor_set_pdc(struct acpi_processor *pr)
FS Interface (/proc)
-------------------------------------------------------------------------- */
+#ifdef CONFIG_ACPI_PROCFS
static struct proc_dir_entry *acpi_processor_dir = NULL;
static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset)
@@ -388,7 +392,6 @@ static int acpi_processor_add_fs(struct acpi_device *device)
return -EIO;
return 0;
}
-
static int acpi_processor_remove_fs(struct acpi_device *device)
{
@@ -405,6 +408,16 @@ static int acpi_processor_remove_fs(struct acpi_device *device)
return 0;
}
+#else
+static inline int acpi_processor_add_fs(struct acpi_device *device)
+{
+ return 0;
+}
+static inline int acpi_processor_remove_fs(struct acpi_device *device)
+{
+ return 0;
+}
+#endif
/* Use the acpiid in MADT to map cpus in case of SMP */
@@ -698,92 +711,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
static DEFINE_PER_CPU(void *, processor_device_array);
-static int __cpuinit acpi_processor_start(struct acpi_device *device)
-{
- int result = 0;
- struct acpi_processor *pr;
- struct sys_device *sysdev;
-
- pr = acpi_driver_data(device);
-
- result = acpi_processor_get_info(device);
- if (result) {
- /* Processor is physically not present */
- return 0;
- }
-
- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
-
- /*
- * Buggy BIOS check
- * ACPI id of processors can be reported wrongly by the BIOS.
- * Don't trust it blindly
- */
- if (per_cpu(processor_device_array, pr->id) != NULL &&
- per_cpu(processor_device_array, pr->id) != device) {
- printk(KERN_WARNING "BIOS reported wrong ACPI id "
- "for the processor\n");
- return -ENODEV;
- }
- per_cpu(processor_device_array, pr->id) = device;
-
- per_cpu(processors, pr->id) = pr;
-
- result = acpi_processor_add_fs(device);
- if (result)
- goto end;
-
- sysdev = get_cpu_sysdev(pr->id);
- if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev"))
- return -EFAULT;
-
- /* _PDC call should be done before doing anything else (if reqd.). */
- arch_acpi_processor_init_pdc(pr);
- acpi_processor_set_pdc(pr);
- arch_acpi_processor_cleanup_pdc(pr);
-
-#ifdef CONFIG_CPU_FREQ
- acpi_processor_ppc_has_changed(pr);
-#endif
- acpi_processor_get_throttling_info(pr);
- acpi_processor_get_limit_info(pr);
-
-
- acpi_processor_power_init(pr, device);
-
- pr->cdev = thermal_cooling_device_register("Processor", device,
- &processor_cooling_ops);
- if (IS_ERR(pr->cdev)) {
- result = PTR_ERR(pr->cdev);
- goto end;
- }
-
- dev_info(&device->dev, "registered as cooling_device%d\n",
- pr->cdev->id);
-
- result = sysfs_create_link(&device->dev.kobj,
- &pr->cdev->device.kobj,
- "thermal_cooling");
- if (result)
- printk(KERN_ERR PREFIX "Create sysfs link\n");
- result = sysfs_create_link(&pr->cdev->device.kobj,
- &device->dev.kobj,
- "device");
- if (result)
- printk(KERN_ERR PREFIX "Create sysfs link\n");
-
- if (pr->flags.throttling) {
- printk(KERN_INFO PREFIX "%s [%s] (supports",
- acpi_device_name(device), acpi_device_bid(device));
- printk(" %d throttling states", pr->throttling.state_count);
- printk(")\n");
- }
-
- end:
-
- return result;
-}
-
static void acpi_processor_notify(struct acpi_device *device, u32 event)
{
struct acpi_processor *pr = acpi_driver_data(device);
@@ -846,10 +773,8 @@ static struct notifier_block acpi_cpu_notifier =
static int acpi_processor_add(struct acpi_device *device)
{
struct acpi_processor *pr = NULL;
-
-
- if (!device)
- return -EINVAL;
+ int result = 0;
+ struct sys_device *sysdev;
pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
if (!pr)
@@ -865,7 +790,100 @@ static int acpi_processor_add(struct acpi_device *device)
strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
device->driver_data = pr;
+ result = acpi_processor_get_info(device);
+ if (result) {
+ /* Processor is physically not present */
+ return 0;
+ }
+
+ BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
+
+ /*
+ * Buggy BIOS check
+ * ACPI id of processors can be reported wrongly by the BIOS.
+ * Don't trust it blindly
+ */
+ if (per_cpu(processor_device_array, pr->id) != NULL &&
+ per_cpu(processor_device_array, pr->id) != device) {
+ printk(KERN_WARNING "BIOS reported wrong ACPI id "
+ "for the processor\n");
+ result = -ENODEV;
+ goto err_free_cpumask;
+ }
+ per_cpu(processor_device_array, pr->id) = device;
+
+ per_cpu(processors, pr->id) = pr;
+
+ result = acpi_processor_add_fs(device);
+ if (result)
+ goto err_free_cpumask;
+
+ sysdev = get_cpu_sysdev(pr->id);
+ if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) {
+ result = -EFAULT;
+ goto err_remove_fs;
+ }
+
+ /* _PDC call should be done before doing anything else (if reqd.). */
+ arch_acpi_processor_init_pdc(pr);
+ acpi_processor_set_pdc(pr);
+ arch_acpi_processor_cleanup_pdc(pr);
+
+#ifdef CONFIG_CPU_FREQ
+ acpi_processor_ppc_has_changed(pr);
+#endif
+ acpi_processor_get_throttling_info(pr);
+ acpi_processor_get_limit_info(pr);
+
+
+ acpi_processor_power_init(pr, device);
+
+ pr->cdev = thermal_cooling_device_register("Processor", device,
+ &processor_cooling_ops);
+ if (IS_ERR(pr->cdev)) {
+ result = PTR_ERR(pr->cdev);
+ goto err_power_exit;
+ }
+
+ dev_info(&device->dev, "registered as cooling_device%d\n",
+ pr->cdev->id);
+
+ result = sysfs_create_link(&device->dev.kobj,
+ &pr->cdev->device.kobj,
+ "thermal_cooling");
+ if (result) {
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+ goto err_thermal_unregister;
+ }
+ result = sysfs_create_link(&pr->cdev->device.kobj,
+ &device->dev.kobj,
+ "device");
+ if (result) {
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+ goto err_remove_sysfs;
+ }
+
+ if (pr->flags.throttling) {
+ printk(KERN_INFO PREFIX "%s [%s] (supports",
+ acpi_device_name(device), acpi_device_bid(device));
+ printk(" %d throttling states", pr->throttling.state_count);
+ printk(")\n");
+ }
+
return 0;
+
+err_remove_sysfs:
+ sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+err_thermal_unregister:
+ thermal_cooling_device_unregister(pr->cdev);
+err_power_exit:
+ acpi_processor_power_exit(pr, device);
+err_remove_fs:
+ acpi_processor_remove_fs(device);
+err_free_cpumask:
+ free_cpumask_var(pr->throttling.shared_cpu_map);
+
+ return result;
}
static int acpi_processor_remove(struct acpi_device *device, int type)
@@ -942,7 +960,6 @@ int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
{
acpi_handle phandle;
struct acpi_device *pdev;
- struct acpi_processor *pr;
if (acpi_get_parent(handle, &phandle)) {
@@ -957,15 +974,6 @@ int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
return -ENODEV;
}
- acpi_bus_start(*device);
-
- pr = acpi_driver_data(*device);
- if (!pr)
- return -ENODEV;
-
- if ((pr->id >= 0) && (pr->id < nr_cpu_ids)) {
- kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE);
- }
return 0;
}
@@ -995,25 +1003,6 @@ static void __ref acpi_processor_hotplug_notify(acpi_handle handle,
"Unable to add the device\n");
break;
}
-
- pr = acpi_driver_data(device);
- if (!pr) {
- printk(KERN_ERR PREFIX "Driver data is NULL\n");
- break;
- }
-
- if (pr->id >= 0 && (pr->id < nr_cpu_ids)) {
- kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
- break;
- }
-
- result = acpi_processor_start(device);
- if ((!result) && ((pr->id >= 0) && (pr->id < nr_cpu_ids))) {
- kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
- } else {
- printk(KERN_ERR PREFIX "Device [%s] failed to start\n",
- acpi_device_bid(device));
- }
break;
case ACPI_NOTIFY_EJECT_REQUEST:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -1030,9 +1019,6 @@ static void __ref acpi_processor_hotplug_notify(acpi_handle handle,
"Driver data is NULL, dropping EJECT\n");
return;
}
-
- if ((pr->id < nr_cpu_ids) && (cpu_present(pr->id)))
- kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -1161,11 +1147,11 @@ static int __init acpi_processor_init(void)
(struct acpi_table_header **)&madt)))
madt = NULL;
#endif
-
+#ifdef CONFIG_ACPI_PROCFS
acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
if (!acpi_processor_dir)
return -ENOMEM;
-
+#endif
/*
* Check whether the system is DMI table. If yes, OSPM
* should not use mwait for CPU-states.
@@ -1193,7 +1179,9 @@ out_cpuidle:
cpuidle_unregister_driver(&acpi_idle_driver);
out_proc:
+#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
+#endif
return result;
}
@@ -1213,7 +1201,9 @@ static void __exit acpi_processor_exit(void)
cpuidle_unregister_driver(&acpi_idle_driver);
+#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
+#endif
return;
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 66393d5c4c7..cc61a622010 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -60,6 +60,8 @@
#include <acpi/processor.h>
#include <asm/processor.h>
+#define PREFIX "ACPI: "
+
#define ACPI_PROCESSOR_CLASS "processor"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_idle");
@@ -680,6 +682,7 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr)
return 0;
}
+#ifdef CONFIG_ACPI_PROCFS
static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
{
struct acpi_processor *pr = seq->private;
@@ -759,7 +762,7 @@ static const struct file_operations acpi_processor_power_fops = {
.llseek = seq_lseek,
.release = single_release,
};
-
+#endif
/**
* acpi_idle_bm_check - checks if bus master activity was detected
@@ -1160,7 +1163,9 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
{
acpi_status status = 0;
static int first_run;
+#ifdef CONFIG_ACPI_PROCFS
struct proc_dir_entry *entry = NULL;
+#endif
unsigned int i;
if (boot_option_idle_override)
@@ -1217,7 +1222,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
pr->power.states[i].type);
printk(")\n");
}
-
+#ifdef CONFIG_ACPI_PROCFS
/* 'power' [R] */
entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER,
S_IRUGO, acpi_device_dir(device),
@@ -1225,6 +1230,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
acpi_driver_data(device));
if (!entry)
return -EIO;
+#endif
return 0;
}
@@ -1237,9 +1243,11 @@ int acpi_processor_power_exit(struct acpi_processor *pr,
cpuidle_unregister_device(&pr->power.dev);
pr->flags.power_setup_done = 0;
+#ifdef CONFIG_ACPI_PROCFS
if (acpi_device_dir(device))
remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
acpi_device_dir(device));
+#endif
return 0;
}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 60e543d3234..8ba0ed0b9dd 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -39,6 +39,8 @@
#include <acpi/acpi_drivers.h>
#include <acpi/processor.h>
+#define PREFIX "ACPI: "
+
#define ACPI_PROCESSOR_CLASS "processor"
#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
@@ -509,7 +511,7 @@ int acpi_processor_preregister_performance(
struct acpi_processor *match_pr;
struct acpi_psd_package *match_pdomain;
- if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
return -ENOMEM;
mutex_lock(&performance_mutex);
@@ -556,7 +558,6 @@ int acpi_processor_preregister_performance(
* Now that we have _PSD data from all CPUs, lets setup P-state
* domain info.
*/
- cpumask_clear(covered_cpus);
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr)
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 31adda1099e..140c5c5b423 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -40,6 +40,8 @@
#include <acpi/processor.h>
#include <acpi/acpi_drivers.h>
+#define PREFIX "ACPI: "
+
#define ACPI_PROCESSOR_CLASS "processor"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_thermal");
@@ -438,7 +440,7 @@ struct thermal_cooling_device_ops processor_cooling_ops = {
};
/* /proc interface */
-
+#ifdef CONFIG_ACPI_PROCFS
static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset)
{
struct acpi_processor *pr = (struct acpi_processor *)seq->private;
@@ -517,3 +519,4 @@ const struct file_operations acpi_processor_limit_fops = {
.llseek = seq_lseek,
.release = single_release,
};
+#endif
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index ae39797aab5..4c6c14c1e30 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -41,6 +41,8 @@
#include <acpi/acpi_drivers.h>
#include <acpi/processor.h>
+#define PREFIX "ACPI: "
+
#define ACPI_PROCESSOR_CLASS "processor"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_throttling");
@@ -75,7 +77,7 @@ static int acpi_processor_update_tsd_coord(void)
struct acpi_tsd_package *pdomain, *match_pdomain;
struct acpi_processor_throttling *pthrottling, *match_pthrottling;
- if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
return -ENOMEM;
/*
@@ -103,7 +105,6 @@ static int acpi_processor_update_tsd_coord(void)
if (retval)
goto err_ret;
- cpumask_clear(covered_cpus);
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr)
@@ -1216,7 +1217,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
}
/* proc interface */
-
+#ifdef CONFIG_ACPI_PROCFS
static int acpi_processor_throttling_seq_show(struct seq_file *seq,
void *offset)
{
@@ -1324,3 +1325,4 @@ const struct file_operations acpi_processor_throttling_fops = {
.llseek = seq_lseek,
.release = single_release,
};
+#endif
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 4b214b74eba..52b9db8afc2 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -46,6 +46,8 @@
#include "sbshc.h"
+#define PREFIX "ACPI: "
+
#define ACPI_SBS_CLASS "sbs"
#define ACPI_AC_CLASS "ac_adapter"
#define ACPI_BATTERY_CLASS "battery"
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index 0619734895b..d9339806df4 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -15,6 +15,8 @@
#include <linux/interrupt.h>
#include "sbshc.h"
+#define PREFIX "ACPI: "
+
#define ACPI_SMB_HC_CLASS "smbus_host_controller"
#define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 318b1ea7a5b..468921bed22 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -22,6 +22,8 @@ extern struct acpi_device *acpi_root;
#define ACPI_BUS_HID "LNXSYBUS"
#define ACPI_BUS_DEVICE_NAME "System Bus"
+#define ACPI_IS_ROOT_DEVICE(device) (!(device)->parent)
+
static LIST_HEAD(acpi_device_list);
static LIST_HEAD(acpi_bus_id_list);
DEFINE_MUTEX(acpi_device_lock);
@@ -43,40 +45,19 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
{
int len;
int count;
-
- if (!acpi_dev->flags.hardware_id && !acpi_dev->flags.compatible_ids)
- return -ENODEV;
+ struct acpi_hardware_id *id;
len = snprintf(modalias, size, "acpi:");
size -= len;
- if (acpi_dev->flags.hardware_id) {
- count = snprintf(&modalias[len], size, "%s:",
- acpi_dev->pnp.hardware_id);
+ list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
+ count = snprintf(&modalias[len], size, "%s:", id->id);
if (count < 0 || count >= size)
return -EINVAL;
len += count;
size -= count;
}
- if (acpi_dev->flags.compatible_ids) {
- struct acpi_compatible_id_list *cid_list;
- int i;
-
- cid_list = acpi_dev->pnp.cid_list;
- for (i = 0; i < cid_list->count; i++) {
- count = snprintf(&modalias[len], size, "%s:",
- cid_list->id[i].value);
- if (count < 0 || count >= size) {
- printk(KERN_ERR PREFIX "%s cid[%i] exceeds event buffer size",
- acpi_dev->pnp.device_name, i);
- break;
- }
- len += count;
- size -= count;
- }
- }
-
modalias[len] = '\0';
return len;
}
@@ -183,7 +164,7 @@ static ssize_t
acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
- return sprintf(buf, "%s\n", acpi_dev->pnp.hardware_id);
+ return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
}
static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
@@ -219,17 +200,13 @@ static int acpi_device_setup_files(struct acpi_device *dev)
goto end;
}
- if (dev->flags.hardware_id) {
- result = device_create_file(&dev->dev, &dev_attr_hid);
- if (result)
- goto end;
- }
+ result = device_create_file(&dev->dev, &dev_attr_hid);
+ if (result)
+ goto end;
- if (dev->flags.hardware_id || dev->flags.compatible_ids) {
- result = device_create_file(&dev->dev, &dev_attr_modalias);
- if (result)
- goto end;
- }
+ result = device_create_file(&dev->dev, &dev_attr_modalias);
+ if (result)
+ goto end;
/*
* If device has _EJ0, 'eject' file is created that is used to trigger
@@ -255,11 +232,8 @@ static void acpi_device_remove_files(struct acpi_device *dev)
if (ACPI_SUCCESS(status))
device_remove_file(&dev->dev, &dev_attr_eject);
- if (dev->flags.hardware_id || dev->flags.compatible_ids)
- device_remove_file(&dev->dev, &dev_attr_modalias);
-
- if (dev->flags.hardware_id)
- device_remove_file(&dev->dev, &dev_attr_hid);
+ device_remove_file(&dev->dev, &dev_attr_modalias);
+ device_remove_file(&dev->dev, &dev_attr_hid);
if (dev->handle)
device_remove_file(&dev->dev, &dev_attr_path);
}
@@ -271,6 +245,7 @@ int acpi_match_device_ids(struct acpi_device *device,
const struct acpi_device_id *ids)
{
const struct acpi_device_id *id;
+ struct acpi_hardware_id *hwid;
/*
* If the device is not present, it is unnecessary to load device
@@ -279,36 +254,30 @@ int acpi_match_device_ids(struct acpi_device *device,
if (!device->status.present)
return -ENODEV;
- if (device->flags.hardware_id) {
- for (id = ids; id->id[0]; id++) {
- if (!strcmp((char*)id->id, device->pnp.hardware_id))
+ for (id = ids; id->id[0]; id++)
+ list_for_each_entry(hwid, &device->pnp.ids, list)
+ if (!strcmp((char *) id->id, hwid->id))
return 0;
- }
- }
-
- if (device->flags.compatible_ids) {
- struct acpi_compatible_id_list *cid_list = device->pnp.cid_list;
- int i;
-
- for (id = ids; id->id[0]; id++) {
- /* compare multiple _CID entries against driver ids */
- for (i = 0; i < cid_list->count; i++) {
- if (!strcmp((char*)id->id,
- cid_list->id[i].value))
- return 0;
- }
- }
- }
return -ENOENT;
}
EXPORT_SYMBOL(acpi_match_device_ids);
+static void acpi_free_ids(struct acpi_device *device)
+{
+ struct acpi_hardware_id *id, *tmp;
+
+ list_for_each_entry_safe(id, tmp, &device->pnp.ids, list) {
+ kfree(id->id);
+ kfree(id);
+ }
+}
+
static void acpi_device_release(struct device *dev)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
- kfree(acpi_dev->pnp.cid_list);
+ acpi_free_ids(acpi_dev);
kfree(acpi_dev);
}
@@ -366,22 +335,21 @@ static acpi_status acpi_device_notify_fixed(void *data)
{
struct acpi_device *device = data;
- acpi_device_notify(device->handle, ACPI_FIXED_HARDWARE_EVENT, device);
+ /* Fixed hardware devices have no handles */
+ acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
return AE_OK;
}
static int acpi_device_install_notify_handler(struct acpi_device *device)
{
acpi_status status;
- char *hid;
- hid = acpi_device_hid(device);
- if (!strcmp(hid, ACPI_BUTTON_HID_POWERF))
+ if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
status =
acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_device_notify_fixed,
device);
- else if (!strcmp(hid, ACPI_BUTTON_HID_SLEEPF))
+ else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
status =
acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
acpi_device_notify_fixed,
@@ -399,10 +367,10 @@ static int acpi_device_install_notify_handler(struct acpi_device *device)
static void acpi_device_remove_notify_handler(struct acpi_device *device)
{
- if (!strcmp(acpi_device_hid(device), ACPI_BUTTON_HID_POWERF))
+ if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_device_notify_fixed);
- else if (!strcmp(acpi_device_hid(device), ACPI_BUTTON_HID_SLEEPF))
+ else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
acpi_device_notify_fixed);
else
@@ -426,9 +394,6 @@ static int acpi_device_probe(struct device * dev)
if (acpi_drv->ops.notify) {
ret = acpi_device_install_notify_handler(acpi_dev);
if (ret) {
- if (acpi_drv->ops.stop)
- acpi_drv->ops.stop(acpi_dev,
- acpi_dev->removal_type);
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev,
acpi_dev->removal_type);
@@ -452,8 +417,6 @@ static int acpi_device_remove(struct device * dev)
if (acpi_drv) {
if (acpi_drv->ops.notify)
acpi_device_remove_notify_handler(acpi_dev);
- if (acpi_drv->ops.stop)
- acpi_drv->ops.stop(acpi_dev, acpi_dev->removal_type);
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev, acpi_dev->removal_type);
}
@@ -474,12 +437,12 @@ struct bus_type acpi_bus_type = {
.uevent = acpi_device_uevent,
};
-static int acpi_device_register(struct acpi_device *device,
- struct acpi_device *parent)
+static int acpi_device_register(struct acpi_device *device)
{
int result;
struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
int found = 0;
+
/*
* Linkage
* -------
@@ -501,8 +464,9 @@ static int acpi_device_register(struct acpi_device *device,
* If failed, create one and link it into acpi_bus_id_list
*/
list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
- if(!strcmp(acpi_device_bus_id->bus_id, device->flags.hardware_id? device->pnp.hardware_id : "device")) {
- acpi_device_bus_id->instance_no ++;
+ if (!strcmp(acpi_device_bus_id->bus_id,
+ acpi_device_hid(device))) {
+ acpi_device_bus_id->instance_no++;
found = 1;
kfree(new_bus_id);
break;
@@ -510,7 +474,7 @@ static int acpi_device_register(struct acpi_device *device,
}
if (!found) {
acpi_device_bus_id = new_bus_id;
- strcpy(acpi_device_bus_id->bus_id, device->flags.hardware_id ? device->pnp.hardware_id : "device");
+ strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device));
acpi_device_bus_id->instance_no = 0;
list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
}
@@ -524,7 +488,7 @@ static int acpi_device_register(struct acpi_device *device,
mutex_unlock(&acpi_device_lock);
if (device->parent)
- device->dev.parent = &parent->dev;
+ device->dev.parent = &device->parent->dev;
device->dev.bus = &acpi_bus_type;
device->dev.release = &acpi_device_release;
result = device_register(&device->dev);
@@ -664,6 +628,33 @@ EXPORT_SYMBOL(acpi_bus_unregister_driver);
/* --------------------------------------------------------------------------
Device Enumeration
-------------------------------------------------------------------------- */
+static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
+{
+ acpi_status status;
+ int ret;
+ struct acpi_device *device;
+
+ /*
+ * Fixed hardware devices do not appear in the namespace and do not
+ * have handles, but we fabricate acpi_devices for them, so we have
+ * to deal with them specially.
+ */
+ if (handle == NULL)
+ return acpi_root;
+
+ do {
+ status = acpi_get_parent(handle, &handle);
+ if (status == AE_NULL_ENTRY)
+ return NULL;
+ if (ACPI_FAILURE(status))
+ return acpi_root;
+
+ ret = acpi_bus_get_device(handle, &device);
+ if (ret == 0)
+ return device;
+ } while (1);
+}
+
acpi_status
acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
{
@@ -687,7 +678,7 @@ acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
}
EXPORT_SYMBOL_GPL(acpi_bus_get_ejd);
-void acpi_bus_data_handler(acpi_handle handle, u32 function, void *context)
+void acpi_bus_data_handler(acpi_handle handle, void *context)
{
/* TBD */
@@ -876,11 +867,6 @@ static int acpi_bus_get_flags(struct acpi_device *device)
if (ACPI_SUCCESS(status))
device->flags.dynamic_status = 1;
- /* Presence of _CID indicates 'compatible_ids' */
- status = acpi_get_handle(device->handle, "_CID", &temp);
- if (ACPI_SUCCESS(status))
- device->flags.compatible_ids = 1;
-
/* Presence of _RMV indicates 'removable' */
status = acpi_get_handle(device->handle, "_RMV", &temp);
if (ACPI_SUCCESS(status))
@@ -918,8 +904,7 @@ static int acpi_bus_get_flags(struct acpi_device *device)
return 0;
}
-static void acpi_device_get_busid(struct acpi_device *device,
- acpi_handle handle, int type)
+static void acpi_device_get_busid(struct acpi_device *device)
{
char bus_id[5] = { '?', 0 };
struct acpi_buffer buffer = { sizeof(bus_id), bus_id };
@@ -931,10 +916,12 @@ static void acpi_device_get_busid(struct acpi_device *device,
* The device's Bus ID is simply the object name.
* TBD: Shouldn't this value be unique (within the ACPI namespace)?
*/
- switch (type) {
- case ACPI_BUS_TYPE_SYSTEM:
+ if (ACPI_IS_ROOT_DEVICE(device)) {
strcpy(device->pnp.bus_id, "ACPI");
- break;
+ return;
+ }
+
+ switch (device->device_type) {
case ACPI_BUS_TYPE_POWER_BUTTON:
strcpy(device->pnp.bus_id, "PWRF");
break;
@@ -942,7 +929,7 @@ static void acpi_device_get_busid(struct acpi_device *device,
strcpy(device->pnp.bus_id, "SLPF");
break;
default:
- acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
+ acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
/* Clean up trailing underscores (if any) */
for (i = 3; i > 1; i--) {
if (bus_id[i] == '_')
@@ -1000,157 +987,132 @@ static int acpi_dock_match(struct acpi_device *device)
return acpi_get_handle(device->handle, "_DCK", &tmp);
}
-static void acpi_device_set_id(struct acpi_device *device,
- struct acpi_device *parent, acpi_handle handle,
- int type)
+char *acpi_device_hid(struct acpi_device *device)
+{
+ struct acpi_hardware_id *hid;
+
+ hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list);
+ return hid->id;
+}
+EXPORT_SYMBOL(acpi_device_hid);
+
+static void acpi_add_id(struct acpi_device *device, const char *dev_id)
+{
+ struct acpi_hardware_id *id;
+
+ id = kmalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return;
+
+ id->id = kmalloc(strlen(dev_id) + 1, GFP_KERNEL);
+ if (!id->id) {
+ kfree(id);
+ return;
+ }
+
+ strcpy(id->id, dev_id);
+ list_add_tail(&id->list, &device->pnp.ids);
+}
+
+static void acpi_device_set_id(struct acpi_device *device)
{
- struct acpi_device_info *info;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- char *hid = NULL;
- char *uid = NULL;
- struct acpi_compatible_id_list *cid_list = NULL;
- const char *cid_add = NULL;
acpi_status status;
+ struct acpi_device_info *info;
+ struct acpica_device_id_list *cid_list;
+ int i;
- switch (type) {
+ switch (device->device_type) {
case ACPI_BUS_TYPE_DEVICE:
- status = acpi_get_object_info(handle, &buffer);
+ if (ACPI_IS_ROOT_DEVICE(device)) {
+ acpi_add_id(device, ACPI_SYSTEM_HID);
+ break;
+ } else if (ACPI_IS_ROOT_DEVICE(device->parent)) {
+ /* \_SB_, the only root-level namespace device */
+ acpi_add_id(device, ACPI_BUS_HID);
+ strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
+ strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
+ break;
+ }
+
+ status = acpi_get_object_info(device->handle, &info);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "%s: Error reading device info\n", __func__);
return;
}
- info = buffer.pointer;
if (info->valid & ACPI_VALID_HID)
- hid = info->hardware_id.value;
- if (info->valid & ACPI_VALID_UID)
- uid = info->unique_id.value;
- if (info->valid & ACPI_VALID_CID)
- cid_list = &info->compatibility_id;
+ acpi_add_id(device, info->hardware_id.string);
+ if (info->valid & ACPI_VALID_CID) {
+ cid_list = &info->compatible_id_list;
+ for (i = 0; i < cid_list->count; i++)
+ acpi_add_id(device, cid_list->ids[i].string);
+ }
if (info->valid & ACPI_VALID_ADR) {
device->pnp.bus_address = info->address;
device->flags.bus_address = 1;
}
- /* If we have a video/bay/dock device, add our selfdefined
- HID to the CID list. Like that the video/bay/dock drivers
- will get autoloaded and the device might still match
- against another driver.
- */
+ /*
+ * Some devices don't reliably have _HIDs & _CIDs, so add
+ * synthetic HIDs to make sure drivers can find them.
+ */
if (acpi_is_video_device(device))
- cid_add = ACPI_VIDEO_HID;
+ acpi_add_id(device, ACPI_VIDEO_HID);
else if (ACPI_SUCCESS(acpi_bay_match(device)))
- cid_add = ACPI_BAY_HID;
+ acpi_add_id(device, ACPI_BAY_HID);
else if (ACPI_SUCCESS(acpi_dock_match(device)))
- cid_add = ACPI_DOCK_HID;
+ acpi_add_id(device, ACPI_DOCK_HID);
break;
case ACPI_BUS_TYPE_POWER:
- hid = ACPI_POWER_HID;
+ acpi_add_id(device, ACPI_POWER_HID);
break;
case ACPI_BUS_TYPE_PROCESSOR:
- hid = ACPI_PROCESSOR_OBJECT_HID;
- break;
- case ACPI_BUS_TYPE_SYSTEM:
- hid = ACPI_SYSTEM_HID;
+ acpi_add_id(device, ACPI_PROCESSOR_OBJECT_HID);
break;
case ACPI_BUS_TYPE_THERMAL:
- hid = ACPI_THERMAL_HID;
+ acpi_add_id(device, ACPI_THERMAL_HID);
break;
case ACPI_BUS_TYPE_POWER_BUTTON:
- hid = ACPI_BUTTON_HID_POWERF;
+ acpi_add_id(device, ACPI_BUTTON_HID_POWERF);
break;
case ACPI_BUS_TYPE_SLEEP_BUTTON:
- hid = ACPI_BUTTON_HID_SLEEPF;
+ acpi_add_id(device, ACPI_BUTTON_HID_SLEEPF);
break;
}
/*
- * \_SB
- * ----
- * Fix for the system root bus device -- the only root-level device.
+ * We build acpi_devices for some objects that don't have _HID or _CID,
+ * e.g., PCI bridges and slots. Drivers can't bind to these objects,
+ * but we do use them indirectly by traversing the acpi_device tree.
+ * This generic ID isn't useful for driver binding, but it provides
+ * the useful property that "every acpi_device has an ID."
*/
- if (((acpi_handle)parent == ACPI_ROOT_OBJECT) && (type == ACPI_BUS_TYPE_DEVICE)) {
- hid = ACPI_BUS_HID;
- strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
- strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
- }
-
- if (hid) {
- strcpy(device->pnp.hardware_id, hid);
- device->flags.hardware_id = 1;
- }
- if (uid) {
- strcpy(device->pnp.unique_id, uid);
- device->flags.unique_id = 1;
- }
- if (cid_list || cid_add) {
- struct acpi_compatible_id_list *list;
- int size = 0;
- int count = 0;
-
- if (cid_list) {
- size = cid_list->size;
- } else if (cid_add) {
- size = sizeof(struct acpi_compatible_id_list);
- cid_list = ACPI_ALLOCATE_ZEROED((acpi_size) size);
- if (!cid_list) {
- printk(KERN_ERR "Memory allocation error\n");
- kfree(buffer.pointer);
- return;
- } else {
- cid_list->count = 0;
- cid_list->size = size;
- }
- }
- if (cid_add)
- size += sizeof(struct acpi_compatible_id);
- list = kmalloc(size, GFP_KERNEL);
-
- if (list) {
- if (cid_list) {
- memcpy(list, cid_list, cid_list->size);
- count = cid_list->count;
- }
- if (cid_add) {
- strncpy(list->id[count].value, cid_add,
- ACPI_MAX_CID_LENGTH);
- count++;
- device->flags.compatible_ids = 1;
- }
- list->size = size;
- list->count = count;
- device->pnp.cid_list = list;
- } else
- printk(KERN_ERR PREFIX "Memory allocation error\n");
- }
-
- kfree(buffer.pointer);
+ if (list_empty(&device->pnp.ids))
+ acpi_add_id(device, "device");
}
-static int acpi_device_set_context(struct acpi_device *device, int type)
+static int acpi_device_set_context(struct acpi_device *device)
{
- acpi_status status = AE_OK;
- int result = 0;
+ acpi_status status;
+
/*
* Context
* -------
* Attach this 'struct acpi_device' to the ACPI object. This makes
- * resolutions from handle->device very efficient. Note that we need
- * to be careful with fixed-feature devices as they all attach to the
- * root object.
+ * resolutions from handle->device very efficient. Fixed hardware
+ * devices have no handles, so we skip them.
*/
- if (type != ACPI_BUS_TYPE_POWER_BUTTON &&
- type != ACPI_BUS_TYPE_SLEEP_BUTTON) {
- status = acpi_attach_data(device->handle,
- acpi_bus_data_handler, device);
+ if (!device->handle)
+ return 0;
- if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX "Error attaching device data\n");
- result = -ENODEV;
- }
- }
- return result;
+ status = acpi_attach_data(device->handle,
+ acpi_bus_data_handler, device);
+ if (ACPI_SUCCESS(status))
+ return 0;
+
+ printk(KERN_ERR PREFIX "Error attaching device data\n");
+ return -ENODEV;
}
static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
@@ -1176,17 +1138,14 @@ static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
return 0;
}
-static int
-acpi_add_single_object(struct acpi_device **child,
- struct acpi_device *parent, acpi_handle handle, int type,
- struct acpi_bus_ops *ops)
+static int acpi_add_single_object(struct acpi_device **child,
+ acpi_handle handle, int type,
+ unsigned long long sta,
+ struct acpi_bus_ops *ops)
{
- int result = 0;
- struct acpi_device *device = NULL;
-
-
- if (!child)
- return -EINVAL;
+ int result;
+ struct acpi_device *device;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL);
if (!device) {
@@ -1194,85 +1153,31 @@ acpi_add_single_object(struct acpi_device **child,
return -ENOMEM;
}
+ INIT_LIST_HEAD(&device->pnp.ids);
+ device->device_type = type;
device->handle = handle;
- device->parent = parent;
+ device->parent = acpi_bus_get_parent(handle);
device->bus_ops = *ops; /* workround for not call .start */
+ STRUCT_TO_INT(device->status) = sta;
-
- acpi_device_get_busid(device, handle, type);
+ acpi_device_get_busid(device);
/*
* Flags
* -----
- * Get prior to calling acpi_bus_get_status() so we know whether
- * or not _STA is present. Note that we only look for object
- * handles -- cannot evaluate objects until we know the device is
- * present and properly initialized.
+ * Note that we only look for object handles -- cannot evaluate objects
+ * until we know the device is present and properly initialized.
*/
result = acpi_bus_get_flags(device);
if (result)
goto end;
/*
- * Status
- * ------
- * See if the device is present. We always assume that non-Device
- * and non-Processor objects (e.g. thermal zones, power resources,
- * etc.) are present, functioning, etc. (at least when parent object
- * is present). Note that _STA has a different meaning for some
- * objects (e.g. power resources) so we need to be careful how we use
- * it.
- */
- switch (type) {
- case ACPI_BUS_TYPE_PROCESSOR:
- case ACPI_BUS_TYPE_DEVICE:
- result = acpi_bus_get_status(device);
- if (ACPI_FAILURE(result)) {
- result = -ENODEV;
- goto end;
- }
- /*
- * When the device is neither present nor functional, the
- * device should not be added to Linux ACPI device tree.
- * When the status of the device is not present but functinal,
- * it should be added to Linux ACPI tree. For example : bay
- * device , dock device.
- * In such conditions it is unncessary to check whether it is
- * bay device or dock device.
- */
- if (!device->status.present && !device->status.functional) {
- result = -ENODEV;
- goto end;
- }
- break;
- default:
- STRUCT_TO_INT(device->status) =
- ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED |
- ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING;
- break;
- }
-
- /*
* Initialize Device
* -----------------
* TBD: Synch with Core's enumeration/initialization process.
*/
-
- /*
- * Hardware ID, Unique ID, & Bus Address
- * -------------------------------------
- */
- acpi_device_set_id(device, parent, handle, type);
-
- /*
- * The ACPI device is attached to acpi handle before getting
- * the power/wakeup/peformance flags. Otherwise OS can't get
- * the corresponding ACPI device by the acpi handle in the course
- * of getting the power/wakeup/performance flags.
- */
- result = acpi_device_set_context(device, type);
- if (result)
- goto end;
+ acpi_device_set_id(device);
/*
* Power Management
@@ -1304,8 +1209,10 @@ acpi_add_single_object(struct acpi_device **child,
goto end;
}
+ if ((result = acpi_device_set_context(device)))
+ goto end;
- result = acpi_device_register(device, parent);
+ result = acpi_device_register(device);
/*
* Bind _ADR-Based Devices when hot add
@@ -1316,130 +1223,122 @@ acpi_add_single_object(struct acpi_device **child,
}
end:
- if (!result)
+ if (!result) {
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Adding %s [%s] parent %s\n", dev_name(&device->dev),
+ (char *) buffer.pointer,
+ device->parent ? dev_name(&device->parent->dev) :
+ "(null)"));
+ kfree(buffer.pointer);
*child = device;
- else {
- kfree(device->pnp.cid_list);
- kfree(device);
- }
+ } else
+ acpi_device_release(&device->dev);
return result;
}
-static int acpi_bus_scan(struct acpi_device *start, struct acpi_bus_ops *ops)
+#define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \
+ ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING)
+
+static int acpi_bus_type_and_status(acpi_handle handle, int *type,
+ unsigned long long *sta)
{
- acpi_status status = AE_OK;
- struct acpi_device *parent = NULL;
- struct acpi_device *child = NULL;
- acpi_handle phandle = NULL;
- acpi_handle chandle = NULL;
- acpi_object_type type = 0;
- u32 level = 1;
+ acpi_status status;
+ acpi_object_type acpi_type;
+ status = acpi_get_type(handle, &acpi_type);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
- if (!start)
- return -EINVAL;
+ switch (acpi_type) {
+ case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */
+ case ACPI_TYPE_DEVICE:
+ *type = ACPI_BUS_TYPE_DEVICE;
+ status = acpi_bus_get_status_handle(handle, sta);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+ break;
+ case ACPI_TYPE_PROCESSOR:
+ *type = ACPI_BUS_TYPE_PROCESSOR;
+ status = acpi_bus_get_status_handle(handle, sta);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+ break;
+ case ACPI_TYPE_THERMAL:
+ *type = ACPI_BUS_TYPE_THERMAL;
+ *sta = ACPI_STA_DEFAULT;
+ break;
+ case ACPI_TYPE_POWER:
+ *type = ACPI_BUS_TYPE_POWER;
+ *sta = ACPI_STA_DEFAULT;
+ break;
+ default:
+ return -ENODEV;
+ }
- parent = start;
- phandle = start->handle;
+ return 0;
+}
- /*
- * Parse through the ACPI namespace, identify all 'devices', and
- * create a new 'struct acpi_device' for each.
- */
- while ((level > 0) && parent) {
+static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl,
+ void *context, void **return_value)
+{
+ struct acpi_bus_ops *ops = context;
+ int type;
+ unsigned long long sta;
+ struct acpi_device *device;
+ acpi_status status;
+ int result;
- status = acpi_get_next_object(ACPI_TYPE_ANY, phandle,
- chandle, &chandle);
+ result = acpi_bus_type_and_status(handle, &type, &sta);
+ if (result)
+ return AE_OK;
- /*
- * If this scope is exhausted then move our way back up.
- */
- if (ACPI_FAILURE(status)) {
- level--;
- chandle = phandle;
- acpi_get_parent(phandle, &phandle);
- if (parent->parent)
- parent = parent->parent;
- continue;
- }
+ if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
+ !(sta & ACPI_STA_DEVICE_FUNCTIONING))
+ return AE_CTRL_DEPTH;
- status = acpi_get_type(chandle, &type);
- if (ACPI_FAILURE(status))
- continue;
+ /*
+ * We may already have an acpi_device from a previous enumeration. If
+ * so, we needn't add it again, but we may still have to start it.
+ */
+ device = NULL;
+ acpi_bus_get_device(handle, &device);
+ if (ops->acpi_op_add && !device)
+ acpi_add_single_object(&device, handle, type, sta, ops);
- /*
- * If this is a scope object then parse it (depth-first).
- */
- if (type == ACPI_TYPE_LOCAL_SCOPE) {
- level++;
- phandle = chandle;
- chandle = NULL;
- continue;
- }
+ if (!device)
+ return AE_CTRL_DEPTH;
- /*
- * We're only interested in objects that we consider 'devices'.
- */
- switch (type) {
- case ACPI_TYPE_DEVICE:
- type = ACPI_BUS_TYPE_DEVICE;
- break;
- case ACPI_TYPE_PROCESSOR:
- type = ACPI_BUS_TYPE_PROCESSOR;
- break;
- case ACPI_TYPE_THERMAL:
- type = ACPI_BUS_TYPE_THERMAL;
- break;
- case ACPI_TYPE_POWER:
- type = ACPI_BUS_TYPE_POWER;
- break;
- default:
- continue;
- }
+ if (ops->acpi_op_start && !(ops->acpi_op_add)) {
+ status = acpi_start_single_object(device);
+ if (ACPI_FAILURE(status))
+ return AE_CTRL_DEPTH;
+ }
- if (ops->acpi_op_add)
- status = acpi_add_single_object(&child, parent,
- chandle, type, ops);
- else
- status = acpi_bus_get_device(chandle, &child);
+ if (!*return_value)
+ *return_value = device;
+ return AE_OK;
+}
- if (ACPI_FAILURE(status))
- continue;
+static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
+ struct acpi_device **child)
+{
+ acpi_status status;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ void *device = NULL;
- if (ops->acpi_op_start && !(ops->acpi_op_add)) {
- status = acpi_start_single_object(child);
- if (ACPI_FAILURE(status))
- continue;
- }
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+ printk(KERN_INFO PREFIX "Enumerating devices from [%s]\n",
+ (char *) buffer.pointer);
- /*
- * If the device is present, enabled, and functioning then
- * parse its scope (depth-first). Note that we need to
- * represent absent devices to facilitate PnP notifications
- * -- but only the subtree head (not all of its children,
- * which will be enumerated when the parent is inserted).
- *
- * TBD: Need notifications and other detection mechanisms
- * in place before we can fully implement this.
- */
- /*
- * When the device is not present but functional, it is also
- * necessary to scan the children of this device.
- */
- if (child->status.present || (!child->status.present &&
- child->status.functional)) {
- status = acpi_get_next_object(ACPI_TYPE_ANY, chandle,
- NULL, NULL);
- if (ACPI_SUCCESS(status)) {
- level++;
- phandle = chandle;
- chandle = NULL;
- parent = child;
- }
- }
- }
+ status = acpi_bus_check_add(handle, 0, ops, &device);
+ if (ACPI_SUCCESS(status))
+ acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
+ acpi_bus_check_add, ops, &device);
+ if (child)
+ *child = device;
return 0;
}
@@ -1447,36 +1346,25 @@ int
acpi_bus_add(struct acpi_device **child,
struct acpi_device *parent, acpi_handle handle, int type)
{
- int result;
struct acpi_bus_ops ops;
memset(&ops, 0, sizeof(ops));
ops.acpi_op_add = 1;
- result = acpi_add_single_object(child, parent, handle, type, &ops);
- if (!result)
- result = acpi_bus_scan(*child, &ops);
-
- return result;
+ acpi_bus_scan(handle, &ops, child);
+ return 0;
}
EXPORT_SYMBOL(acpi_bus_add);
int acpi_bus_start(struct acpi_device *device)
{
- int result;
struct acpi_bus_ops ops;
+ memset(&ops, 0, sizeof(ops));
+ ops.acpi_op_start = 1;
- if (!device)
- return -EINVAL;
-
- result = acpi_start_single_object(device);
- if (!result) {
- memset(&ops, 0, sizeof(ops));
- ops.acpi_op_start = 1;
- result = acpi_bus_scan(device, &ops);
- }
- return result;
+ acpi_bus_scan(device->handle, &ops, NULL);
+ return 0;
}
EXPORT_SYMBOL(acpi_bus_start);
@@ -1535,15 +1423,12 @@ int acpi_bus_trim(struct acpi_device *start, int rmdevice)
}
EXPORT_SYMBOL_GPL(acpi_bus_trim);
-static int acpi_bus_scan_fixed(struct acpi_device *root)
+static int acpi_bus_scan_fixed(void)
{
int result = 0;
struct acpi_device *device = NULL;
struct acpi_bus_ops ops;
- if (!root)
- return -ENODEV;
-
memset(&ops, 0, sizeof(ops));
ops.acpi_op_add = 1;
ops.acpi_op_start = 1;
@@ -1552,16 +1437,16 @@ static int acpi_bus_scan_fixed(struct acpi_device *root)
* Enumerate all fixed-feature devices.
*/
if ((acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON) == 0) {
- result = acpi_add_single_object(&device, acpi_root,
- NULL,
+ result = acpi_add_single_object(&device, NULL,
ACPI_BUS_TYPE_POWER_BUTTON,
+ ACPI_STA_DEFAULT,
&ops);
}
if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
- result = acpi_add_single_object(&device, acpi_root,
- NULL,
+ result = acpi_add_single_object(&device, NULL,
ACPI_BUS_TYPE_SLEEP_BUTTON,
+ ACPI_STA_DEFAULT,
&ops);
}
@@ -1584,24 +1469,15 @@ int __init acpi_scan_init(void)
}
/*
- * Create the root device in the bus's device tree
- */
- result = acpi_add_single_object(&acpi_root, NULL, ACPI_ROOT_OBJECT,
- ACPI_BUS_TYPE_SYSTEM, &ops);
- if (result)
- goto Done;
-
- /*
* Enumerate devices in the ACPI namespace.
*/
- result = acpi_bus_scan_fixed(acpi_root);
+ result = acpi_bus_scan(ACPI_ROOT_OBJECT, &ops, &acpi_root);
if (!result)
- result = acpi_bus_scan(acpi_root, &ops);
+ result = acpi_bus_scan_fixed();
if (result)
acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
-Done:
return result;
}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index feece693d77..a90afcc723a 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -405,6 +405,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
},
{
+ .callback = init_set_sci_en_on_resume,
+ .ident = "Hewlett-Packard HP Pavilion dv3 Notebook PC",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv3 Notebook PC"),
+ },
+ },
+ {
.callback = init_old_suspend_ordering,
.ident = "Panasonic CF51-2L",
.matches = {
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index 9c61ab2177c..d11282975f3 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -31,6 +31,8 @@
#include <acpi/acpi_drivers.h>
+#define PREFIX "ACPI: "
+
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME("system");
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 646d39c031c..f336bca7c45 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -213,6 +213,9 @@ acpi_table_parse_entries(char *id,
unsigned long table_end;
acpi_size tbl_size;
+ if (acpi_disabled)
+ return -ENODEV;
+
if (!handler)
return -EINVAL;
@@ -277,6 +280,9 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
struct acpi_table_header *table = NULL;
acpi_size tbl_size;
+ if (acpi_disabled)
+ return -ENODEV;
+
if (!handler)
return -EINVAL;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 564ea142428..65f67815902 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -47,6 +47,8 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#define PREFIX "ACPI: "
+
#define ACPI_THERMAL_CLASS "thermal_zone"
#define ACPI_THERMAL_DEVICE_NAME "Thermal Zone"
#define ACPI_THERMAL_FILE_STATE "state"
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index f844941089b..811fec10462 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -30,6 +30,8 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#include "internal.h"
+
#define _COMPONENT ACPI_BUS_COMPONENT
ACPI_MODULE_NAME("utils");
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 60ea984c84a..94b1a4c5aba 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -40,10 +40,12 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <asm/uaccess.h>
-
+#include <linux/dmi.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#define PREFIX "ACPI: "
+
#define ACPI_VIDEO_CLASS "video"
#define ACPI_VIDEO_BUS_NAME "Video Bus"
#define ACPI_VIDEO_DEVICE_NAME "Video Device"
@@ -198,7 +200,7 @@ struct acpi_video_device {
struct acpi_device *dev;
struct acpi_video_device_brightness *brightness;
struct backlight_device *backlight;
- struct thermal_cooling_device *cdev;
+ struct thermal_cooling_device *cooling_dev;
struct output_device *output_dev;
};
@@ -387,20 +389,20 @@ static struct output_properties acpi_output_properties = {
/* thermal cooling device callbacks */
-static int video_get_max_state(struct thermal_cooling_device *cdev, unsigned
+static int video_get_max_state(struct thermal_cooling_device *cooling_dev, unsigned
long *state)
{
- struct acpi_device *device = cdev->devdata;
+ struct acpi_device *device = cooling_dev->devdata;
struct acpi_video_device *video = acpi_driver_data(device);
*state = video->brightness->count - 3;
return 0;
}
-static int video_get_cur_state(struct thermal_cooling_device *cdev, unsigned
+static int video_get_cur_state(struct thermal_cooling_device *cooling_dev, unsigned
long *state)
{
- struct acpi_device *device = cdev->devdata;
+ struct acpi_device *device = cooling_dev->devdata;
struct acpi_video_device *video = acpi_driver_data(device);
unsigned long long level;
int offset;
@@ -417,9 +419,9 @@ static int video_get_cur_state(struct thermal_cooling_device *cdev, unsigned
}
static int
-video_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
+video_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long state)
{
- struct acpi_device *device = cdev->devdata;
+ struct acpi_device *device = cooling_dev->devdata;
struct acpi_video_device *video = acpi_driver_data(device);
int level;
@@ -603,6 +605,7 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
unsigned long long *level)
{
acpi_status status = AE_OK;
+ int i;
if (device->cap._BQC || device->cap._BCQ) {
char *buf = device->cap._BQC ? "_BQC" : "_BCQ";
@@ -618,8 +621,15 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
}
*level += bqc_offset_aml_bug_workaround;
- device->brightness->curr = *level;
- return 0;
+ for (i = 2; i < device->brightness->count; i++)
+ if (device->brightness->levels[i] == *level) {
+ device->brightness->curr = *level;
+ return 0;
+ }
+ /* BQC returned an invalid level. Stop using it. */
+ ACPI_WARNING((AE_INFO, "%s returned an invalid level",
+ buf));
+ device->cap._BQC = device->cap._BCQ = 0;
} else {
/* Fixme:
* should we return an error or ignore this failure?
@@ -870,7 +880,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
br->flags._BCM_use_index = br->flags._BCL_use_index;
/* _BQC uses INDEX while _BCL uses VALUE in some laptops */
- br->curr = level_old = max_level;
+ br->curr = level = max_level;
if (!device->cap._BQC)
goto set_level;
@@ -892,15 +902,25 @@ acpi_video_init_brightness(struct acpi_video_device *device)
br->flags._BQC_use_index = (level == max_level ? 0 : 1);
- if (!br->flags._BQC_use_index)
+ if (!br->flags._BQC_use_index) {
+ /*
+ * Set the backlight to the initial state.
+ * On some buggy laptops, _BQC returns an uninitialized value
+ * when invoked for the first time, i.e. level_old is invalid.
+ * set the backlight to max_level in this case
+ */
+ for (i = 2; i < br->count; i++)
+ if (level_old == br->levels[i])
+ level = level_old;
goto set_level;
+ }
if (br->flags._BCL_reversed)
level_old = (br->count - 1) - level_old;
- level_old = br->levels[level_old];
+ level = br->levels[level_old];
set_level:
- result = acpi_video_device_lcd_set_level(device, level_old);
+ result = acpi_video_device_lcd_set_level(device, level);
if (result)
goto out_free_levels;
@@ -934,9 +954,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
{
acpi_handle h_dummy1;
-
- memset(&device->cap, 0, sizeof(device->cap));
-
if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_ADR", &h_dummy1))) {
device->cap._ADR = 1;
}
@@ -990,19 +1007,29 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
if (result)
printk(KERN_ERR PREFIX "Create sysfs link\n");
- device->cdev = thermal_cooling_device_register("LCD",
+ device->cooling_dev = thermal_cooling_device_register("LCD",
device->dev, &video_cooling_ops);
- if (IS_ERR(device->cdev))
+ if (IS_ERR(device->cooling_dev)) {
+ /*
+ * Set cooling_dev to NULL so we don't crash trying to
+ * free it.
+ * Also, why the hell we are returning early and
+ * not attempt to register video output if cooling
+ * device registration failed?
+ * -- dtor
+ */
+ device->cooling_dev = NULL;
return;
+ }
dev_info(&device->dev->dev, "registered as cooling_device%d\n",
- device->cdev->id);
+ device->cooling_dev->id);
result = sysfs_create_link(&device->dev->dev.kobj,
- &device->cdev->device.kobj,
+ &device->cooling_dev->device.kobj,
"thermal_cooling");
if (result)
printk(KERN_ERR PREFIX "Create sysfs link\n");
- result = sysfs_create_link(&device->cdev->device.kobj,
+ result = sysfs_create_link(&device->cooling_dev->device.kobj,
&device->dev->dev.kobj, "device");
if (result)
printk(KERN_ERR PREFIX "Create sysfs link\n");
@@ -1039,7 +1066,6 @@ static void acpi_video_bus_find_cap(struct acpi_video_bus *video)
{
acpi_handle h_dummy1;
- memset(&video->cap, 0, sizeof(video->cap));
if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_DOS", &h_dummy1))) {
video->cap._DOS = 1;
}
@@ -2009,13 +2035,13 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
backlight_device_unregister(device->backlight);
device->backlight = NULL;
}
- if (device->cdev) {
+ if (device->cooling_dev) {
sysfs_remove_link(&device->dev->dev.kobj,
"thermal_cooling");
- sysfs_remove_link(&device->cdev->device.kobj,
+ sysfs_remove_link(&device->cooling_dev->device.kobj,
"device");
- thermal_cooling_device_unregister(device->cdev);
- device->cdev = NULL;
+ thermal_cooling_device_unregister(device->cooling_dev);
+ device->cooling_dev = NULL;
}
video_output_unregister(device->output_dev);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 7cd2b63435e..7032f25da9b 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -38,6 +38,8 @@
#include <linux/dmi.h>
#include <linux/pci.h>
+#define PREFIX "ACPI: "
+
ACPI_MODULE_NAME("video");
#define _COMPONENT ACPI_VIDEO_COMPONENT
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 122c786449a..d0a7df2e5ca 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -624,7 +624,7 @@ static struct ata_port_operations hpt374_fn1_port_ops = {
};
/**
- * htp37x_clock_slot - Turn timing to PC clock entry
+ * hpt37x_clock_slot - Turn timing to PC clock entry
* @freq: Reported frequency timing
* @base: Base timing
*
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 2de64065aa1..29e66d603d3 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -790,11 +790,15 @@ he_init_group(struct he_dev *he_dev, int group)
he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
if (he_dev->rbps_base == NULL) {
- hprintk("failed to alloc rbps\n");
- return -ENOMEM;
+ hprintk("failed to alloc rbps_base\n");
+ goto out_destroy_rbps_pool;
}
memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
+ if (he_dev->rbps_virt == NULL) {
+ hprintk("failed to alloc rbps_virt\n");
+ goto out_free_rbps_base;
+ }
for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
dma_addr_t dma_handle;
@@ -802,7 +806,7 @@ he_init_group(struct he_dev *he_dev, int group)
cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
if (cpuaddr == NULL)
- return -ENOMEM;
+ goto out_free_rbps_virt;
he_dev->rbps_virt[i].virt = cpuaddr;
he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
@@ -827,17 +831,21 @@ he_init_group(struct he_dev *he_dev, int group)
CONFIG_RBPL_BUFSIZE, 8, 0);
if (he_dev->rbpl_pool == NULL) {
hprintk("unable to create rbpl pool\n");
- return -ENOMEM;
+ goto out_free_rbps_virt;
}
he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
if (he_dev->rbpl_base == NULL) {
- hprintk("failed to alloc rbpl\n");
- return -ENOMEM;
+ hprintk("failed to alloc rbpl_base\n");
+ goto out_destroy_rbpl_pool;
}
memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
+ if (he_dev->rbpl_virt == NULL) {
+ hprintk("failed to alloc rbpl_virt\n");
+ goto out_free_rbpl_base;
+ }
for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
dma_addr_t dma_handle;
@@ -845,7 +853,7 @@ he_init_group(struct he_dev *he_dev, int group)
cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
if (cpuaddr == NULL)
- return -ENOMEM;
+ goto out_free_rbpl_virt;
he_dev->rbpl_virt[i].virt = cpuaddr;
he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
@@ -870,7 +878,7 @@ he_init_group(struct he_dev *he_dev, int group)
CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
if (he_dev->rbrq_base == NULL) {
hprintk("failed to allocate rbrq\n");
- return -ENOMEM;
+ goto out_free_rbpl_virt;
}
memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
@@ -894,7 +902,7 @@ he_init_group(struct he_dev *he_dev, int group)
CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
if (he_dev->tbrq_base == NULL) {
hprintk("failed to allocate tbrq\n");
- return -ENOMEM;
+ goto out_free_rbpq_base;
}
memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
@@ -906,6 +914,39 @@ he_init_group(struct he_dev *he_dev, int group)
he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
return 0;
+
+out_free_rbpq_base:
+ pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
+ sizeof(struct he_rbrq), he_dev->rbrq_base,
+ he_dev->rbrq_phys);
+ i = CONFIG_RBPL_SIZE;
+out_free_rbpl_virt:
+ while (--i)
+ pci_pool_free(he_dev->rbps_pool, he_dev->rbpl_virt[i].virt,
+ he_dev->rbps_base[i].phys);
+ kfree(he_dev->rbpl_virt);
+
+out_free_rbpl_base:
+ pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
+ sizeof(struct he_rbp), he_dev->rbpl_base,
+ he_dev->rbpl_phys);
+out_destroy_rbpl_pool:
+ pci_pool_destroy(he_dev->rbpl_pool);
+
+ i = CONFIG_RBPL_SIZE;
+out_free_rbps_virt:
+ while (--i)
+ pci_pool_free(he_dev->rbpl_pool, he_dev->rbps_virt[i].virt,
+ he_dev->rbpl_base[i].phys);
+ kfree(he_dev->rbps_virt);
+
+out_free_rbps_base:
+ pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE *
+ sizeof(struct he_rbp), he_dev->rbps_base,
+ he_dev->rbps_phys);
+out_destroy_rbps_pool:
+ pci_pool_destroy(he_dev->rbps_pool);
+ return -ENOMEM;
}
static int __devinit
diff --git a/drivers/atm/solos-attrlist.c b/drivers/atm/solos-attrlist.c
index efa2808dd94..1a9332e4efe 100644
--- a/drivers/atm/solos-attrlist.c
+++ b/drivers/atm/solos-attrlist.c
@@ -25,6 +25,10 @@ SOLOS_ATTR_RO(RSCorrectedErrorsUp)
SOLOS_ATTR_RO(RSUnCorrectedErrorsUp)
SOLOS_ATTR_RO(InterleaveRDn)
SOLOS_ATTR_RO(InterleaveRUp)
+SOLOS_ATTR_RO(BisRDn)
+SOLOS_ATTR_RO(BisRUp)
+SOLOS_ATTR_RO(INPdown)
+SOLOS_ATTR_RO(INPup)
SOLOS_ATTR_RO(ShowtimeStart)
SOLOS_ATTR_RO(ATURVendor)
SOLOS_ATTR_RO(ATUCCountry)
@@ -62,6 +66,13 @@ SOLOS_ATTR_RW(Defaults)
SOLOS_ATTR_RW(LineMode)
SOLOS_ATTR_RW(Profile)
SOLOS_ATTR_RW(DetectNoise)
+SOLOS_ATTR_RW(BisAForceSNRMarginDn)
+SOLOS_ATTR_RW(BisMForceSNRMarginDn)
+SOLOS_ATTR_RW(BisAMaxMargin)
+SOLOS_ATTR_RW(BisMMaxMargin)
+SOLOS_ATTR_RW(AnnexAForceSNRMarginDn)
+SOLOS_ATTR_RW(AnnexAMaxMargin)
+SOLOS_ATTR_RW(AnnexMMaxMargin)
SOLOS_ATTR_RO(SupportedAnnexes)
SOLOS_ATTR_RO(Status)
SOLOS_ATTR_RO(TotalStart)
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 307321b32cb..c5f5186d62a 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -59,21 +59,29 @@
#define RX_DMA_ADDR(port) (0x30 + (4 * (port)))
#define DATA_RAM_SIZE 32768
-#define BUF_SIZE 4096
+#define BUF_SIZE 2048
+#define OLD_BUF_SIZE 4096 /* For FPGA versions <= 2*/
#define FPGA_PAGE 528 /* FPGA flash page size*/
#define SOLOS_PAGE 512 /* Solos flash page size*/
#define FPGA_BLOCK (FPGA_PAGE * 8) /* FPGA flash block size*/
#define SOLOS_BLOCK (SOLOS_PAGE * 8) /* Solos flash block size*/
-#define RX_BUF(card, nr) ((card->buffers) + (nr)*BUF_SIZE*2)
-#define TX_BUF(card, nr) ((card->buffers) + (nr)*BUF_SIZE*2 + BUF_SIZE)
+#define RX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2)
+#define TX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2 + (card->buffer_size))
+#define FLASH_BUF ((card->buffers) + 4*(card->buffer_size)*2)
#define RX_DMA_SIZE 2048
+#define FPGA_VERSION(a,b) (((a) << 8) + (b))
+#define LEGACY_BUFFERS 2
+#define DMA_SUPPORTED 4
+
static int reset = 0;
static int atmdebug = 0;
static int firmware_upgrade = 0;
static int fpga_upgrade = 0;
+static int db_firmware_upgrade = 0;
+static int db_fpga_upgrade = 0;
struct pkt_hdr {
__le16 size;
@@ -116,6 +124,8 @@ struct solos_card {
wait_queue_head_t param_wq;
wait_queue_head_t fw_wq;
int using_dma;
+ int fpga_version;
+ int buffer_size;
};
@@ -136,10 +146,14 @@ MODULE_PARM_DESC(reset, "Reset Solos chips on startup");
MODULE_PARM_DESC(atmdebug, "Print ATM data");
MODULE_PARM_DESC(firmware_upgrade, "Initiate Solos firmware upgrade");
MODULE_PARM_DESC(fpga_upgrade, "Initiate FPGA upgrade");
+MODULE_PARM_DESC(db_firmware_upgrade, "Initiate daughter board Solos firmware upgrade");
+MODULE_PARM_DESC(db_fpga_upgrade, "Initiate daughter board FPGA upgrade");
module_param(reset, int, 0444);
module_param(atmdebug, int, 0644);
module_param(firmware_upgrade, int, 0444);
module_param(fpga_upgrade, int, 0444);
+module_param(db_firmware_upgrade, int, 0444);
+module_param(db_fpga_upgrade, int, 0444);
static void fpga_queue(struct solos_card *card, int port, struct sk_buff *skb,
struct atm_vcc *vcc);
@@ -517,10 +531,32 @@ static int flash_upgrade(struct solos_card *card, int chip)
if (chip == 0) {
fw_name = "solos-FPGA.bin";
blocksize = FPGA_BLOCK;
- } else {
+ }
+
+ if (chip == 1) {
fw_name = "solos-Firmware.bin";
blocksize = SOLOS_BLOCK;
}
+
+ if (chip == 2){
+ if (card->fpga_version > LEGACY_BUFFERS){
+ fw_name = "solos-db-FPGA.bin";
+ blocksize = FPGA_BLOCK;
+ } else {
+ dev_info(&card->dev->dev, "FPGA version doesn't support daughter board upgrades\n");
+ return -EPERM;
+ }
+ }
+
+ if (chip == 3){
+ if (card->fpga_version > LEGACY_BUFFERS){
+ fw_name = "solos-Firmware.bin";
+ blocksize = SOLOS_BLOCK;
+ } else {
+ dev_info(&card->dev->dev, "FPGA version doesn't support daughter board upgrades\n");
+ return -EPERM;
+ }
+ }
if (request_firmware(&fw, fw_name, &card->dev->dev))
return -ENOENT;
@@ -536,8 +572,10 @@ static int flash_upgrade(struct solos_card *card, int chip)
data32 = ioread32(card->config_regs + FPGA_MODE);
/* Set mode to Chip Erase */
- dev_info(&card->dev->dev, "Set FPGA Flash mode to %s Chip Erase\n",
- chip?"Solos":"FPGA");
+ if(chip == 0 || chip == 2)
+ dev_info(&card->dev->dev, "Set FPGA Flash mode to FPGA Chip Erase\n");
+ if(chip == 1 || chip == 3)
+ dev_info(&card->dev->dev, "Set FPGA Flash mode to Solos Chip Erase\n");
iowrite32((chip * 2), card->config_regs + FLASH_MODE);
@@ -557,7 +595,10 @@ static int flash_upgrade(struct solos_card *card, int chip)
/* Copy block to buffer, swapping each 16 bits */
for(i = 0; i < blocksize; i += 4) {
uint32_t word = swahb32p((uint32_t *)(fw->data + offset + i));
- iowrite32(word, RX_BUF(card, 3) + i);
+ if(card->fpga_version > LEGACY_BUFFERS)
+ iowrite32(word, FLASH_BUF + i);
+ else
+ iowrite32(word, RX_BUF(card, 3) + i);
}
/* Specify block number and then trigger flash write */
@@ -630,6 +671,10 @@ void solos_bh(unsigned long card_arg)
memcpy_fromio(header, RX_BUF(card, port), sizeof(*header));
size = le16_to_cpu(header->size);
+ if (size > (card->buffer_size - sizeof(*header))){
+ dev_warn(&card->dev->dev, "Invalid buffer size\n");
+ continue;
+ }
skb = alloc_skb(size + 1, GFP_ATOMIC);
if (!skb) {
@@ -1094,12 +1139,18 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
fpga_ver = (data32 & 0x0000FFFF);
major_ver = ((data32 & 0xFF000000) >> 24);
minor_ver = ((data32 & 0x00FF0000) >> 16);
+ card->fpga_version = FPGA_VERSION(major_ver,minor_ver);
+ if (card->fpga_version > LEGACY_BUFFERS)
+ card->buffer_size = BUF_SIZE;
+ else
+ card->buffer_size = OLD_BUF_SIZE;
dev_info(&dev->dev, "Solos FPGA Version %d.%02d svn-%d\n",
major_ver, minor_ver, fpga_ver);
- if (0 && fpga_ver > 27)
+ if (card->fpga_version >= DMA_SUPPORTED){
card->using_dma = 1;
- else {
+ } else {
+ card->using_dma = 0;
/* Set RX empty flag for all ports */
iowrite32(0xF0, card->config_regs + FLAGS_ADDR);
}
@@ -1131,6 +1182,12 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (firmware_upgrade)
flash_upgrade(card, 1);
+ if (db_fpga_upgrade)
+ flash_upgrade(card, 2);
+
+ if (db_firmware_upgrade)
+ flash_upgrade(card, 3);
+
err = atm_init(card);
if (err)
goto out_free_irq;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 91d4087b403..1fe5536d404 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -85,6 +85,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
"Node %d FilePages: %8lu kB\n"
"Node %d Mapped: %8lu kB\n"
"Node %d AnonPages: %8lu kB\n"
+ "Node %d Shmem: %8lu kB\n"
+ "Node %d KernelStack: %8lu kB\n"
"Node %d PageTables: %8lu kB\n"
"Node %d NFS_Unstable: %8lu kB\n"
"Node %d Bounce: %8lu kB\n"
@@ -116,6 +118,9 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
nid, K(node_page_state(nid, NR_FILE_PAGES)),
nid, K(node_page_state(nid, NR_FILE_MAPPED)),
nid, K(node_page_state(nid, NR_ANON_PAGES)),
+ nid, K(node_page_state(nid, NR_SHMEM)),
+ nid, node_page_state(nid, NR_KERNEL_STACK) *
+ THREAD_SIZE / 1024,
nid, K(node_page_state(nid, NR_PAGETABLE)),
nid, K(node_page_state(nid, NR_UNSTABLE_NFS)),
nid, K(node_page_state(nid, NR_BOUNCE)),
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 1e6b7c14f69..6fa7b0fdbdf 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -152,7 +152,7 @@ static int DAC960_revalidate_disk(struct gendisk *disk)
return 0;
}
-static struct block_device_operations DAC960_BlockDeviceOperations = {
+static const struct block_device_operations DAC960_BlockDeviceOperations = {
.owner = THIS_MODULE,
.open = DAC960_open,
.getgeo = DAC960_getgeo,
@@ -6562,7 +6562,7 @@ static int DAC960_ProcWriteUserCommand(struct file *file,
if (copy_from_user(CommandBuffer, Buffer, Count)) return -EFAULT;
CommandBuffer[Count] = '\0';
Length = strlen(CommandBuffer);
- if (CommandBuffer[Length-1] == '\n')
+ if (Length > 0 && CommandBuffer[Length-1] == '\n')
CommandBuffer[--Length] = '\0';
if (Controller->FirmwareType == DAC960_V1_Controller)
return (DAC960_V1_ExecuteUserCommand(Controller, CommandBuffer)
@@ -6653,7 +6653,7 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
else ErrorCode = get_user(ControllerNumber,
&UserSpaceControllerInfo->ControllerNumber);
if (ErrorCode != 0)
- break;;
+ break;
ErrorCode = -ENXIO;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1) {
@@ -6661,7 +6661,7 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
}
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;;
+ break;
memset(&ControllerInfo, 0, sizeof(DAC960_ControllerInfo_T));
ControllerInfo.ControllerNumber = ControllerNumber;
ControllerInfo.FirmwareType = Controller->FirmwareType;
@@ -7210,7 +7210,7 @@ static struct pci_driver DAC960_pci_driver = {
.remove = DAC960_Remove,
};
-static int DAC960_init_module(void)
+static int __init DAC960_init_module(void)
{
int ret;
@@ -7222,7 +7222,7 @@ static int DAC960_init_module(void)
return ret;
}
-static void DAC960_cleanup_module(void)
+static void __exit DAC960_cleanup_module(void)
{
int i;
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 2f07b7c99a9..05522583902 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1632,7 +1632,7 @@ static int amiga_floppy_change(struct gendisk *disk)
return 0;
}
-static struct block_device_operations floppy_fops = {
+static const struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
.open = floppy_open,
.release = floppy_release,
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index b6cd571adbf..3af97d4da2d 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -237,7 +237,7 @@ aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static struct block_device_operations aoe_bdops = {
+static const struct block_device_operations aoe_bdops = {
.open = aoeblk_open,
.release = aoeblk_release,
.getgeo = aoeblk_getgeo,
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 3ff02941b3d..847a9e57570 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1856,7 +1856,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
return 0;
}
-static struct block_device_operations floppy_fops = {
+static const struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
.open = floppy_open,
.release = floppy_release,
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 4bf8705b3ac..4f688434daf 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -375,7 +375,7 @@ static int brd_ioctl(struct block_device *bdev, fmode_t mode,
return error;
}
-static struct block_device_operations brd_fops = {
+static const struct block_device_operations brd_fops = {
.owner = THIS_MODULE,
.locked_ioctl = brd_ioctl,
#ifdef CONFIG_BLK_DEV_XIP
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index d8372b43282..24c3e21ab26 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -205,7 +205,7 @@ static int cciss_compat_ioctl(struct block_device *, fmode_t,
unsigned, unsigned long);
#endif
-static struct block_device_operations cciss_fops = {
+static const struct block_device_operations cciss_fops = {
.owner = THIS_MODULE,
.open = cciss_open,
.release = cciss_release,
@@ -363,7 +363,7 @@ static void cciss_seq_stop(struct seq_file *seq, void *v)
h->busy_configuring = 0;
}
-static struct seq_operations cciss_seq_ops = {
+static const struct seq_operations cciss_seq_ops = {
.start = cciss_seq_start,
.show = cciss_seq_show,
.next = cciss_seq_next,
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 44fa2018f6b..b82d438e260 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -193,7 +193,7 @@ static inline ctlr_info_t *get_host(struct gendisk *disk)
}
-static struct block_device_operations ida_fops = {
+static const struct block_device_operations ida_fops = {
.owner = THIS_MODULE,
.open = ida_open,
.release = ida_release,
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 2b387c2260d..5c01f747571 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3907,7 +3907,7 @@ static int floppy_revalidate(struct gendisk *disk)
return res;
}
-static struct block_device_operations floppy_fops = {
+static const struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
.open = floppy_open,
.release = floppy_release,
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index f9d01608cbe..d5cdce08ffd 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -692,7 +692,7 @@ static irqreturn_t hd_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct block_device_operations hd_fops = {
+static const struct block_device_operations hd_fops = {
.getgeo = hd_getgeo,
};
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index bbb79441d89..edda9ea7c62 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1438,7 +1438,7 @@ out_unlocked:
return 0;
}
-static struct block_device_operations lo_fops = {
+static const struct block_device_operations lo_fops = {
.owner = THIS_MODULE,
.open = lo_open,
.release = lo_release,
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 6d7fbaa9224..e0339aaa181 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -775,7 +775,7 @@ static int mg_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static struct block_device_operations mg_disk_ops = {
+static const struct block_device_operations mg_disk_ops = {
.getgeo = mg_getgeo
};
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 5d23ffad7c7..cc923a5b430 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -722,7 +722,7 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
return error;
}
-static struct block_device_operations nbd_fops =
+static const struct block_device_operations nbd_fops =
{
.owner = THIS_MODULE,
.locked_ioctl = nbd_ioctl,
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 13c1aee6aa3..a808b1530b3 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -125,7 +125,7 @@ static struct class *class_osdblk; /* /sys/class/osdblk */
static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
static LIST_HEAD(osdblkdev_list);
-static struct block_device_operations osdblk_bd_ops = {
+static const struct block_device_operations osdblk_bd_ops = {
.owner = THIS_MODULE,
};
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 9f3518c515a..8866ca369d5 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -247,7 +247,7 @@ static int pcd_block_media_changed(struct gendisk *disk)
return cdrom_media_changed(&cd->info);
}
-static struct block_device_operations pcd_bdops = {
+static const struct block_device_operations pcd_bdops = {
.owner = THIS_MODULE,
.open = pcd_block_open,
.release = pcd_block_release,
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index bf5955b3d87..569e39e8f11 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -807,7 +807,7 @@ static int pd_revalidate(struct gendisk *p)
return 0;
}
-static struct block_device_operations pd_fops = {
+static const struct block_device_operations pd_fops = {
.owner = THIS_MODULE,
.open = pd_open,
.release = pd_release,
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 68a90834e99..ea54ea39355 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -262,7 +262,7 @@ static char *pf_buf; /* buffer for request in progress */
/* kernel glue structures */
-static struct block_device_operations pf_fops = {
+static const struct block_device_operations pf_fops = {
.owner = THIS_MODULE,
.open = pf_open,
.release = pf_release,
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index fd5bb8ad59a..2ddf03ae034 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2849,7 +2849,7 @@ static int pkt_media_changed(struct gendisk *disk)
return attached_disk->fops->media_changed(attached_disk);
}
-static struct block_device_operations pktcdvd_ops = {
+static const struct block_device_operations pktcdvd_ops = {
.owner = THIS_MODULE,
.open = pkt_open,
.release = pkt_close,
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 34cbb7f3efa..03a130dca8a 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -82,7 +82,7 @@ enum lv1_ata_in_out {
static int ps3disk_major;
-static struct block_device_operations ps3disk_fops = {
+static const struct block_device_operations ps3disk_fops = {
.owner = THIS_MODULE,
};
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index c8753a9ed29..3bb7c47c869 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -88,7 +88,7 @@ struct ps3vram_priv {
static int ps3vram_major;
-static struct block_device_operations ps3vram_fops = {
+static const struct block_device_operations ps3vram_fops = {
.owner = THIS_MODULE,
};
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index cbfd9c0aef0..411f064760b 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -103,7 +103,7 @@ static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static struct block_device_operations vdc_fops = {
+static const struct block_device_operations vdc_fops = {
.owner = THIS_MODULE,
.getgeo = vdc_getgeo,
};
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index cf7877fb8a7..8f569e3df89 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -748,7 +748,7 @@ static int floppy_revalidate(struct gendisk *disk)
return !fs->disk_in;
}
-static struct block_device_operations floppy_fops = {
+static const struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
.open = floppy_open,
.release = floppy_release,
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 80df93e3cdd..6380ad8d91b 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -998,7 +998,7 @@ static int floppy_revalidate(struct gendisk *disk)
return ret;
}
-static struct block_device_operations floppy_fops = {
+static const struct block_device_operations floppy_fops = {
.open = floppy_open,
.release = floppy_release,
.locked_ioctl = floppy_ioctl,
@@ -1062,7 +1062,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
goto out_release;
}
fs->swim3_intr = macio_irq(mdev, 0);
- fs->dma_intr = macio_irq(mdev, 1);;
+ fs->dma_intr = macio_irq(mdev, 1);
fs->cur_cyl = -1;
fs->cur_sector = -1;
fs->secpercyl = 36;
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index f5cd2e83ebc..a7c4184f4a6 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -423,7 +423,7 @@ static struct pci_driver carm_driver = {
.remove = carm_remove_one,
};
-static struct block_device_operations carm_bd_ops = {
+static const struct block_device_operations carm_bd_ops = {
.owner = THIS_MODULE,
.getgeo = carm_bdev_getgeo,
};
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index cc54473b8e7..c739b203fe9 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -1789,7 +1789,7 @@ static int ub_bd_media_changed(struct gendisk *disk)
return lun->changed;
}
-static struct block_device_operations ub_bd_fops = {
+static const struct block_device_operations ub_bd_fops = {
.owner = THIS_MODULE,
.open = ub_bd_open,
.release = ub_bd_release,
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 858c34dd032..ad1ba393801 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -140,7 +140,6 @@ struct cardinfo {
};
static struct cardinfo cards[MM_MAXCARDS];
-static struct block_device_operations mm_fops;
static struct timer_list battery_timer;
static int num_cards;
@@ -789,7 +788,7 @@ static int mm_check_change(struct gendisk *disk)
return 0;
}
-static struct block_device_operations mm_fops = {
+static const struct block_device_operations mm_fops = {
.owner = THIS_MODULE,
.getgeo = mm_getgeo,
.revalidate_disk = mm_revalidate,
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index b441ce3832e..a8c8b56b275 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -219,7 +219,7 @@ static int viodasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
/*
* Our file operations table
*/
-static struct block_device_operations viodasd_fops = {
+static const struct block_device_operations viodasd_fops = {
.owner = THIS_MODULE,
.open = viodasd_open,
.release = viodasd_release,
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index aa1a3d5a3e2..43f19389647 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -3,6 +3,7 @@
#include <linux/blkdev.h>
#include <linux/hdreg.h>
#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
#include <linux/virtio_blk.h>
#include <linux/scatterlist.h>
@@ -91,15 +92,26 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
return false;
vbr->req = req;
- if (blk_fs_request(vbr->req)) {
+ switch (req->cmd_type) {
+ case REQ_TYPE_FS:
vbr->out_hdr.type = 0;
vbr->out_hdr.sector = blk_rq_pos(vbr->req);
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
- } else if (blk_pc_request(vbr->req)) {
+ break;
+ case REQ_TYPE_BLOCK_PC:
vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
- } else {
+ break;
+ case REQ_TYPE_LINUX_BLOCK:
+ if (req->cmd[0] == REQ_LB_OP_FLUSH) {
+ vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
+ vbr->out_hdr.sector = 0;
+ vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+ break;
+ }
+ /*FALLTHRU*/
+ default:
/* We don't put anything else in the queue. */
BUG();
}
@@ -139,7 +151,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
}
}
- if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) {
+ if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
mempool_free(vbr, vblk->pool);
return false;
}
@@ -199,6 +211,12 @@ out:
return err;
}
+static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
+{
+ req->cmd_type = REQ_TYPE_LINUX_BLOCK;
+ req->cmd[0] = REQ_LB_OP_FLUSH;
+}
+
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long data)
{
@@ -243,7 +261,7 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
return 0;
}
-static struct block_device_operations virtblk_fops = {
+static const struct block_device_operations virtblk_fops = {
.locked_ioctl = virtblk_ioctl,
.owner = THIS_MODULE,
.getgeo = virtblk_getgeo,
@@ -337,7 +355,10 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
index++;
/* If barriers are supported, tell block layer that queue is ordered */
- if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER))
+ if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
+ blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_DRAIN_FLUSH,
+ virtblk_prepare_flush);
+ else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER))
blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL);
/* If disk is read-only in the host, the guest should obey */
@@ -424,7 +445,7 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
- VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY
+ VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY, VIRTIO_BLK_F_FLUSH
};
/*
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index ce242921992..0877d3628fd 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -130,7 +130,7 @@ static struct gendisk *xd_gendisk[2];
static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo);
-static struct block_device_operations xd_fops = {
+static const struct block_device_operations xd_fops = {
.owner = THIS_MODULE,
.locked_ioctl = xd_ioctl,
.getgeo = xd_getgeo,
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index e53284767f7..b8578bb3f4c 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -65,7 +65,7 @@ struct blk_shadow {
unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
};
-static struct block_device_operations xlvbd_block_fops;
+static const struct block_device_operations xlvbd_block_fops;
#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE)
@@ -1039,7 +1039,7 @@ static int blkif_release(struct gendisk *disk, fmode_t mode)
return 0;
}
-static struct block_device_operations xlvbd_block_fops =
+static const struct block_device_operations xlvbd_block_fops =
{
.owner = THIS_MODULE,
.open = blkif_open,
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index b20abe102a2..e5c5415eb45 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -941,7 +941,7 @@ static int ace_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static struct block_device_operations ace_fops = {
+static const struct block_device_operations ace_fops = {
.owner = THIS_MODULE,
.open = ace_open,
.release = ace_release,
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index b2590409f25..64f941e0f14 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -64,7 +64,6 @@ static int current_device = -1;
static DEFINE_SPINLOCK(z2ram_lock);
-static struct block_device_operations z2_fops;
static struct gendisk *z2ram_gendisk;
static void do_z2_request(struct request_queue *q)
@@ -315,7 +314,7 @@ z2_release(struct gendisk *disk, fmode_t mode)
return 0;
}
-static struct block_device_operations z2_fops =
+static const struct block_device_operations z2_fops =
{
.owner = THIS_MODULE,
.open = z2_open,
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 71d1b9bab70..614da5b8613 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -3412,7 +3412,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
return 0;
}
-static int cdrom_sysctl_info(ctl_table *ctl, int write, struct file * filp,
+static int cdrom_sysctl_info(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int pos;
@@ -3489,7 +3489,7 @@ static int cdrom_sysctl_info(ctl_table *ctl, int write, struct file * filp,
goto done;
doit:
mutex_unlock(&cdrom_mutex);
- return proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ return proc_dostring(ctl, write, buffer, lenp, ppos);
done:
printk(KERN_INFO "cdrom: info buffer too small\n");
goto doit;
@@ -3525,12 +3525,12 @@ static void cdrom_update_settings(void)
mutex_unlock(&cdrom_mutex);
}
-static int cdrom_sysctl_handler(ctl_table *ctl, int write, struct file * filp,
+static int cdrom_sysctl_handler(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
- ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write) {
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index b5621f27c4b..a762283d2a2 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -512,7 +512,7 @@ static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode,
return cdrom_ioctl(gd.cd_info, bdev, mode, cmd, arg);
}
-static struct block_device_operations gdrom_bdops = {
+static const struct block_device_operations gdrom_bdops = {
.owner = THIS_MODULE,
.open = gdrom_bdops_open,
.release = gdrom_bdops_release,
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 0fff646cc2f..57ca69e0ac5 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -177,7 +177,7 @@ static int viocd_blk_media_changed(struct gendisk *disk)
return cdrom_media_changed(&di->viocd_info);
}
-struct block_device_operations viocd_fops = {
+static const struct block_device_operations viocd_fops = {
.owner = THIS_MODULE,
.open = viocd_blk_open,
.release = viocd_blk_release,
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 6a06913b01d..08a6f50ae79 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -1087,6 +1087,14 @@ config MMTIMER
The mmtimer device allows direct userspace access to the
Altix system timer.
+config UV_MMTIMER
+ tristate "UV_MMTIMER Memory mapped RTC for SGI UV"
+ depends on X86_UV
+ default m
+ help
+ The uv_mmtimer device allows direct userspace access to the
+ UV system timer.
+
source "drivers/char/tpm/Kconfig"
config TELCLOCK
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 66f779ad4f4..19a79dd79ee 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_RAW_DRIVER) += raw.o
obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
obj-$(CONFIG_MSPEC) += mspec.o
obj-$(CONFIG_MMTIMER) += mmtimer.o
+obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
obj-$(CONFIG_VIOTAPE) += viotape.o
obj-$(CONFIG_HVCS) += hvcs.o
obj-$(CONFIG_IBM_BSR) += bsr.o
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index ad87753f6de..a56ca080e10 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -114,9 +114,9 @@ static int agp_find_max(void)
long memory, index, result;
#if PAGE_SHIFT < 20
- memory = num_physpages >> (20 - PAGE_SHIFT);
+ memory = totalram_pages >> (20 - PAGE_SHIFT);
#else
- memory = num_physpages << (PAGE_SHIFT - 20);
+ memory = totalram_pages << (PAGE_SHIFT - 20);
#endif
index = 1;
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
index 501e293e5ad..9047b271465 100644
--- a/drivers/char/agp/hp-agp.c
+++ b/drivers/char/agp/hp-agp.c
@@ -476,7 +476,6 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
{
acpi_handle handle, parent;
acpi_status status;
- struct acpi_buffer buffer;
struct acpi_device_info *info;
u64 lba_hpa, sba_hpa, length;
int match;
@@ -488,13 +487,11 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
/* Look for an enclosing IOC scope and find its CSR space */
handle = obj;
do {
- buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
- status = acpi_get_object_info(handle, &buffer);
+ status = acpi_get_object_info(handle, &info);
if (ACPI_SUCCESS(status)) {
/* TBD check _CID also */
- info = buffer.pointer;
- info->hardware_id.value[sizeof(info->hardware_id)-1] = '\0';
- match = (strcmp(info->hardware_id.value, "HWP0001") == 0);
+ info->hardware_id.string[sizeof(info->hardware_id.length)-1] = '\0';
+ match = (strcmp(info->hardware_id.string, "HWP0001") == 0);
kfree(info);
if (match) {
status = hp_acpi_csr_space(handle, &sba_hpa, &length);
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 1540e693d91..4068467ce7b 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -46,6 +46,8 @@
#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
+#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
+#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
#define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00
@@ -91,6 +93,7 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB)
@@ -804,23 +807,39 @@ static void intel_i830_setup_flush(void)
if (!intel_private.i8xx_page)
return;
- /* make page uncached */
- map_page_into_agp(intel_private.i8xx_page);
-
intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
if (!intel_private.i8xx_flush_page)
intel_i830_fini_flush();
}
+static void
+do_wbinvd(void *null)
+{
+ wbinvd();
+}
+
+/* The chipset_flush interface needs to get data that has already been
+ * flushed out of the CPU all the way out to main memory, because the GPU
+ * doesn't snoop those buffers.
+ *
+ * The 8xx series doesn't have the same lovely interface for flushing the
+ * chipset write buffers that the later chips do. According to the 865
+ * specs, it's 64 octwords, or 1KB. So, to get those previous things in
+ * that buffer out, we just fill 1KB and clflush it out, on the assumption
+ * that it'll push whatever was in there out. It appears to work.
+ */
static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
{
unsigned int *pg = intel_private.i8xx_flush_page;
- int i;
- for (i = 0; i < 256; i += 2)
- *(pg + i) = i;
+ memset(pg, 0, 1024);
- wmb();
+ if (cpu_has_clflush) {
+ clflush_cache_range(pg, 1024);
+ } else {
+ if (on_each_cpu(do_wbinvd, NULL, 1) != 0)
+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
+ }
}
/* The intel i830 automatically initializes the agp aperture during POST.
@@ -1341,6 +1360,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
case PCI_DEVICE_ID_INTEL_Q45_HB:
case PCI_DEVICE_ID_INTEL_G45_HB:
case PCI_DEVICE_ID_INTEL_G41_HB:
+ case PCI_DEVICE_ID_INTEL_B43_HB:
case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB:
@@ -2335,6 +2355,8 @@ static const struct intel_driver_description {
"Q45/Q43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
"G45/G43", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0,
+ "B43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
"G41", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0,
@@ -2535,6 +2557,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_Q45_HB),
ID(PCI_DEVICE_ID_INTEL_G45_HB),
ID(PCI_DEVICE_ID_INTEL_G41_HB),
+ ID(PCI_DEVICE_ID_INTEL_B43_HB),
ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 20ef1bf5e72..703959eba45 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -270,7 +270,7 @@ static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode)
if ((uninorth_rev >= 0x30) && (uninorth_rev <= 0x33)) {
/*
- * We need to to set REQ_DEPTH to 7 for U3 versions 1.0, 2.1,
+ * We need to set REQ_DEPTH to 7 for U3 versions 1.0, 2.1,
* 2.2 and 2.3, Darwin do so.
*/
if ((command >> AGPSTAT_RQ_DEPTH_SHIFT) > 7)
diff --git a/drivers/char/bfin-otp.c b/drivers/char/bfin-otp.c
index 0a01329451e..e3dd24bff51 100644
--- a/drivers/char/bfin-otp.c
+++ b/drivers/char/bfin-otp.c
@@ -1,8 +1,7 @@
/*
* Blackfin On-Chip OTP Memory Interface
- * Supports BF52x/BF54x
*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2009 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
*
@@ -17,8 +16,10 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/types.h>
+#include <mtd/mtd-abi.h>
#include <asm/blackfin.h>
+#include <asm/bfrom.h>
#include <asm/uaccess.h>
#define stamp(fmt, args...) pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args)
@@ -30,39 +31,6 @@
static DEFINE_MUTEX(bfin_otp_lock);
-/* OTP Boot ROM functions */
-#define _BOOTROM_OTP_COMMAND 0xEF000018
-#define _BOOTROM_OTP_READ 0xEF00001A
-#define _BOOTROM_OTP_WRITE 0xEF00001C
-
-static u32 (* const otp_command)(u32 command, u32 value) = (void *)_BOOTROM_OTP_COMMAND;
-static u32 (* const otp_read)(u32 page, u32 flags, u64 *page_content) = (void *)_BOOTROM_OTP_READ;
-static u32 (* const otp_write)(u32 page, u32 flags, u64 *page_content) = (void *)_BOOTROM_OTP_WRITE;
-
-/* otp_command(): defines for "command" */
-#define OTP_INIT 0x00000001
-#define OTP_CLOSE 0x00000002
-
-/* otp_{read,write}(): defines for "flags" */
-#define OTP_LOWER_HALF 0x00000000 /* select upper/lower 64-bit half (bit 0) */
-#define OTP_UPPER_HALF 0x00000001
-#define OTP_NO_ECC 0x00000010 /* do not use ECC */
-#define OTP_LOCK 0x00000020 /* sets page protection bit for page */
-#define OTP_ACCESS_READ 0x00001000
-#define OTP_ACCESS_READWRITE 0x00002000
-
-/* Return values for all functions */
-#define OTP_SUCCESS 0x00000000
-#define OTP_MASTER_ERROR 0x001
-#define OTP_WRITE_ERROR 0x003
-#define OTP_READ_ERROR 0x005
-#define OTP_ACC_VIO_ERROR 0x009
-#define OTP_DATA_MULT_ERROR 0x011
-#define OTP_ECC_MULT_ERROR 0x021
-#define OTP_PREV_WR_ERROR 0x041
-#define OTP_DATA_SB_WARN 0x100
-#define OTP_ECC_SB_WARN 0x200
-
/**
* bfin_otp_read - Read OTP pages
*
@@ -86,9 +54,11 @@ static ssize_t bfin_otp_read(struct file *file, char __user *buff, size_t count,
page = *pos / (sizeof(u64) * 2);
while (bytes_done < count) {
flags = (*pos % (sizeof(u64) * 2) ? OTP_UPPER_HALF : OTP_LOWER_HALF);
- stamp("processing page %i (%s)", page, (flags == OTP_UPPER_HALF ? "upper" : "lower"));
- ret = otp_read(page, flags, &content);
+ stamp("processing page %i (0x%x:%s)", page, flags,
+ (flags & OTP_UPPER_HALF ? "upper" : "lower"));
+ ret = bfrom_OtpRead(page, flags, &content);
if (ret & OTP_MASTER_ERROR) {
+ stamp("error from otp: 0x%x", ret);
bytes_done = -EIO;
break;
}
@@ -96,7 +66,7 @@ static ssize_t bfin_otp_read(struct file *file, char __user *buff, size_t count,
bytes_done = -EFAULT;
break;
}
- if (flags == OTP_UPPER_HALF)
+ if (flags & OTP_UPPER_HALF)
++page;
bytes_done += sizeof(content);
*pos += sizeof(content);
@@ -108,14 +78,53 @@ static ssize_t bfin_otp_read(struct file *file, char __user *buff, size_t count,
}
#ifdef CONFIG_BFIN_OTP_WRITE_ENABLE
+static bool allow_writes;
+
+/**
+ * bfin_otp_init_timing - setup OTP timing parameters
+ *
+ * Required before doing any write operation. Algorithms from HRM.
+ */
+static u32 bfin_otp_init_timing(void)
+{
+ u32 tp1, tp2, tp3, timing;
+
+ tp1 = get_sclk() / 1000000;
+ tp2 = (2 * get_sclk() / 10000000) << 8;
+ tp3 = (0x1401) << 15;
+ timing = tp1 | tp2 | tp3;
+ if (bfrom_OtpCommand(OTP_INIT, timing))
+ return 0;
+
+ return timing;
+}
+
+/**
+ * bfin_otp_deinit_timing - set timings to only allow reads
+ *
+ * Should be called after all writes are done.
+ */
+static void bfin_otp_deinit_timing(u32 timing)
+{
+ /* mask bits [31:15] so that any attempts to write fail */
+ bfrom_OtpCommand(OTP_CLOSE, 0);
+ bfrom_OtpCommand(OTP_INIT, timing & ~(-1 << 15));
+ bfrom_OtpCommand(OTP_CLOSE, 0);
+}
+
/**
- * bfin_otp_write - Write OTP pages
+ * bfin_otp_write - write OTP pages
*
* All writes must be in half page chunks (half page == 64 bits).
*/
static ssize_t bfin_otp_write(struct file *filp, const char __user *buff, size_t count, loff_t *pos)
{
- stampit();
+ ssize_t bytes_done;
+ u32 timing, page, base_flags, flags, ret;
+ u64 content;
+
+ if (!allow_writes)
+ return -EACCES;
if (count % sizeof(u64))
return -EMSGSIZE;
@@ -123,20 +132,96 @@ static ssize_t bfin_otp_write(struct file *filp, const char __user *buff, size_t
if (mutex_lock_interruptible(&bfin_otp_lock))
return -ERESTARTSYS;
- /* need otp_init() documentation before this can be implemented */
+ stampit();
+
+ timing = bfin_otp_init_timing();
+ if (timing == 0) {
+ mutex_unlock(&bfin_otp_lock);
+ return -EIO;
+ }
+
+ base_flags = OTP_CHECK_FOR_PREV_WRITE;
+
+ bytes_done = 0;
+ page = *pos / (sizeof(u64) * 2);
+ while (bytes_done < count) {
+ flags = base_flags | (*pos % (sizeof(u64) * 2) ? OTP_UPPER_HALF : OTP_LOWER_HALF);
+ stamp("processing page %i (0x%x:%s) from %p", page, flags,
+ (flags & OTP_UPPER_HALF ? "upper" : "lower"), buff + bytes_done);
+ if (copy_from_user(&content, buff + bytes_done, sizeof(content))) {
+ bytes_done = -EFAULT;
+ break;
+ }
+ ret = bfrom_OtpWrite(page, flags, &content);
+ if (ret & OTP_MASTER_ERROR) {
+ stamp("error from otp: 0x%x", ret);
+ bytes_done = -EIO;
+ break;
+ }
+ if (flags & OTP_UPPER_HALF)
+ ++page;
+ bytes_done += sizeof(content);
+ *pos += sizeof(content);
+ }
+
+ bfin_otp_deinit_timing(timing);
mutex_unlock(&bfin_otp_lock);
+ return bytes_done;
+}
+
+static long bfin_otp_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
+{
+ stampit();
+
+ switch (cmd) {
+ case OTPLOCK: {
+ u32 timing;
+ int ret = -EIO;
+
+ if (!allow_writes)
+ return -EACCES;
+
+ if (mutex_lock_interruptible(&bfin_otp_lock))
+ return -ERESTARTSYS;
+
+ timing = bfin_otp_init_timing();
+ if (timing) {
+ u32 otp_result = bfrom_OtpWrite(arg, OTP_LOCK, NULL);
+ stamp("locking page %lu resulted in 0x%x", arg, otp_result);
+ if (!(otp_result & OTP_MASTER_ERROR))
+ ret = 0;
+
+ bfin_otp_deinit_timing(timing);
+ }
+
+ mutex_unlock(&bfin_otp_lock);
+
+ return ret;
+ }
+
+ case MEMLOCK:
+ allow_writes = false;
+ return 0;
+
+ case MEMUNLOCK:
+ allow_writes = true;
+ return 0;
+ }
+
return -EINVAL;
}
#else
# define bfin_otp_write NULL
+# define bfin_otp_ioctl NULL
#endif
static struct file_operations bfin_otp_fops = {
- .owner = THIS_MODULE,
- .read = bfin_otp_read,
- .write = bfin_otp_write,
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = bfin_otp_ioctl,
+ .read = bfin_otp_read,
+ .write = bfin_otp_write,
};
static struct miscdevice bfin_otp_misc_device = {
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index ff647ca1c48..9d589e3144d 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -2239,7 +2239,7 @@ static void do_softint(struct work_struct *work)
struct channel *ch = container_of(work, struct channel, tqueue);
/* Called in response to a modem change event */
if (ch && ch->magic == EPCA_MAGIC) {
- struct tty_struct *tty = tty_port_tty_get(&ch->port);;
+ struct tty_struct *tty = tty_port_tty_get(&ch->port);
if (tty && tty->driver_data) {
if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event)) {
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 4a9f3492b92..70a770ac013 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -166,9 +166,8 @@ static irqreturn_t hpet_interrupt(int irq, void *data)
unsigned long m, t;
t = devp->hd_ireqfreq;
- m = read_counter(&devp->hd_hpet->hpet_mc);
- write_counter(t + m + devp->hd_hpets->hp_delta,
- &devp->hd_timer->hpet_compare);
+ m = read_counter(&devp->hd_timer->hpet_compare);
+ write_counter(t + m, &devp->hd_timer->hpet_compare);
}
if (devp->hd_flags & HPET_SHARED_IRQ)
@@ -504,21 +503,25 @@ static int hpet_ioctl_ieon(struct hpet_dev *devp)
g = v | Tn_32MODE_CNF_MASK | Tn_INT_ENB_CNF_MASK;
if (devp->hd_flags & HPET_PERIODIC) {
- write_counter(t, &timer->hpet_compare);
g |= Tn_TYPE_CNF_MASK;
- v |= Tn_TYPE_CNF_MASK;
- writeq(v, &timer->hpet_config);
- v |= Tn_VAL_SET_CNF_MASK;
+ v |= Tn_TYPE_CNF_MASK | Tn_VAL_SET_CNF_MASK;
writeq(v, &timer->hpet_config);
local_irq_save(flags);
- /* NOTE: what we modify here is a hidden accumulator
+ /*
+ * NOTE: First we modify the hidden accumulator
* register supported by periodic-capable comparators.
* We never want to modify the (single) counter; that
- * would affect all the comparators.
+ * would affect all the comparators. The value written
+ * is the counter value when the first interrupt is due.
*/
m = read_counter(&hpet->hpet_mc);
write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
+ /*
+ * Then we modify the comparator, indicating the period
+ * for subsequent interrupt.
+ */
+ write_counter(t, &timer->hpet_compare);
} else {
local_irq_save(flags);
m = read_counter(&hpet->hpet_mc);
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 25ce15bb1c0..a632f25f144 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -678,7 +678,7 @@ int hvc_poll(struct hvc_struct *hp)
EXPORT_SYMBOL_GPL(hvc_poll);
/**
- * hvc_resize() - Update terminal window size information.
+ * __hvc_resize() - Update terminal window size information.
* @hp: HVC console pointer
* @ws: Terminal window size structure
*
@@ -687,12 +687,12 @@ EXPORT_SYMBOL_GPL(hvc_poll);
*
* Locking: Locking free; the function MUST be called holding hp->lock
*/
-void hvc_resize(struct hvc_struct *hp, struct winsize ws)
+void __hvc_resize(struct hvc_struct *hp, struct winsize ws)
{
hp->ws = ws;
schedule_work(&hp->tty_resize);
}
-EXPORT_SYMBOL_GPL(hvc_resize);
+EXPORT_SYMBOL_GPL(__hvc_resize);
/*
* This kthread is either polling or interrupt driven. This is determined by
diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
index 3c85d78c975..10950ca706d 100644
--- a/drivers/char/hvc_console.h
+++ b/drivers/char/hvc_console.h
@@ -28,6 +28,7 @@
#define HVC_CONSOLE_H
#include <linux/kref.h>
#include <linux/tty.h>
+#include <linux/spinlock.h>
/*
* This is the max number of console adapters that can/will be found as
@@ -88,7 +89,16 @@ int hvc_poll(struct hvc_struct *hp);
void hvc_kick(void);
/* Resize hvc tty terminal window */
-extern void hvc_resize(struct hvc_struct *hp, struct winsize ws);
+extern void __hvc_resize(struct hvc_struct *hp, struct winsize ws);
+
+static inline void hvc_resize(struct hvc_struct *hp, struct winsize ws)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hp->lock, flags);
+ __hvc_resize(hp, ws);
+ spin_unlock_irqrestore(&hp->lock, flags);
+}
/* default notifier for irq based notification */
extern int notifier_add_irq(struct hvc_struct *hp, int data);
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index 0ecac7e532f..b8a5d654d3d 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -273,7 +273,9 @@ static int hvc_iucv_write(struct hvc_iucv_private *priv,
case MSG_TYPE_WINSIZE:
if (rb->mbuf->datalen != sizeof(struct winsize))
break;
- hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
+ /* The caller must ensure that the hvc is locked, which
+ * is the case when called from hvc_iucv_get_chars() */
+ __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
break;
case MSG_TYPE_ERROR: /* ignored ... */
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 32216b62324..962968f05b9 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -21,6 +21,7 @@
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
#include <linux/virtio_rng.h>
/* The host will fill any buffer we give it with sweet, sweet randomness. We
@@ -51,7 +52,7 @@ static void register_buffer(void)
sg_init_one(&sg, random_data+data_left, RANDOM_DATA_SIZE-data_left);
/* There should always be room for one buffer. */
- if (vq->vq_ops->add_buf(vq, &sg, 0, 1, random_data) != 0)
+ if (vq->vq_ops->add_buf(vq, &sg, 0, 1, random_data) < 0)
BUG();
vq->vq_ops->kick(vq);
}
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index a261bd735df..2e66b5f773d 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -691,7 +691,7 @@ static struct ctl_table_header *ipmi_table_header;
/*
* Startup and shutdown functions.
*/
-static int ipmi_poweroff_init(void)
+static int __init ipmi_poweroff_init(void)
{
int rv;
@@ -725,7 +725,7 @@ static int ipmi_poweroff_init(void)
}
#ifdef MODULE
-static __exit void ipmi_poweroff_cleanup(void)
+static void __exit ipmi_poweroff_cleanup(void)
{
int rv;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 0aede1d6a9e..6c8b65d069e 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -690,7 +690,7 @@ static ssize_t read_zero(struct file * file, char __user * buf,
if (chunk > PAGE_SIZE)
chunk = PAGE_SIZE; /* Just for latency reasons */
- unwritten = clear_user(buf, chunk);
+ unwritten = __clear_user(buf, chunk);
written += chunk - unwritten;
if (unwritten)
break;
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 1ee27cc2342..07fa612a58d 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -91,7 +91,7 @@ static int misc_seq_show(struct seq_file *seq, void *v)
}
-static struct seq_operations misc_seq_ops = {
+static const struct seq_operations misc_seq_ops = {
.start = misc_seq_start,
.next = misc_seq_next,
.stop = misc_seq_stop,
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
index 94ad2c3bfc4..a4ec50c9507 100644
--- a/drivers/char/mwave/mwavedd.c
+++ b/drivers/char/mwave/mwavedd.c
@@ -281,12 +281,6 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
case IOCTL_MW_REGISTER_IPC: {
unsigned int ipcnum = (unsigned int) ioarg;
- PRINTK_3(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
- " ipcnum %x entry usIntCount %x\n",
- ipcnum,
- pDrvData->IPCs[ipcnum].usIntCount);
-
if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd::mwave_ioctl:"
@@ -295,6 +289,12 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
ipcnum);
return -EINVAL;
}
+ PRINTK_3(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
+ " ipcnum %x entry usIntCount %x\n",
+ ipcnum,
+ pDrvData->IPCs[ipcnum].usIntCount);
+
lock_kernel();
pDrvData->IPCs[ipcnum].bIsHere = FALSE;
pDrvData->IPCs[ipcnum].bIsEnabled = TRUE;
@@ -310,11 +310,6 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
case IOCTL_MW_GET_IPC: {
unsigned int ipcnum = (unsigned int) ioarg;
- PRINTK_3(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
- " ipcnum %x, usIntCount %x\n",
- ipcnum,
- pDrvData->IPCs[ipcnum].usIntCount);
if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd::mwave_ioctl:"
@@ -322,6 +317,11 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
" Invalid ipcnum %x\n", ipcnum);
return -EINVAL;
}
+ PRINTK_3(TRACE_MWAVE,
+ "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
+ " ipcnum %x, usIntCount %x\n",
+ ipcnum,
+ pDrvData->IPCs[ipcnum].usIntCount);
lock_kernel();
if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) {
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 881934c068c..c250a31efa5 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1017,7 +1017,7 @@ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count,
}
}
- if (dev->proto == 0 && count > dev->rlen - dev->rpos) {
+ if (dev->proto == 0 && count > dev->rlen - dev->rpos && i) {
DEBUGP(4, dev, "T=0 and count > buffer\n");
dev->rbuf[i] = dev->rbuf[i - 1];
dev->rbuf[i - 1] = dev->procbyte;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d8a9255e1a3..04b505e5a5e 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1231,7 +1231,7 @@ static char sysctl_bootid[16];
* as an ASCII string in the standard UUID format. If accesses via the
* sysctl system call, it is returned as 16 bytes of binary data.
*/
-static int proc_do_uuid(ctl_table *table, int write, struct file *filp,
+static int proc_do_uuid(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
ctl_table fake_table;
@@ -1254,7 +1254,7 @@ static int proc_do_uuid(ctl_table *table, int write, struct file *filp,
fake_table.data = buf;
fake_table.maxlen = sizeof(buf);
- return proc_dostring(&fake_table, write, filp, buffer, lenp, ppos);
+ return proc_dostring(&fake_table, write, buffer, lenp, ppos);
}
static int uuid_strategy(ctl_table *table,
diff --git a/drivers/char/rio/rioctrl.c b/drivers/char/rio/rioctrl.c
index eecee0f576d..74339559f0b 100644
--- a/drivers/char/rio/rioctrl.c
+++ b/drivers/char/rio/rioctrl.c
@@ -873,7 +873,7 @@ int riocontrol(struct rio_info *p, dev_t dev, int cmd, unsigned long arg, int su
/*
** It is important that the product code is an unsigned object!
*/
- if (DownLoad.ProductCode > MAX_PRODUCT) {
+ if (DownLoad.ProductCode >= MAX_PRODUCT) {
rio_dprintk(RIO_DEBUG_CTRL, "RIO_DOWNLOAD: Bad product code %d passed\n", DownLoad.ProductCode);
p->RIOError.Error = NO_SUCH_PRODUCT;
return -ENXIO;
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 50eecfe1d72..44203ff599d 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -26,7 +26,7 @@
#include <linux/proc_fs.h>
#include <linux/nmi.h>
#include <linux/quotaops.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/suspend.h>
@@ -252,7 +252,7 @@ static void sysrq_handle_showregs(int key, struct tty_struct *tty)
struct pt_regs *regs = get_irq_regs();
if (regs)
show_regs(regs);
- perf_counter_print_debug();
+ perf_event_print_debug();
}
static struct sysrq_key_op sysrq_showregs_op = {
.handler = sysrq_handle_showregs,
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index b0603b2e568..45d58002b06 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -696,7 +696,7 @@ int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
cmd.header.in = pcrread_header;
cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx);
- BUILD_BUG_ON(cmd.header.in.length > READ_PCR_RESULT_SIZE);
+ BUG_ON(cmd.header.in.length > READ_PCR_RESULT_SIZE);
rc = transmit_cmd(chip, &cmd, cmd.header.in.length,
"attempting to read a pcr value");
@@ -742,7 +742,7 @@ EXPORT_SYMBOL_GPL(tpm_pcr_read);
* the module usage count.
*/
#define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
-#define EXTEND_PCR_SIZE 34
+#define EXTEND_PCR_RESULT_SIZE 34
static struct tpm_input_header pcrextend_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(34),
@@ -760,10 +760,9 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
return -ENODEV;
cmd.header.in = pcrextend_header;
- BUILD_BUG_ON(be32_to_cpu(cmd.header.in.length) > EXTEND_PCR_SIZE);
cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx);
memcpy(cmd.params.pcrextend_in.hash, hash, TPM_DIGEST_SIZE);
- rc = transmit_cmd(chip, &cmd, cmd.header.in.length,
+ rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
"attempting extend a PCR value");
module_put(chip->dev->driver->owner);
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
index 0c2f55a38b9..bf2170fb1cd 100644
--- a/drivers/char/tpm/tpm_bios.c
+++ b/drivers/char/tpm/tpm_bios.c
@@ -343,14 +343,14 @@ static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v)
return 0;
}
-static struct seq_operations tpm_ascii_b_measurments_seqops = {
+static const struct seq_operations tpm_ascii_b_measurments_seqops = {
.start = tpm_bios_measurements_start,
.next = tpm_bios_measurements_next,
.stop = tpm_bios_measurements_stop,
.show = tpm_ascii_bios_measurements_show,
};
-static struct seq_operations tpm_binary_b_measurments_seqops = {
+static const struct seq_operations tpm_binary_b_measurments_seqops = {
.start = tpm_bios_measurements_start,
.next = tpm_bios_measurements_next,
.stop = tpm_bios_measurements_stop,
diff --git a/drivers/char/uv_mmtimer.c b/drivers/char/uv_mmtimer.c
new file mode 100644
index 00000000000..867b67be9f0
--- /dev/null
+++ b/drivers/char/uv_mmtimer.c
@@ -0,0 +1,216 @@
+/*
+ * Timer device implementation for SGI UV platform.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2009 Silicon Graphics, Inc. All rights reserved.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/mmtimer.h>
+#include <linux/miscdevice.h>
+#include <linux/posix-timers.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+#include <linux/smp_lock.h>
+
+#include <asm/genapic.h>
+#include <asm/uv/uv_hub.h>
+#include <asm/uv/bios.h>
+#include <asm/uv/uv.h>
+
+MODULE_AUTHOR("Dimitri Sivanich <sivanich@sgi.com>");
+MODULE_DESCRIPTION("SGI UV Memory Mapped RTC Timer");
+MODULE_LICENSE("GPL");
+
+/* name of the device, usually in /dev */
+#define UV_MMTIMER_NAME "mmtimer"
+#define UV_MMTIMER_DESC "SGI UV Memory Mapped RTC Timer"
+#define UV_MMTIMER_VERSION "1.0"
+
+static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma);
+
+/*
+ * Period in femtoseconds (10^-15 s)
+ */
+static unsigned long uv_mmtimer_femtoperiod;
+
+static const struct file_operations uv_mmtimer_fops = {
+ .owner = THIS_MODULE,
+ .mmap = uv_mmtimer_mmap,
+ .unlocked_ioctl = uv_mmtimer_ioctl,
+};
+
+/**
+ * uv_mmtimer_ioctl - ioctl interface for /dev/uv_mmtimer
+ * @file: file structure for the device
+ * @cmd: command to execute
+ * @arg: optional argument to command
+ *
+ * Executes the command specified by @cmd. Returns 0 for success, < 0 for
+ * failure.
+ *
+ * Valid commands:
+ *
+ * %MMTIMER_GETOFFSET - Should return the offset (relative to the start
+ * of the page where the registers are mapped) for the counter in question.
+ *
+ * %MMTIMER_GETRES - Returns the resolution of the clock in femto (10^-15)
+ * seconds
+ *
+ * %MMTIMER_GETFREQ - Copies the frequency of the clock in Hz to the address
+ * specified by @arg
+ *
+ * %MMTIMER_GETBITS - Returns the number of bits in the clock's counter
+ *
+ * %MMTIMER_MMAPAVAIL - Returns 1 if registers can be mmap'd into userspace
+ *
+ * %MMTIMER_GETCOUNTER - Gets the current value in the counter and places it
+ * in the address specified by @arg.
+ */
+static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ case MMTIMER_GETOFFSET: /* offset of the counter */
+ /*
+ * UV RTC register is on its own page
+ */
+ if (PAGE_SIZE <= (1 << 16))
+ ret = ((UV_LOCAL_MMR_BASE | UVH_RTC) & (PAGE_SIZE-1))
+ / 8;
+ else
+ ret = -ENOSYS;
+ break;
+
+ case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */
+ if (copy_to_user((unsigned long __user *)arg,
+ &uv_mmtimer_femtoperiod, sizeof(unsigned long)))
+ ret = -EFAULT;
+ break;
+
+ case MMTIMER_GETFREQ: /* frequency in Hz */
+ if (copy_to_user((unsigned long __user *)arg,
+ &sn_rtc_cycles_per_second,
+ sizeof(unsigned long)))
+ ret = -EFAULT;
+ break;
+
+ case MMTIMER_GETBITS: /* number of bits in the clock */
+ ret = hweight64(UVH_RTC_REAL_TIME_CLOCK_MASK);
+ break;
+
+ case MMTIMER_MMAPAVAIL: /* can we mmap the clock into userspace? */
+ ret = (PAGE_SIZE <= (1 << 16)) ? 1 : 0;
+ break;
+
+ case MMTIMER_GETCOUNTER:
+ if (copy_to_user((unsigned long __user *)arg,
+ (unsigned long *)uv_local_mmr_address(UVH_RTC),
+ sizeof(unsigned long)))
+ ret = -EFAULT;
+ break;
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+ return ret;
+}
+
+/**
+ * uv_mmtimer_mmap - maps the clock's registers into userspace
+ * @file: file structure for the device
+ * @vma: VMA to map the registers into
+ *
+ * Calls remap_pfn_range() to map the clock's registers into
+ * the calling process' address space.
+ */
+static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned long uv_mmtimer_addr;
+
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ if (PAGE_SIZE > (1 << 16))
+ return -ENOSYS;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ uv_mmtimer_addr = UV_LOCAL_MMR_BASE | UVH_RTC;
+ uv_mmtimer_addr &= ~(PAGE_SIZE - 1);
+ uv_mmtimer_addr &= 0xfffffffffffffffUL;
+
+ if (remap_pfn_range(vma, vma->vm_start, uv_mmtimer_addr >> PAGE_SHIFT,
+ PAGE_SIZE, vma->vm_page_prot)) {
+ printk(KERN_ERR "remap_pfn_range failed in uv_mmtimer_mmap\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static struct miscdevice uv_mmtimer_miscdev = {
+ MISC_DYNAMIC_MINOR,
+ UV_MMTIMER_NAME,
+ &uv_mmtimer_fops
+};
+
+
+/**
+ * uv_mmtimer_init - device initialization routine
+ *
+ * Does initial setup for the uv_mmtimer device.
+ */
+static int __init uv_mmtimer_init(void)
+{
+ if (!is_uv_system()) {
+ printk(KERN_ERR "%s: Hardware unsupported\n", UV_MMTIMER_NAME);
+ return -1;
+ }
+
+ /*
+ * Sanity check the cycles/sec variable
+ */
+ if (sn_rtc_cycles_per_second < 100000) {
+ printk(KERN_ERR "%s: unable to determine clock frequency\n",
+ UV_MMTIMER_NAME);
+ return -1;
+ }
+
+ uv_mmtimer_femtoperiod = ((unsigned long)1E15 +
+ sn_rtc_cycles_per_second / 2) /
+ sn_rtc_cycles_per_second;
+
+ if (misc_register(&uv_mmtimer_miscdev)) {
+ printk(KERN_ERR "%s: failed to register device\n",
+ UV_MMTIMER_NAME);
+ return -1;
+ }
+
+ printk(KERN_INFO "%s: v%s, %ld MHz\n", UV_MMTIMER_DESC,
+ UV_MMTIMER_VERSION,
+ sn_rtc_cycles_per_second/(unsigned long)1E6);
+
+ return 0;
+}
+
+module_init(uv_mmtimer_init);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index c74dacfa679..0d328b59568 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -31,6 +31,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
#include <linux/virtio_console.h>
#include "hvc_console.h"
@@ -65,7 +66,7 @@ static int put_chars(u32 vtermno, const char *buf, int count)
/* add_buf wants a token to identify this buffer: we hand it any
* non-NULL pointer, since there's only ever one buffer. */
- if (out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, (void *)1) == 0) {
+ if (out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, (void *)1) >= 0) {
/* Tell Host to go! */
out_vq->vq_ops->kick(out_vq);
/* Chill out until it's done with the buffer. */
@@ -85,7 +86,7 @@ static void add_inbuf(void)
sg_init_one(sg, inbuf, PAGE_SIZE);
/* We should always be able to add one buffer to an empty queue. */
- if (in_vq->vq_ops->add_buf(in_vq, sg, 0, 1, inbuf) != 0)
+ if (in_vq->vq_ops->add_buf(in_vq, sg, 0, 1, inbuf) < 0)
BUG();
in_vq->vq_ops->kick(in_vq);
}
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 85e5dc0431f..abf4a2529f8 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -139,6 +139,31 @@ void proc_id_connector(struct task_struct *task, int which_id)
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
+void proc_sid_connector(struct task_struct *task)
+{
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+ __u8 buffer[CN_PROC_MSG_SIZE];
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->what = PROC_EVENT_SID;
+ ev->event_data.sid.process_pid = task->pid;
+ ev->event_data.sid.process_tgid = task->tgid;
+
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+}
+
void proc_exit_connector(struct task_struct *task)
{
struct cn_msg *msg;
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index f1df59f59a3..68104434ebb 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -2,8 +2,12 @@
* menu.c - the menu idle governor
*
* Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
+ * Copyright (C) 2009 Intel Corporation
+ * Author:
+ * Arjan van de Ven <arjan@linux.intel.com>
*
- * This code is licenced under the GPL.
+ * This code is licenced under the GPL version 2 as described
+ * in the COPYING file that acompanies the Linux Kernel.
*/
#include <linux/kernel.h>
@@ -13,22 +17,158 @@
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
+#include <linux/sched.h>
-#define BREAK_FUZZ 4 /* 4 us */
-#define PRED_HISTORY_PCT 50
+#define BUCKETS 12
+#define RESOLUTION 1024
+#define DECAY 4
+#define MAX_INTERESTING 50000
+
+/*
+ * Concepts and ideas behind the menu governor
+ *
+ * For the menu governor, there are 3 decision factors for picking a C
+ * state:
+ * 1) Energy break even point
+ * 2) Performance impact
+ * 3) Latency tolerance (from pmqos infrastructure)
+ * These these three factors are treated independently.
+ *
+ * Energy break even point
+ * -----------------------
+ * C state entry and exit have an energy cost, and a certain amount of time in
+ * the C state is required to actually break even on this cost. CPUIDLE
+ * provides us this duration in the "target_residency" field. So all that we
+ * need is a good prediction of how long we'll be idle. Like the traditional
+ * menu governor, we start with the actual known "next timer event" time.
+ *
+ * Since there are other source of wakeups (interrupts for example) than
+ * the next timer event, this estimation is rather optimistic. To get a
+ * more realistic estimate, a correction factor is applied to the estimate,
+ * that is based on historic behavior. For example, if in the past the actual
+ * duration always was 50% of the next timer tick, the correction factor will
+ * be 0.5.
+ *
+ * menu uses a running average for this correction factor, however it uses a
+ * set of factors, not just a single factor. This stems from the realization
+ * that the ratio is dependent on the order of magnitude of the expected
+ * duration; if we expect 500 milliseconds of idle time the likelihood of
+ * getting an interrupt very early is much higher than if we expect 50 micro
+ * seconds of idle time. A second independent factor that has big impact on
+ * the actual factor is if there is (disk) IO outstanding or not.
+ * (as a special twist, we consider every sleep longer than 50 milliseconds
+ * as perfect; there are no power gains for sleeping longer than this)
+ *
+ * For these two reasons we keep an array of 12 independent factors, that gets
+ * indexed based on the magnitude of the expected duration as well as the
+ * "is IO outstanding" property.
+ *
+ * Limiting Performance Impact
+ * ---------------------------
+ * C states, especially those with large exit latencies, can have a real
+ * noticable impact on workloads, which is not acceptable for most sysadmins,
+ * and in addition, less performance has a power price of its own.
+ *
+ * As a general rule of thumb, menu assumes that the following heuristic
+ * holds:
+ * The busier the system, the less impact of C states is acceptable
+ *
+ * This rule-of-thumb is implemented using a performance-multiplier:
+ * If the exit latency times the performance multiplier is longer than
+ * the predicted duration, the C state is not considered a candidate
+ * for selection due to a too high performance impact. So the higher
+ * this multiplier is, the longer we need to be idle to pick a deep C
+ * state, and thus the less likely a busy CPU will hit such a deep
+ * C state.
+ *
+ * Two factors are used in determing this multiplier:
+ * a value of 10 is added for each point of "per cpu load average" we have.
+ * a value of 5 points is added for each process that is waiting for
+ * IO on this CPU.
+ * (these values are experimentally determined)
+ *
+ * The load average factor gives a longer term (few seconds) input to the
+ * decision, while the iowait value gives a cpu local instantanious input.
+ * The iowait factor may look low, but realize that this is also already
+ * represented in the system load average.
+ *
+ */
struct menu_device {
int last_state_idx;
+ int needs_update;
unsigned int expected_us;
- unsigned int predicted_us;
- unsigned int current_predicted_us;
- unsigned int last_measured_us;
- unsigned int elapsed_us;
+ u64 predicted_us;
+ unsigned int measured_us;
+ unsigned int exit_us;
+ unsigned int bucket;
+ u64 correction_factor[BUCKETS];
};
+
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
+
+static int get_loadavg(void)
+{
+ unsigned long this = this_cpu_load();
+
+
+ return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10;
+}
+
+static inline int which_bucket(unsigned int duration)
+{
+ int bucket = 0;
+
+ /*
+ * We keep two groups of stats; one with no
+ * IO pending, one without.
+ * This allows us to calculate
+ * E(duration)|iowait
+ */
+ if (nr_iowait_cpu())
+ bucket = BUCKETS/2;
+
+ if (duration < 10)
+ return bucket;
+ if (duration < 100)
+ return bucket + 1;
+ if (duration < 1000)
+ return bucket + 2;
+ if (duration < 10000)
+ return bucket + 3;
+ if (duration < 100000)
+ return bucket + 4;
+ return bucket + 5;
+}
+
+/*
+ * Return a multiplier for the exit latency that is intended
+ * to take performance requirements into account.
+ * The more performance critical we estimate the system
+ * to be, the higher this multiplier, and thus the higher
+ * the barrier to go to an expensive C state.
+ */
+static inline int performance_multiplier(void)
+{
+ int mult = 1;
+
+ /* for higher loadavg, we are more reluctant */
+
+ mult += 2 * get_loadavg();
+
+ /* for IO wait tasks (per cpu!) we add 5x each */
+ mult += 10 * nr_iowait_cpu();
+
+ return mult;
+}
+
static DEFINE_PER_CPU(struct menu_device, menu_devices);
+static void menu_update(struct cpuidle_device *dev);
+
/**
* menu_select - selects the next idle state to enter
* @dev: the CPU
@@ -38,41 +178,68 @@ static int menu_select(struct cpuidle_device *dev)
struct menu_device *data = &__get_cpu_var(menu_devices);
int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
int i;
+ int multiplier;
+
+ data->last_state_idx = 0;
+ data->exit_us = 0;
+
+ if (data->needs_update) {
+ menu_update(dev);
+ data->needs_update = 0;
+ }
/* Special case when user has set very strict latency requirement */
- if (unlikely(latency_req == 0)) {
- data->last_state_idx = 0;
+ if (unlikely(latency_req == 0))
return 0;
- }
- /* determine the expected residency time */
+ /* determine the expected residency time, round up */
data->expected_us =
- (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;
+ DIV_ROUND_UP((u32)ktime_to_ns(tick_nohz_get_sleep_length()), 1000);
+
+
+ data->bucket = which_bucket(data->expected_us);
+
+ multiplier = performance_multiplier();
+
+ /*
+ * if the correction factor is 0 (eg first time init or cpu hotplug
+ * etc), we actually want to start out with a unity factor.
+ */
+ if (data->correction_factor[data->bucket] == 0)
+ data->correction_factor[data->bucket] = RESOLUTION * DECAY;
+
+ /* Make sure to round up for half microseconds */
+ data->predicted_us = DIV_ROUND_CLOSEST(
+ data->expected_us * data->correction_factor[data->bucket],
+ RESOLUTION * DECAY);
+
+ /*
+ * We want to default to C1 (hlt), not to busy polling
+ * unless the timer is happening really really soon.
+ */
+ if (data->expected_us > 5)
+ data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
- /* Recalculate predicted_us based on prediction_history_pct */
- data->predicted_us *= PRED_HISTORY_PCT;
- data->predicted_us += (100 - PRED_HISTORY_PCT) *
- data->current_predicted_us;
- data->predicted_us /= 100;
/* find the deepest idle state that satisfies our constraints */
- for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) {
+ for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) {
struct cpuidle_state *s = &dev->states[i];
- if (s->target_residency > data->expected_us)
- break;
if (s->target_residency > data->predicted_us)
break;
if (s->exit_latency > latency_req)
break;
+ if (s->exit_latency * multiplier > data->predicted_us)
+ break;
+ data->exit_us = s->exit_latency;
+ data->last_state_idx = i;
}
- data->last_state_idx = i - 1;
- return i - 1;
+ return data->last_state_idx;
}
/**
- * menu_reflect - attempts to guess what happened after entry
+ * menu_reflect - records that data structures need update
* @dev: the CPU
*
* NOTE: it's important to be fast here because this operation will add to
@@ -81,39 +248,63 @@ static int menu_select(struct cpuidle_device *dev)
static void menu_reflect(struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
+ data->needs_update = 1;
+}
+
+/**
+ * menu_update - attempts to guess what happened after entry
+ * @dev: the CPU
+ */
+static void menu_update(struct cpuidle_device *dev)
+{
+ struct menu_device *data = &__get_cpu_var(menu_devices);
int last_idx = data->last_state_idx;
unsigned int last_idle_us = cpuidle_get_last_residency(dev);
struct cpuidle_state *target = &dev->states[last_idx];
unsigned int measured_us;
+ u64 new_factor;
/*
* Ugh, this idle state doesn't support residency measurements, so we
* are basically lost in the dark. As a compromise, assume we slept
- * for one full standard timer tick. However, be aware that this
- * could potentially result in a suboptimal state transition.
+ * for the whole expected time.
*/
if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
- last_idle_us = USEC_PER_SEC / HZ;
+ last_idle_us = data->expected_us;
+
+
+ measured_us = last_idle_us;
/*
- * measured_us and elapsed_us are the cumulative idle time, since the
- * last time we were woken out of idle by an interrupt.
+ * We correct for the exit latency; we are assuming here that the
+ * exit latency happens after the event that we're interested in.
*/
- if (data->elapsed_us <= data->elapsed_us + last_idle_us)
- measured_us = data->elapsed_us + last_idle_us;
+ if (measured_us > data->exit_us)
+ measured_us -= data->exit_us;
+
+
+ /* update our correction ratio */
+
+ new_factor = data->correction_factor[data->bucket]
+ * (DECAY - 1) / DECAY;
+
+ if (data->expected_us > 0 && data->measured_us < MAX_INTERESTING)
+ new_factor += RESOLUTION * measured_us / data->expected_us;
else
- measured_us = -1;
+ /*
+ * we were idle so long that we count it as a perfect
+ * prediction
+ */
+ new_factor += RESOLUTION;
- /* Predict time until next break event */
- data->current_predicted_us = max(measured_us, data->last_measured_us);
+ /*
+ * We don't want 0 as factor; we always want at least
+ * a tiny bit of estimated time.
+ */
+ if (new_factor == 0)
+ new_factor = 1;
- if (last_idle_us + BREAK_FUZZ <
- data->expected_us - target->exit_latency) {
- data->last_measured_us = measured_us;
- data->elapsed_us = 0;
- } else {
- data->elapsed_us = measured_us;
- }
+ data->correction_factor[data->bucket] = new_factor;
}
/**
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index 25b743abfb5..52e6bb70a49 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -28,7 +28,7 @@
#include <linux/device.h>
#include <linux/dca.h>
-#define DCA_VERSION "1.8"
+#define DCA_VERSION "1.12.1"
MODULE_VERSION(DCA_VERSION);
MODULE_LICENSE("GPL");
@@ -36,20 +36,92 @@ MODULE_AUTHOR("Intel Corporation");
static DEFINE_SPINLOCK(dca_lock);
-static LIST_HEAD(dca_providers);
+static LIST_HEAD(dca_domains);
-static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
+static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
{
- struct dca_provider *dca, *ret = NULL;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_bus *bus = pdev->bus;
- list_for_each_entry(dca, &dca_providers, node) {
- if ((!dev) || (dca->ops->dev_managed(dca, dev))) {
- ret = dca;
- break;
- }
+ while (bus->parent)
+ bus = bus->parent;
+
+ return bus;
+}
+
+static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
+{
+ struct dca_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
+ if (!domain)
+ return NULL;
+
+ INIT_LIST_HEAD(&domain->dca_providers);
+ domain->pci_rc = rc;
+
+ return domain;
+}
+
+static void dca_free_domain(struct dca_domain *domain)
+{
+ list_del(&domain->node);
+ kfree(domain);
+}
+
+static struct dca_domain *dca_find_domain(struct pci_bus *rc)
+{
+ struct dca_domain *domain;
+
+ list_for_each_entry(domain, &dca_domains, node)
+ if (domain->pci_rc == rc)
+ return domain;
+
+ return NULL;
+}
+
+static struct dca_domain *dca_get_domain(struct device *dev)
+{
+ struct pci_bus *rc;
+ struct dca_domain *domain;
+
+ rc = dca_pci_rc_from_dev(dev);
+ domain = dca_find_domain(rc);
+
+ if (!domain) {
+ domain = dca_allocate_domain(rc);
+ if (domain)
+ list_add(&domain->node, &dca_domains);
+ }
+
+ return domain;
+}
+
+static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
+{
+ struct dca_provider *dca;
+ struct pci_bus *rc;
+ struct dca_domain *domain;
+
+ if (dev) {
+ rc = dca_pci_rc_from_dev(dev);
+ domain = dca_find_domain(rc);
+ if (!domain)
+ return NULL;
+ } else {
+ if (!list_empty(&dca_domains))
+ domain = list_first_entry(&dca_domains,
+ struct dca_domain,
+ node);
+ else
+ return NULL;
}
- return ret;
+ list_for_each_entry(dca, &domain->dca_providers, node)
+ if ((!dev) || (dca->ops->dev_managed(dca, dev)))
+ return dca;
+
+ return NULL;
}
/**
@@ -61,6 +133,8 @@ int dca_add_requester(struct device *dev)
struct dca_provider *dca;
int err, slot = -ENODEV;
unsigned long flags;
+ struct pci_bus *pci_rc;
+ struct dca_domain *domain;
if (!dev)
return -EFAULT;
@@ -74,7 +148,14 @@ int dca_add_requester(struct device *dev)
return -EEXIST;
}
- list_for_each_entry(dca, &dca_providers, node) {
+ pci_rc = dca_pci_rc_from_dev(dev);
+ domain = dca_find_domain(pci_rc);
+ if (!domain) {
+ spin_unlock_irqrestore(&dca_lock, flags);
+ return -ENODEV;
+ }
+
+ list_for_each_entry(dca, &domain->dca_providers, node) {
slot = dca->ops->add_requester(dca, dev);
if (slot >= 0)
break;
@@ -222,13 +303,19 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
{
int err;
unsigned long flags;
+ struct dca_domain *domain;
err = dca_sysfs_add_provider(dca, dev);
if (err)
return err;
spin_lock_irqsave(&dca_lock, flags);
- list_add(&dca->node, &dca_providers);
+ domain = dca_get_domain(dev);
+ if (!domain) {
+ spin_unlock_irqrestore(&dca_lock, flags);
+ return -ENODEV;
+ }
+ list_add(&dca->node, &domain->dca_providers);
spin_unlock_irqrestore(&dca_lock, flags);
blocking_notifier_call_chain(&dca_provider_chain,
@@ -241,15 +328,24 @@ EXPORT_SYMBOL_GPL(register_dca_provider);
* unregister_dca_provider - remove a dca provider
* @dca - struct created by alloc_dca_provider()
*/
-void unregister_dca_provider(struct dca_provider *dca)
+void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
{
unsigned long flags;
+ struct pci_bus *pci_rc;
+ struct dca_domain *domain;
blocking_notifier_call_chain(&dca_provider_chain,
DCA_PROVIDER_REMOVE, NULL);
spin_lock_irqsave(&dca_lock, flags);
+
list_del(&dca->node);
+
+ pci_rc = dca_pci_rc_from_dev(dev);
+ domain = dca_find_domain(pci_rc);
+ if (list_empty(&domain->dca_providers))
+ dca_free_domain(domain);
+
spin_unlock_irqrestore(&dca_lock, flags);
dca_sysfs_remove_provider(dca);
@@ -276,7 +372,7 @@ EXPORT_SYMBOL_GPL(dca_unregister_notify);
static int __init dca_init(void)
{
- printk(KERN_ERR "dca service started, version %s\n", DCA_VERSION);
+ pr_info("dca service started, version %s\n", DCA_VERSION);
return dca_sysfs_init();
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 81e1020fb51..5903a88351b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -17,11 +17,15 @@ if DMADEVICES
comment "DMA Devices"
+config ASYNC_TX_DISABLE_CHANNEL_SWITCH
+ bool
+
config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
depends on PCI && X86
select DMA_ENGINE
select DCA
+ select ASYNC_TX_DISABLE_CHANNEL_SWITCH
help
Enable support for the Intel(R) I/OAT DMA engine present
in recent Intel Xeon chipsets.
@@ -97,6 +101,14 @@ config TXX9_DMAC
Support the TXx9 SoC internal DMA controller. This can be
integrated in chips such as the Toshiba TX4927/38/39.
+config SH_DMAE
+ tristate "Renesas SuperH DMAC support"
+ depends on SUPERH && SH_DMA
+ depends on !SH_DMA_API
+ select DMA_ENGINE
+ help
+ Enable support for the Renesas SuperH DMA controllers.
+
config DMA_ENGINE
bool
@@ -116,7 +128,7 @@ config NET_DMA
config ASYNC_TX_DMA
bool "Async_tx: Offload support for the async_tx api"
- depends on DMA_ENGINE && !HIGHMEM64G
+ depends on DMA_ENGINE
help
This allows the async_tx api to take advantage of offload engines for
memcpy, memset, xor, and raid6 p+q operations. If your platform has
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 40e1e008357..eca71ba78ae 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,8 +1,7 @@
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_DMATEST) += dmatest.o
-obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
-ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o
+obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_MV_XOR) += mv_xor.o
@@ -10,3 +9,4 @@ obj-$(CONFIG_DW_DMAC) += dw_dmac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
+obj-$(CONFIG_SH_DMAE) += shdma.o
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index c8522e6f1ad..7585c4164bd 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -87,6 +87,7 @@ static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
if (desc) {
memset(desc, 0, sizeof(struct at_desc));
+ INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->txd, chan);
/* txd.flags will be overwritten in prep functions */
desc->txd.flags = DMA_CTRL_ACK;
@@ -150,11 +151,11 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
struct at_desc *child;
spin_lock_bh(&atchan->lock);
- list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+ list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&atchan->chan_common),
"moving child desc %p to freelist\n",
child);
- list_splice_init(&desc->txd.tx_list, &atchan->free_list);
+ list_splice_init(&desc->tx_list, &atchan->free_list);
dev_vdbg(chan2dev(&atchan->chan_common),
"moving desc %p to freelist\n", desc);
list_add(&desc->desc_node, &atchan->free_list);
@@ -247,30 +248,33 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
param = txd->callback_param;
/* move children to free_list */
- list_splice_init(&txd->tx_list, &atchan->free_list);
+ list_splice_init(&desc->tx_list, &atchan->free_list);
/* move myself to free_list */
list_move(&desc->desc_node, &atchan->free_list);
/* unmap dma addresses */
- if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- dma_unmap_single(chan2parent(&atchan->chan_common),
- desc->lli.daddr,
- desc->len, DMA_FROM_DEVICE);
- else
- dma_unmap_page(chan2parent(&atchan->chan_common),
- desc->lli.daddr,
- desc->len, DMA_FROM_DEVICE);
- }
- if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- dma_unmap_single(chan2parent(&atchan->chan_common),
- desc->lli.saddr,
- desc->len, DMA_TO_DEVICE);
- else
- dma_unmap_page(chan2parent(&atchan->chan_common),
- desc->lli.saddr,
- desc->len, DMA_TO_DEVICE);
+ if (!atchan->chan_common.private) {
+ struct device *parent = chan2parent(&atchan->chan_common);
+ if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+ dma_unmap_single(parent,
+ desc->lli.daddr,
+ desc->len, DMA_FROM_DEVICE);
+ else
+ dma_unmap_page(parent,
+ desc->lli.daddr,
+ desc->len, DMA_FROM_DEVICE);
+ }
+ if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+ dma_unmap_single(parent,
+ desc->lli.saddr,
+ desc->len, DMA_TO_DEVICE);
+ else
+ dma_unmap_page(parent,
+ desc->lli.saddr,
+ desc->len, DMA_TO_DEVICE);
+ }
}
/*
@@ -334,7 +338,7 @@ static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
/* This one is currently in progress */
return;
- list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+ list_for_each_entry(child, &desc->tx_list, desc_node)
if (!(child->lli.ctrla & ATC_DONE))
/* Currently in progress */
return;
@@ -407,7 +411,7 @@ static void atc_handle_error(struct at_dma_chan *atchan)
dev_crit(chan2dev(&atchan->chan_common),
" cookie: %d\n", bad_desc->txd.cookie);
atc_dump_lli(atchan, &bad_desc->lli);
- list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
+ list_for_each_entry(child, &bad_desc->tx_list, desc_node)
atc_dump_lli(atchan, &child->lli);
/* Pretend the descriptor completed successfully */
@@ -587,7 +591,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
prev->lli.dscr = desc->txd.phys;
/* insert the link descriptor to the LD ring */
list_add_tail(&desc->desc_node,
- &first->txd.tx_list);
+ &first->tx_list);
}
prev = desc;
}
@@ -646,8 +650,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg_width = atslave->reg_width;
- sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction);
-
ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN;
@@ -687,7 +689,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
prev->lli.dscr = desc->txd.phys;
/* insert the link descriptor to the LD ring */
list_add_tail(&desc->desc_node,
- &first->txd.tx_list);
+ &first->tx_list);
}
prev = desc;
total_len += len;
@@ -729,7 +731,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
prev->lli.dscr = desc->txd.phys;
/* insert the link descriptor to the LD ring */
list_add_tail(&desc->desc_node,
- &first->txd.tx_list);
+ &first->tx_list);
}
prev = desc;
total_len += len;
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 4c972afc49e..495457e3dc4 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -165,6 +165,7 @@ struct at_desc {
struct at_lli lli;
/* THEN values for driver housekeeping */
+ struct list_head tx_list;
struct dma_async_tx_descriptor txd;
struct list_head desc_node;
size_t len;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 5a87384ea4f..bd0b248de2c 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -608,6 +608,40 @@ void dmaengine_put(void)
}
EXPORT_SYMBOL(dmaengine_put);
+static bool device_has_all_tx_types(struct dma_device *device)
+{
+ /* A device that satisfies this test has channels that will never cause
+ * an async_tx channel switch event as all possible operation types can
+ * be handled.
+ */
+ #ifdef CONFIG_ASYNC_TX_DMA
+ if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
+ return false;
+ #endif
+
+ #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
+ if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
+ return false;
+ #endif
+
+ #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
+ if (!dma_has_cap(DMA_MEMSET, device->cap_mask))
+ return false;
+ #endif
+
+ #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
+ if (!dma_has_cap(DMA_XOR, device->cap_mask))
+ return false;
+ #endif
+
+ #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
+ if (!dma_has_cap(DMA_PQ, device->cap_mask))
+ return false;
+ #endif
+
+ return true;
+}
+
static int get_dma_id(struct dma_device *device)
{
int rc;
@@ -644,8 +678,12 @@ int dma_async_device_register(struct dma_device *device)
!device->device_prep_dma_memcpy);
BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
!device->device_prep_dma_xor);
- BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
- !device->device_prep_dma_zero_sum);
+ BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
+ !device->device_prep_dma_xor_val);
+ BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
+ !device->device_prep_dma_pq);
+ BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
+ !device->device_prep_dma_pq_val);
BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
!device->device_prep_dma_memset);
BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
@@ -661,6 +699,12 @@ int dma_async_device_register(struct dma_device *device)
BUG_ON(!device->device_issue_pending);
BUG_ON(!device->dev);
+ /* note: this only matters in the
+ * CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case
+ */
+ if (device_has_all_tx_types(device))
+ dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
+
idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
if (!idr_ref)
return -ENOMEM;
@@ -933,55 +977,29 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
{
tx->chan = chan;
spin_lock_init(&tx->lock);
- INIT_LIST_HEAD(&tx->tx_list);
}
EXPORT_SYMBOL(dma_async_tx_descriptor_init);
/* dma_wait_for_async_tx - spin wait for a transaction to complete
* @tx: in-flight transaction to wait on
- *
- * This routine assumes that tx was obtained from a call to async_memcpy,
- * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
- * and submitted). Walking the parent chain is only meant to cover for DMA
- * drivers that do not implement the DMA_INTERRUPT capability and may race with
- * the driver's descriptor cleanup routine.
*/
enum dma_status
dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
{
- enum dma_status status;
- struct dma_async_tx_descriptor *iter;
- struct dma_async_tx_descriptor *parent;
+ unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
if (!tx)
return DMA_SUCCESS;
- WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for"
- " %s\n", __func__, dma_chan_name(tx->chan));
-
- /* poll through the dependency chain, return when tx is complete */
- do {
- iter = tx;
-
- /* find the root of the unsubmitted dependency chain */
- do {
- parent = iter->parent;
- if (!parent)
- break;
- else
- iter = parent;
- } while (parent);
-
- /* there is a small window for ->parent == NULL and
- * ->cookie == -EBUSY
- */
- while (iter->cookie == -EBUSY)
- cpu_relax();
-
- status = dma_sync_wait(iter->chan, iter->cookie);
- } while (status == DMA_IN_PROGRESS || (iter != tx));
-
- return status;
+ while (tx->cookie == -EBUSY) {
+ if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
+ pr_err("%s timeout waiting for descriptor submission\n",
+ __func__);
+ return DMA_ERROR;
+ }
+ cpu_relax();
+ }
+ return dma_sync_wait(tx->chan, tx->cookie);
}
EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index d93017fc787..a32a4cf7b1e 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -48,6 +48,11 @@ module_param(xor_sources, uint, S_IRUGO);
MODULE_PARM_DESC(xor_sources,
"Number of xor source buffers (default: 3)");
+static unsigned int pq_sources = 3;
+module_param(pq_sources, uint, S_IRUGO);
+MODULE_PARM_DESC(pq_sources,
+ "Number of p+q source buffers (default: 3)");
+
/*
* Initialization patterns. All bytes in the source buffer has bit 7
* set, all bytes in the destination buffer has bit 7 cleared.
@@ -232,6 +237,7 @@ static int dmatest_func(void *data)
dma_cookie_t cookie;
enum dma_status status;
enum dma_ctrl_flags flags;
+ u8 pq_coefs[pq_sources];
int ret;
int src_cnt;
int dst_cnt;
@@ -248,6 +254,11 @@ static int dmatest_func(void *data)
else if (thread->type == DMA_XOR) {
src_cnt = xor_sources | 1; /* force odd to ensure dst = src */
dst_cnt = 1;
+ } else if (thread->type == DMA_PQ) {
+ src_cnt = pq_sources | 1; /* force odd to ensure dst = src */
+ dst_cnt = 2;
+ for (i = 0; i < pq_sources; i++)
+ pq_coefs[i] = 1;
} else
goto err_srcs;
@@ -283,6 +294,7 @@ static int dmatest_func(void *data)
dma_addr_t dma_dsts[dst_cnt];
struct completion cmp;
unsigned long tmo = msecs_to_jiffies(3000);
+ u8 align = 0;
total_tests++;
@@ -290,6 +302,18 @@ static int dmatest_func(void *data)
src_off = dmatest_random() % (test_buf_size - len + 1);
dst_off = dmatest_random() % (test_buf_size - len + 1);
+ /* honor alignment restrictions */
+ if (thread->type == DMA_MEMCPY)
+ align = dev->copy_align;
+ else if (thread->type == DMA_XOR)
+ align = dev->xor_align;
+ else if (thread->type == DMA_PQ)
+ align = dev->pq_align;
+
+ len = (len >> align) << align;
+ src_off = (src_off >> align) << align;
+ dst_off = (dst_off >> align) << align;
+
dmatest_init_srcs(thread->srcs, src_off, len);
dmatest_init_dsts(thread->dsts, dst_off, len);
@@ -306,6 +330,7 @@ static int dmatest_func(void *data)
DMA_BIDIRECTIONAL);
}
+
if (thread->type == DMA_MEMCPY)
tx = dev->device_prep_dma_memcpy(chan,
dma_dsts[0] + dst_off,
@@ -316,6 +341,15 @@ static int dmatest_func(void *data)
dma_dsts[0] + dst_off,
dma_srcs, xor_sources,
len, flags);
+ else if (thread->type == DMA_PQ) {
+ dma_addr_t dma_pq[dst_cnt];
+
+ for (i = 0; i < dst_cnt; i++)
+ dma_pq[i] = dma_dsts[i] + dst_off;
+ tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
+ pq_sources, pq_coefs,
+ len, flags);
+ }
if (!tx) {
for (i = 0; i < src_cnt; i++)
@@ -459,6 +493,8 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty
op = "copy";
else if (type == DMA_XOR)
op = "xor";
+ else if (type == DMA_PQ)
+ op = "pq";
else
return -EINVAL;
@@ -514,6 +550,10 @@ static int dmatest_add_channel(struct dma_chan *chan)
cnt = dmatest_add_threads(dtc, DMA_XOR);
thread_count += cnt > 0 ? cnt : 0;
}
+ if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
+ cnt = dmatest_add_threads(dtc, DMA_PQ);
+ thread_count += cnt > 0 ?: 0;
+ }
pr_info("dmatest: Started %u threads using %s\n",
thread_count, dma_chan_name(chan));
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 933c143b6a7..2eea823516a 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -116,7 +116,7 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
{
struct dw_desc *child;
- list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+ list_for_each_entry(child, &desc->tx_list, desc_node)
dma_sync_single_for_cpu(chan2parent(&dwc->chan),
child->txd.phys, sizeof(child->lli),
DMA_TO_DEVICE);
@@ -137,11 +137,11 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
dwc_sync_desc_for_cpu(dwc, desc);
spin_lock_bh(&dwc->lock);
- list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+ list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&dwc->chan),
"moving child desc %p to freelist\n",
child);
- list_splice_init(&desc->txd.tx_list, &dwc->free_list);
+ list_splice_init(&desc->tx_list, &dwc->free_list);
dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
list_add(&desc->desc_node, &dwc->free_list);
spin_unlock_bh(&dwc->lock);
@@ -209,19 +209,28 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
param = txd->callback_param;
dwc_sync_desc_for_cpu(dwc, desc);
- list_splice_init(&txd->tx_list, &dwc->free_list);
+ list_splice_init(&desc->tx_list, &dwc->free_list);
list_move(&desc->desc_node, &dwc->free_list);
- /*
- * We use dma_unmap_page() regardless of how the buffers were
- * mapped before they were submitted...
- */
- if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP))
- dma_unmap_page(chan2parent(&dwc->chan), desc->lli.dar,
- desc->len, DMA_FROM_DEVICE);
- if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
- dma_unmap_page(chan2parent(&dwc->chan), desc->lli.sar,
- desc->len, DMA_TO_DEVICE);
+ if (!dwc->chan.private) {
+ struct device *parent = chan2parent(&dwc->chan);
+ if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+ dma_unmap_single(parent, desc->lli.dar,
+ desc->len, DMA_FROM_DEVICE);
+ else
+ dma_unmap_page(parent, desc->lli.dar,
+ desc->len, DMA_FROM_DEVICE);
+ }
+ if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+ dma_unmap_single(parent, desc->lli.sar,
+ desc->len, DMA_TO_DEVICE);
+ else
+ dma_unmap_page(parent, desc->lli.sar,
+ desc->len, DMA_TO_DEVICE);
+ }
+ }
/*
* The API requires that no submissions are done from a
@@ -289,7 +298,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
/* This one is currently in progress */
return;
- list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+ list_for_each_entry(child, &desc->tx_list, desc_node)
if (child->lli.llp == llp)
/* Currently in progress */
return;
@@ -356,7 +365,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
" cookie: %d\n", bad_desc->txd.cookie);
dwc_dump_lli(dwc, &bad_desc->lli);
- list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
+ list_for_each_entry(child, &bad_desc->tx_list, desc_node)
dwc_dump_lli(dwc, &child->lli);
/* Pretend the descriptor completed successfully */
@@ -608,7 +617,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
prev->txd.phys, sizeof(prev->lli),
DMA_TO_DEVICE);
list_add_tail(&desc->desc_node,
- &first->txd.tx_list);
+ &first->tx_list);
}
prev = desc;
}
@@ -658,8 +667,6 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg_width = dws->reg_width;
prev = first = NULL;
- sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction);
-
switch (direction) {
case DMA_TO_DEVICE:
ctllo = (DWC_DEFAULT_CTLLO
@@ -700,7 +707,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
sizeof(prev->lli),
DMA_TO_DEVICE);
list_add_tail(&desc->desc_node,
- &first->txd.tx_list);
+ &first->tx_list);
}
prev = desc;
total_len += len;
@@ -746,7 +753,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
sizeof(prev->lli),
DMA_TO_DEVICE);
list_add_tail(&desc->desc_node,
- &first->txd.tx_list);
+ &first->tx_list);
}
prev = desc;
total_len += len;
@@ -902,6 +909,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
break;
}
+ INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->txd, chan);
desc->txd.tx_submit = dwc_tx_submit;
desc->txd.flags = DMA_CTRL_ACK;
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index 13a58076703..d9a939f67f4 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -217,6 +217,7 @@ struct dw_desc {
/* THEN values for driver housekeeping */
struct list_head desc_node;
+ struct list_head tx_list;
struct dma_async_tx_descriptor txd;
size_t len;
};
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index ef87a898414..296f9e747fa 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -34,6 +34,7 @@
#include <linux/dmapool.h>
#include <linux/of_platform.h>
+#include <asm/fsldma.h>
#include "fsldma.h"
static void dma_init(struct fsl_dma_chan *fsl_chan)
@@ -280,28 +281,40 @@ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
}
/**
- * fsl_chan_toggle_ext_pause - Toggle channel external pause status
+ * fsl_chan_set_request_count - Set DMA Request Count for external control
* @fsl_chan : Freescale DMA channel
- * @size : Pause control size, 0 for disable external pause control.
- * The maximum is 1024.
+ * @size : Number of bytes to transfer in a single request
+ *
+ * The Freescale DMA channel can be controlled by the external signal DREQ#.
+ * The DMA request count is how many bytes are allowed to transfer before
+ * pausing the channel, after which a new assertion of DREQ# resumes channel
+ * operation.
*
- * The Freescale DMA channel can be controlled by the external
- * signal DREQ#. The pause control size is how many bytes are allowed
- * to transfer before pausing the channel, after which a new assertion
- * of DREQ# resumes channel operation.
+ * A size of 0 disables external pause control. The maximum size is 1024.
*/
-static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size)
+static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size)
{
- if (size > 1024)
- return;
+ BUG_ON(size > 1024);
+ DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
+ DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
+ | ((__ilog2(size) << 24) & 0x0f000000),
+ 32);
+}
- if (size) {
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
- | ((__ilog2(size) << 24) & 0x0f000000),
- 32);
+/**
+ * fsl_chan_toggle_ext_pause - Toggle channel external pause status
+ * @fsl_chan : Freescale DMA channel
+ * @enable : 0 is disabled, 1 is enabled.
+ *
+ * The Freescale DMA channel can be controlled by the external signal DREQ#.
+ * The DMA Request Count feature should be used in addition to this feature
+ * to set the number of bytes to transfer before pausing the channel.
+ */
+static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable)
+{
+ if (enable)
fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
- } else
+ else
fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
}
@@ -326,7 +339,8 @@ static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
- struct fsl_desc_sw *desc;
+ struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
+ struct fsl_desc_sw *child;
unsigned long flags;
dma_cookie_t cookie;
@@ -334,7 +348,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&fsl_chan->desc_lock, flags);
cookie = fsl_chan->common.cookie;
- list_for_each_entry(desc, &tx->tx_list, node) {
+ list_for_each_entry(child, &desc->tx_list, node) {
cookie++;
if (cookie < 0)
cookie = 1;
@@ -343,8 +357,8 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
}
fsl_chan->common.cookie = cookie;
- append_ld_queue(fsl_chan, tx_to_fsl_desc(tx));
- list_splice_init(&tx->tx_list, fsl_chan->ld_queue.prev);
+ append_ld_queue(fsl_chan, desc);
+ list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev);
spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
@@ -366,6 +380,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc);
if (desc_sw) {
memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
+ INIT_LIST_HEAD(&desc_sw->tx_list);
dma_async_tx_descriptor_init(&desc_sw->async_tx,
&fsl_chan->common);
desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
@@ -455,7 +470,7 @@ fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
new->async_tx.flags = flags;
/* Insert the link descriptor to the LD ring */
- list_add_tail(&new->node, &new->async_tx.tx_list);
+ list_add_tail(&new->node, &new->tx_list);
/* Set End-of-link to the last link descriptor of new list*/
set_ld_eol(fsl_chan, new);
@@ -513,7 +528,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
dma_dest += copy;
/* Insert the link descriptor to the LD ring */
- list_add_tail(&new->node, &first->async_tx.tx_list);
+ list_add_tail(&new->node, &first->tx_list);
} while (len);
new->async_tx.flags = flags; /* client is in control of this ack */
@@ -528,7 +543,7 @@ fail:
if (!first)
return NULL;
- list = &first->async_tx.tx_list;
+ list = &first->tx_list;
list_for_each_entry_safe_reverse(new, prev, list, node) {
list_del(&new->node);
dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
@@ -538,6 +553,229 @@ fail:
}
/**
+ * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
+ * @chan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: DMAEngine flags
+ *
+ * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
+ * DMA_SLAVE API, this gets the device-specific information from the
+ * chan->private variable.
+ */
+static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_data_direction direction, unsigned long flags)
+{
+ struct fsl_dma_chan *fsl_chan;
+ struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
+ struct fsl_dma_slave *slave;
+ struct list_head *tx_list;
+ size_t copy;
+
+ int i;
+ struct scatterlist *sg;
+ size_t sg_used;
+ size_t hw_used;
+ struct fsl_dma_hw_addr *hw;
+ dma_addr_t dma_dst, dma_src;
+
+ if (!chan)
+ return NULL;
+
+ if (!chan->private)
+ return NULL;
+
+ fsl_chan = to_fsl_chan(chan);
+ slave = chan->private;
+
+ if (list_empty(&slave->addresses))
+ return NULL;
+
+ hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry);
+ hw_used = 0;
+
+ /*
+ * Build the hardware transaction to copy from the scatterlist to
+ * the hardware, or from the hardware to the scatterlist
+ *
+ * If you are copying from the hardware to the scatterlist and it
+ * takes two hardware entries to fill an entire page, then both
+ * hardware entries will be coalesced into the same page
+ *
+ * If you are copying from the scatterlist to the hardware and a
+ * single page can fill two hardware entries, then the data will
+ * be read out of the page into the first hardware entry, and so on
+ */
+ for_each_sg(sgl, sg, sg_len, i) {
+ sg_used = 0;
+
+ /* Loop until the entire scatterlist entry is used */
+ while (sg_used < sg_dma_len(sg)) {
+
+ /*
+ * If we've used up the current hardware address/length
+ * pair, we need to load a new one
+ *
+ * This is done in a while loop so that descriptors with
+ * length == 0 will be skipped
+ */
+ while (hw_used >= hw->length) {
+
+ /*
+ * If the current hardware entry is the last
+ * entry in the list, we're finished
+ */
+ if (list_is_last(&hw->entry, &slave->addresses))
+ goto finished;
+
+ /* Get the next hardware address/length pair */
+ hw = list_entry(hw->entry.next,
+ struct fsl_dma_hw_addr, entry);
+ hw_used = 0;
+ }
+
+ /* Allocate the link descriptor from DMA pool */
+ new = fsl_dma_alloc_descriptor(fsl_chan);
+ if (!new) {
+ dev_err(fsl_chan->dev, "No free memory for "
+ "link descriptor\n");
+ goto fail;
+ }
+#ifdef FSL_DMA_LD_DEBUG
+ dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
+#endif
+
+ /*
+ * Calculate the maximum number of bytes to transfer,
+ * making sure it is less than the DMA controller limit
+ */
+ copy = min_t(size_t, sg_dma_len(sg) - sg_used,
+ hw->length - hw_used);
+ copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT);
+
+ /*
+ * DMA_FROM_DEVICE
+ * from the hardware to the scatterlist
+ *
+ * DMA_TO_DEVICE
+ * from the scatterlist to the hardware
+ */
+ if (direction == DMA_FROM_DEVICE) {
+ dma_src = hw->address + hw_used;
+ dma_dst = sg_dma_address(sg) + sg_used;
+ } else {
+ dma_src = sg_dma_address(sg) + sg_used;
+ dma_dst = hw->address + hw_used;
+ }
+
+ /* Fill in the descriptor */
+ set_desc_cnt(fsl_chan, &new->hw, copy);
+ set_desc_src(fsl_chan, &new->hw, dma_src);
+ set_desc_dest(fsl_chan, &new->hw, dma_dst);
+
+ /*
+ * If this is not the first descriptor, chain the
+ * current descriptor after the previous descriptor
+ */
+ if (!first) {
+ first = new;
+ } else {
+ set_desc_next(fsl_chan, &prev->hw,
+ new->async_tx.phys);
+ }
+
+ new->async_tx.cookie = 0;
+ async_tx_ack(&new->async_tx);
+
+ prev = new;
+ sg_used += copy;
+ hw_used += copy;
+
+ /* Insert the link descriptor into the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+ }
+ }
+
+finished:
+
+ /* All of the hardware address/length pairs had length == 0 */
+ if (!first || !new)
+ return NULL;
+
+ new->async_tx.flags = flags;
+ new->async_tx.cookie = -EBUSY;
+
+ /* Set End-of-link to the last link descriptor of new list */
+ set_ld_eol(fsl_chan, new);
+
+ /* Enable extra controller features */
+ if (fsl_chan->set_src_loop_size)
+ fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size);
+
+ if (fsl_chan->set_dest_loop_size)
+ fsl_chan->set_dest_loop_size(fsl_chan, slave->dst_loop_size);
+
+ if (fsl_chan->toggle_ext_start)
+ fsl_chan->toggle_ext_start(fsl_chan, slave->external_start);
+
+ if (fsl_chan->toggle_ext_pause)
+ fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause);
+
+ if (fsl_chan->set_request_count)
+ fsl_chan->set_request_count(fsl_chan, slave->request_count);
+
+ return &first->async_tx;
+
+fail:
+ /* If first was not set, then we failed to allocate the very first
+ * descriptor, and we're done */
+ if (!first)
+ return NULL;
+
+ /*
+ * First is set, so all of the descriptors we allocated have been added
+ * to first->tx_list, INCLUDING "first" itself. Therefore we
+ * must traverse the list backwards freeing each descriptor in turn
+ *
+ * We're re-using variables for the loop, oh well
+ */
+ tx_list = &first->tx_list;
+ list_for_each_entry_safe_reverse(new, prev, tx_list, node) {
+ list_del_init(&new->node);
+ dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
+ }
+
+ return NULL;
+}
+
+static void fsl_dma_device_terminate_all(struct dma_chan *chan)
+{
+ struct fsl_dma_chan *fsl_chan;
+ struct fsl_desc_sw *desc, *tmp;
+ unsigned long flags;
+
+ if (!chan)
+ return;
+
+ fsl_chan = to_fsl_chan(chan);
+
+ /* Halt the DMA engine */
+ dma_halt(fsl_chan);
+
+ spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+
+ /* Remove and free all of the descriptors in the LD queue */
+ list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) {
+ list_del(&desc->node);
+ dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
+ }
+
+ spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+}
+
+/**
* fsl_dma_update_completed_cookie - Update the completed cookie.
* @fsl_chan : Freescale DMA channel
*/
@@ -883,6 +1121,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size;
+ new_fsl_chan->set_request_count = fsl_chan_set_request_count;
}
spin_lock_init(&new_fsl_chan->desc_lock);
@@ -962,12 +1201,15 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
+ dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
fdev->common.device_is_tx_complete = fsl_dma_is_complete;
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
+ fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
+ fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
fdev->common.dev = &dev->dev;
fdev->irq = irq_of_parse_and_map(dev->node, 0);
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index dc7f2686579..0df14cbb8ca 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -90,6 +90,7 @@ struct fsl_dma_ld_hw {
struct fsl_desc_sw {
struct fsl_dma_ld_hw hw;
struct list_head node;
+ struct list_head tx_list;
struct dma_async_tx_descriptor async_tx;
struct list_head *ld;
void *priv;
@@ -143,10 +144,11 @@ struct fsl_dma_chan {
struct tasklet_struct tasklet;
u32 feature;
- void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int size);
+ void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int enable);
void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable);
void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
+ void (*set_request_count)(struct fsl_dma_chan *fsl_chan, int size);
};
#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common)
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c
deleted file mode 100644
index 2225bb6ba3d..00000000000
--- a/drivers/dma/ioat.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Intel I/OAT DMA Linux driver
- * Copyright(c) 2007 - 2009 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- */
-
-/*
- * This driver supports an Intel I/OAT DMA engine, which does asynchronous
- * copy operations.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/dca.h>
-#include "ioatdma.h"
-#include "ioatdma_registers.h"
-#include "ioatdma_hw.h"
-
-MODULE_VERSION(IOAT_DMA_VERSION);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Intel Corporation");
-
-static struct pci_device_id ioat_pci_tbl[] = {
- /* I/OAT v1 platforms */
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) },
- { PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
-
- /* I/OAT v2 platforms */
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) },
-
- /* I/OAT v3 platforms */
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
- { 0, }
-};
-
-struct ioat_device {
- struct pci_dev *pdev;
- void __iomem *iobase;
- struct ioatdma_device *dma;
- struct dca_provider *dca;
-};
-
-static int __devinit ioat_probe(struct pci_dev *pdev,
- const struct pci_device_id *id);
-static void __devexit ioat_remove(struct pci_dev *pdev);
-
-static int ioat_dca_enabled = 1;
-module_param(ioat_dca_enabled, int, 0644);
-MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
-
-static struct pci_driver ioat_pci_driver = {
- .name = "ioatdma",
- .id_table = ioat_pci_tbl,
- .probe = ioat_probe,
- .remove = __devexit_p(ioat_remove),
-};
-
-static int __devinit ioat_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- void __iomem *iobase;
- struct ioat_device *device;
- unsigned long mmio_start, mmio_len;
- int err;
-
- err = pci_enable_device(pdev);
- if (err)
- goto err_enable_device;
-
- err = pci_request_regions(pdev, ioat_pci_driver.name);
- if (err)
- goto err_request_regions;
-
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err)
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err)
- goto err_set_dma_mask;
-
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err)
- goto err_set_dma_mask;
-
- mmio_start = pci_resource_start(pdev, 0);
- mmio_len = pci_resource_len(pdev, 0);
- iobase = ioremap(mmio_start, mmio_len);
- if (!iobase) {
- err = -ENOMEM;
- goto err_ioremap;
- }
-
- device = kzalloc(sizeof(*device), GFP_KERNEL);
- if (!device) {
- err = -ENOMEM;
- goto err_kzalloc;
- }
- device->pdev = pdev;
- pci_set_drvdata(pdev, device);
- device->iobase = iobase;
-
- pci_set_master(pdev);
-
- switch (readb(iobase + IOAT_VER_OFFSET)) {
- case IOAT_VER_1_2:
- device->dma = ioat_dma_probe(pdev, iobase);
- if (device->dma && ioat_dca_enabled)
- device->dca = ioat_dca_init(pdev, iobase);
- break;
- case IOAT_VER_2_0:
- device->dma = ioat_dma_probe(pdev, iobase);
- if (device->dma && ioat_dca_enabled)
- device->dca = ioat2_dca_init(pdev, iobase);
- break;
- case IOAT_VER_3_0:
- device->dma = ioat_dma_probe(pdev, iobase);
- if (device->dma && ioat_dca_enabled)
- device->dca = ioat3_dca_init(pdev, iobase);
- break;
- default:
- err = -ENODEV;
- break;
- }
- if (!device->dma)
- err = -ENODEV;
-
- if (err)
- goto err_version;
-
- return 0;
-
-err_version:
- kfree(device);
-err_kzalloc:
- iounmap(iobase);
-err_ioremap:
-err_set_dma_mask:
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-err_request_regions:
-err_enable_device:
- return err;
-}
-
-static void __devexit ioat_remove(struct pci_dev *pdev)
-{
- struct ioat_device *device = pci_get_drvdata(pdev);
-
- dev_err(&pdev->dev, "Removing dma and dca services\n");
- if (device->dca) {
- unregister_dca_provider(device->dca);
- free_dca_provider(device->dca);
- device->dca = NULL;
- }
-
- if (device->dma) {
- ioat_dma_remove(device->dma);
- device->dma = NULL;
- }
-
- kfree(device);
-}
-
-static int __init ioat_init_module(void)
-{
- return pci_register_driver(&ioat_pci_driver);
-}
-module_init(ioat_init_module);
-
-static void __exit ioat_exit_module(void)
-{
- pci_unregister_driver(&ioat_pci_driver);
-}
-module_exit(ioat_exit_module);
diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile
new file mode 100644
index 00000000000..8997d3fb905
--- /dev/null
+++ b/drivers/dma/ioat/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
+ioatdma-objs := pci.o dma.o dma_v2.o dma_v3.o dca.o
diff --git a/drivers/dma/ioat_dca.c b/drivers/dma/ioat/dca.c
index c012a1e1504..69d02615c4d 100644
--- a/drivers/dma/ioat_dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -33,8 +33,8 @@
#define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24)
#endif
-#include "ioatdma.h"
-#include "ioatdma_registers.h"
+#include "dma.h"
+#include "registers.h"
/*
* Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
@@ -242,7 +242,8 @@ static struct dca_ops ioat_dca_ops = {
};
-struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+struct dca_provider * __devinit
+ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{
struct dca_provider *dca;
struct ioat_dca_priv *ioatdca;
@@ -407,7 +408,8 @@ static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
return slots;
}
-struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+struct dca_provider * __devinit
+ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{
struct dca_provider *dca;
struct ioat_dca_priv *ioatdca;
@@ -602,7 +604,8 @@ static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
return slots;
}
-struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+struct dca_provider * __devinit
+ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{
struct dca_provider *dca;
struct ioat_dca_priv *ioatdca;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
new file mode 100644
index 00000000000..c524d36d3c2
--- /dev/null
+++ b/drivers/dma/ioat/dma.c
@@ -0,0 +1,1238 @@
+/*
+ * Intel I/OAT DMA Linux driver
+ * Copyright(c) 2004 - 2009 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+/*
+ * This driver supports an Intel I/OAT DMA engine, which does asynchronous
+ * copy operations.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/i7300_idle.h>
+#include "dma.h"
+#include "registers.h"
+#include "hw.h"
+
+int ioat_pending_level = 4;
+module_param(ioat_pending_level, int, 0644);
+MODULE_PARM_DESC(ioat_pending_level,
+ "high-water mark for pushing ioat descriptors (default: 4)");
+
+/* internal functions */
+static void ioat1_cleanup(struct ioat_dma_chan *ioat);
+static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
+
+/**
+ * ioat_dma_do_interrupt - handler used for single vector interrupt mode
+ * @irq: interrupt id
+ * @data: interrupt data
+ */
+static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
+{
+ struct ioatdma_device *instance = data;
+ struct ioat_chan_common *chan;
+ unsigned long attnstatus;
+ int bit;
+ u8 intrctrl;
+
+ intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
+
+ if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
+ return IRQ_NONE;
+
+ if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
+ writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
+ return IRQ_NONE;
+ }
+
+ attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
+ for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
+ chan = ioat_chan_by_index(instance, bit);
+ tasklet_schedule(&chan->cleanup_task);
+ }
+
+ writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
+ return IRQ_HANDLED;
+}
+
+/**
+ * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
+ * @irq: interrupt id
+ * @data: interrupt data
+ */
+static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
+{
+ struct ioat_chan_common *chan = data;
+
+ tasklet_schedule(&chan->cleanup_task);
+
+ return IRQ_HANDLED;
+}
+
+static void ioat1_cleanup_tasklet(unsigned long data);
+
+/* common channel initialization */
+void ioat_init_channel(struct ioatdma_device *device,
+ struct ioat_chan_common *chan, int idx,
+ void (*timer_fn)(unsigned long),
+ void (*tasklet)(unsigned long),
+ unsigned long ioat)
+{
+ struct dma_device *dma = &device->common;
+
+ chan->device = device;
+ chan->reg_base = device->reg_base + (0x80 * (idx + 1));
+ spin_lock_init(&chan->cleanup_lock);
+ chan->common.device = dma;
+ list_add_tail(&chan->common.device_node, &dma->channels);
+ device->idx[idx] = chan;
+ init_timer(&chan->timer);
+ chan->timer.function = timer_fn;
+ chan->timer.data = ioat;
+ tasklet_init(&chan->cleanup_task, tasklet, ioat);
+ tasklet_disable(&chan->cleanup_task);
+}
+
+static void ioat1_timer_event(unsigned long data);
+
+/**
+ * ioat1_dma_enumerate_channels - find and initialize the device's channels
+ * @device: the device to be enumerated
+ */
+static int ioat1_enumerate_channels(struct ioatdma_device *device)
+{
+ u8 xfercap_scale;
+ u32 xfercap;
+ int i;
+ struct ioat_dma_chan *ioat;
+ struct device *dev = &device->pdev->dev;
+ struct dma_device *dma = &device->common;
+
+ INIT_LIST_HEAD(&dma->channels);
+ dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
+ dma->chancnt &= 0x1f; /* bits [4:0] valid */
+ if (dma->chancnt > ARRAY_SIZE(device->idx)) {
+ dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
+ dma->chancnt, ARRAY_SIZE(device->idx));
+ dma->chancnt = ARRAY_SIZE(device->idx);
+ }
+ xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
+ xfercap_scale &= 0x1f; /* bits [4:0] valid */
+ xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
+ dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
+
+#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
+ if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
+ dma->chancnt--;
+#endif
+ for (i = 0; i < dma->chancnt; i++) {
+ ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
+ if (!ioat)
+ break;
+
+ ioat_init_channel(device, &ioat->base, i,
+ ioat1_timer_event,
+ ioat1_cleanup_tasklet,
+ (unsigned long) ioat);
+ ioat->xfercap = xfercap;
+ spin_lock_init(&ioat->desc_lock);
+ INIT_LIST_HEAD(&ioat->free_desc);
+ INIT_LIST_HEAD(&ioat->used_desc);
+ }
+ dma->chancnt = i;
+ return i;
+}
+
+/**
+ * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
+ * descriptors to hw
+ * @chan: DMA channel handle
+ */
+static inline void
+__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
+{
+ void __iomem *reg_base = ioat->base.reg_base;
+
+ dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
+ __func__, ioat->pending);
+ ioat->pending = 0;
+ writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
+}
+
+static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
+{
+ struct ioat_dma_chan *ioat = to_ioat_chan(chan);
+
+ if (ioat->pending > 0) {
+ spin_lock_bh(&ioat->desc_lock);
+ __ioat1_dma_memcpy_issue_pending(ioat);
+ spin_unlock_bh(&ioat->desc_lock);
+ }
+}
+
+/**
+ * ioat1_reset_channel - restart a channel
+ * @ioat: IOAT DMA channel handle
+ */
+static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+ void __iomem *reg_base = chan->reg_base;
+ u32 chansts, chanerr;
+
+ dev_warn(to_dev(chan), "reset\n");
+ chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
+ chansts = *chan->completion & IOAT_CHANSTS_STATUS;
+ if (chanerr) {
+ dev_err(to_dev(chan),
+ "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
+ chan_num(chan), chansts, chanerr);
+ writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
+ }
+
+ /*
+ * whack it upside the head with a reset
+ * and wait for things to settle out.
+ * force the pending count to a really big negative
+ * to make sure no one forces an issue_pending
+ * while we're waiting.
+ */
+
+ ioat->pending = INT_MIN;
+ writeb(IOAT_CHANCMD_RESET,
+ reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
+ set_bit(IOAT_RESET_PENDING, &chan->state);
+ mod_timer(&chan->timer, jiffies + RESET_DELAY);
+}
+
+static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_chan *c = tx->chan;
+ struct ioat_dma_chan *ioat = to_ioat_chan(c);
+ struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
+ struct ioat_chan_common *chan = &ioat->base;
+ struct ioat_desc_sw *first;
+ struct ioat_desc_sw *chain_tail;
+ dma_cookie_t cookie;
+
+ spin_lock_bh(&ioat->desc_lock);
+ /* cookie incr and addition to used_list must be atomic */
+ cookie = c->cookie;
+ cookie++;
+ if (cookie < 0)
+ cookie = 1;
+ c->cookie = cookie;
+ tx->cookie = cookie;
+ dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
+
+ /* write address into NextDescriptor field of last desc in chain */
+ first = to_ioat_desc(desc->tx_list.next);
+ chain_tail = to_ioat_desc(ioat->used_desc.prev);
+ /* make descriptor updates globally visible before chaining */
+ wmb();
+ chain_tail->hw->next = first->txd.phys;
+ list_splice_tail_init(&desc->tx_list, &ioat->used_desc);
+ dump_desc_dbg(ioat, chain_tail);
+ dump_desc_dbg(ioat, first);
+
+ if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+ ioat->active += desc->hw->tx_cnt;
+ ioat->pending += desc->hw->tx_cnt;
+ if (ioat->pending >= ioat_pending_level)
+ __ioat1_dma_memcpy_issue_pending(ioat);
+ spin_unlock_bh(&ioat->desc_lock);
+
+ return cookie;
+}
+
+/**
+ * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
+ * @ioat: the channel supplying the memory pool for the descriptors
+ * @flags: allocation flags
+ */
+static struct ioat_desc_sw *
+ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
+{
+ struct ioat_dma_descriptor *desc;
+ struct ioat_desc_sw *desc_sw;
+ struct ioatdma_device *ioatdma_device;
+ dma_addr_t phys;
+
+ ioatdma_device = ioat->base.device;
+ desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
+ if (unlikely(!desc))
+ return NULL;
+
+ desc_sw = kzalloc(sizeof(*desc_sw), flags);
+ if (unlikely(!desc_sw)) {
+ pci_pool_free(ioatdma_device->dma_pool, desc, phys);
+ return NULL;
+ }
+
+ memset(desc, 0, sizeof(*desc));
+
+ INIT_LIST_HEAD(&desc_sw->tx_list);
+ dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
+ desc_sw->txd.tx_submit = ioat1_tx_submit;
+ desc_sw->hw = desc;
+ desc_sw->txd.phys = phys;
+ set_desc_id(desc_sw, -1);
+
+ return desc_sw;
+}
+
+static int ioat_initial_desc_count = 256;
+module_param(ioat_initial_desc_count, int, 0644);
+MODULE_PARM_DESC(ioat_initial_desc_count,
+ "ioat1: initial descriptors per channel (default: 256)");
+/**
+ * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
+ * @chan: the channel to be filled out
+ */
+static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
+{
+ struct ioat_dma_chan *ioat = to_ioat_chan(c);
+ struct ioat_chan_common *chan = &ioat->base;
+ struct ioat_desc_sw *desc;
+ u32 chanerr;
+ int i;
+ LIST_HEAD(tmp_list);
+
+ /* have we already been set up? */
+ if (!list_empty(&ioat->free_desc))
+ return ioat->desccount;
+
+ /* Setup register to interrupt and write completion status on error */
+ writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
+
+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ if (chanerr) {
+ dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
+ writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
+ }
+
+ /* Allocate descriptors */
+ for (i = 0; i < ioat_initial_desc_count; i++) {
+ desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
+ if (!desc) {
+ dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
+ break;
+ }
+ set_desc_id(desc, i);
+ list_add_tail(&desc->node, &tmp_list);
+ }
+ spin_lock_bh(&ioat->desc_lock);
+ ioat->desccount = i;
+ list_splice(&tmp_list, &ioat->free_desc);
+ spin_unlock_bh(&ioat->desc_lock);
+
+ /* allocate a completion writeback area */
+ /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
+ chan->completion = pci_pool_alloc(chan->device->completion_pool,
+ GFP_KERNEL, &chan->completion_dma);
+ memset(chan->completion, 0, sizeof(*chan->completion));
+ writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
+ chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
+ writel(((u64) chan->completion_dma) >> 32,
+ chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
+
+ tasklet_enable(&chan->cleanup_task);
+ ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
+ dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
+ __func__, ioat->desccount);
+ return ioat->desccount;
+}
+
+/**
+ * ioat1_dma_free_chan_resources - release all the descriptors
+ * @chan: the channel to be cleaned
+ */
+static void ioat1_dma_free_chan_resources(struct dma_chan *c)
+{
+ struct ioat_dma_chan *ioat = to_ioat_chan(c);
+ struct ioat_chan_common *chan = &ioat->base;
+ struct ioatdma_device *ioatdma_device = chan->device;
+ struct ioat_desc_sw *desc, *_desc;
+ int in_use_descs = 0;
+
+ /* Before freeing channel resources first check
+ * if they have been previously allocated for this channel.
+ */
+ if (ioat->desccount == 0)
+ return;
+
+ tasklet_disable(&chan->cleanup_task);
+ del_timer_sync(&chan->timer);
+ ioat1_cleanup(ioat);
+
+ /* Delay 100ms after reset to allow internal DMA logic to quiesce
+ * before removing DMA descriptor resources.
+ */
+ writeb(IOAT_CHANCMD_RESET,
+ chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
+ mdelay(100);
+
+ spin_lock_bh(&ioat->desc_lock);
+ list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
+ dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
+ __func__, desc_id(desc));
+ dump_desc_dbg(ioat, desc);
+ in_use_descs++;
+ list_del(&desc->node);
+ pci_pool_free(ioatdma_device->dma_pool, desc->hw,
+ desc->txd.phys);
+ kfree(desc);
+ }
+ list_for_each_entry_safe(desc, _desc,
+ &ioat->free_desc, node) {
+ list_del(&desc->node);
+ pci_pool_free(ioatdma_device->dma_pool, desc->hw,
+ desc->txd.phys);
+ kfree(desc);
+ }
+ spin_unlock_bh(&ioat->desc_lock);
+
+ pci_pool_free(ioatdma_device->completion_pool,
+ chan->completion,
+ chan->completion_dma);
+
+ /* one is ok since we left it on there on purpose */
+ if (in_use_descs > 1)
+ dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
+ in_use_descs - 1);
+
+ chan->last_completion = 0;
+ chan->completion_dma = 0;
+ ioat->pending = 0;
+ ioat->desccount = 0;
+}
+
+/**
+ * ioat1_dma_get_next_descriptor - return the next available descriptor
+ * @ioat: IOAT DMA channel handle
+ *
+ * Gets the next descriptor from the chain, and must be called with the
+ * channel's desc_lock held. Allocates more descriptors if the channel
+ * has run out.
+ */
+static struct ioat_desc_sw *
+ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
+{
+ struct ioat_desc_sw *new;
+
+ if (!list_empty(&ioat->free_desc)) {
+ new = to_ioat_desc(ioat->free_desc.next);
+ list_del(&new->node);
+ } else {
+ /* try to get another desc */
+ new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
+ if (!new) {
+ dev_err(to_dev(&ioat->base), "alloc failed\n");
+ return NULL;
+ }
+ }
+ dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
+ __func__, desc_id(new));
+ prefetch(new->hw);
+ return new;
+}
+
+static struct dma_async_tx_descriptor *
+ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ struct ioat_dma_chan *ioat = to_ioat_chan(c);
+ struct ioat_desc_sw *desc;
+ size_t copy;
+ LIST_HEAD(chain);
+ dma_addr_t src = dma_src;
+ dma_addr_t dest = dma_dest;
+ size_t total_len = len;
+ struct ioat_dma_descriptor *hw = NULL;
+ int tx_cnt = 0;
+
+ spin_lock_bh(&ioat->desc_lock);
+ desc = ioat1_dma_get_next_descriptor(ioat);
+ do {
+ if (!desc)
+ break;
+
+ tx_cnt++;
+ copy = min_t(size_t, len, ioat->xfercap);
+
+ hw = desc->hw;
+ hw->size = copy;
+ hw->ctl = 0;
+ hw->src_addr = src;
+ hw->dst_addr = dest;
+
+ list_add_tail(&desc->node, &chain);
+
+ len -= copy;
+ dest += copy;
+ src += copy;
+ if (len) {
+ struct ioat_desc_sw *next;
+
+ async_tx_ack(&desc->txd);
+ next = ioat1_dma_get_next_descriptor(ioat);
+ hw->next = next ? next->txd.phys : 0;
+ dump_desc_dbg(ioat, desc);
+ desc = next;
+ } else
+ hw->next = 0;
+ } while (len);
+
+ if (!desc) {
+ struct ioat_chan_common *chan = &ioat->base;
+
+ dev_err(to_dev(chan),
+ "chan%d - get_next_desc failed\n", chan_num(chan));
+ list_splice(&chain, &ioat->free_desc);
+ spin_unlock_bh(&ioat->desc_lock);
+ return NULL;
+ }
+ spin_unlock_bh(&ioat->desc_lock);
+
+ desc->txd.flags = flags;
+ desc->len = total_len;
+ list_splice(&chain, &desc->tx_list);
+ hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ hw->ctl_f.compl_write = 1;
+ hw->tx_cnt = tx_cnt;
+ dump_desc_dbg(ioat, desc);
+
+ return &desc->txd;
+}
+
+static void ioat1_cleanup_tasklet(unsigned long data)
+{
+ struct ioat_dma_chan *chan = (void *)data;
+
+ ioat1_cleanup(chan);
+ writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET);
+}
+
+void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
+ size_t len, struct ioat_dma_descriptor *hw)
+{
+ struct pci_dev *pdev = chan->device->pdev;
+ size_t offset = len - hw->size;
+
+ if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
+ ioat_unmap(pdev, hw->dst_addr - offset, len,
+ PCI_DMA_FROMDEVICE, flags, 1);
+
+ if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
+ ioat_unmap(pdev, hw->src_addr - offset, len,
+ PCI_DMA_TODEVICE, flags, 0);
+}
+
+unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
+{
+ unsigned long phys_complete;
+ u64 completion;
+
+ completion = *chan->completion;
+ phys_complete = ioat_chansts_to_addr(completion);
+
+ dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
+ (unsigned long long) phys_complete);
+
+ if (is_ioat_halted(completion)) {
+ u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
+ chanerr);
+
+ /* TODO do something to salvage the situation */
+ }
+
+ return phys_complete;
+}
+
+bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
+ unsigned long *phys_complete)
+{
+ *phys_complete = ioat_get_current_completion(chan);
+ if (*phys_complete == chan->last_completion)
+ return false;
+ clear_bit(IOAT_COMPLETION_ACK, &chan->state);
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+ return true;
+}
+
+static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+ struct list_head *_desc, *n;
+ struct dma_async_tx_descriptor *tx;
+
+ dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
+ __func__, phys_complete);
+ list_for_each_safe(_desc, n, &ioat->used_desc) {
+ struct ioat_desc_sw *desc;
+
+ prefetch(n);
+ desc = list_entry(_desc, typeof(*desc), node);
+ tx = &desc->txd;
+ /*
+ * Incoming DMA requests may use multiple descriptors,
+ * due to exceeding xfercap, perhaps. If so, only the
+ * last one will have a cookie, and require unmapping.
+ */
+ dump_desc_dbg(ioat, desc);
+ if (tx->cookie) {
+ chan->completed_cookie = tx->cookie;
+ tx->cookie = 0;
+ ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
+ ioat->active -= desc->hw->tx_cnt;
+ if (tx->callback) {
+ tx->callback(tx->callback_param);
+ tx->callback = NULL;
+ }
+ }
+
+ if (tx->phys != phys_complete) {
+ /*
+ * a completed entry, but not the last, so clean
+ * up if the client is done with the descriptor
+ */
+ if (async_tx_test_ack(tx))
+ list_move_tail(&desc->node, &ioat->free_desc);
+ } else {
+ /*
+ * last used desc. Do not remove, so we can
+ * append from it.
+ */
+
+ /* if nothing else is pending, cancel the
+ * completion timeout
+ */
+ if (n == &ioat->used_desc) {
+ dev_dbg(to_dev(chan),
+ "%s cancel completion timeout\n",
+ __func__);
+ clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
+ }
+
+ /* TODO check status bits? */
+ break;
+ }
+ }
+
+ chan->last_completion = phys_complete;
+}
+
+/**
+ * ioat1_cleanup - cleanup up finished descriptors
+ * @chan: ioat channel to be cleaned up
+ *
+ * To prevent lock contention we defer cleanup when the locks are
+ * contended with a terminal timeout that forces cleanup and catches
+ * completion notification errors.
+ */
+static void ioat1_cleanup(struct ioat_dma_chan *ioat)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+ unsigned long phys_complete;
+
+ prefetch(chan->completion);
+
+ if (!spin_trylock_bh(&chan->cleanup_lock))
+ return;
+
+ if (!ioat_cleanup_preamble(chan, &phys_complete)) {
+ spin_unlock_bh(&chan->cleanup_lock);
+ return;
+ }
+
+ if (!spin_trylock_bh(&ioat->desc_lock)) {
+ spin_unlock_bh(&chan->cleanup_lock);
+ return;
+ }
+
+ __cleanup(ioat, phys_complete);
+
+ spin_unlock_bh(&ioat->desc_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
+}
+
+static void ioat1_timer_event(unsigned long data)
+{
+ struct ioat_dma_chan *ioat = (void *) data;
+ struct ioat_chan_common *chan = &ioat->base;
+
+ dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
+
+ spin_lock_bh(&chan->cleanup_lock);
+ if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
+ struct ioat_desc_sw *desc;
+
+ spin_lock_bh(&ioat->desc_lock);
+
+ /* restart active descriptors */
+ desc = to_ioat_desc(ioat->used_desc.prev);
+ ioat_set_chainaddr(ioat, desc->txd.phys);
+ ioat_start(chan);
+
+ ioat->pending = 0;
+ set_bit(IOAT_COMPLETION_PENDING, &chan->state);
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ spin_unlock_bh(&ioat->desc_lock);
+ } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
+ unsigned long phys_complete;
+
+ spin_lock_bh(&ioat->desc_lock);
+ /* if we haven't made progress and we have already
+ * acknowledged a pending completion once, then be more
+ * forceful with a restart
+ */
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
+ else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
+ ioat1_reset_channel(ioat);
+ else {
+ u64 status = ioat_chansts(chan);
+
+ /* manually update the last completion address */
+ if (ioat_chansts_to_addr(status) != 0)
+ *chan->completion = status;
+
+ set_bit(IOAT_COMPLETION_ACK, &chan->state);
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ }
+ spin_unlock_bh(&ioat->desc_lock);
+ }
+ spin_unlock_bh(&chan->cleanup_lock);
+}
+
+static enum dma_status
+ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie,
+ dma_cookie_t *done, dma_cookie_t *used)
+{
+ struct ioat_dma_chan *ioat = to_ioat_chan(c);
+
+ if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
+ return DMA_SUCCESS;
+
+ ioat1_cleanup(ioat);
+
+ return ioat_is_complete(c, cookie, done, used);
+}
+
+static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+ struct ioat_desc_sw *desc;
+ struct ioat_dma_descriptor *hw;
+
+ spin_lock_bh(&ioat->desc_lock);
+
+ desc = ioat1_dma_get_next_descriptor(ioat);
+
+ if (!desc) {
+ dev_err(to_dev(chan),
+ "Unable to start null desc - get next desc failed\n");
+ spin_unlock_bh(&ioat->desc_lock);
+ return;
+ }
+
+ hw = desc->hw;
+ hw->ctl = 0;
+ hw->ctl_f.null = 1;
+ hw->ctl_f.int_en = 1;
+ hw->ctl_f.compl_write = 1;
+ /* set size to non-zero value (channel returns error when size is 0) */
+ hw->size = NULL_DESC_BUFFER_SIZE;
+ hw->src_addr = 0;
+ hw->dst_addr = 0;
+ async_tx_ack(&desc->txd);
+ hw->next = 0;
+ list_add_tail(&desc->node, &ioat->used_desc);
+ dump_desc_dbg(ioat, desc);
+
+ ioat_set_chainaddr(ioat, desc->txd.phys);
+ ioat_start(chan);
+ spin_unlock_bh(&ioat->desc_lock);
+}
+
+/*
+ * Perform a IOAT transaction to verify the HW works.
+ */
+#define IOAT_TEST_SIZE 2000
+
+static void __devinit ioat_dma_test_callback(void *dma_async_param)
+{
+ struct completion *cmp = dma_async_param;
+
+ complete(cmp);
+}
+
+/**
+ * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
+ * @device: device to be tested
+ */
+int __devinit ioat_dma_self_test(struct ioatdma_device *device)
+{
+ int i;
+ u8 *src;
+ u8 *dest;
+ struct dma_device *dma = &device->common;
+ struct device *dev = &device->pdev->dev;
+ struct dma_chan *dma_chan;
+ struct dma_async_tx_descriptor *tx;
+ dma_addr_t dma_dest, dma_src;
+ dma_cookie_t cookie;
+ int err = 0;
+ struct completion cmp;
+ unsigned long tmo;
+ unsigned long flags;
+
+ src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
+ if (!src)
+ return -ENOMEM;
+ dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
+ if (!dest) {
+ kfree(src);
+ return -ENOMEM;
+ }
+
+ /* Fill in src buffer */
+ for (i = 0; i < IOAT_TEST_SIZE; i++)
+ src[i] = (u8)i;
+
+ /* Start copy, using first DMA channel */
+ dma_chan = container_of(dma->channels.next, struct dma_chan,
+ device_node);
+ if (dma->device_alloc_chan_resources(dma_chan) < 1) {
+ dev_err(dev, "selftest cannot allocate chan resource\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
+ dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
+ flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE |
+ DMA_PREP_INTERRUPT;
+ tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
+ IOAT_TEST_SIZE, flags);
+ if (!tx) {
+ dev_err(dev, "Self-test prep failed, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ async_tx_ack(tx);
+ init_completion(&cmp);
+ tx->callback = ioat_dma_test_callback;
+ tx->callback_param = &cmp;
+ cookie = tx->tx_submit(tx);
+ if (cookie < 0) {
+ dev_err(dev, "Self-test setup failed, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+ dma->device_issue_pending(dma_chan);
+
+ tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+ if (tmo == 0 ||
+ dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL)
+ != DMA_SUCCESS) {
+ dev_err(dev, "Self-test copy timed out, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+ if (memcmp(src, dest, IOAT_TEST_SIZE)) {
+ dev_err(dev, "Self-test copy failed compare, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+free_resources:
+ dma->device_free_chan_resources(dma_chan);
+out:
+ kfree(src);
+ kfree(dest);
+ return err;
+}
+
+static char ioat_interrupt_style[32] = "msix";
+module_param_string(ioat_interrupt_style, ioat_interrupt_style,
+ sizeof(ioat_interrupt_style), 0644);
+MODULE_PARM_DESC(ioat_interrupt_style,
+ "set ioat interrupt style: msix (default), "
+ "msix-single-vector, msi, intx)");
+
+/**
+ * ioat_dma_setup_interrupts - setup interrupt handler
+ * @device: ioat device
+ */
+static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
+{
+ struct ioat_chan_common *chan;
+ struct pci_dev *pdev = device->pdev;
+ struct device *dev = &pdev->dev;
+ struct msix_entry *msix;
+ int i, j, msixcnt;
+ int err = -EINVAL;
+ u8 intrctrl = 0;
+
+ if (!strcmp(ioat_interrupt_style, "msix"))
+ goto msix;
+ if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
+ goto msix_single_vector;
+ if (!strcmp(ioat_interrupt_style, "msi"))
+ goto msi;
+ if (!strcmp(ioat_interrupt_style, "intx"))
+ goto intx;
+ dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
+ goto err_no_irq;
+
+msix:
+ /* The number of MSI-X vectors should equal the number of channels */
+ msixcnt = device->common.chancnt;
+ for (i = 0; i < msixcnt; i++)
+ device->msix_entries[i].entry = i;
+
+ err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
+ if (err < 0)
+ goto msi;
+ if (err > 0)
+ goto msix_single_vector;
+
+ for (i = 0; i < msixcnt; i++) {
+ msix = &device->msix_entries[i];
+ chan = ioat_chan_by_index(device, i);
+ err = devm_request_irq(dev, msix->vector,
+ ioat_dma_do_interrupt_msix, 0,
+ "ioat-msix", chan);
+ if (err) {
+ for (j = 0; j < i; j++) {
+ msix = &device->msix_entries[j];
+ chan = ioat_chan_by_index(device, j);
+ devm_free_irq(dev, msix->vector, chan);
+ }
+ goto msix_single_vector;
+ }
+ }
+ intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
+ goto done;
+
+msix_single_vector:
+ msix = &device->msix_entries[0];
+ msix->entry = 0;
+ err = pci_enable_msix(pdev, device->msix_entries, 1);
+ if (err)
+ goto msi;
+
+ err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
+ "ioat-msix", device);
+ if (err) {
+ pci_disable_msix(pdev);
+ goto msi;
+ }
+ goto done;
+
+msi:
+ err = pci_enable_msi(pdev);
+ if (err)
+ goto intx;
+
+ err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
+ "ioat-msi", device);
+ if (err) {
+ pci_disable_msi(pdev);
+ goto intx;
+ }
+ goto done;
+
+intx:
+ err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
+ IRQF_SHARED, "ioat-intx", device);
+ if (err)
+ goto err_no_irq;
+
+done:
+ if (device->intr_quirk)
+ device->intr_quirk(device);
+ intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
+ writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
+ return 0;
+
+err_no_irq:
+ /* Disable all interrupt generation */
+ writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
+ dev_err(dev, "no usable interrupts\n");
+ return err;
+}
+
+static void ioat_disable_interrupts(struct ioatdma_device *device)
+{
+ /* Disable all interrupt generation */
+ writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
+}
+
+int __devinit ioat_probe(struct ioatdma_device *device)
+{
+ int err = -ENODEV;
+ struct dma_device *dma = &device->common;
+ struct pci_dev *pdev = device->pdev;
+ struct device *dev = &pdev->dev;
+
+ /* DMA coherent memory pool for DMA descriptor allocations */
+ device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
+ sizeof(struct ioat_dma_descriptor),
+ 64, 0);
+ if (!device->dma_pool) {
+ err = -ENOMEM;
+ goto err_dma_pool;
+ }
+
+ device->completion_pool = pci_pool_create("completion_pool", pdev,
+ sizeof(u64), SMP_CACHE_BYTES,
+ SMP_CACHE_BYTES);
+
+ if (!device->completion_pool) {
+ err = -ENOMEM;
+ goto err_completion_pool;
+ }
+
+ device->enumerate_channels(device);
+
+ dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+ dma->dev = &pdev->dev;
+
+ if (!dma->chancnt) {
+ dev_err(dev, "zero channels detected\n");
+ goto err_setup_interrupts;
+ }
+
+ err = ioat_dma_setup_interrupts(device);
+ if (err)
+ goto err_setup_interrupts;
+
+ err = device->self_test(device);
+ if (err)
+ goto err_self_test;
+
+ return 0;
+
+err_self_test:
+ ioat_disable_interrupts(device);
+err_setup_interrupts:
+ pci_pool_destroy(device->completion_pool);
+err_completion_pool:
+ pci_pool_destroy(device->dma_pool);
+err_dma_pool:
+ return err;
+}
+
+int __devinit ioat_register(struct ioatdma_device *device)
+{
+ int err = dma_async_device_register(&device->common);
+
+ if (err) {
+ ioat_disable_interrupts(device);
+ pci_pool_destroy(device->completion_pool);
+ pci_pool_destroy(device->dma_pool);
+ }
+
+ return err;
+}
+
+/* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
+static void ioat1_intr_quirk(struct ioatdma_device *device)
+{
+ struct pci_dev *pdev = device->pdev;
+ u32 dmactrl;
+
+ pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
+ if (pdev->msi_enabled)
+ dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
+ else
+ dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
+ pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
+}
+
+static ssize_t ring_size_show(struct dma_chan *c, char *page)
+{
+ struct ioat_dma_chan *ioat = to_ioat_chan(c);
+
+ return sprintf(page, "%d\n", ioat->desccount);
+}
+static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
+
+static ssize_t ring_active_show(struct dma_chan *c, char *page)
+{
+ struct ioat_dma_chan *ioat = to_ioat_chan(c);
+
+ return sprintf(page, "%d\n", ioat->active);
+}
+static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
+
+static ssize_t cap_show(struct dma_chan *c, char *page)
+{
+ struct dma_device *dma = c->device;
+
+ return sprintf(page, "copy%s%s%s%s%s%s\n",
+ dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
+ dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
+ dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
+ dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
+ dma_has_cap(DMA_MEMSET, dma->cap_mask) ? " fill" : "",
+ dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
+
+}
+struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
+
+static ssize_t version_show(struct dma_chan *c, char *page)
+{
+ struct dma_device *dma = c->device;
+ struct ioatdma_device *device = to_ioatdma_device(dma);
+
+ return sprintf(page, "%d.%d\n",
+ device->version >> 4, device->version & 0xf);
+}
+struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
+
+static struct attribute *ioat1_attrs[] = {
+ &ring_size_attr.attr,
+ &ring_active_attr.attr,
+ &ioat_cap_attr.attr,
+ &ioat_version_attr.attr,
+ NULL,
+};
+
+static ssize_t
+ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+ struct ioat_sysfs_entry *entry;
+ struct ioat_chan_common *chan;
+
+ entry = container_of(attr, struct ioat_sysfs_entry, attr);
+ chan = container_of(kobj, struct ioat_chan_common, kobj);
+
+ if (!entry->show)
+ return -EIO;
+ return entry->show(&chan->common, page);
+}
+
+struct sysfs_ops ioat_sysfs_ops = {
+ .show = ioat_attr_show,
+};
+
+static struct kobj_type ioat1_ktype = {
+ .sysfs_ops = &ioat_sysfs_ops,
+ .default_attrs = ioat1_attrs,
+};
+
+void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
+{
+ struct dma_device *dma = &device->common;
+ struct dma_chan *c;
+
+ list_for_each_entry(c, &dma->channels, device_node) {
+ struct ioat_chan_common *chan = to_chan_common(c);
+ struct kobject *parent = &c->dev->device.kobj;
+ int err;
+
+ err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata");
+ if (err) {
+ dev_warn(to_dev(chan),
+ "sysfs init error (%d), continuing...\n", err);
+ kobject_put(&chan->kobj);
+ set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state);
+ }
+ }
+}
+
+void ioat_kobject_del(struct ioatdma_device *device)
+{
+ struct dma_device *dma = &device->common;
+ struct dma_chan *c;
+
+ list_for_each_entry(c, &dma->channels, device_node) {
+ struct ioat_chan_common *chan = to_chan_common(c);
+
+ if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) {
+ kobject_del(&chan->kobj);
+ kobject_put(&chan->kobj);
+ }
+ }
+}
+
+int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
+{
+ struct pci_dev *pdev = device->pdev;
+ struct dma_device *dma;
+ int err;
+
+ device->intr_quirk = ioat1_intr_quirk;
+ device->enumerate_channels = ioat1_enumerate_channels;
+ device->self_test = ioat_dma_self_test;
+ dma = &device->common;
+ dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
+ dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
+ dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
+ dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
+ dma->device_is_tx_complete = ioat1_dma_is_complete;
+
+ err = ioat_probe(device);
+ if (err)
+ return err;
+ ioat_set_tcp_copy_break(4096);
+ err = ioat_register(device);
+ if (err)
+ return err;
+ ioat_kobject_add(device, &ioat1_ktype);
+
+ if (dca)
+ device->dca = ioat_dca_init(pdev, device->reg_base);
+
+ return err;
+}
+
+void __devexit ioat_dma_remove(struct ioatdma_device *device)
+{
+ struct dma_device *dma = &device->common;
+
+ ioat_disable_interrupts(device);
+
+ ioat_kobject_del(device);
+
+ dma_async_device_unregister(dma);
+
+ pci_pool_destroy(device->dma_pool);
+ pci_pool_destroy(device->completion_pool);
+
+ INIT_LIST_HEAD(&dma->channels);
+}
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
new file mode 100644
index 00000000000..c14fdfeb7f3
--- /dev/null
+++ b/drivers/dma/ioat/dma.h
@@ -0,0 +1,337 @@
+/*
+ * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+#ifndef IOATDMA_H
+#define IOATDMA_H
+
+#include <linux/dmaengine.h>
+#include "hw.h"
+#include "registers.h"
+#include <linux/init.h>
+#include <linux/dmapool.h>
+#include <linux/cache.h>
+#include <linux/pci_ids.h>
+#include <net/tcp.h>
+
+#define IOAT_DMA_VERSION "4.00"
+
+#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
+#define IOAT_DMA_DCA_ANY_CPU ~0
+
+#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
+#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
+#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd)
+#define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev)
+
+#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
+
+/*
+ * workaround for IOAT ver.3.0 null descriptor issue
+ * (channel returns error when size is 0)
+ */
+#define NULL_DESC_BUFFER_SIZE 1
+
+/**
+ * struct ioatdma_device - internal representation of a IOAT device
+ * @pdev: PCI-Express device
+ * @reg_base: MMIO register space base address
+ * @dma_pool: for allocating DMA descriptors
+ * @common: embedded struct dma_device
+ * @version: version of ioatdma device
+ * @msix_entries: irq handlers
+ * @idx: per channel data
+ * @dca: direct cache access context
+ * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
+ * @enumerate_channels: hw version specific channel enumeration
+ * @cleanup_tasklet: select between the v2 and v3 cleanup routines
+ * @timer_fn: select between the v2 and v3 timer watchdog routines
+ * @self_test: hardware version specific self test for each supported op type
+ *
+ * Note: the v3 cleanup routine supports raid operations
+ */
+struct ioatdma_device {
+ struct pci_dev *pdev;
+ void __iomem *reg_base;
+ struct pci_pool *dma_pool;
+ struct pci_pool *completion_pool;
+ struct dma_device common;
+ u8 version;
+ struct msix_entry msix_entries[4];
+ struct ioat_chan_common *idx[4];
+ struct dca_provider *dca;
+ void (*intr_quirk)(struct ioatdma_device *device);
+ int (*enumerate_channels)(struct ioatdma_device *device);
+ void (*cleanup_tasklet)(unsigned long data);
+ void (*timer_fn)(unsigned long data);
+ int (*self_test)(struct ioatdma_device *device);
+};
+
+struct ioat_chan_common {
+ struct dma_chan common;
+ void __iomem *reg_base;
+ unsigned long last_completion;
+ spinlock_t cleanup_lock;
+ dma_cookie_t completed_cookie;
+ unsigned long state;
+ #define IOAT_COMPLETION_PENDING 0
+ #define IOAT_COMPLETION_ACK 1
+ #define IOAT_RESET_PENDING 2
+ #define IOAT_KOBJ_INIT_FAIL 3
+ struct timer_list timer;
+ #define COMPLETION_TIMEOUT msecs_to_jiffies(100)
+ #define IDLE_TIMEOUT msecs_to_jiffies(2000)
+ #define RESET_DELAY msecs_to_jiffies(100)
+ struct ioatdma_device *device;
+ dma_addr_t completion_dma;
+ u64 *completion;
+ struct tasklet_struct cleanup_task;
+ struct kobject kobj;
+};
+
+struct ioat_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct dma_chan *, char *);
+};
+
+/**
+ * struct ioat_dma_chan - internal representation of a DMA channel
+ */
+struct ioat_dma_chan {
+ struct ioat_chan_common base;
+
+ size_t xfercap; /* XFERCAP register value expanded out */
+
+ spinlock_t desc_lock;
+ struct list_head free_desc;
+ struct list_head used_desc;
+
+ int pending;
+ u16 desccount;
+ u16 active;
+};
+
+static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c)
+{
+ return container_of(c, struct ioat_chan_common, common);
+}
+
+static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
+{
+ struct ioat_chan_common *chan = to_chan_common(c);
+
+ return container_of(chan, struct ioat_dma_chan, base);
+}
+
+/**
+ * ioat_is_complete - poll the status of an ioat transaction
+ * @c: channel handle
+ * @cookie: transaction identifier
+ * @done: if set, updated with last completed transaction
+ * @used: if set, updated with last used transaction
+ */
+static inline enum dma_status
+ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie,
+ dma_cookie_t *done, dma_cookie_t *used)
+{
+ struct ioat_chan_common *chan = to_chan_common(c);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+
+ last_used = c->cookie;
+ last_complete = chan->completed_cookie;
+
+ if (done)
+ *done = last_complete;
+ if (used)
+ *used = last_used;
+
+ return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+/* wrapper around hardware descriptor format + additional software fields */
+
+/**
+ * struct ioat_desc_sw - wrapper around hardware descriptor
+ * @hw: hardware DMA descriptor (for memcpy)
+ * @node: this descriptor will either be on the free list,
+ * or attached to a transaction list (tx_list)
+ * @txd: the generic software descriptor for all engines
+ * @id: identifier for debug
+ */
+struct ioat_desc_sw {
+ struct ioat_dma_descriptor *hw;
+ struct list_head node;
+ size_t len;
+ struct list_head tx_list;
+ struct dma_async_tx_descriptor txd;
+ #ifdef DEBUG
+ int id;
+ #endif
+};
+
+#ifdef DEBUG
+#define set_desc_id(desc, i) ((desc)->id = (i))
+#define desc_id(desc) ((desc)->id)
+#else
+#define set_desc_id(desc, i)
+#define desc_id(desc) (0)
+#endif
+
+static inline void
+__dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw,
+ struct dma_async_tx_descriptor *tx, int id)
+{
+ struct device *dev = to_dev(chan);
+
+ dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
+ " ctl: %#x (op: %d int_en: %d compl: %d)\n", id,
+ (unsigned long long) tx->phys,
+ (unsigned long long) hw->next, tx->cookie, tx->flags,
+ hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write);
+}
+
+#define dump_desc_dbg(c, d) \
+ ({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; })
+
+static inline void ioat_set_tcp_copy_break(unsigned long copybreak)
+{
+ #ifdef CONFIG_NET_DMA
+ sysctl_tcp_dma_copybreak = copybreak;
+ #endif
+}
+
+static inline struct ioat_chan_common *
+ioat_chan_by_index(struct ioatdma_device *device, int index)
+{
+ return device->idx[index];
+}
+
+static inline u64 ioat_chansts(struct ioat_chan_common *chan)
+{
+ u8 ver = chan->device->version;
+ u64 status;
+ u32 status_lo;
+
+ /* We need to read the low address first as this causes the
+ * chipset to latch the upper bits for the subsequent read
+ */
+ status_lo = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
+ status = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
+ status <<= 32;
+ status |= status_lo;
+
+ return status;
+}
+
+static inline void ioat_start(struct ioat_chan_common *chan)
+{
+ u8 ver = chan->device->version;
+
+ writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+}
+
+static inline u64 ioat_chansts_to_addr(u64 status)
+{
+ return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
+}
+
+static inline u32 ioat_chanerr(struct ioat_chan_common *chan)
+{
+ return readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+}
+
+static inline void ioat_suspend(struct ioat_chan_common *chan)
+{
+ u8 ver = chan->device->version;
+
+ writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+}
+
+static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+
+ writel(addr & 0x00000000FFFFFFFF,
+ chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
+ writel(addr >> 32,
+ chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
+}
+
+static inline bool is_ioat_active(unsigned long status)
+{
+ return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
+}
+
+static inline bool is_ioat_idle(unsigned long status)
+{
+ return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE);
+}
+
+static inline bool is_ioat_halted(unsigned long status)
+{
+ return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED);
+}
+
+static inline bool is_ioat_suspended(unsigned long status)
+{
+ return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED);
+}
+
+/* channel was fatally programmed */
+static inline bool is_ioat_bug(unsigned long err)
+{
+ return !!(err & (IOAT_CHANERR_SRC_ADDR_ERR|IOAT_CHANERR_DEST_ADDR_ERR|
+ IOAT_CHANERR_NEXT_ADDR_ERR|IOAT_CHANERR_CONTROL_ERR|
+ IOAT_CHANERR_LENGTH_ERR));
+}
+
+static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
+ int direction, enum dma_ctrl_flags flags, bool dst)
+{
+ if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) ||
+ (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE)))
+ pci_unmap_single(pdev, addr, len, direction);
+ else
+ pci_unmap_page(pdev, addr, len, direction);
+}
+
+int __devinit ioat_probe(struct ioatdma_device *device);
+int __devinit ioat_register(struct ioatdma_device *device);
+int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca);
+int __devinit ioat_dma_self_test(struct ioatdma_device *device);
+void __devexit ioat_dma_remove(struct ioatdma_device *device);
+struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
+ void __iomem *iobase);
+unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
+void ioat_init_channel(struct ioatdma_device *device,
+ struct ioat_chan_common *chan, int idx,
+ void (*timer_fn)(unsigned long),
+ void (*tasklet)(unsigned long),
+ unsigned long ioat);
+void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
+ size_t len, struct ioat_dma_descriptor *hw);
+bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
+ unsigned long *phys_complete);
+void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
+void ioat_kobject_del(struct ioatdma_device *device);
+extern struct sysfs_ops ioat_sysfs_ops;
+extern struct ioat_sysfs_entry ioat_version_attr;
+extern struct ioat_sysfs_entry ioat_cap_attr;
+#endif /* IOATDMA_H */
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
new file mode 100644
index 00000000000..96ffab7d37a
--- /dev/null
+++ b/drivers/dma/ioat/dma_v2.c
@@ -0,0 +1,871 @@
+/*
+ * Intel I/OAT DMA Linux driver
+ * Copyright(c) 2004 - 2009 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+/*
+ * This driver supports an Intel I/OAT DMA engine (versions >= 2), which
+ * does asynchronous data movement and checksumming operations.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/i7300_idle.h>
+#include "dma.h"
+#include "dma_v2.h"
+#include "registers.h"
+#include "hw.h"
+
+int ioat_ring_alloc_order = 8;
+module_param(ioat_ring_alloc_order, int, 0644);
+MODULE_PARM_DESC(ioat_ring_alloc_order,
+ "ioat2+: allocate 2^n descriptors per channel"
+ " (default: 8 max: 16)");
+static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
+module_param(ioat_ring_max_alloc_order, int, 0644);
+MODULE_PARM_DESC(ioat_ring_max_alloc_order,
+ "ioat2+: upper limit for ring size (default: 16)");
+
+void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
+{
+ void * __iomem reg_base = ioat->base.reg_base;
+
+ ioat->pending = 0;
+ ioat->dmacount += ioat2_ring_pending(ioat);
+ ioat->issued = ioat->head;
+ /* make descriptor updates globally visible before notifying channel */
+ wmb();
+ writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
+ dev_dbg(to_dev(&ioat->base),
+ "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
+ __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
+}
+
+void ioat2_issue_pending(struct dma_chan *chan)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan(chan);
+
+ spin_lock_bh(&ioat->ring_lock);
+ if (ioat->pending == 1)
+ __ioat2_issue_pending(ioat);
+ spin_unlock_bh(&ioat->ring_lock);
+}
+
+/**
+ * ioat2_update_pending - log pending descriptors
+ * @ioat: ioat2+ channel
+ *
+ * set pending to '1' unless pending is already set to '2', pending == 2
+ * indicates that submission is temporarily blocked due to an in-flight
+ * reset. If we are already above the ioat_pending_level threshold then
+ * just issue pending.
+ *
+ * called with ring_lock held
+ */
+static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
+{
+ if (unlikely(ioat->pending == 2))
+ return;
+ else if (ioat2_ring_pending(ioat) > ioat_pending_level)
+ __ioat2_issue_pending(ioat);
+ else
+ ioat->pending = 1;
+}
+
+static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
+{
+ struct ioat_ring_ent *desc;
+ struct ioat_dma_descriptor *hw;
+ int idx;
+
+ if (ioat2_ring_space(ioat) < 1) {
+ dev_err(to_dev(&ioat->base),
+ "Unable to start null desc - ring full\n");
+ return;
+ }
+
+ dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n",
+ __func__, ioat->head, ioat->tail, ioat->issued);
+ idx = ioat2_desc_alloc(ioat, 1);
+ desc = ioat2_get_ring_ent(ioat, idx);
+
+ hw = desc->hw;
+ hw->ctl = 0;
+ hw->ctl_f.null = 1;
+ hw->ctl_f.int_en = 1;
+ hw->ctl_f.compl_write = 1;
+ /* set size to non-zero value (channel returns error when size is 0) */
+ hw->size = NULL_DESC_BUFFER_SIZE;
+ hw->src_addr = 0;
+ hw->dst_addr = 0;
+ async_tx_ack(&desc->txd);
+ ioat2_set_chainaddr(ioat, desc->txd.phys);
+ dump_desc_dbg(ioat, desc);
+ __ioat2_issue_pending(ioat);
+}
+
+static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
+{
+ spin_lock_bh(&ioat->ring_lock);
+ __ioat2_start_null_desc(ioat);
+ spin_unlock_bh(&ioat->ring_lock);
+}
+
+static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+ struct dma_async_tx_descriptor *tx;
+ struct ioat_ring_ent *desc;
+ bool seen_current = false;
+ u16 active;
+ int i;
+
+ dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
+ __func__, ioat->head, ioat->tail, ioat->issued);
+
+ active = ioat2_ring_active(ioat);
+ for (i = 0; i < active && !seen_current; i++) {
+ prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1));
+ desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
+ tx = &desc->txd;
+ dump_desc_dbg(ioat, desc);
+ if (tx->cookie) {
+ ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
+ chan->completed_cookie = tx->cookie;
+ tx->cookie = 0;
+ if (tx->callback) {
+ tx->callback(tx->callback_param);
+ tx->callback = NULL;
+ }
+ }
+
+ if (tx->phys == phys_complete)
+ seen_current = true;
+ }
+ ioat->tail += i;
+ BUG_ON(!seen_current); /* no active descs have written a completion? */
+
+ chan->last_completion = phys_complete;
+ if (ioat->head == ioat->tail) {
+ dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
+ __func__);
+ clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
+ mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
+ }
+}
+
+/**
+ * ioat2_cleanup - clean finished descriptors (advance tail pointer)
+ * @chan: ioat channel to be cleaned up
+ */
+static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+ unsigned long phys_complete;
+
+ prefetch(chan->completion);
+
+ if (!spin_trylock_bh(&chan->cleanup_lock))
+ return;
+
+ if (!ioat_cleanup_preamble(chan, &phys_complete)) {
+ spin_unlock_bh(&chan->cleanup_lock);
+ return;
+ }
+
+ if (!spin_trylock_bh(&ioat->ring_lock)) {
+ spin_unlock_bh(&chan->cleanup_lock);
+ return;
+ }
+
+ __cleanup(ioat, phys_complete);
+
+ spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
+}
+
+void ioat2_cleanup_tasklet(unsigned long data)
+{
+ struct ioat2_dma_chan *ioat = (void *) data;
+
+ ioat2_cleanup(ioat);
+ writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
+}
+
+void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+
+ /* set the tail to be re-issued */
+ ioat->issued = ioat->tail;
+ ioat->dmacount = 0;
+ set_bit(IOAT_COMPLETION_PENDING, &chan->state);
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+ dev_dbg(to_dev(chan),
+ "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
+ __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
+
+ if (ioat2_ring_pending(ioat)) {
+ struct ioat_ring_ent *desc;
+
+ desc = ioat2_get_ring_ent(ioat, ioat->tail);
+ ioat2_set_chainaddr(ioat, desc->txd.phys);
+ __ioat2_issue_pending(ioat);
+ } else
+ __ioat2_start_null_desc(ioat);
+}
+
+static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+ unsigned long phys_complete;
+ u32 status;
+
+ status = ioat_chansts(chan);
+ if (is_ioat_active(status) || is_ioat_idle(status))
+ ioat_suspend(chan);
+ while (is_ioat_active(status) || is_ioat_idle(status)) {
+ status = ioat_chansts(chan);
+ cpu_relax();
+ }
+
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
+
+ __ioat2_restart_chan(ioat);
+}
+
+void ioat2_timer_event(unsigned long data)
+{
+ struct ioat2_dma_chan *ioat = (void *) data;
+ struct ioat_chan_common *chan = &ioat->base;
+
+ spin_lock_bh(&chan->cleanup_lock);
+ if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
+ unsigned long phys_complete;
+ u64 status;
+
+ spin_lock_bh(&ioat->ring_lock);
+ status = ioat_chansts(chan);
+
+ /* when halted due to errors check for channel
+ * programming errors before advancing the completion state
+ */
+ if (is_ioat_halted(status)) {
+ u32 chanerr;
+
+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ BUG_ON(is_ioat_bug(chanerr));
+ }
+
+ /* if we haven't made progress and we have already
+ * acknowledged a pending completion once, then be more
+ * forceful with a restart
+ */
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
+ else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
+ ioat2_restart_channel(ioat);
+ else {
+ set_bit(IOAT_COMPLETION_ACK, &chan->state);
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ }
+ spin_unlock_bh(&ioat->ring_lock);
+ } else {
+ u16 active;
+
+ /* if the ring is idle, empty, and oversized try to step
+ * down the size
+ */
+ spin_lock_bh(&ioat->ring_lock);
+ active = ioat2_ring_active(ioat);
+ if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
+ reshape_ring(ioat, ioat->alloc_order-1);
+ spin_unlock_bh(&ioat->ring_lock);
+
+ /* keep shrinking until we get back to our minimum
+ * default size
+ */
+ if (ioat->alloc_order > ioat_get_alloc_order())
+ mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
+ }
+ spin_unlock_bh(&chan->cleanup_lock);
+}
+
+/**
+ * ioat2_enumerate_channels - find and initialize the device's channels
+ * @device: the device to be enumerated
+ */
+int ioat2_enumerate_channels(struct ioatdma_device *device)
+{
+ struct ioat2_dma_chan *ioat;
+ struct device *dev = &device->pdev->dev;
+ struct dma_device *dma = &device->common;
+ u8 xfercap_log;
+ int i;
+
+ INIT_LIST_HEAD(&dma->channels);
+ dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
+ dma->chancnt &= 0x1f; /* bits [4:0] valid */
+ if (dma->chancnt > ARRAY_SIZE(device->idx)) {
+ dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
+ dma->chancnt, ARRAY_SIZE(device->idx));
+ dma->chancnt = ARRAY_SIZE(device->idx);
+ }
+ xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
+ xfercap_log &= 0x1f; /* bits [4:0] valid */
+ if (xfercap_log == 0)
+ return 0;
+ dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
+
+ /* FIXME which i/oat version is i7300? */
+#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
+ if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
+ dma->chancnt--;
+#endif
+ for (i = 0; i < dma->chancnt; i++) {
+ ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
+ if (!ioat)
+ break;
+
+ ioat_init_channel(device, &ioat->base, i,
+ device->timer_fn,
+ device->cleanup_tasklet,
+ (unsigned long) ioat);
+ ioat->xfercap_log = xfercap_log;
+ spin_lock_init(&ioat->ring_lock);
+ }
+ dma->chancnt = i;
+ return i;
+}
+
+static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_chan *c = tx->chan;
+ struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+ struct ioat_chan_common *chan = &ioat->base;
+ dma_cookie_t cookie = c->cookie;
+
+ cookie++;
+ if (cookie < 0)
+ cookie = 1;
+ tx->cookie = cookie;
+ c->cookie = cookie;
+ dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
+
+ if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ ioat2_update_pending(ioat);
+ spin_unlock_bh(&ioat->ring_lock);
+
+ return cookie;
+}
+
+static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
+{
+ struct ioat_dma_descriptor *hw;
+ struct ioat_ring_ent *desc;
+ struct ioatdma_device *dma;
+ dma_addr_t phys;
+
+ dma = to_ioatdma_device(chan->device);
+ hw = pci_pool_alloc(dma->dma_pool, flags, &phys);
+ if (!hw)
+ return NULL;
+ memset(hw, 0, sizeof(*hw));
+
+ desc = kmem_cache_alloc(ioat2_cache, flags);
+ if (!desc) {
+ pci_pool_free(dma->dma_pool, hw, phys);
+ return NULL;
+ }
+ memset(desc, 0, sizeof(*desc));
+
+ dma_async_tx_descriptor_init(&desc->txd, chan);
+ desc->txd.tx_submit = ioat2_tx_submit_unlock;
+ desc->hw = hw;
+ desc->txd.phys = phys;
+ return desc;
+}
+
+static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
+{
+ struct ioatdma_device *dma;
+
+ dma = to_ioatdma_device(chan->device);
+ pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys);
+ kmem_cache_free(ioat2_cache, desc);
+}
+
+static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
+{
+ struct ioat_ring_ent **ring;
+ int descs = 1 << order;
+ int i;
+
+ if (order > ioat_get_max_alloc_order())
+ return NULL;
+
+ /* allocate the array to hold the software ring */
+ ring = kcalloc(descs, sizeof(*ring), flags);
+ if (!ring)
+ return NULL;
+ for (i = 0; i < descs; i++) {
+ ring[i] = ioat2_alloc_ring_ent(c, flags);
+ if (!ring[i]) {
+ while (i--)
+ ioat2_free_ring_ent(ring[i], c);
+ kfree(ring);
+ return NULL;
+ }
+ set_desc_id(ring[i], i);
+ }
+
+ /* link descs */
+ for (i = 0; i < descs-1; i++) {
+ struct ioat_ring_ent *next = ring[i+1];
+ struct ioat_dma_descriptor *hw = ring[i]->hw;
+
+ hw->next = next->txd.phys;
+ }
+ ring[i]->hw->next = ring[0]->txd.phys;
+
+ return ring;
+}
+
+/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
+ * @chan: channel to be initialized
+ */
+int ioat2_alloc_chan_resources(struct dma_chan *c)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+ struct ioat_chan_common *chan = &ioat->base;
+ struct ioat_ring_ent **ring;
+ u32 chanerr;
+ int order;
+
+ /* have we already been set up? */
+ if (ioat->ring)
+ return 1 << ioat->alloc_order;
+
+ /* Setup register to interrupt and write completion status on error */
+ writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
+
+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ if (chanerr) {
+ dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
+ writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
+ }
+
+ /* allocate a completion writeback area */
+ /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
+ chan->completion = pci_pool_alloc(chan->device->completion_pool,
+ GFP_KERNEL, &chan->completion_dma);
+ if (!chan->completion)
+ return -ENOMEM;
+
+ memset(chan->completion, 0, sizeof(*chan->completion));
+ writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
+ chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
+ writel(((u64) chan->completion_dma) >> 32,
+ chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
+
+ order = ioat_get_alloc_order();
+ ring = ioat2_alloc_ring(c, order, GFP_KERNEL);
+ if (!ring)
+ return -ENOMEM;
+
+ spin_lock_bh(&ioat->ring_lock);
+ ioat->ring = ring;
+ ioat->head = 0;
+ ioat->issued = 0;
+ ioat->tail = 0;
+ ioat->pending = 0;
+ ioat->alloc_order = order;
+ spin_unlock_bh(&ioat->ring_lock);
+
+ tasklet_enable(&chan->cleanup_task);
+ ioat2_start_null_desc(ioat);
+
+ return 1 << ioat->alloc_order;
+}
+
+bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
+{
+ /* reshape differs from normal ring allocation in that we want
+ * to allocate a new software ring while only
+ * extending/truncating the hardware ring
+ */
+ struct ioat_chan_common *chan = &ioat->base;
+ struct dma_chan *c = &chan->common;
+ const u16 curr_size = ioat2_ring_mask(ioat) + 1;
+ const u16 active = ioat2_ring_active(ioat);
+ const u16 new_size = 1 << order;
+ struct ioat_ring_ent **ring;
+ u16 i;
+
+ if (order > ioat_get_max_alloc_order())
+ return false;
+
+ /* double check that we have at least 1 free descriptor */
+ if (active == curr_size)
+ return false;
+
+ /* when shrinking, verify that we can hold the current active
+ * set in the new ring
+ */
+ if (active >= new_size)
+ return false;
+
+ /* allocate the array to hold the software ring */
+ ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
+ if (!ring)
+ return false;
+
+ /* allocate/trim descriptors as needed */
+ if (new_size > curr_size) {
+ /* copy current descriptors to the new ring */
+ for (i = 0; i < curr_size; i++) {
+ u16 curr_idx = (ioat->tail+i) & (curr_size-1);
+ u16 new_idx = (ioat->tail+i) & (new_size-1);
+
+ ring[new_idx] = ioat->ring[curr_idx];
+ set_desc_id(ring[new_idx], new_idx);
+ }
+
+ /* add new descriptors to the ring */
+ for (i = curr_size; i < new_size; i++) {
+ u16 new_idx = (ioat->tail+i) & (new_size-1);
+
+ ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT);
+ if (!ring[new_idx]) {
+ while (i--) {
+ u16 new_idx = (ioat->tail+i) & (new_size-1);
+
+ ioat2_free_ring_ent(ring[new_idx], c);
+ }
+ kfree(ring);
+ return false;
+ }
+ set_desc_id(ring[new_idx], new_idx);
+ }
+
+ /* hw link new descriptors */
+ for (i = curr_size-1; i < new_size; i++) {
+ u16 new_idx = (ioat->tail+i) & (new_size-1);
+ struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)];
+ struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
+
+ hw->next = next->txd.phys;
+ }
+ } else {
+ struct ioat_dma_descriptor *hw;
+ struct ioat_ring_ent *next;
+
+ /* copy current descriptors to the new ring, dropping the
+ * removed descriptors
+ */
+ for (i = 0; i < new_size; i++) {
+ u16 curr_idx = (ioat->tail+i) & (curr_size-1);
+ u16 new_idx = (ioat->tail+i) & (new_size-1);
+
+ ring[new_idx] = ioat->ring[curr_idx];
+ set_desc_id(ring[new_idx], new_idx);
+ }
+
+ /* free deleted descriptors */
+ for (i = new_size; i < curr_size; i++) {
+ struct ioat_ring_ent *ent;
+
+ ent = ioat2_get_ring_ent(ioat, ioat->tail+i);
+ ioat2_free_ring_ent(ent, c);
+ }
+
+ /* fix up hardware ring */
+ hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw;
+ next = ring[(ioat->tail+new_size) & (new_size-1)];
+ hw->next = next->txd.phys;
+ }
+
+ dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
+ __func__, new_size);
+
+ kfree(ioat->ring);
+ ioat->ring = ring;
+ ioat->alloc_order = order;
+
+ return true;
+}
+
+/**
+ * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops
+ * @idx: gets starting descriptor index on successful allocation
+ * @ioat: ioat2,3 channel (ring) to operate on
+ * @num_descs: allocation length
+ */
+int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+
+ spin_lock_bh(&ioat->ring_lock);
+ /* never allow the last descriptor to be consumed, we need at
+ * least one free at all times to allow for on-the-fly ring
+ * resizing.
+ */
+ while (unlikely(ioat2_ring_space(ioat) <= num_descs)) {
+ if (reshape_ring(ioat, ioat->alloc_order + 1) &&
+ ioat2_ring_space(ioat) > num_descs)
+ break;
+
+ if (printk_ratelimit())
+ dev_dbg(to_dev(chan),
+ "%s: ring full! num_descs: %d (%x:%x:%x)\n",
+ __func__, num_descs, ioat->head, ioat->tail,
+ ioat->issued);
+ spin_unlock_bh(&ioat->ring_lock);
+
+ /* progress reclaim in the allocation failure case we
+ * may be called under bh_disabled so we need to trigger
+ * the timer event directly
+ */
+ spin_lock_bh(&chan->cleanup_lock);
+ if (jiffies > chan->timer.expires &&
+ timer_pending(&chan->timer)) {
+ struct ioatdma_device *device = chan->device;
+
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ spin_unlock_bh(&chan->cleanup_lock);
+ device->timer_fn((unsigned long) ioat);
+ } else
+ spin_unlock_bh(&chan->cleanup_lock);
+ return -ENOMEM;
+ }
+
+ dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
+ __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
+
+ *idx = ioat2_desc_alloc(ioat, num_descs);
+ return 0; /* with ioat->ring_lock held */
+}
+
+struct dma_async_tx_descriptor *
+ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+ struct ioat_dma_descriptor *hw;
+ struct ioat_ring_ent *desc;
+ dma_addr_t dst = dma_dest;
+ dma_addr_t src = dma_src;
+ size_t total_len = len;
+ int num_descs;
+ u16 idx;
+ int i;
+
+ num_descs = ioat2_xferlen_to_descs(ioat, len);
+ if (likely(num_descs) &&
+ ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0)
+ /* pass */;
+ else
+ return NULL;
+ i = 0;
+ do {
+ size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log);
+
+ desc = ioat2_get_ring_ent(ioat, idx + i);
+ hw = desc->hw;
+
+ hw->size = copy;
+ hw->ctl = 0;
+ hw->src_addr = src;
+ hw->dst_addr = dst;
+
+ len -= copy;
+ dst += copy;
+ src += copy;
+ dump_desc_dbg(ioat, desc);
+ } while (++i < num_descs);
+
+ desc->txd.flags = flags;
+ desc->len = total_len;
+ hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+ hw->ctl_f.compl_write = 1;
+ dump_desc_dbg(ioat, desc);
+ /* we leave the channel locked to ensure in order submission */
+
+ return &desc->txd;
+}
+
+/**
+ * ioat2_free_chan_resources - release all the descriptors
+ * @chan: the channel to be cleaned
+ */
+void ioat2_free_chan_resources(struct dma_chan *c)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+ struct ioat_chan_common *chan = &ioat->base;
+ struct ioatdma_device *device = chan->device;
+ struct ioat_ring_ent *desc;
+ const u16 total_descs = 1 << ioat->alloc_order;
+ int descs;
+ int i;
+
+ /* Before freeing channel resources first check
+ * if they have been previously allocated for this channel.
+ */
+ if (!ioat->ring)
+ return;
+
+ tasklet_disable(&chan->cleanup_task);
+ del_timer_sync(&chan->timer);
+ device->cleanup_tasklet((unsigned long) ioat);
+
+ /* Delay 100ms after reset to allow internal DMA logic to quiesce
+ * before removing DMA descriptor resources.
+ */
+ writeb(IOAT_CHANCMD_RESET,
+ chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
+ mdelay(100);
+
+ spin_lock_bh(&ioat->ring_lock);
+ descs = ioat2_ring_space(ioat);
+ dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs);
+ for (i = 0; i < descs; i++) {
+ desc = ioat2_get_ring_ent(ioat, ioat->head + i);
+ ioat2_free_ring_ent(desc, c);
+ }
+
+ if (descs < total_descs)
+ dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
+ total_descs - descs);
+
+ for (i = 0; i < total_descs - descs; i++) {
+ desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
+ dump_desc_dbg(ioat, desc);
+ ioat2_free_ring_ent(desc, c);
+ }
+
+ kfree(ioat->ring);
+ ioat->ring = NULL;
+ ioat->alloc_order = 0;
+ pci_pool_free(device->completion_pool, chan->completion,
+ chan->completion_dma);
+ spin_unlock_bh(&ioat->ring_lock);
+
+ chan->last_completion = 0;
+ chan->completion_dma = 0;
+ ioat->pending = 0;
+ ioat->dmacount = 0;
+}
+
+enum dma_status
+ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
+ dma_cookie_t *done, dma_cookie_t *used)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+ struct ioatdma_device *device = ioat->base.device;
+
+ if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
+ return DMA_SUCCESS;
+
+ device->cleanup_tasklet((unsigned long) ioat);
+
+ return ioat_is_complete(c, cookie, done, used);
+}
+
+static ssize_t ring_size_show(struct dma_chan *c, char *page)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+
+ return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1);
+}
+static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
+
+static ssize_t ring_active_show(struct dma_chan *c, char *page)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+
+ /* ...taken outside the lock, no need to be precise */
+ return sprintf(page, "%d\n", ioat2_ring_active(ioat));
+}
+static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
+
+static struct attribute *ioat2_attrs[] = {
+ &ring_size_attr.attr,
+ &ring_active_attr.attr,
+ &ioat_cap_attr.attr,
+ &ioat_version_attr.attr,
+ NULL,
+};
+
+struct kobj_type ioat2_ktype = {
+ .sysfs_ops = &ioat_sysfs_ops,
+ .default_attrs = ioat2_attrs,
+};
+
+int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
+{
+ struct pci_dev *pdev = device->pdev;
+ struct dma_device *dma;
+ struct dma_chan *c;
+ struct ioat_chan_common *chan;
+ int err;
+
+ device->enumerate_channels = ioat2_enumerate_channels;
+ device->cleanup_tasklet = ioat2_cleanup_tasklet;
+ device->timer_fn = ioat2_timer_event;
+ device->self_test = ioat_dma_self_test;
+ dma = &device->common;
+ dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
+ dma->device_issue_pending = ioat2_issue_pending;
+ dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
+ dma->device_free_chan_resources = ioat2_free_chan_resources;
+ dma->device_is_tx_complete = ioat2_is_complete;
+
+ err = ioat_probe(device);
+ if (err)
+ return err;
+ ioat_set_tcp_copy_break(2048);
+
+ list_for_each_entry(c, &dma->channels, device_node) {
+ chan = to_chan_common(c);
+ writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU,
+ chan->reg_base + IOAT_DCACTRL_OFFSET);
+ }
+
+ err = ioat_register(device);
+ if (err)
+ return err;
+
+ ioat_kobject_add(device, &ioat2_ktype);
+
+ if (dca)
+ device->dca = ioat2_dca_init(pdev, device->reg_base);
+
+ return err;
+}
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
new file mode 100644
index 00000000000..1d849ef74d5
--- /dev/null
+++ b/drivers/dma/ioat/dma_v2.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+#ifndef IOATDMA_V2_H
+#define IOATDMA_V2_H
+
+#include <linux/dmaengine.h>
+#include "dma.h"
+#include "hw.h"
+
+
+extern int ioat_pending_level;
+extern int ioat_ring_alloc_order;
+
+/*
+ * workaround for IOAT ver.3.0 null descriptor issue
+ * (channel returns error when size is 0)
+ */
+#define NULL_DESC_BUFFER_SIZE 1
+
+#define IOAT_MAX_ORDER 16
+#define ioat_get_alloc_order() \
+ (min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
+#define ioat_get_max_alloc_order() \
+ (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
+
+/* struct ioat2_dma_chan - ioat v2 / v3 channel attributes
+ * @base: common ioat channel parameters
+ * @xfercap_log; log2 of channel max transfer length (for fast division)
+ * @head: allocated index
+ * @issued: hardware notification point
+ * @tail: cleanup index
+ * @pending: lock free indicator for issued != head
+ * @dmacount: identical to 'head' except for occasionally resetting to zero
+ * @alloc_order: log2 of the number of allocated descriptors
+ * @ring: software ring buffer implementation of hardware ring
+ * @ring_lock: protects ring attributes
+ */
+struct ioat2_dma_chan {
+ struct ioat_chan_common base;
+ size_t xfercap_log;
+ u16 head;
+ u16 issued;
+ u16 tail;
+ u16 dmacount;
+ u16 alloc_order;
+ int pending;
+ struct ioat_ring_ent **ring;
+ spinlock_t ring_lock;
+};
+
+static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
+{
+ struct ioat_chan_common *chan = to_chan_common(c);
+
+ return container_of(chan, struct ioat2_dma_chan, base);
+}
+
+static inline u16 ioat2_ring_mask(struct ioat2_dma_chan *ioat)
+{
+ return (1 << ioat->alloc_order) - 1;
+}
+
+/* count of descriptors in flight with the engine */
+static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat)
+{
+ return (ioat->head - ioat->tail) & ioat2_ring_mask(ioat);
+}
+
+/* count of descriptors pending submission to hardware */
+static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
+{
+ return (ioat->head - ioat->issued) & ioat2_ring_mask(ioat);
+}
+
+static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat)
+{
+ u16 num_descs = ioat2_ring_mask(ioat) + 1;
+ u16 active = ioat2_ring_active(ioat);
+
+ BUG_ON(active > num_descs);
+
+ return num_descs - active;
+}
+
+/* assumes caller already checked space */
+static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len)
+{
+ ioat->head += len;
+ return ioat->head - len;
+}
+
+static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len)
+{
+ u16 num_descs = len >> ioat->xfercap_log;
+
+ num_descs += !!(len & ((1 << ioat->xfercap_log) - 1));
+ return num_descs;
+}
+
+/**
+ * struct ioat_ring_ent - wrapper around hardware descriptor
+ * @hw: hardware DMA descriptor (for memcpy)
+ * @fill: hardware fill descriptor
+ * @xor: hardware xor descriptor
+ * @xor_ex: hardware xor extension descriptor
+ * @pq: hardware pq descriptor
+ * @pq_ex: hardware pq extension descriptor
+ * @pqu: hardware pq update descriptor
+ * @raw: hardware raw (un-typed) descriptor
+ * @txd: the generic software descriptor for all engines
+ * @len: total transaction length for unmap
+ * @result: asynchronous result of validate operations
+ * @id: identifier for debug
+ */
+
+struct ioat_ring_ent {
+ union {
+ struct ioat_dma_descriptor *hw;
+ struct ioat_fill_descriptor *fill;
+ struct ioat_xor_descriptor *xor;
+ struct ioat_xor_ext_descriptor *xor_ex;
+ struct ioat_pq_descriptor *pq;
+ struct ioat_pq_ext_descriptor *pq_ex;
+ struct ioat_pq_update_descriptor *pqu;
+ struct ioat_raw_descriptor *raw;
+ };
+ size_t len;
+ struct dma_async_tx_descriptor txd;
+ enum sum_check_flags *result;
+ #ifdef DEBUG
+ int id;
+ #endif
+};
+
+static inline struct ioat_ring_ent *
+ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx)
+{
+ return ioat->ring[idx & ioat2_ring_mask(ioat)];
+}
+
+static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+
+ writel(addr & 0x00000000FFFFFFFF,
+ chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
+ writel(addr >> 32,
+ chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
+}
+
+int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca);
+int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca);
+struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
+struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
+int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs);
+int ioat2_enumerate_channels(struct ioatdma_device *device);
+struct dma_async_tx_descriptor *
+ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
+ dma_addr_t dma_src, size_t len, unsigned long flags);
+void ioat2_issue_pending(struct dma_chan *chan);
+int ioat2_alloc_chan_resources(struct dma_chan *c);
+void ioat2_free_chan_resources(struct dma_chan *c);
+enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
+ dma_cookie_t *done, dma_cookie_t *used);
+void __ioat2_restart_chan(struct ioat2_dma_chan *ioat);
+bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
+void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
+void ioat2_cleanup_tasklet(unsigned long data);
+void ioat2_timer_event(unsigned long data);
+extern struct kobj_type ioat2_ktype;
+extern struct kmem_cache *ioat2_cache;
+#endif /* IOATDMA_V2_H */
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
new file mode 100644
index 00000000000..35d1e33afd5
--- /dev/null
+++ b/drivers/dma/ioat/dma_v3.c
@@ -0,0 +1,1223 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Support routines for v3+ hardware
+ */
+
+#include <linux/pci.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include "registers.h"
+#include "hw.h"
+#include "dma.h"
+#include "dma_v2.h"
+
+/* ioat hardware assumes at least two sources for raid operations */
+#define src_cnt_to_sw(x) ((x) + 2)
+#define src_cnt_to_hw(x) ((x) - 2)
+
+/* provide a lookup table for setting the source address in the base or
+ * extended descriptor of an xor or pq descriptor
+ */
+static const u8 xor_idx_to_desc __read_mostly = 0xd0;
+static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
+static const u8 pq_idx_to_desc __read_mostly = 0xf8;
+static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
+
+static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
+{
+ struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
+
+ return raw->field[xor_idx_to_field[idx]];
+}
+
+static void xor_set_src(struct ioat_raw_descriptor *descs[2],
+ dma_addr_t addr, u32 offset, int idx)
+{
+ struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
+
+ raw->field[xor_idx_to_field[idx]] = addr + offset;
+}
+
+static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
+{
+ struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
+
+ return raw->field[pq_idx_to_field[idx]];
+}
+
+static void pq_set_src(struct ioat_raw_descriptor *descs[2],
+ dma_addr_t addr, u32 offset, u8 coef, int idx)
+{
+ struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
+ struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
+
+ raw->field[pq_idx_to_field[idx]] = addr + offset;
+ pq->coef[idx] = coef;
+}
+
+static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
+ struct ioat_ring_ent *desc, int idx)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+ struct pci_dev *pdev = chan->device->pdev;
+ size_t len = desc->len;
+ size_t offset = len - desc->hw->size;
+ struct dma_async_tx_descriptor *tx = &desc->txd;
+ enum dma_ctrl_flags flags = tx->flags;
+
+ switch (desc->hw->ctl_f.op) {
+ case IOAT_OP_COPY:
+ if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */
+ ioat_dma_unmap(chan, flags, len, desc->hw);
+ break;
+ case IOAT_OP_FILL: {
+ struct ioat_fill_descriptor *hw = desc->fill;
+
+ if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
+ ioat_unmap(pdev, hw->dst_addr - offset, len,
+ PCI_DMA_FROMDEVICE, flags, 1);
+ break;
+ }
+ case IOAT_OP_XOR_VAL:
+ case IOAT_OP_XOR: {
+ struct ioat_xor_descriptor *xor = desc->xor;
+ struct ioat_ring_ent *ext;
+ struct ioat_xor_ext_descriptor *xor_ex = NULL;
+ int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt);
+ struct ioat_raw_descriptor *descs[2];
+ int i;
+
+ if (src_cnt > 5) {
+ ext = ioat2_get_ring_ent(ioat, idx + 1);
+ xor_ex = ext->xor_ex;
+ }
+
+ if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ descs[0] = (struct ioat_raw_descriptor *) xor;
+ descs[1] = (struct ioat_raw_descriptor *) xor_ex;
+ for (i = 0; i < src_cnt; i++) {
+ dma_addr_t src = xor_get_src(descs, i);
+
+ ioat_unmap(pdev, src - offset, len,
+ PCI_DMA_TODEVICE, flags, 0);
+ }
+
+ /* dest is a source in xor validate operations */
+ if (xor->ctl_f.op == IOAT_OP_XOR_VAL) {
+ ioat_unmap(pdev, xor->dst_addr - offset, len,
+ PCI_DMA_TODEVICE, flags, 1);
+ break;
+ }
+ }
+
+ if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
+ ioat_unmap(pdev, xor->dst_addr - offset, len,
+ PCI_DMA_FROMDEVICE, flags, 1);
+ break;
+ }
+ case IOAT_OP_PQ_VAL:
+ case IOAT_OP_PQ: {
+ struct ioat_pq_descriptor *pq = desc->pq;
+ struct ioat_ring_ent *ext;
+ struct ioat_pq_ext_descriptor *pq_ex = NULL;
+ int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
+ struct ioat_raw_descriptor *descs[2];
+ int i;
+
+ if (src_cnt > 3) {
+ ext = ioat2_get_ring_ent(ioat, idx + 1);
+ pq_ex = ext->pq_ex;
+ }
+
+ /* in the 'continue' case don't unmap the dests as sources */
+ if (dmaf_p_disabled_continue(flags))
+ src_cnt--;
+ else if (dmaf_continue(flags))
+ src_cnt -= 3;
+
+ if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ descs[0] = (struct ioat_raw_descriptor *) pq;
+ descs[1] = (struct ioat_raw_descriptor *) pq_ex;
+ for (i = 0; i < src_cnt; i++) {
+ dma_addr_t src = pq_get_src(descs, i);
+
+ ioat_unmap(pdev, src - offset, len,
+ PCI_DMA_TODEVICE, flags, 0);
+ }
+
+ /* the dests are sources in pq validate operations */
+ if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
+ if (!(flags & DMA_PREP_PQ_DISABLE_P))
+ ioat_unmap(pdev, pq->p_addr - offset,
+ len, PCI_DMA_TODEVICE, flags, 0);
+ if (!(flags & DMA_PREP_PQ_DISABLE_Q))
+ ioat_unmap(pdev, pq->q_addr - offset,
+ len, PCI_DMA_TODEVICE, flags, 0);
+ break;
+ }
+ }
+
+ if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (!(flags & DMA_PREP_PQ_DISABLE_P))
+ ioat_unmap(pdev, pq->p_addr - offset, len,
+ PCI_DMA_BIDIRECTIONAL, flags, 1);
+ if (!(flags & DMA_PREP_PQ_DISABLE_Q))
+ ioat_unmap(pdev, pq->q_addr - offset, len,
+ PCI_DMA_BIDIRECTIONAL, flags, 1);
+ }
+ break;
+ }
+ default:
+ dev_err(&pdev->dev, "%s: unknown op type: %#x\n",
+ __func__, desc->hw->ctl_f.op);
+ }
+}
+
+static bool desc_has_ext(struct ioat_ring_ent *desc)
+{
+ struct ioat_dma_descriptor *hw = desc->hw;
+
+ if (hw->ctl_f.op == IOAT_OP_XOR ||
+ hw->ctl_f.op == IOAT_OP_XOR_VAL) {
+ struct ioat_xor_descriptor *xor = desc->xor;
+
+ if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
+ return true;
+ } else if (hw->ctl_f.op == IOAT_OP_PQ ||
+ hw->ctl_f.op == IOAT_OP_PQ_VAL) {
+ struct ioat_pq_descriptor *pq = desc->pq;
+
+ if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * __cleanup - reclaim used descriptors
+ * @ioat: channel (ring) to clean
+ *
+ * The difference from the dma_v2.c __cleanup() is that this routine
+ * handles extended descriptors and dma-unmapping raid operations.
+ */
+static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+ struct ioat_ring_ent *desc;
+ bool seen_current = false;
+ u16 active;
+ int i;
+
+ dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
+ __func__, ioat->head, ioat->tail, ioat->issued);
+
+ active = ioat2_ring_active(ioat);
+ for (i = 0; i < active && !seen_current; i++) {
+ struct dma_async_tx_descriptor *tx;
+
+ prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1));
+ desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
+ dump_desc_dbg(ioat, desc);
+ tx = &desc->txd;
+ if (tx->cookie) {
+ chan->completed_cookie = tx->cookie;
+ ioat3_dma_unmap(ioat, desc, ioat->tail + i);
+ tx->cookie = 0;
+ if (tx->callback) {
+ tx->callback(tx->callback_param);
+ tx->callback = NULL;
+ }
+ }
+
+ if (tx->phys == phys_complete)
+ seen_current = true;
+
+ /* skip extended descriptors */
+ if (desc_has_ext(desc)) {
+ BUG_ON(i + 1 >= active);
+ i++;
+ }
+ }
+ ioat->tail += i;
+ BUG_ON(!seen_current); /* no active descs have written a completion? */
+ chan->last_completion = phys_complete;
+ if (ioat->head == ioat->tail) {
+ dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
+ __func__);
+ clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
+ mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
+ }
+}
+
+static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+ unsigned long phys_complete;
+
+ prefetch(chan->completion);
+
+ if (!spin_trylock_bh(&chan->cleanup_lock))
+ return;
+
+ if (!ioat_cleanup_preamble(chan, &phys_complete)) {
+ spin_unlock_bh(&chan->cleanup_lock);
+ return;
+ }
+
+ if (!spin_trylock_bh(&ioat->ring_lock)) {
+ spin_unlock_bh(&chan->cleanup_lock);
+ return;
+ }
+
+ __cleanup(ioat, phys_complete);
+
+ spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
+}
+
+static void ioat3_cleanup_tasklet(unsigned long data)
+{
+ struct ioat2_dma_chan *ioat = (void *) data;
+
+ ioat3_cleanup(ioat);
+ writew(IOAT_CHANCTRL_RUN | IOAT3_CHANCTRL_COMPL_DCA_EN,
+ ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
+}
+
+static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+ unsigned long phys_complete;
+ u32 status;
+
+ status = ioat_chansts(chan);
+ if (is_ioat_active(status) || is_ioat_idle(status))
+ ioat_suspend(chan);
+ while (is_ioat_active(status) || is_ioat_idle(status)) {
+ status = ioat_chansts(chan);
+ cpu_relax();
+ }
+
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
+
+ __ioat2_restart_chan(ioat);
+}
+
+static void ioat3_timer_event(unsigned long data)
+{
+ struct ioat2_dma_chan *ioat = (void *) data;
+ struct ioat_chan_common *chan = &ioat->base;
+
+ spin_lock_bh(&chan->cleanup_lock);
+ if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
+ unsigned long phys_complete;
+ u64 status;
+
+ spin_lock_bh(&ioat->ring_lock);
+ status = ioat_chansts(chan);
+
+ /* when halted due to errors check for channel
+ * programming errors before advancing the completion state
+ */
+ if (is_ioat_halted(status)) {
+ u32 chanerr;
+
+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ BUG_ON(is_ioat_bug(chanerr));
+ }
+
+ /* if we haven't made progress and we have already
+ * acknowledged a pending completion once, then be more
+ * forceful with a restart
+ */
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
+ else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
+ ioat3_restart_channel(ioat);
+ else {
+ set_bit(IOAT_COMPLETION_ACK, &chan->state);
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ }
+ spin_unlock_bh(&ioat->ring_lock);
+ } else {
+ u16 active;
+
+ /* if the ring is idle, empty, and oversized try to step
+ * down the size
+ */
+ spin_lock_bh(&ioat->ring_lock);
+ active = ioat2_ring_active(ioat);
+ if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
+ reshape_ring(ioat, ioat->alloc_order-1);
+ spin_unlock_bh(&ioat->ring_lock);
+
+ /* keep shrinking until we get back to our minimum
+ * default size
+ */
+ if (ioat->alloc_order > ioat_get_alloc_order())
+ mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
+ }
+ spin_unlock_bh(&chan->cleanup_lock);
+}
+
+static enum dma_status
+ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie,
+ dma_cookie_t *done, dma_cookie_t *used)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+
+ if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
+ return DMA_SUCCESS;
+
+ ioat3_cleanup(ioat);
+
+ return ioat_is_complete(c, cookie, done, used);
+}
+
+static struct dma_async_tx_descriptor *
+ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value,
+ size_t len, unsigned long flags)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+ struct ioat_ring_ent *desc;
+ size_t total_len = len;
+ struct ioat_fill_descriptor *fill;
+ int num_descs;
+ u64 src_data = (0x0101010101010101ULL) * (value & 0xff);
+ u16 idx;
+ int i;
+
+ num_descs = ioat2_xferlen_to_descs(ioat, len);
+ if (likely(num_descs) &&
+ ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0)
+ /* pass */;
+ else
+ return NULL;
+ i = 0;
+ do {
+ size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
+
+ desc = ioat2_get_ring_ent(ioat, idx + i);
+ fill = desc->fill;
+
+ fill->size = xfer_size;
+ fill->src_data = src_data;
+ fill->dst_addr = dest;
+ fill->ctl = 0;
+ fill->ctl_f.op = IOAT_OP_FILL;
+
+ len -= xfer_size;
+ dest += xfer_size;
+ dump_desc_dbg(ioat, desc);
+ } while (++i < num_descs);
+
+ desc->txd.flags = flags;
+ desc->len = total_len;
+ fill->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ fill->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+ fill->ctl_f.compl_write = 1;
+ dump_desc_dbg(ioat, desc);
+
+ /* we leave the channel locked to ensure in order submission */
+ return &desc->txd;
+}
+
+static struct dma_async_tx_descriptor *
+__ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
+ dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
+ size_t len, unsigned long flags)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+ struct ioat_ring_ent *compl_desc;
+ struct ioat_ring_ent *desc;
+ struct ioat_ring_ent *ext;
+ size_t total_len = len;
+ struct ioat_xor_descriptor *xor;
+ struct ioat_xor_ext_descriptor *xor_ex = NULL;
+ struct ioat_dma_descriptor *hw;
+ u32 offset = 0;
+ int num_descs;
+ int with_ext;
+ int i;
+ u16 idx;
+ u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
+
+ BUG_ON(src_cnt < 2);
+
+ num_descs = ioat2_xferlen_to_descs(ioat, len);
+ /* we need 2x the number of descriptors to cover greater than 5
+ * sources
+ */
+ if (src_cnt > 5) {
+ with_ext = 1;
+ num_descs *= 2;
+ } else
+ with_ext = 0;
+
+ /* completion writes from the raid engine may pass completion
+ * writes from the legacy engine, so we need one extra null
+ * (legacy) descriptor to ensure all completion writes arrive in
+ * order.
+ */
+ if (likely(num_descs) &&
+ ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0)
+ /* pass */;
+ else
+ return NULL;
+ i = 0;
+ do {
+ struct ioat_raw_descriptor *descs[2];
+ size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
+ int s;
+
+ desc = ioat2_get_ring_ent(ioat, idx + i);
+ xor = desc->xor;
+
+ /* save a branch by unconditionally retrieving the
+ * extended descriptor xor_set_src() knows to not write
+ * to it in the single descriptor case
+ */
+ ext = ioat2_get_ring_ent(ioat, idx + i + 1);
+ xor_ex = ext->xor_ex;
+
+ descs[0] = (struct ioat_raw_descriptor *) xor;
+ descs[1] = (struct ioat_raw_descriptor *) xor_ex;
+ for (s = 0; s < src_cnt; s++)
+ xor_set_src(descs, src[s], offset, s);
+ xor->size = xfer_size;
+ xor->dst_addr = dest + offset;
+ xor->ctl = 0;
+ xor->ctl_f.op = op;
+ xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
+
+ len -= xfer_size;
+ offset += xfer_size;
+ dump_desc_dbg(ioat, desc);
+ } while ((i += 1 + with_ext) < num_descs);
+
+ /* last xor descriptor carries the unmap parameters and fence bit */
+ desc->txd.flags = flags;
+ desc->len = total_len;
+ if (result)
+ desc->result = result;
+ xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+
+ /* completion descriptor carries interrupt bit */
+ compl_desc = ioat2_get_ring_ent(ioat, idx + i);
+ compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
+ hw = compl_desc->hw;
+ hw->ctl = 0;
+ hw->ctl_f.null = 1;
+ hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ hw->ctl_f.compl_write = 1;
+ hw->size = NULL_DESC_BUFFER_SIZE;
+ dump_desc_dbg(ioat, compl_desc);
+
+ /* we leave the channel locked to ensure in order submission */
+ return &desc->txd;
+}
+
+static struct dma_async_tx_descriptor *
+ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+ unsigned int src_cnt, size_t len, unsigned long flags)
+{
+ return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
+}
+
+struct dma_async_tx_descriptor *
+ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
+ unsigned int src_cnt, size_t len,
+ enum sum_check_flags *result, unsigned long flags)
+{
+ /* the cleanup routine only sets bits on validate failure, it
+ * does not clear bits on validate success... so clear it here
+ */
+ *result = 0;
+
+ return __ioat3_prep_xor_lock(chan, result, src[0], &src[1],
+ src_cnt - 1, len, flags);
+}
+
+static void
+dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext)
+{
+ struct device *dev = to_dev(&ioat->base);
+ struct ioat_pq_descriptor *pq = desc->pq;
+ struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
+ struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
+ int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
+ int i;
+
+ dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
+ " sz: %#x ctl: %#x (op: %d int: %d compl: %d pq: '%s%s' src_cnt: %d)\n",
+ desc_id(desc), (unsigned long long) desc->txd.phys,
+ (unsigned long long) (pq_ex ? pq_ex->next : pq->next),
+ desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
+ pq->ctl_f.compl_write,
+ pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
+ pq->ctl_f.src_cnt);
+ for (i = 0; i < src_cnt; i++)
+ dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
+ (unsigned long long) pq_get_src(descs, i), pq->coef[i]);
+ dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
+ dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
+}
+
+static struct dma_async_tx_descriptor *
+__ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
+ const dma_addr_t *dst, const dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf,
+ size_t len, unsigned long flags)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+ struct ioat_chan_common *chan = &ioat->base;
+ struct ioat_ring_ent *compl_desc;
+ struct ioat_ring_ent *desc;
+ struct ioat_ring_ent *ext;
+ size_t total_len = len;
+ struct ioat_pq_descriptor *pq;
+ struct ioat_pq_ext_descriptor *pq_ex = NULL;
+ struct ioat_dma_descriptor *hw;
+ u32 offset = 0;
+ int num_descs;
+ int with_ext;
+ int i, s;
+ u16 idx;
+ u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
+
+ dev_dbg(to_dev(chan), "%s\n", __func__);
+ /* the engine requires at least two sources (we provide
+ * at least 1 implied source in the DMA_PREP_CONTINUE case)
+ */
+ BUG_ON(src_cnt + dmaf_continue(flags) < 2);
+
+ num_descs = ioat2_xferlen_to_descs(ioat, len);
+ /* we need 2x the number of descriptors to cover greater than 3
+ * sources
+ */
+ if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) {
+ with_ext = 1;
+ num_descs *= 2;
+ } else
+ with_ext = 0;
+
+ /* completion writes from the raid engine may pass completion
+ * writes from the legacy engine, so we need one extra null
+ * (legacy) descriptor to ensure all completion writes arrive in
+ * order.
+ */
+ if (likely(num_descs) &&
+ ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0)
+ /* pass */;
+ else
+ return NULL;
+ i = 0;
+ do {
+ struct ioat_raw_descriptor *descs[2];
+ size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
+
+ desc = ioat2_get_ring_ent(ioat, idx + i);
+ pq = desc->pq;
+
+ /* save a branch by unconditionally retrieving the
+ * extended descriptor pq_set_src() knows to not write
+ * to it in the single descriptor case
+ */
+ ext = ioat2_get_ring_ent(ioat, idx + i + with_ext);
+ pq_ex = ext->pq_ex;
+
+ descs[0] = (struct ioat_raw_descriptor *) pq;
+ descs[1] = (struct ioat_raw_descriptor *) pq_ex;
+
+ for (s = 0; s < src_cnt; s++)
+ pq_set_src(descs, src[s], offset, scf[s], s);
+
+ /* see the comment for dma_maxpq in include/linux/dmaengine.h */
+ if (dmaf_p_disabled_continue(flags))
+ pq_set_src(descs, dst[1], offset, 1, s++);
+ else if (dmaf_continue(flags)) {
+ pq_set_src(descs, dst[0], offset, 0, s++);
+ pq_set_src(descs, dst[1], offset, 1, s++);
+ pq_set_src(descs, dst[1], offset, 0, s++);
+ }
+ pq->size = xfer_size;
+ pq->p_addr = dst[0] + offset;
+ pq->q_addr = dst[1] + offset;
+ pq->ctl = 0;
+ pq->ctl_f.op = op;
+ pq->ctl_f.src_cnt = src_cnt_to_hw(s);
+ pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
+ pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
+
+ len -= xfer_size;
+ offset += xfer_size;
+ } while ((i += 1 + with_ext) < num_descs);
+
+ /* last pq descriptor carries the unmap parameters and fence bit */
+ desc->txd.flags = flags;
+ desc->len = total_len;
+ if (result)
+ desc->result = result;
+ pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+ dump_pq_desc_dbg(ioat, desc, ext);
+
+ /* completion descriptor carries interrupt bit */
+ compl_desc = ioat2_get_ring_ent(ioat, idx + i);
+ compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
+ hw = compl_desc->hw;
+ hw->ctl = 0;
+ hw->ctl_f.null = 1;
+ hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ hw->ctl_f.compl_write = 1;
+ hw->size = NULL_DESC_BUFFER_SIZE;
+ dump_desc_dbg(ioat, compl_desc);
+
+ /* we leave the channel locked to ensure in order submission */
+ return &desc->txd;
+}
+
+static struct dma_async_tx_descriptor *
+ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ unsigned long flags)
+{
+ /* handle the single source multiply case from the raid6
+ * recovery path
+ */
+ if (unlikely((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1)) {
+ dma_addr_t single_source[2];
+ unsigned char single_source_coef[2];
+
+ BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
+ single_source[0] = src[0];
+ single_source[1] = src[0];
+ single_source_coef[0] = scf[0];
+ single_source_coef[1] = 0;
+
+ return __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
+ single_source_coef, len, flags);
+ } else
+ return __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, scf,
+ len, flags);
+}
+
+struct dma_async_tx_descriptor *
+ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ enum sum_check_flags *pqres, unsigned long flags)
+{
+ /* the cleanup routine only sets bits on validate failure, it
+ * does not clear bits on validate success... so clear it here
+ */
+ *pqres = 0;
+
+ return __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
+ flags);
+}
+
+static struct dma_async_tx_descriptor *
+ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
+ unsigned int src_cnt, size_t len, unsigned long flags)
+{
+ unsigned char scf[src_cnt];
+ dma_addr_t pq[2];
+
+ memset(scf, 0, src_cnt);
+ flags |= DMA_PREP_PQ_DISABLE_Q;
+ pq[0] = dst;
+ pq[1] = ~0;
+
+ return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
+ flags);
+}
+
+struct dma_async_tx_descriptor *
+ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
+ unsigned int src_cnt, size_t len,
+ enum sum_check_flags *result, unsigned long flags)
+{
+ unsigned char scf[src_cnt];
+ dma_addr_t pq[2];
+
+ /* the cleanup routine only sets bits on validate failure, it
+ * does not clear bits on validate success... so clear it here
+ */
+ *result = 0;
+
+ memset(scf, 0, src_cnt);
+ flags |= DMA_PREP_PQ_DISABLE_Q;
+ pq[0] = src[0];
+ pq[1] = ~0;
+
+ return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf,
+ len, flags);
+}
+
+static struct dma_async_tx_descriptor *
+ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+ struct ioat_ring_ent *desc;
+ struct ioat_dma_descriptor *hw;
+ u16 idx;
+
+ if (ioat2_alloc_and_lock(&idx, ioat, 1) == 0)
+ desc = ioat2_get_ring_ent(ioat, idx);
+ else
+ return NULL;
+
+ hw = desc->hw;
+ hw->ctl = 0;
+ hw->ctl_f.null = 1;
+ hw->ctl_f.int_en = 1;
+ hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+ hw->ctl_f.compl_write = 1;
+ hw->size = NULL_DESC_BUFFER_SIZE;
+ hw->src_addr = 0;
+ hw->dst_addr = 0;
+
+ desc->txd.flags = flags;
+ desc->len = 1;
+
+ dump_desc_dbg(ioat, desc);
+
+ /* we leave the channel locked to ensure in order submission */
+ return &desc->txd;
+}
+
+static void __devinit ioat3_dma_test_callback(void *dma_async_param)
+{
+ struct completion *cmp = dma_async_param;
+
+ complete(cmp);
+}
+
+#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
+static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
+{
+ int i, src_idx;
+ struct page *dest;
+ struct page *xor_srcs[IOAT_NUM_SRC_TEST];
+ struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
+ dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
+ dma_addr_t dma_addr, dest_dma;
+ struct dma_async_tx_descriptor *tx;
+ struct dma_chan *dma_chan;
+ dma_cookie_t cookie;
+ u8 cmp_byte = 0;
+ u32 cmp_word;
+ u32 xor_val_result;
+ int err = 0;
+ struct completion cmp;
+ unsigned long tmo;
+ struct device *dev = &device->pdev->dev;
+ struct dma_device *dma = &device->common;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (!dma_has_cap(DMA_XOR, dma->cap_mask))
+ return 0;
+
+ for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
+ xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
+ if (!xor_srcs[src_idx]) {
+ while (src_idx--)
+ __free_page(xor_srcs[src_idx]);
+ return -ENOMEM;
+ }
+ }
+
+ dest = alloc_page(GFP_KERNEL);
+ if (!dest) {
+ while (src_idx--)
+ __free_page(xor_srcs[src_idx]);
+ return -ENOMEM;
+ }
+
+ /* Fill in src buffers */
+ for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
+ u8 *ptr = page_address(xor_srcs[src_idx]);
+ for (i = 0; i < PAGE_SIZE; i++)
+ ptr[i] = (1 << src_idx);
+ }
+
+ for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
+ cmp_byte ^= (u8) (1 << src_idx);
+
+ cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
+ (cmp_byte << 8) | cmp_byte;
+
+ memset(page_address(dest), 0, PAGE_SIZE);
+
+ dma_chan = container_of(dma->channels.next, struct dma_chan,
+ device_node);
+ if (dma->device_alloc_chan_resources(dma_chan) < 1) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ /* test xor */
+ dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+ dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
+ IOAT_NUM_SRC_TEST, PAGE_SIZE,
+ DMA_PREP_INTERRUPT);
+
+ if (!tx) {
+ dev_err(dev, "Self-test xor prep failed\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ async_tx_ack(tx);
+ init_completion(&cmp);
+ tx->callback = ioat3_dma_test_callback;
+ tx->callback_param = &cmp;
+ cookie = tx->tx_submit(tx);
+ if (cookie < 0) {
+ dev_err(dev, "Self-test xor setup failed\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+ dma->device_issue_pending(dma_chan);
+
+ tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+ if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ dev_err(dev, "Self-test xor timed out\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
+ u32 *ptr = page_address(dest);
+ if (ptr[i] != cmp_word) {
+ dev_err(dev, "Self-test xor failed compare\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+ }
+ dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE);
+
+ /* skip validate if the capability is not present */
+ if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
+ goto free_resources;
+
+ /* validate the sources with the destintation page */
+ for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+ xor_val_srcs[i] = xor_srcs[i];
+ xor_val_srcs[i] = dest;
+
+ xor_val_result = 1;
+
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+ dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
+ IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
+ &xor_val_result, DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(dev, "Self-test zero prep failed\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ async_tx_ack(tx);
+ init_completion(&cmp);
+ tx->callback = ioat3_dma_test_callback;
+ tx->callback_param = &cmp;
+ cookie = tx->tx_submit(tx);
+ if (cookie < 0) {
+ dev_err(dev, "Self-test zero setup failed\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+ dma->device_issue_pending(dma_chan);
+
+ tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+ if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ dev_err(dev, "Self-test validate timed out\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ if (xor_val_result != 0) {
+ dev_err(dev, "Self-test validate failed compare\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ /* skip memset if the capability is not present */
+ if (!dma_has_cap(DMA_MEMSET, dma_chan->device->cap_mask))
+ goto free_resources;
+
+ /* test memset */
+ dma_addr = dma_map_page(dev, dest, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
+ DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(dev, "Self-test memset prep failed\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ async_tx_ack(tx);
+ init_completion(&cmp);
+ tx->callback = ioat3_dma_test_callback;
+ tx->callback_param = &cmp;
+ cookie = tx->tx_submit(tx);
+ if (cookie < 0) {
+ dev_err(dev, "Self-test memset setup failed\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+ dma->device_issue_pending(dma_chan);
+
+ tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+ if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ dev_err(dev, "Self-test memset timed out\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
+ u32 *ptr = page_address(dest);
+ if (ptr[i]) {
+ dev_err(dev, "Self-test memset failed compare\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+ }
+
+ /* test for non-zero parity sum */
+ xor_val_result = 0;
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+ dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
+ IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
+ &xor_val_result, DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(dev, "Self-test 2nd zero prep failed\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ async_tx_ack(tx);
+ init_completion(&cmp);
+ tx->callback = ioat3_dma_test_callback;
+ tx->callback_param = &cmp;
+ cookie = tx->tx_submit(tx);
+ if (cookie < 0) {
+ dev_err(dev, "Self-test 2nd zero setup failed\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+ dma->device_issue_pending(dma_chan);
+
+ tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+ if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ dev_err(dev, "Self-test 2nd validate timed out\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ if (xor_val_result != SUM_CHECK_P_RESULT) {
+ dev_err(dev, "Self-test validate failed compare\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+free_resources:
+ dma->device_free_chan_resources(dma_chan);
+out:
+ src_idx = IOAT_NUM_SRC_TEST;
+ while (src_idx--)
+ __free_page(xor_srcs[src_idx]);
+ __free_page(dest);
+ return err;
+}
+
+static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
+{
+ int rc = ioat_dma_self_test(device);
+
+ if (rc)
+ return rc;
+
+ rc = ioat_xor_val_self_test(device);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
+{
+ struct pci_dev *pdev = device->pdev;
+ struct dma_device *dma;
+ struct dma_chan *c;
+ struct ioat_chan_common *chan;
+ bool is_raid_device = false;
+ int err;
+ u16 dev_id;
+ u32 cap;
+
+ device->enumerate_channels = ioat2_enumerate_channels;
+ device->self_test = ioat3_dma_self_test;
+ dma = &device->common;
+ dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
+ dma->device_issue_pending = ioat2_issue_pending;
+ dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
+ dma->device_free_chan_resources = ioat2_free_chan_resources;
+
+ dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
+ dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
+
+ cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
+ if (cap & IOAT_CAP_XOR) {
+ is_raid_device = true;
+ dma->max_xor = 8;
+ dma->xor_align = 2;
+
+ dma_cap_set(DMA_XOR, dma->cap_mask);
+ dma->device_prep_dma_xor = ioat3_prep_xor;
+
+ dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
+ dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
+ }
+ if (cap & IOAT_CAP_PQ) {
+ is_raid_device = true;
+ dma_set_maxpq(dma, 8, 0);
+ dma->pq_align = 2;
+
+ dma_cap_set(DMA_PQ, dma->cap_mask);
+ dma->device_prep_dma_pq = ioat3_prep_pq;
+
+ dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
+ dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
+
+ if (!(cap & IOAT_CAP_XOR)) {
+ dma->max_xor = 8;
+ dma->xor_align = 2;
+
+ dma_cap_set(DMA_XOR, dma->cap_mask);
+ dma->device_prep_dma_xor = ioat3_prep_pqxor;
+
+ dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
+ dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
+ }
+ }
+ if (is_raid_device && (cap & IOAT_CAP_FILL_BLOCK)) {
+ dma_cap_set(DMA_MEMSET, dma->cap_mask);
+ dma->device_prep_dma_memset = ioat3_prep_memset_lock;
+ }
+
+
+ if (is_raid_device) {
+ dma->device_is_tx_complete = ioat3_is_complete;
+ device->cleanup_tasklet = ioat3_cleanup_tasklet;
+ device->timer_fn = ioat3_timer_event;
+ } else {
+ dma->device_is_tx_complete = ioat2_is_complete;
+ device->cleanup_tasklet = ioat2_cleanup_tasklet;
+ device->timer_fn = ioat2_timer_event;
+ }
+
+ /* -= IOAT ver.3 workarounds =- */
+ /* Write CHANERRMSK_INT with 3E07h to mask out the errors
+ * that can cause stability issues for IOAT ver.3
+ */
+ pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
+
+ /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
+ * (workaround for spurious config parity error after restart)
+ */
+ pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
+ if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
+ pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
+
+ err = ioat_probe(device);
+ if (err)
+ return err;
+ ioat_set_tcp_copy_break(262144);
+
+ list_for_each_entry(c, &dma->channels, device_node) {
+ chan = to_chan_common(c);
+ writel(IOAT_DMA_DCA_ANY_CPU,
+ chan->reg_base + IOAT_DCACTRL_OFFSET);
+ }
+
+ err = ioat_register(device);
+ if (err)
+ return err;
+
+ ioat_kobject_add(device, &ioat2_ktype);
+
+ if (dca)
+ device->dca = ioat3_dca_init(pdev, device->reg_base);
+
+ return 0;
+}
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
new file mode 100644
index 00000000000..99afb12bd40
--- /dev/null
+++ b/drivers/dma/ioat/hw.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+#ifndef _IOAT_HW_H_
+#define _IOAT_HW_H_
+
+/* PCI Configuration Space Values */
+#define IOAT_PCI_VID 0x8086
+#define IOAT_MMIO_BAR 0
+
+/* CB device ID's */
+#define IOAT_PCI_DID_5000 0x1A38
+#define IOAT_PCI_DID_CNB 0x360B
+#define IOAT_PCI_DID_SCNB 0x65FF
+#define IOAT_PCI_DID_SNB 0x402F
+
+#define IOAT_PCI_RID 0x00
+#define IOAT_PCI_SVID 0x8086
+#define IOAT_PCI_SID 0x8086
+#define IOAT_VER_1_2 0x12 /* Version 1.2 */
+#define IOAT_VER_2_0 0x20 /* Version 2.0 */
+#define IOAT_VER_3_0 0x30 /* Version 3.0 */
+#define IOAT_VER_3_2 0x32 /* Version 3.2 */
+
+struct ioat_dma_descriptor {
+ uint32_t size;
+ union {
+ uint32_t ctl;
+ struct {
+ unsigned int int_en:1;
+ unsigned int src_snoop_dis:1;
+ unsigned int dest_snoop_dis:1;
+ unsigned int compl_write:1;
+ unsigned int fence:1;
+ unsigned int null:1;
+ unsigned int src_brk:1;
+ unsigned int dest_brk:1;
+ unsigned int bundle:1;
+ unsigned int dest_dca:1;
+ unsigned int hint:1;
+ unsigned int rsvd2:13;
+ #define IOAT_OP_COPY 0x00
+ unsigned int op:8;
+ } ctl_f;
+ };
+ uint64_t src_addr;
+ uint64_t dst_addr;
+ uint64_t next;
+ uint64_t rsv1;
+ uint64_t rsv2;
+ /* store some driver data in an unused portion of the descriptor */
+ union {
+ uint64_t user1;
+ uint64_t tx_cnt;
+ };
+ uint64_t user2;
+};
+
+struct ioat_fill_descriptor {
+ uint32_t size;
+ union {
+ uint32_t ctl;
+ struct {
+ unsigned int int_en:1;
+ unsigned int rsvd:1;
+ unsigned int dest_snoop_dis:1;
+ unsigned int compl_write:1;
+ unsigned int fence:1;
+ unsigned int rsvd2:2;
+ unsigned int dest_brk:1;
+ unsigned int bundle:1;
+ unsigned int rsvd4:15;
+ #define IOAT_OP_FILL 0x01
+ unsigned int op:8;
+ } ctl_f;
+ };
+ uint64_t src_data;
+ uint64_t dst_addr;
+ uint64_t next;
+ uint64_t rsv1;
+ uint64_t next_dst_addr;
+ uint64_t user1;
+ uint64_t user2;
+};
+
+struct ioat_xor_descriptor {
+ uint32_t size;
+ union {
+ uint32_t ctl;
+ struct {
+ unsigned int int_en:1;
+ unsigned int src_snoop_dis:1;
+ unsigned int dest_snoop_dis:1;
+ unsigned int compl_write:1;
+ unsigned int fence:1;
+ unsigned int src_cnt:3;
+ unsigned int bundle:1;
+ unsigned int dest_dca:1;
+ unsigned int hint:1;
+ unsigned int rsvd:13;
+ #define IOAT_OP_XOR 0x87
+ #define IOAT_OP_XOR_VAL 0x88
+ unsigned int op:8;
+ } ctl_f;
+ };
+ uint64_t src_addr;
+ uint64_t dst_addr;
+ uint64_t next;
+ uint64_t src_addr2;
+ uint64_t src_addr3;
+ uint64_t src_addr4;
+ uint64_t src_addr5;
+};
+
+struct ioat_xor_ext_descriptor {
+ uint64_t src_addr6;
+ uint64_t src_addr7;
+ uint64_t src_addr8;
+ uint64_t next;
+ uint64_t rsvd[4];
+};
+
+struct ioat_pq_descriptor {
+ uint32_t size;
+ union {
+ uint32_t ctl;
+ struct {
+ unsigned int int_en:1;
+ unsigned int src_snoop_dis:1;
+ unsigned int dest_snoop_dis:1;
+ unsigned int compl_write:1;
+ unsigned int fence:1;
+ unsigned int src_cnt:3;
+ unsigned int bundle:1;
+ unsigned int dest_dca:1;
+ unsigned int hint:1;
+ unsigned int p_disable:1;
+ unsigned int q_disable:1;
+ unsigned int rsvd:11;
+ #define IOAT_OP_PQ 0x89
+ #define IOAT_OP_PQ_VAL 0x8a
+ unsigned int op:8;
+ } ctl_f;
+ };
+ uint64_t src_addr;
+ uint64_t p_addr;
+ uint64_t next;
+ uint64_t src_addr2;
+ uint64_t src_addr3;
+ uint8_t coef[8];
+ uint64_t q_addr;
+};
+
+struct ioat_pq_ext_descriptor {
+ uint64_t src_addr4;
+ uint64_t src_addr5;
+ uint64_t src_addr6;
+ uint64_t next;
+ uint64_t src_addr7;
+ uint64_t src_addr8;
+ uint64_t rsvd[2];
+};
+
+struct ioat_pq_update_descriptor {
+ uint32_t size;
+ union {
+ uint32_t ctl;
+ struct {
+ unsigned int int_en:1;
+ unsigned int src_snoop_dis:1;
+ unsigned int dest_snoop_dis:1;
+ unsigned int compl_write:1;
+ unsigned int fence:1;
+ unsigned int src_cnt:3;
+ unsigned int bundle:1;
+ unsigned int dest_dca:1;
+ unsigned int hint:1;
+ unsigned int p_disable:1;
+ unsigned int q_disable:1;
+ unsigned int rsvd:3;
+ unsigned int coef:8;
+ #define IOAT_OP_PQ_UP 0x8b
+ unsigned int op:8;
+ } ctl_f;
+ };
+ uint64_t src_addr;
+ uint64_t p_addr;
+ uint64_t next;
+ uint64_t src_addr2;
+ uint64_t p_src;
+ uint64_t q_src;
+ uint64_t q_addr;
+};
+
+struct ioat_raw_descriptor {
+ uint64_t field[8];
+};
+#endif
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
new file mode 100644
index 00000000000..d545fae30f3
--- /dev/null
+++ b/drivers/dma/ioat/pci.c
@@ -0,0 +1,210 @@
+/*
+ * Intel I/OAT DMA Linux driver
+ * Copyright(c) 2007 - 2009 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+/*
+ * This driver supports an Intel I/OAT DMA engine, which does asynchronous
+ * copy operations.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/dca.h>
+#include "dma.h"
+#include "dma_v2.h"
+#include "registers.h"
+#include "hw.h"
+
+MODULE_VERSION(IOAT_DMA_VERSION);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+static struct pci_device_id ioat_pci_tbl[] = {
+ /* I/OAT v1 platforms */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) },
+ { PCI_VDEVICE(UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
+
+ /* I/OAT v2 platforms */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) },
+
+ /* I/OAT v3 platforms */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
+
+ /* I/OAT v3.2 platforms */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
+
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
+
+static int __devinit ioat_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id);
+static void __devexit ioat_remove(struct pci_dev *pdev);
+
+static int ioat_dca_enabled = 1;
+module_param(ioat_dca_enabled, int, 0644);
+MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
+
+struct kmem_cache *ioat2_cache;
+
+#define DRV_NAME "ioatdma"
+
+static struct pci_driver ioat_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = ioat_pci_tbl,
+ .probe = ioat_pci_probe,
+ .remove = __devexit_p(ioat_remove),
+};
+
+static struct ioatdma_device *
+alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
+{
+ struct device *dev = &pdev->dev;
+ struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+
+ if (!d)
+ return NULL;
+ d->pdev = pdev;
+ d->reg_base = iobase;
+ return d;
+}
+
+static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ void __iomem * const *iomap;
+ struct device *dev = &pdev->dev;
+ struct ioatdma_device *device;
+ int err;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ return err;
+
+ err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
+ if (err)
+ return err;
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return -ENOMEM;
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err)
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+ pci_set_master(pdev);
+
+ device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
+ if (!device)
+ return -ENOMEM;
+ pci_set_drvdata(pdev, device);
+
+ device->version = readb(device->reg_base + IOAT_VER_OFFSET);
+ if (device->version == IOAT_VER_1_2)
+ err = ioat1_dma_probe(device, ioat_dca_enabled);
+ else if (device->version == IOAT_VER_2_0)
+ err = ioat2_dma_probe(device, ioat_dca_enabled);
+ else if (device->version >= IOAT_VER_3_0)
+ err = ioat3_dma_probe(device, ioat_dca_enabled);
+ else
+ return -ENODEV;
+
+ if (err) {
+ dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __devexit ioat_remove(struct pci_dev *pdev)
+{
+ struct ioatdma_device *device = pci_get_drvdata(pdev);
+
+ if (!device)
+ return;
+
+ dev_err(&pdev->dev, "Removing dma and dca services\n");
+ if (device->dca) {
+ unregister_dca_provider(device->dca, &pdev->dev);
+ free_dca_provider(device->dca);
+ device->dca = NULL;
+ }
+ ioat_dma_remove(device);
+}
+
+static int __init ioat_init_module(void)
+{
+ int err;
+
+ pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
+ DRV_NAME, IOAT_DMA_VERSION);
+
+ ioat2_cache = kmem_cache_create("ioat2", sizeof(struct ioat_ring_ent),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!ioat2_cache)
+ return -ENOMEM;
+
+ err = pci_register_driver(&ioat_pci_driver);
+ if (err)
+ kmem_cache_destroy(ioat2_cache);
+
+ return err;
+}
+module_init(ioat_init_module);
+
+static void __exit ioat_exit_module(void)
+{
+ pci_unregister_driver(&ioat_pci_driver);
+ kmem_cache_destroy(ioat2_cache);
+}
+module_exit(ioat_exit_module);
diff --git a/drivers/dma/ioatdma_registers.h b/drivers/dma/ioat/registers.h
index 49bc277424f..63038e18ab0 100644
--- a/drivers/dma/ioatdma_registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -64,18 +64,37 @@
#define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */
#define IOAT_DEVICE_STATUS_DEGRADED_MODE 0x0001
+#define IOAT_DEVICE_MMIO_RESTRICTED 0x0002
+#define IOAT_DEVICE_MEMORY_BYPASS 0x0004
+#define IOAT_DEVICE_ADDRESS_REMAPPING 0x0008
+
+#define IOAT_DMA_CAP_OFFSET 0x10 /* 32-bit */
+#define IOAT_CAP_PAGE_BREAK 0x00000001
+#define IOAT_CAP_CRC 0x00000002
+#define IOAT_CAP_SKIP_MARKER 0x00000004
+#define IOAT_CAP_DCA 0x00000010
+#define IOAT_CAP_CRC_MOVE 0x00000020
+#define IOAT_CAP_FILL_BLOCK 0x00000040
+#define IOAT_CAP_APIC 0x00000080
+#define IOAT_CAP_XOR 0x00000100
+#define IOAT_CAP_PQ 0x00000200
#define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */
/* DMA Channel Registers */
#define IOAT_CHANCTRL_OFFSET 0x00 /* 16-bit Channel Control Register */
#define IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK 0xF000
+#define IOAT3_CHANCTRL_COMPL_DCA_EN 0x0200
#define IOAT_CHANCTRL_CHANNEL_IN_USE 0x0100
#define IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL 0x0020
#define IOAT_CHANCTRL_ERR_INT_EN 0x0010
#define IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008
#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
-#define IOAT_CHANCTRL_INT_DISABLE 0x0001
+#define IOAT_CHANCTRL_INT_REARM 0x0001
+#define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\
+ IOAT_CHANCTRL_ERR_COMPLETION_EN |\
+ IOAT_CHANCTRL_ANY_ERR_ABORT_EN |\
+ IOAT_CHANCTRL_ERR_INT_EN)
#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */
#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */
@@ -94,14 +113,14 @@
#define IOAT2_CHANSTS_OFFSET_HIGH 0x0C
#define IOAT_CHANSTS_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \
? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH)
-#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR ~0x3F
-#define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010
-#define IOAT_CHANSTS_UNAFFILIATED_ERR 0x0000000000000008
-#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007
-#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0
-#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x1
-#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x2
-#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED 0x3
+#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL)
+#define IOAT_CHANSTS_SOFT_ERR 0x10ULL
+#define IOAT_CHANSTS_UNAFFILIATED_ERR 0x8ULL
+#define IOAT_CHANSTS_STATUS 0x7ULL
+#define IOAT_CHANSTS_ACTIVE 0x0
+#define IOAT_CHANSTS_DONE 0x1
+#define IOAT_CHANSTS_SUSPENDED 0x2
+#define IOAT_CHANSTS_HALTED 0x3
@@ -204,22 +223,27 @@
#define IOAT_CDAR_OFFSET_HIGH 0x24
#define IOAT_CHANERR_OFFSET 0x28 /* 32-bit Channel Error Register */
-#define IOAT_CHANERR_DMA_TRANSFER_SRC_ADDR_ERR 0x0001
-#define IOAT_CHANERR_DMA_TRANSFER_DEST_ADDR_ERR 0x0002
-#define IOAT_CHANERR_NEXT_DESCRIPTOR_ADDR_ERR 0x0004
-#define IOAT_CHANERR_NEXT_DESCRIPTOR_ALIGNMENT_ERR 0x0008
+#define IOAT_CHANERR_SRC_ADDR_ERR 0x0001
+#define IOAT_CHANERR_DEST_ADDR_ERR 0x0002
+#define IOAT_CHANERR_NEXT_ADDR_ERR 0x0004
+#define IOAT_CHANERR_NEXT_DESC_ALIGN_ERR 0x0008
#define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR 0x0010
#define IOAT_CHANERR_CHANCMD_ERR 0x0020
#define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0040
#define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0080
#define IOAT_CHANERR_READ_DATA_ERR 0x0100
#define IOAT_CHANERR_WRITE_DATA_ERR 0x0200
-#define IOAT_CHANERR_DESCRIPTOR_CONTROL_ERR 0x0400
-#define IOAT_CHANERR_DESCRIPTOR_LENGTH_ERR 0x0800
+#define IOAT_CHANERR_CONTROL_ERR 0x0400
+#define IOAT_CHANERR_LENGTH_ERR 0x0800
#define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000
#define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000
#define IOAT_CHANERR_SOFT_ERR 0x4000
#define IOAT_CHANERR_UNAFFILIATED_ERR 0x8000
+#define IOAT_CHANERR_XOR_P_OR_CRC_ERR 0x10000
+#define IOAT_CHANERR_XOR_Q_ERR 0x20000
+#define IOAT_CHANERR_DESCRIPTOR_COUNT_ERR 0x40000
+
+#define IOAT_CHANERR_HANDLE_MASK (IOAT_CHANERR_XOR_P_OR_CRC_ERR | IOAT_CHANERR_XOR_Q_ERR)
#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
deleted file mode 100644
index a600fc0f796..00000000000
--- a/drivers/dma/ioat_dma.c
+++ /dev/null
@@ -1,1741 +0,0 @@
-/*
- * Intel I/OAT DMA Linux driver
- * Copyright(c) 2004 - 2009 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- */
-
-/*
- * This driver supports an Intel I/OAT DMA engine, which does asynchronous
- * copy operations.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/workqueue.h>
-#include <linux/i7300_idle.h>
-#include "ioatdma.h"
-#include "ioatdma_registers.h"
-#include "ioatdma_hw.h"
-
-#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
-#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
-#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
-#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
-
-#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
-static int ioat_pending_level = 4;
-module_param(ioat_pending_level, int, 0644);
-MODULE_PARM_DESC(ioat_pending_level,
- "high-water mark for pushing ioat descriptors (default: 4)");
-
-#define RESET_DELAY msecs_to_jiffies(100)
-#define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000))
-static void ioat_dma_chan_reset_part2(struct work_struct *work);
-static void ioat_dma_chan_watchdog(struct work_struct *work);
-
-/*
- * workaround for IOAT ver.3.0 null descriptor issue
- * (channel returns error when size is 0)
- */
-#define NULL_DESC_BUFFER_SIZE 1
-
-/* internal functions */
-static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
-static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
-
-static struct ioat_desc_sw *
-ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
-static struct ioat_desc_sw *
-ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
-
-static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
- struct ioatdma_device *device,
- int index)
-{
- return device->idx[index];
-}
-
-/**
- * ioat_dma_do_interrupt - handler used for single vector interrupt mode
- * @irq: interrupt id
- * @data: interrupt data
- */
-static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
-{
- struct ioatdma_device *instance = data;
- struct ioat_dma_chan *ioat_chan;
- unsigned long attnstatus;
- int bit;
- u8 intrctrl;
-
- intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
-
- if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
- return IRQ_NONE;
-
- if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
- writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
- return IRQ_NONE;
- }
-
- attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
- for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
- ioat_chan = ioat_lookup_chan_by_index(instance, bit);
- tasklet_schedule(&ioat_chan->cleanup_task);
- }
-
- writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
- return IRQ_HANDLED;
-}
-
-/**
- * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
- * @irq: interrupt id
- * @data: interrupt data
- */
-static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
-{
- struct ioat_dma_chan *ioat_chan = data;
-
- tasklet_schedule(&ioat_chan->cleanup_task);
-
- return IRQ_HANDLED;
-}
-
-static void ioat_dma_cleanup_tasklet(unsigned long data);
-
-/**
- * ioat_dma_enumerate_channels - find and initialize the device's channels
- * @device: the device to be enumerated
- */
-static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
-{
- u8 xfercap_scale;
- u32 xfercap;
- int i;
- struct ioat_dma_chan *ioat_chan;
-
- /*
- * IOAT ver.3 workarounds
- */
- if (device->version == IOAT_VER_3_0) {
- u32 chan_err_mask;
- u16 dev_id;
- u32 dmauncerrsts;
-
- /*
- * Write CHANERRMSK_INT with 3E07h to mask out the errors
- * that can cause stability issues for IOAT ver.3
- */
- chan_err_mask = 0x3E07;
- pci_write_config_dword(device->pdev,
- IOAT_PCI_CHANERRMASK_INT_OFFSET,
- chan_err_mask);
-
- /*
- * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
- * (workaround for spurious config parity error after restart)
- */
- pci_read_config_word(device->pdev,
- IOAT_PCI_DEVICE_ID_OFFSET,
- &dev_id);
- if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
- dmauncerrsts = 0x10;
- pci_write_config_dword(device->pdev,
- IOAT_PCI_DMAUNCERRSTS_OFFSET,
- dmauncerrsts);
- }
- }
-
- device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
- xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
- xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
-
-#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
- if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) {
- device->common.chancnt--;
- }
-#endif
- for (i = 0; i < device->common.chancnt; i++) {
- ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
- if (!ioat_chan) {
- device->common.chancnt = i;
- break;
- }
-
- ioat_chan->device = device;
- ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
- ioat_chan->xfercap = xfercap;
- ioat_chan->desccount = 0;
- INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
- if (ioat_chan->device->version == IOAT_VER_2_0)
- writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE |
- IOAT_DMA_DCA_ANY_CPU,
- ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
- else if (ioat_chan->device->version == IOAT_VER_3_0)
- writel(IOAT_DMA_DCA_ANY_CPU,
- ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
- spin_lock_init(&ioat_chan->cleanup_lock);
- spin_lock_init(&ioat_chan->desc_lock);
- INIT_LIST_HEAD(&ioat_chan->free_desc);
- INIT_LIST_HEAD(&ioat_chan->used_desc);
- /* This should be made common somewhere in dmaengine.c */
- ioat_chan->common.device = &device->common;
- list_add_tail(&ioat_chan->common.device_node,
- &device->common.channels);
- device->idx[i] = ioat_chan;
- tasklet_init(&ioat_chan->cleanup_task,
- ioat_dma_cleanup_tasklet,
- (unsigned long) ioat_chan);
- tasklet_disable(&ioat_chan->cleanup_task);
- }
- return device->common.chancnt;
-}
-
-/**
- * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
- * descriptors to hw
- * @chan: DMA channel handle
- */
-static inline void __ioat1_dma_memcpy_issue_pending(
- struct ioat_dma_chan *ioat_chan)
-{
- ioat_chan->pending = 0;
- writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
-}
-
-static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
-{
- struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-
- if (ioat_chan->pending > 0) {
- spin_lock_bh(&ioat_chan->desc_lock);
- __ioat1_dma_memcpy_issue_pending(ioat_chan);
- spin_unlock_bh(&ioat_chan->desc_lock);
- }
-}
-
-static inline void __ioat2_dma_memcpy_issue_pending(
- struct ioat_dma_chan *ioat_chan)
-{
- ioat_chan->pending = 0;
- writew(ioat_chan->dmacount,
- ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
-}
-
-static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
-{
- struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-
- if (ioat_chan->pending > 0) {
- spin_lock_bh(&ioat_chan->desc_lock);
- __ioat2_dma_memcpy_issue_pending(ioat_chan);
- spin_unlock_bh(&ioat_chan->desc_lock);
- }
-}
-
-
-/**
- * ioat_dma_chan_reset_part2 - reinit the channel after a reset
- */
-static void ioat_dma_chan_reset_part2(struct work_struct *work)
-{
- struct ioat_dma_chan *ioat_chan =
- container_of(work, struct ioat_dma_chan, work.work);
- struct ioat_desc_sw *desc;
-
- spin_lock_bh(&ioat_chan->cleanup_lock);
- spin_lock_bh(&ioat_chan->desc_lock);
-
- ioat_chan->completion_virt->low = 0;
- ioat_chan->completion_virt->high = 0;
- ioat_chan->pending = 0;
-
- /*
- * count the descriptors waiting, and be sure to do it
- * right for both the CB1 line and the CB2 ring
- */
- ioat_chan->dmacount = 0;
- if (ioat_chan->used_desc.prev) {
- desc = to_ioat_desc(ioat_chan->used_desc.prev);
- do {
- ioat_chan->dmacount++;
- desc = to_ioat_desc(desc->node.next);
- } while (&desc->node != ioat_chan->used_desc.next);
- }
-
- /*
- * write the new starting descriptor address
- * this puts channel engine into ARMED state
- */
- desc = to_ioat_desc(ioat_chan->used_desc.prev);
- switch (ioat_chan->device->version) {
- case IOAT_VER_1_2:
- writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
- ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
- writel(((u64) desc->async_tx.phys) >> 32,
- ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
-
- writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
- + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
- break;
- case IOAT_VER_2_0:
- writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
- ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
- writel(((u64) desc->async_tx.phys) >> 32,
- ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
-
- /* tell the engine to go with what's left to be done */
- writew(ioat_chan->dmacount,
- ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
-
- break;
- }
- dev_err(&ioat_chan->device->pdev->dev,
- "chan%d reset - %d descs waiting, %d total desc\n",
- chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
-
- spin_unlock_bh(&ioat_chan->desc_lock);
- spin_unlock_bh(&ioat_chan->cleanup_lock);
-}
-
-/**
- * ioat_dma_reset_channel - restart a channel
- * @ioat_chan: IOAT DMA channel handle
- */
-static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
-{
- u32 chansts, chanerr;
-
- if (!ioat_chan->used_desc.prev)
- return;
-
- chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
- chansts = (ioat_chan->completion_virt->low
- & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
- if (chanerr) {
- dev_err(&ioat_chan->device->pdev->dev,
- "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
- chan_num(ioat_chan), chansts, chanerr);
- writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
- }
-
- /*
- * whack it upside the head with a reset
- * and wait for things to settle out.
- * force the pending count to a really big negative
- * to make sure no one forces an issue_pending
- * while we're waiting.
- */
-
- spin_lock_bh(&ioat_chan->desc_lock);
- ioat_chan->pending = INT_MIN;
- writeb(IOAT_CHANCMD_RESET,
- ioat_chan->reg_base
- + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
- spin_unlock_bh(&ioat_chan->desc_lock);
-
- /* schedule the 2nd half instead of sleeping a long time */
- schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
-}
-
-/**
- * ioat_dma_chan_watchdog - watch for stuck channels
- */
-static void ioat_dma_chan_watchdog(struct work_struct *work)
-{
- struct ioatdma_device *device =
- container_of(work, struct ioatdma_device, work.work);
- struct ioat_dma_chan *ioat_chan;
- int i;
-
- union {
- u64 full;
- struct {
- u32 low;
- u32 high;
- };
- } completion_hw;
- unsigned long compl_desc_addr_hw;
-
- for (i = 0; i < device->common.chancnt; i++) {
- ioat_chan = ioat_lookup_chan_by_index(device, i);
-
- if (ioat_chan->device->version == IOAT_VER_1_2
- /* have we started processing anything yet */
- && ioat_chan->last_completion
- /* have we completed any since last watchdog cycle? */
- && (ioat_chan->last_completion ==
- ioat_chan->watchdog_completion)
- /* has TCP stuck on one cookie since last watchdog? */
- && (ioat_chan->watchdog_tcp_cookie ==
- ioat_chan->watchdog_last_tcp_cookie)
- && (ioat_chan->watchdog_tcp_cookie !=
- ioat_chan->completed_cookie)
- /* is there something in the chain to be processed? */
- /* CB1 chain always has at least the last one processed */
- && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
- && ioat_chan->pending == 0) {
-
- /*
- * check CHANSTS register for completed
- * descriptor address.
- * if it is different than completion writeback,
- * it is not zero
- * and it has changed since the last watchdog
- * we can assume that channel
- * is still working correctly
- * and the problem is in completion writeback.
- * update completion writeback
- * with actual CHANSTS value
- * else
- * try resetting the channel
- */
-
- completion_hw.low = readl(ioat_chan->reg_base +
- IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
- completion_hw.high = readl(ioat_chan->reg_base +
- IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
-#if (BITS_PER_LONG == 64)
- compl_desc_addr_hw =
- completion_hw.full
- & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
-#else
- compl_desc_addr_hw =
- completion_hw.low & IOAT_LOW_COMPLETION_MASK;
-#endif
-
- if ((compl_desc_addr_hw != 0)
- && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
- && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
- ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
- ioat_chan->completion_virt->low = completion_hw.low;
- ioat_chan->completion_virt->high = completion_hw.high;
- } else {
- ioat_dma_reset_channel(ioat_chan);
- ioat_chan->watchdog_completion = 0;
- ioat_chan->last_compl_desc_addr_hw = 0;
- }
-
- /*
- * for version 2.0 if there are descriptors yet to be processed
- * and the last completed hasn't changed since the last watchdog
- * if they haven't hit the pending level
- * issue the pending to push them through
- * else
- * try resetting the channel
- */
- } else if (ioat_chan->device->version == IOAT_VER_2_0
- && ioat_chan->used_desc.prev
- && ioat_chan->last_completion
- && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
-
- if (ioat_chan->pending < ioat_pending_level)
- ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
- else {
- ioat_dma_reset_channel(ioat_chan);
- ioat_chan->watchdog_completion = 0;
- }
- } else {
- ioat_chan->last_compl_desc_addr_hw = 0;
- ioat_chan->watchdog_completion
- = ioat_chan->last_completion;
- }
-
- ioat_chan->watchdog_last_tcp_cookie =
- ioat_chan->watchdog_tcp_cookie;
- }
-
- schedule_delayed_work(&device->work, WATCHDOG_DELAY);
-}
-
-static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
-{
- struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
- struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
- struct ioat_desc_sw *prev, *new;
- struct ioat_dma_descriptor *hw;
- dma_cookie_t cookie;
- LIST_HEAD(new_chain);
- u32 copy;
- size_t len;
- dma_addr_t src, dst;
- unsigned long orig_flags;
- unsigned int desc_count = 0;
-
- /* src and dest and len are stored in the initial descriptor */
- len = first->len;
- src = first->src;
- dst = first->dst;
- orig_flags = first->async_tx.flags;
- new = first;
-
- spin_lock_bh(&ioat_chan->desc_lock);
- prev = to_ioat_desc(ioat_chan->used_desc.prev);
- prefetch(prev->hw);
- do {
- copy = min_t(size_t, len, ioat_chan->xfercap);
-
- async_tx_ack(&new->async_tx);
-
- hw = new->hw;
- hw->size = copy;
- hw->ctl = 0;
- hw->src_addr = src;
- hw->dst_addr = dst;
- hw->next = 0;
-
- /* chain together the physical address list for the HW */
- wmb();
- prev->hw->next = (u64) new->async_tx.phys;
-
- len -= copy;
- dst += copy;
- src += copy;
-
- list_add_tail(&new->node, &new_chain);
- desc_count++;
- prev = new;
- } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
-
- if (!new) {
- dev_err(&ioat_chan->device->pdev->dev,
- "tx submit failed\n");
- spin_unlock_bh(&ioat_chan->desc_lock);
- return -ENOMEM;
- }
-
- hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
- if (first->async_tx.callback) {
- hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
- if (first != new) {
- /* move callback into to last desc */
- new->async_tx.callback = first->async_tx.callback;
- new->async_tx.callback_param
- = first->async_tx.callback_param;
- first->async_tx.callback = NULL;
- first->async_tx.callback_param = NULL;
- }
- }
-
- new->tx_cnt = desc_count;
- new->async_tx.flags = orig_flags; /* client is in control of this ack */
-
- /* store the original values for use in later cleanup */
- if (new != first) {
- new->src = first->src;
- new->dst = first->dst;
- new->len = first->len;
- }
-
- /* cookie incr and addition to used_list must be atomic */
- cookie = ioat_chan->common.cookie;
- cookie++;
- if (cookie < 0)
- cookie = 1;
- ioat_chan->common.cookie = new->async_tx.cookie = cookie;
-
- /* write address into NextDescriptor field of last desc in chain */
- to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
- first->async_tx.phys;
- list_splice_tail(&new_chain, &ioat_chan->used_desc);
-
- ioat_chan->dmacount += desc_count;
- ioat_chan->pending += desc_count;
- if (ioat_chan->pending >= ioat_pending_level)
- __ioat1_dma_memcpy_issue_pending(ioat_chan);
- spin_unlock_bh(&ioat_chan->desc_lock);
-
- return cookie;
-}
-
-static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
-{
- struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
- struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
- struct ioat_desc_sw *new;
- struct ioat_dma_descriptor *hw;
- dma_cookie_t cookie;
- u32 copy;
- size_t len;
- dma_addr_t src, dst;
- unsigned long orig_flags;
- unsigned int desc_count = 0;
-
- /* src and dest and len are stored in the initial descriptor */
- len = first->len;
- src = first->src;
- dst = first->dst;
- orig_flags = first->async_tx.flags;
- new = first;
-
- /*
- * ioat_chan->desc_lock is still in force in version 2 path
- * it gets unlocked at end of this function
- */
- do {
- copy = min_t(size_t, len, ioat_chan->xfercap);
-
- async_tx_ack(&new->async_tx);
-
- hw = new->hw;
- hw->size = copy;
- hw->ctl = 0;
- hw->src_addr = src;
- hw->dst_addr = dst;
-
- len -= copy;
- dst += copy;
- src += copy;
- desc_count++;
- } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
-
- if (!new) {
- dev_err(&ioat_chan->device->pdev->dev,
- "tx submit failed\n");
- spin_unlock_bh(&ioat_chan->desc_lock);
- return -ENOMEM;
- }
-
- hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
- if (first->async_tx.callback) {
- hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
- if (first != new) {
- /* move callback into to last desc */
- new->async_tx.callback = first->async_tx.callback;
- new->async_tx.callback_param
- = first->async_tx.callback_param;
- first->async_tx.callback = NULL;
- first->async_tx.callback_param = NULL;
- }
- }
-
- new->tx_cnt = desc_count;
- new->async_tx.flags = orig_flags; /* client is in control of this ack */
-
- /* store the original values for use in later cleanup */
- if (new != first) {
- new->src = first->src;
- new->dst = first->dst;
- new->len = first->len;
- }
-
- /* cookie incr and addition to used_list must be atomic */
- cookie = ioat_chan->common.cookie;
- cookie++;
- if (cookie < 0)
- cookie = 1;
- ioat_chan->common.cookie = new->async_tx.cookie = cookie;
-
- ioat_chan->dmacount += desc_count;
- ioat_chan->pending += desc_count;
- if (ioat_chan->pending >= ioat_pending_level)
- __ioat2_dma_memcpy_issue_pending(ioat_chan);
- spin_unlock_bh(&ioat_chan->desc_lock);
-
- return cookie;
-}
-
-/**
- * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
- * @ioat_chan: the channel supplying the memory pool for the descriptors
- * @flags: allocation flags
- */
-static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
- struct ioat_dma_chan *ioat_chan,
- gfp_t flags)
-{
- struct ioat_dma_descriptor *desc;
- struct ioat_desc_sw *desc_sw;
- struct ioatdma_device *ioatdma_device;
- dma_addr_t phys;
-
- ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
- desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
- if (unlikely(!desc))
- return NULL;
-
- desc_sw = kzalloc(sizeof(*desc_sw), flags);
- if (unlikely(!desc_sw)) {
- pci_pool_free(ioatdma_device->dma_pool, desc, phys);
- return NULL;
- }
-
- memset(desc, 0, sizeof(*desc));
- dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
- switch (ioat_chan->device->version) {
- case IOAT_VER_1_2:
- desc_sw->async_tx.tx_submit = ioat1_tx_submit;
- break;
- case IOAT_VER_2_0:
- case IOAT_VER_3_0:
- desc_sw->async_tx.tx_submit = ioat2_tx_submit;
- break;
- }
-
- desc_sw->hw = desc;
- desc_sw->async_tx.phys = phys;
-
- return desc_sw;
-}
-
-static int ioat_initial_desc_count = 256;
-module_param(ioat_initial_desc_count, int, 0644);
-MODULE_PARM_DESC(ioat_initial_desc_count,
- "initial descriptors per channel (default: 256)");
-
-/**
- * ioat2_dma_massage_chan_desc - link the descriptors into a circle
- * @ioat_chan: the channel to be massaged
- */
-static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
-{
- struct ioat_desc_sw *desc, *_desc;
-
- /* setup used_desc */
- ioat_chan->used_desc.next = ioat_chan->free_desc.next;
- ioat_chan->used_desc.prev = NULL;
-
- /* pull free_desc out of the circle so that every node is a hw
- * descriptor, but leave it pointing to the list
- */
- ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
- ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
-
- /* circle link the hw descriptors */
- desc = to_ioat_desc(ioat_chan->free_desc.next);
- desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
- list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
- desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
- }
-}
-
-/**
- * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
- * @chan: the channel to be filled out
- */
-static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
-{
- struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
- struct ioat_desc_sw *desc;
- u16 chanctrl;
- u32 chanerr;
- int i;
- LIST_HEAD(tmp_list);
-
- /* have we already been set up? */
- if (!list_empty(&ioat_chan->free_desc))
- return ioat_chan->desccount;
-
- /* Setup register to interrupt and write completion status on error */
- chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
- IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
- IOAT_CHANCTRL_ERR_COMPLETION_EN;
- writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
-
- chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
- if (chanerr) {
- dev_err(&ioat_chan->device->pdev->dev,
- "CHANERR = %x, clearing\n", chanerr);
- writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
- }
-
- /* Allocate descriptors */
- for (i = 0; i < ioat_initial_desc_count; i++) {
- desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
- if (!desc) {
- dev_err(&ioat_chan->device->pdev->dev,
- "Only %d initial descriptors\n", i);
- break;
- }
- list_add_tail(&desc->node, &tmp_list);
- }
- spin_lock_bh(&ioat_chan->desc_lock);
- ioat_chan->desccount = i;
- list_splice(&tmp_list, &ioat_chan->free_desc);
- if (ioat_chan->device->version != IOAT_VER_1_2)
- ioat2_dma_massage_chan_desc(ioat_chan);
- spin_unlock_bh(&ioat_chan->desc_lock);
-
- /* allocate a completion writeback area */
- /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
- ioat_chan->completion_virt =
- pci_pool_alloc(ioat_chan->device->completion_pool,
- GFP_KERNEL,
- &ioat_chan->completion_addr);
- memset(ioat_chan->completion_virt, 0,
- sizeof(*ioat_chan->completion_virt));
- writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
- ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
- writel(((u64) ioat_chan->completion_addr) >> 32,
- ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
-
- tasklet_enable(&ioat_chan->cleanup_task);
- ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */
- return ioat_chan->desccount;
-}
-
-/**
- * ioat_dma_free_chan_resources - release all the descriptors
- * @chan: the channel to be cleaned
- */
-static void ioat_dma_free_chan_resources(struct dma_chan *chan)
-{
- struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
- struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
- struct ioat_desc_sw *desc, *_desc;
- int in_use_descs = 0;
-
- /* Before freeing channel resources first check
- * if they have been previously allocated for this channel.
- */
- if (ioat_chan->desccount == 0)
- return;
-
- tasklet_disable(&ioat_chan->cleanup_task);
- ioat_dma_memcpy_cleanup(ioat_chan);
-
- /* Delay 100ms after reset to allow internal DMA logic to quiesce
- * before removing DMA descriptor resources.
- */
- writeb(IOAT_CHANCMD_RESET,
- ioat_chan->reg_base
- + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
- mdelay(100);
-
- spin_lock_bh(&ioat_chan->desc_lock);
- switch (ioat_chan->device->version) {
- case IOAT_VER_1_2:
- list_for_each_entry_safe(desc, _desc,
- &ioat_chan->used_desc, node) {
- in_use_descs++;
- list_del(&desc->node);
- pci_pool_free(ioatdma_device->dma_pool, desc->hw,
- desc->async_tx.phys);
- kfree(desc);
- }
- list_for_each_entry_safe(desc, _desc,
- &ioat_chan->free_desc, node) {
- list_del(&desc->node);
- pci_pool_free(ioatdma_device->dma_pool, desc->hw,
- desc->async_tx.phys);
- kfree(desc);
- }
- break;
- case IOAT_VER_2_0:
- case IOAT_VER_3_0:
- list_for_each_entry_safe(desc, _desc,
- ioat_chan->free_desc.next, node) {
- list_del(&desc->node);
- pci_pool_free(ioatdma_device->dma_pool, desc->hw,
- desc->async_tx.phys);
- kfree(desc);
- }
- desc = to_ioat_desc(ioat_chan->free_desc.next);
- pci_pool_free(ioatdma_device->dma_pool, desc->hw,
- desc->async_tx.phys);
- kfree(desc);
- INIT_LIST_HEAD(&ioat_chan->free_desc);
- INIT_LIST_HEAD(&ioat_chan->used_desc);
- break;
- }
- spin_unlock_bh(&ioat_chan->desc_lock);
-
- pci_pool_free(ioatdma_device->completion_pool,
- ioat_chan->completion_virt,
- ioat_chan->completion_addr);
-
- /* one is ok since we left it on there on purpose */
- if (in_use_descs > 1)
- dev_err(&ioat_chan->device->pdev->dev,
- "Freeing %d in use descriptors!\n",
- in_use_descs - 1);
-
- ioat_chan->last_completion = ioat_chan->completion_addr = 0;
- ioat_chan->pending = 0;
- ioat_chan->dmacount = 0;
- ioat_chan->desccount = 0;
- ioat_chan->watchdog_completion = 0;
- ioat_chan->last_compl_desc_addr_hw = 0;
- ioat_chan->watchdog_tcp_cookie =
- ioat_chan->watchdog_last_tcp_cookie = 0;
-}
-
-/**
- * ioat_dma_get_next_descriptor - return the next available descriptor
- * @ioat_chan: IOAT DMA channel handle
- *
- * Gets the next descriptor from the chain, and must be called with the
- * channel's desc_lock held. Allocates more descriptors if the channel
- * has run out.
- */
-static struct ioat_desc_sw *
-ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
-{
- struct ioat_desc_sw *new;
-
- if (!list_empty(&ioat_chan->free_desc)) {
- new = to_ioat_desc(ioat_chan->free_desc.next);
- list_del(&new->node);
- } else {
- /* try to get another desc */
- new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
- if (!new) {
- dev_err(&ioat_chan->device->pdev->dev,
- "alloc failed\n");
- return NULL;
- }
- }
-
- prefetch(new->hw);
- return new;
-}
-
-static struct ioat_desc_sw *
-ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
-{
- struct ioat_desc_sw *new;
-
- /*
- * used.prev points to where to start processing
- * used.next points to next free descriptor
- * if used.prev == NULL, there are none waiting to be processed
- * if used.next == used.prev.prev, there is only one free descriptor,
- * and we need to use it to as a noop descriptor before
- * linking in a new set of descriptors, since the device
- * has probably already read the pointer to it
- */
- if (ioat_chan->used_desc.prev &&
- ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
-
- struct ioat_desc_sw *desc;
- struct ioat_desc_sw *noop_desc;
- int i;
-
- /* set up the noop descriptor */
- noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
- /* set size to non-zero value (channel returns error when size is 0) */
- noop_desc->hw->size = NULL_DESC_BUFFER_SIZE;
- noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
- noop_desc->hw->src_addr = 0;
- noop_desc->hw->dst_addr = 0;
-
- ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
- ioat_chan->pending++;
- ioat_chan->dmacount++;
-
- /* try to get a few more descriptors */
- for (i = 16; i; i--) {
- desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
- if (!desc) {
- dev_err(&ioat_chan->device->pdev->dev,
- "alloc failed\n");
- break;
- }
- list_add_tail(&desc->node, ioat_chan->used_desc.next);
-
- desc->hw->next
- = to_ioat_desc(desc->node.next)->async_tx.phys;
- to_ioat_desc(desc->node.prev)->hw->next
- = desc->async_tx.phys;
- ioat_chan->desccount++;
- }
-
- ioat_chan->used_desc.next = noop_desc->node.next;
- }
- new = to_ioat_desc(ioat_chan->used_desc.next);
- prefetch(new);
- ioat_chan->used_desc.next = new->node.next;
-
- if (ioat_chan->used_desc.prev == NULL)
- ioat_chan->used_desc.prev = &new->node;
-
- prefetch(new->hw);
- return new;
-}
-
-static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
- struct ioat_dma_chan *ioat_chan)
-{
- if (!ioat_chan)
- return NULL;
-
- switch (ioat_chan->device->version) {
- case IOAT_VER_1_2:
- return ioat1_dma_get_next_descriptor(ioat_chan);
- case IOAT_VER_2_0:
- case IOAT_VER_3_0:
- return ioat2_dma_get_next_descriptor(ioat_chan);
- }
- return NULL;
-}
-
-static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
- struct dma_chan *chan,
- dma_addr_t dma_dest,
- dma_addr_t dma_src,
- size_t len,
- unsigned long flags)
-{
- struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
- struct ioat_desc_sw *new;
-
- spin_lock_bh(&ioat_chan->desc_lock);
- new = ioat_dma_get_next_descriptor(ioat_chan);
- spin_unlock_bh(&ioat_chan->desc_lock);
-
- if (new) {
- new->len = len;
- new->dst = dma_dest;
- new->src = dma_src;
- new->async_tx.flags = flags;
- return &new->async_tx;
- } else {
- dev_err(&ioat_chan->device->pdev->dev,
- "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
- chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
- return NULL;
- }
-}
-
-static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
- struct dma_chan *chan,
- dma_addr_t dma_dest,
- dma_addr_t dma_src,
- size_t len,
- unsigned long flags)
-{
- struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
- struct ioat_desc_sw *new;
-
- spin_lock_bh(&ioat_chan->desc_lock);
- new = ioat2_dma_get_next_descriptor(ioat_chan);
-
- /*
- * leave ioat_chan->desc_lock set in ioat 2 path
- * it will get unlocked at end of tx_submit
- */
-
- if (new) {
- new->len = len;
- new->dst = dma_dest;
- new->src = dma_src;
- new->async_tx.flags = flags;
- return &new->async_tx;
- } else {
- spin_unlock_bh(&ioat_chan->desc_lock);
- dev_err(&ioat_chan->device->pdev->dev,
- "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
- chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
- return NULL;
- }
-}
-
-static void ioat_dma_cleanup_tasklet(unsigned long data)
-{
- struct ioat_dma_chan *chan = (void *)data;
- ioat_dma_memcpy_cleanup(chan);
- writew(IOAT_CHANCTRL_INT_DISABLE,
- chan->reg_base + IOAT_CHANCTRL_OFFSET);
-}
-
-static void
-ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
-{
- if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (desc->async_tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- pci_unmap_single(ioat_chan->device->pdev,
- pci_unmap_addr(desc, dst),
- pci_unmap_len(desc, len),
- PCI_DMA_FROMDEVICE);
- else
- pci_unmap_page(ioat_chan->device->pdev,
- pci_unmap_addr(desc, dst),
- pci_unmap_len(desc, len),
- PCI_DMA_FROMDEVICE);
- }
-
- if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- if (desc->async_tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- pci_unmap_single(ioat_chan->device->pdev,
- pci_unmap_addr(desc, src),
- pci_unmap_len(desc, len),
- PCI_DMA_TODEVICE);
- else
- pci_unmap_page(ioat_chan->device->pdev,
- pci_unmap_addr(desc, src),
- pci_unmap_len(desc, len),
- PCI_DMA_TODEVICE);
- }
-}
-
-/**
- * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
- * @chan: ioat channel to be cleaned up
- */
-static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
-{
- unsigned long phys_complete;
- struct ioat_desc_sw *desc, *_desc;
- dma_cookie_t cookie = 0;
- unsigned long desc_phys;
- struct ioat_desc_sw *latest_desc;
-
- prefetch(ioat_chan->completion_virt);
-
- if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
- return;
-
- /* The completion writeback can happen at any time,
- so reads by the driver need to be atomic operations
- The descriptor physical addresses are limited to 32-bits
- when the CPU can only do a 32-bit mov */
-
-#if (BITS_PER_LONG == 64)
- phys_complete =
- ioat_chan->completion_virt->full
- & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
-#else
- phys_complete =
- ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
-#endif
-
- if ((ioat_chan->completion_virt->full
- & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
- IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
- dev_err(&ioat_chan->device->pdev->dev,
- "Channel halted, chanerr = %x\n",
- readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
-
- /* TODO do something to salvage the situation */
- }
-
- if (phys_complete == ioat_chan->last_completion) {
- spin_unlock_bh(&ioat_chan->cleanup_lock);
- /*
- * perhaps we're stuck so hard that the watchdog can't go off?
- * try to catch it after 2 seconds
- */
- if (ioat_chan->device->version != IOAT_VER_3_0) {
- if (time_after(jiffies,
- ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
- ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
- ioat_chan->last_completion_time = jiffies;
- }
- }
- return;
- }
- ioat_chan->last_completion_time = jiffies;
-
- cookie = 0;
- if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
- spin_unlock_bh(&ioat_chan->cleanup_lock);
- return;
- }
-
- switch (ioat_chan->device->version) {
- case IOAT_VER_1_2:
- list_for_each_entry_safe(desc, _desc,
- &ioat_chan->used_desc, node) {
-
- /*
- * Incoming DMA requests may use multiple descriptors,
- * due to exceeding xfercap, perhaps. If so, only the
- * last one will have a cookie, and require unmapping.
- */
- if (desc->async_tx.cookie) {
- cookie = desc->async_tx.cookie;
- ioat_dma_unmap(ioat_chan, desc);
- if (desc->async_tx.callback) {
- desc->async_tx.callback(desc->async_tx.callback_param);
- desc->async_tx.callback = NULL;
- }
- }
-
- if (desc->async_tx.phys != phys_complete) {
- /*
- * a completed entry, but not the last, so clean
- * up if the client is done with the descriptor
- */
- if (async_tx_test_ack(&desc->async_tx)) {
- list_move_tail(&desc->node,
- &ioat_chan->free_desc);
- } else
- desc->async_tx.cookie = 0;
- } else {
- /*
- * last used desc. Do not remove, so we can
- * append from it, but don't look at it next
- * time, either
- */
- desc->async_tx.cookie = 0;
-
- /* TODO check status bits? */
- break;
- }
- }
- break;
- case IOAT_VER_2_0:
- case IOAT_VER_3_0:
- /* has some other thread has already cleaned up? */
- if (ioat_chan->used_desc.prev == NULL)
- break;
-
- /* work backwards to find latest finished desc */
- desc = to_ioat_desc(ioat_chan->used_desc.next);
- latest_desc = NULL;
- do {
- desc = to_ioat_desc(desc->node.prev);
- desc_phys = (unsigned long)desc->async_tx.phys
- & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
- if (desc_phys == phys_complete) {
- latest_desc = desc;
- break;
- }
- } while (&desc->node != ioat_chan->used_desc.prev);
-
- if (latest_desc != NULL) {
-
- /* work forwards to clear finished descriptors */
- for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
- &desc->node != latest_desc->node.next &&
- &desc->node != ioat_chan->used_desc.next;
- desc = to_ioat_desc(desc->node.next)) {
- if (desc->async_tx.cookie) {
- cookie = desc->async_tx.cookie;
- desc->async_tx.cookie = 0;
- ioat_dma_unmap(ioat_chan, desc);
- if (desc->async_tx.callback) {
- desc->async_tx.callback(desc->async_tx.callback_param);
- desc->async_tx.callback = NULL;
- }
- }
- }
-
- /* move used.prev up beyond those that are finished */
- if (&desc->node == ioat_chan->used_desc.next)
- ioat_chan->used_desc.prev = NULL;
- else
- ioat_chan->used_desc.prev = &desc->node;
- }
- break;
- }
-
- spin_unlock_bh(&ioat_chan->desc_lock);
-
- ioat_chan->last_completion = phys_complete;
- if (cookie != 0)
- ioat_chan->completed_cookie = cookie;
-
- spin_unlock_bh(&ioat_chan->cleanup_lock);
-}
-
-/**
- * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
- * @chan: IOAT DMA channel handle
- * @cookie: DMA transaction identifier
- * @done: if not %NULL, updated with last completed transaction
- * @used: if not %NULL, updated with last used transaction
- */
-static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
- dma_cookie_t cookie,
- dma_cookie_t *done,
- dma_cookie_t *used)
-{
- struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
- enum dma_status ret;
-
- last_used = chan->cookie;
- last_complete = ioat_chan->completed_cookie;
- ioat_chan->watchdog_tcp_cookie = cookie;
-
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
-
- ret = dma_async_is_complete(cookie, last_complete, last_used);
- if (ret == DMA_SUCCESS)
- return ret;
-
- ioat_dma_memcpy_cleanup(ioat_chan);
-
- last_used = chan->cookie;
- last_complete = ioat_chan->completed_cookie;
-
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
-
- return dma_async_is_complete(cookie, last_complete, last_used);
-}
-
-static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
-{
- struct ioat_desc_sw *desc;
-
- spin_lock_bh(&ioat_chan->desc_lock);
-
- desc = ioat_dma_get_next_descriptor(ioat_chan);
-
- if (!desc) {
- dev_err(&ioat_chan->device->pdev->dev,
- "Unable to start null desc - get next desc failed\n");
- spin_unlock_bh(&ioat_chan->desc_lock);
- return;
- }
-
- desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
- | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
- | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
- /* set size to non-zero value (channel returns error when size is 0) */
- desc->hw->size = NULL_DESC_BUFFER_SIZE;
- desc->hw->src_addr = 0;
- desc->hw->dst_addr = 0;
- async_tx_ack(&desc->async_tx);
- switch (ioat_chan->device->version) {
- case IOAT_VER_1_2:
- desc->hw->next = 0;
- list_add_tail(&desc->node, &ioat_chan->used_desc);
-
- writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
- ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
- writel(((u64) desc->async_tx.phys) >> 32,
- ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
-
- writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
- + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
- break;
- case IOAT_VER_2_0:
- case IOAT_VER_3_0:
- writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
- ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
- writel(((u64) desc->async_tx.phys) >> 32,
- ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
-
- ioat_chan->dmacount++;
- __ioat2_dma_memcpy_issue_pending(ioat_chan);
- break;
- }
- spin_unlock_bh(&ioat_chan->desc_lock);
-}
-
-/*
- * Perform a IOAT transaction to verify the HW works.
- */
-#define IOAT_TEST_SIZE 2000
-
-static void ioat_dma_test_callback(void *dma_async_param)
-{
- struct completion *cmp = dma_async_param;
-
- complete(cmp);
-}
-
-/**
- * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
- * @device: device to be tested
- */
-static int ioat_dma_self_test(struct ioatdma_device *device)
-{
- int i;
- u8 *src;
- u8 *dest;
- struct dma_chan *dma_chan;
- struct dma_async_tx_descriptor *tx;
- dma_addr_t dma_dest, dma_src;
- dma_cookie_t cookie;
- int err = 0;
- struct completion cmp;
- unsigned long tmo;
- unsigned long flags;
-
- src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
- if (!src)
- return -ENOMEM;
- dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
- if (!dest) {
- kfree(src);
- return -ENOMEM;
- }
-
- /* Fill in src buffer */
- for (i = 0; i < IOAT_TEST_SIZE; i++)
- src[i] = (u8)i;
-
- /* Start copy, using first DMA channel */
- dma_chan = container_of(device->common.channels.next,
- struct dma_chan,
- device_node);
- if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
- dev_err(&device->pdev->dev,
- "selftest cannot allocate chan resource\n");
- err = -ENODEV;
- goto out;
- }
-
- dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
- DMA_TO_DEVICE);
- dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
- DMA_FROM_DEVICE);
- flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE;
- tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
- IOAT_TEST_SIZE, flags);
- if (!tx) {
- dev_err(&device->pdev->dev,
- "Self-test prep failed, disabling\n");
- err = -ENODEV;
- goto free_resources;
- }
-
- async_tx_ack(tx);
- init_completion(&cmp);
- tx->callback = ioat_dma_test_callback;
- tx->callback_param = &cmp;
- cookie = tx->tx_submit(tx);
- if (cookie < 0) {
- dev_err(&device->pdev->dev,
- "Self-test setup failed, disabling\n");
- err = -ENODEV;
- goto free_resources;
- }
- device->common.device_issue_pending(dma_chan);
-
- tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
-
- if (tmo == 0 ||
- device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
- != DMA_SUCCESS) {
- dev_err(&device->pdev->dev,
- "Self-test copy timed out, disabling\n");
- err = -ENODEV;
- goto free_resources;
- }
- if (memcmp(src, dest, IOAT_TEST_SIZE)) {
- dev_err(&device->pdev->dev,
- "Self-test copy failed compare, disabling\n");
- err = -ENODEV;
- goto free_resources;
- }
-
-free_resources:
- device->common.device_free_chan_resources(dma_chan);
-out:
- kfree(src);
- kfree(dest);
- return err;
-}
-
-static char ioat_interrupt_style[32] = "msix";
-module_param_string(ioat_interrupt_style, ioat_interrupt_style,
- sizeof(ioat_interrupt_style), 0644);
-MODULE_PARM_DESC(ioat_interrupt_style,
- "set ioat interrupt style: msix (default), "
- "msix-single-vector, msi, intx)");
-
-/**
- * ioat_dma_setup_interrupts - setup interrupt handler
- * @device: ioat device
- */
-static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
-{
- struct ioat_dma_chan *ioat_chan;
- int err, i, j, msixcnt;
- u8 intrctrl = 0;
-
- if (!strcmp(ioat_interrupt_style, "msix"))
- goto msix;
- if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
- goto msix_single_vector;
- if (!strcmp(ioat_interrupt_style, "msi"))
- goto msi;
- if (!strcmp(ioat_interrupt_style, "intx"))
- goto intx;
- dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n",
- ioat_interrupt_style);
- goto err_no_irq;
-
-msix:
- /* The number of MSI-X vectors should equal the number of channels */
- msixcnt = device->common.chancnt;
- for (i = 0; i < msixcnt; i++)
- device->msix_entries[i].entry = i;
-
- err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt);
- if (err < 0)
- goto msi;
- if (err > 0)
- goto msix_single_vector;
-
- for (i = 0; i < msixcnt; i++) {
- ioat_chan = ioat_lookup_chan_by_index(device, i);
- err = request_irq(device->msix_entries[i].vector,
- ioat_dma_do_interrupt_msix,
- 0, "ioat-msix", ioat_chan);
- if (err) {
- for (j = 0; j < i; j++) {
- ioat_chan =
- ioat_lookup_chan_by_index(device, j);
- free_irq(device->msix_entries[j].vector,
- ioat_chan);
- }
- goto msix_single_vector;
- }
- }
- intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
- device->irq_mode = msix_multi_vector;
- goto done;
-
-msix_single_vector:
- device->msix_entries[0].entry = 0;
- err = pci_enable_msix(device->pdev, device->msix_entries, 1);
- if (err)
- goto msi;
-
- err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt,
- 0, "ioat-msix", device);
- if (err) {
- pci_disable_msix(device->pdev);
- goto msi;
- }
- device->irq_mode = msix_single_vector;
- goto done;
-
-msi:
- err = pci_enable_msi(device->pdev);
- if (err)
- goto intx;
-
- err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
- 0, "ioat-msi", device);
- if (err) {
- pci_disable_msi(device->pdev);
- goto intx;
- }
- /*
- * CB 1.2 devices need a bit set in configuration space to enable MSI
- */
- if (device->version == IOAT_VER_1_2) {
- u32 dmactrl;
- pci_read_config_dword(device->pdev,
- IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
- dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
- pci_write_config_dword(device->pdev,
- IOAT_PCI_DMACTRL_OFFSET, dmactrl);
- }
- device->irq_mode = msi;
- goto done;
-
-intx:
- err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
- IRQF_SHARED, "ioat-intx", device);
- if (err)
- goto err_no_irq;
- device->irq_mode = intx;
-
-done:
- intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
- writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
- return 0;
-
-err_no_irq:
- /* Disable all interrupt generation */
- writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
- dev_err(&device->pdev->dev, "no usable interrupts\n");
- device->irq_mode = none;
- return -1;
-}
-
-/**
- * ioat_dma_remove_interrupts - remove whatever interrupts were set
- * @device: ioat device
- */
-static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
-{
- struct ioat_dma_chan *ioat_chan;
- int i;
-
- /* Disable all interrupt generation */
- writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
-
- switch (device->irq_mode) {
- case msix_multi_vector:
- for (i = 0; i < device->common.chancnt; i++) {
- ioat_chan = ioat_lookup_chan_by_index(device, i);
- free_irq(device->msix_entries[i].vector, ioat_chan);
- }
- pci_disable_msix(device->pdev);
- break;
- case msix_single_vector:
- free_irq(device->msix_entries[0].vector, device);
- pci_disable_msix(device->pdev);
- break;
- case msi:
- free_irq(device->pdev->irq, device);
- pci_disable_msi(device->pdev);
- break;
- case intx:
- free_irq(device->pdev->irq, device);
- break;
- case none:
- dev_warn(&device->pdev->dev,
- "call to %s without interrupts setup\n", __func__);
- }
- device->irq_mode = none;
-}
-
-struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
- void __iomem *iobase)
-{
- int err;
- struct ioatdma_device *device;
-
- device = kzalloc(sizeof(*device), GFP_KERNEL);
- if (!device) {
- err = -ENOMEM;
- goto err_kzalloc;
- }
- device->pdev = pdev;
- device->reg_base = iobase;
- device->version = readb(device->reg_base + IOAT_VER_OFFSET);
-
- /* DMA coherent memory pool for DMA descriptor allocations */
- device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
- sizeof(struct ioat_dma_descriptor),
- 64, 0);
- if (!device->dma_pool) {
- err = -ENOMEM;
- goto err_dma_pool;
- }
-
- device->completion_pool = pci_pool_create("completion_pool", pdev,
- sizeof(u64), SMP_CACHE_BYTES,
- SMP_CACHE_BYTES);
- if (!device->completion_pool) {
- err = -ENOMEM;
- goto err_completion_pool;
- }
-
- INIT_LIST_HEAD(&device->common.channels);
- ioat_dma_enumerate_channels(device);
-
- device->common.device_alloc_chan_resources =
- ioat_dma_alloc_chan_resources;
- device->common.device_free_chan_resources =
- ioat_dma_free_chan_resources;
- device->common.dev = &pdev->dev;
-
- dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
- device->common.device_is_tx_complete = ioat_dma_is_complete;
- switch (device->version) {
- case IOAT_VER_1_2:
- device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
- device->common.device_issue_pending =
- ioat1_dma_memcpy_issue_pending;
- break;
- case IOAT_VER_2_0:
- case IOAT_VER_3_0:
- device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
- device->common.device_issue_pending =
- ioat2_dma_memcpy_issue_pending;
- break;
- }
-
- dev_err(&device->pdev->dev,
- "Intel(R) I/OAT DMA Engine found,"
- " %d channels, device version 0x%02x, driver version %s\n",
- device->common.chancnt, device->version, IOAT_DMA_VERSION);
-
- if (!device->common.chancnt) {
- dev_err(&device->pdev->dev,
- "Intel(R) I/OAT DMA Engine problem found: "
- "zero channels detected\n");
- goto err_setup_interrupts;
- }
-
- err = ioat_dma_setup_interrupts(device);
- if (err)
- goto err_setup_interrupts;
-
- err = ioat_dma_self_test(device);
- if (err)
- goto err_self_test;
-
- ioat_set_tcp_copy_break(device);
-
- dma_async_device_register(&device->common);
-
- if (device->version != IOAT_VER_3_0) {
- INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
- schedule_delayed_work(&device->work,
- WATCHDOG_DELAY);
- }
-
- return device;
-
-err_self_test:
- ioat_dma_remove_interrupts(device);
-err_setup_interrupts:
- pci_pool_destroy(device->completion_pool);
-err_completion_pool:
- pci_pool_destroy(device->dma_pool);
-err_dma_pool:
- kfree(device);
-err_kzalloc:
- dev_err(&pdev->dev,
- "Intel(R) I/OAT DMA Engine initialization failed\n");
- return NULL;
-}
-
-void ioat_dma_remove(struct ioatdma_device *device)
-{
- struct dma_chan *chan, *_chan;
- struct ioat_dma_chan *ioat_chan;
-
- if (device->version != IOAT_VER_3_0)
- cancel_delayed_work(&device->work);
-
- ioat_dma_remove_interrupts(device);
-
- dma_async_device_unregister(&device->common);
-
- pci_pool_destroy(device->dma_pool);
- pci_pool_destroy(device->completion_pool);
-
- iounmap(device->reg_base);
- pci_release_regions(device->pdev);
- pci_disable_device(device->pdev);
-
- list_for_each_entry_safe(chan, _chan,
- &device->common.channels, device_node) {
- ioat_chan = to_ioat_chan(chan);
- list_del(&chan->device_node);
- kfree(ioat_chan);
- }
- kfree(device);
-}
-
diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h
deleted file mode 100644
index a52ff4bd460..00000000000
--- a/drivers/dma/ioatdma.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
- */
-#ifndef IOATDMA_H
-#define IOATDMA_H
-
-#include <linux/dmaengine.h>
-#include "ioatdma_hw.h"
-#include <linux/init.h>
-#include <linux/dmapool.h>
-#include <linux/cache.h>
-#include <linux/pci_ids.h>
-#include <net/tcp.h>
-
-#define IOAT_DMA_VERSION "3.64"
-
-enum ioat_interrupt {
- none = 0,
- msix_multi_vector = 1,
- msix_single_vector = 2,
- msi = 3,
- intx = 4,
-};
-
-#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
-#define IOAT_DMA_DCA_ANY_CPU ~0
-#define IOAT_WATCHDOG_PERIOD (2 * HZ)
-
-
-/**
- * struct ioatdma_device - internal representation of a IOAT device
- * @pdev: PCI-Express device
- * @reg_base: MMIO register space base address
- * @dma_pool: for allocating DMA descriptors
- * @common: embedded struct dma_device
- * @version: version of ioatdma device
- * @irq_mode: which style irq to use
- * @msix_entries: irq handlers
- * @idx: per channel data
- */
-
-struct ioatdma_device {
- struct pci_dev *pdev;
- void __iomem *reg_base;
- struct pci_pool *dma_pool;
- struct pci_pool *completion_pool;
- struct dma_device common;
- u8 version;
- enum ioat_interrupt irq_mode;
- struct delayed_work work;
- struct msix_entry msix_entries[4];
- struct ioat_dma_chan *idx[4];
-};
-
-/**
- * struct ioat_dma_chan - internal representation of a DMA channel
- */
-struct ioat_dma_chan {
-
- void __iomem *reg_base;
-
- dma_cookie_t completed_cookie;
- unsigned long last_completion;
- unsigned long last_completion_time;
-
- size_t xfercap; /* XFERCAP register value expanded out */
-
- spinlock_t cleanup_lock;
- spinlock_t desc_lock;
- struct list_head free_desc;
- struct list_head used_desc;
- unsigned long watchdog_completion;
- int watchdog_tcp_cookie;
- u32 watchdog_last_tcp_cookie;
- struct delayed_work work;
-
- int pending;
- int dmacount;
- int desccount;
-
- struct ioatdma_device *device;
- struct dma_chan common;
-
- dma_addr_t completion_addr;
- union {
- u64 full; /* HW completion writeback */
- struct {
- u32 low;
- u32 high;
- };
- } *completion_virt;
- unsigned long last_compl_desc_addr_hw;
- struct tasklet_struct cleanup_task;
-};
-
-/* wrapper around hardware descriptor format + additional software fields */
-
-/**
- * struct ioat_desc_sw - wrapper around hardware descriptor
- * @hw: hardware DMA descriptor
- * @node: this descriptor will either be on the free list,
- * or attached to a transaction list (async_tx.tx_list)
- * @tx_cnt: number of descriptors required to complete the transaction
- * @async_tx: the generic software descriptor for all engines
- */
-struct ioat_desc_sw {
- struct ioat_dma_descriptor *hw;
- struct list_head node;
- int tx_cnt;
- size_t len;
- dma_addr_t src;
- dma_addr_t dst;
- struct dma_async_tx_descriptor async_tx;
-};
-
-static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev)
-{
- #ifdef CONFIG_NET_DMA
- switch (dev->version) {
- case IOAT_VER_1_2:
- sysctl_tcp_dma_copybreak = 4096;
- break;
- case IOAT_VER_2_0:
- sysctl_tcp_dma_copybreak = 2048;
- break;
- case IOAT_VER_3_0:
- sysctl_tcp_dma_copybreak = 262144;
- break;
- }
- #endif
-}
-
-#if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE)
-struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
- void __iomem *iobase);
-void ioat_dma_remove(struct ioatdma_device *device);
-struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-#else
-#define ioat_dma_probe(pdev, iobase) NULL
-#define ioat_dma_remove(device) do { } while (0)
-#define ioat_dca_init(pdev, iobase) NULL
-#define ioat2_dca_init(pdev, iobase) NULL
-#define ioat3_dca_init(pdev, iobase) NULL
-#endif
-
-#endif /* IOATDMA_H */
diff --git a/drivers/dma/ioatdma_hw.h b/drivers/dma/ioatdma_hw.h
deleted file mode 100644
index afa57eef86c..00000000000
--- a/drivers/dma/ioatdma_hw.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
- */
-#ifndef _IOAT_HW_H_
-#define _IOAT_HW_H_
-
-/* PCI Configuration Space Values */
-#define IOAT_PCI_VID 0x8086
-
-/* CB device ID's */
-#define IOAT_PCI_DID_5000 0x1A38
-#define IOAT_PCI_DID_CNB 0x360B
-#define IOAT_PCI_DID_SCNB 0x65FF
-#define IOAT_PCI_DID_SNB 0x402F
-
-#define IOAT_PCI_RID 0x00
-#define IOAT_PCI_SVID 0x8086
-#define IOAT_PCI_SID 0x8086
-#define IOAT_VER_1_2 0x12 /* Version 1.2 */
-#define IOAT_VER_2_0 0x20 /* Version 2.0 */
-#define IOAT_VER_3_0 0x30 /* Version 3.0 */
-
-struct ioat_dma_descriptor {
- uint32_t size;
- uint32_t ctl;
- uint64_t src_addr;
- uint64_t dst_addr;
- uint64_t next;
- uint64_t rsv1;
- uint64_t rsv2;
- uint64_t user1;
- uint64_t user2;
-};
-
-#define IOAT_DMA_DESCRIPTOR_CTL_INT_GN 0x00000001
-#define IOAT_DMA_DESCRIPTOR_CTL_SRC_SN 0x00000002
-#define IOAT_DMA_DESCRIPTOR_CTL_DST_SN 0x00000004
-#define IOAT_DMA_DESCRIPTOR_CTL_CP_STS 0x00000008
-#define IOAT_DMA_DESCRIPTOR_CTL_FRAME 0x00000010
-#define IOAT_DMA_DESCRIPTOR_NUL 0x00000020
-#define IOAT_DMA_DESCRIPTOR_CTL_SP_BRK 0x00000040
-#define IOAT_DMA_DESCRIPTOR_CTL_DP_BRK 0x00000080
-#define IOAT_DMA_DESCRIPTOR_CTL_BNDL 0x00000100
-#define IOAT_DMA_DESCRIPTOR_CTL_DCA 0x00000200
-#define IOAT_DMA_DESCRIPTOR_CTL_BUFHINT 0x00000400
-
-#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_CONTEXT 0xFF000000
-#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_DMA 0x00000000
-
-#define IOAT_DMA_DESCRIPTOR_CTL_CONTEXT_DCA 0x00000001
-#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_MASK 0xFF000000
-
-#endif
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 2f052265122..645ca8d54ec 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -31,6 +31,7 @@
#include <linux/platform_device.h>
#include <linux/memory.h>
#include <linux/ioport.h>
+#include <linux/raid/pq.h>
#include <mach/adma.h>
@@ -57,65 +58,110 @@ static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
}
}
+static void
+iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
+{
+ struct dma_async_tx_descriptor *tx = &desc->async_tx;
+ struct iop_adma_desc_slot *unmap = desc->group_head;
+ struct device *dev = &iop_chan->device->pdev->dev;
+ u32 len = unmap->unmap_len;
+ enum dma_ctrl_flags flags = tx->flags;
+ u32 src_cnt;
+ dma_addr_t addr;
+ dma_addr_t dest;
+
+ src_cnt = unmap->unmap_src_cnt;
+ dest = iop_desc_get_dest_addr(unmap, iop_chan);
+ if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ enum dma_data_direction dir;
+
+ if (src_cnt > 1) /* is xor? */
+ dir = DMA_BIDIRECTIONAL;
+ else
+ dir = DMA_FROM_DEVICE;
+
+ dma_unmap_page(dev, dest, len, dir);
+ }
+
+ if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ while (src_cnt--) {
+ addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt);
+ if (addr == dest)
+ continue;
+ dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
+ }
+ }
+ desc->group_head = NULL;
+}
+
+static void
+iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
+{
+ struct dma_async_tx_descriptor *tx = &desc->async_tx;
+ struct iop_adma_desc_slot *unmap = desc->group_head;
+ struct device *dev = &iop_chan->device->pdev->dev;
+ u32 len = unmap->unmap_len;
+ enum dma_ctrl_flags flags = tx->flags;
+ u32 src_cnt = unmap->unmap_src_cnt;
+ dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan);
+ dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan);
+ int i;
+
+ if (tx->flags & DMA_PREP_CONTINUE)
+ src_cnt -= 3;
+
+ if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) {
+ dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL);
+ }
+
+ if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ dma_addr_t addr;
+
+ for (i = 0; i < src_cnt; i++) {
+ addr = iop_desc_get_src_addr(unmap, iop_chan, i);
+ dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
+ }
+ if (desc->pq_check_result) {
+ dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE);
+ dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE);
+ }
+ }
+
+ desc->group_head = NULL;
+}
+
+
static dma_cookie_t
iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
{
- BUG_ON(desc->async_tx.cookie < 0);
- if (desc->async_tx.cookie > 0) {
- cookie = desc->async_tx.cookie;
- desc->async_tx.cookie = 0;
+ struct dma_async_tx_descriptor *tx = &desc->async_tx;
+
+ BUG_ON(tx->cookie < 0);
+ if (tx->cookie > 0) {
+ cookie = tx->cookie;
+ tx->cookie = 0;
/* call the callback (must not sleep or submit new
* operations to this channel)
*/
- if (desc->async_tx.callback)
- desc->async_tx.callback(
- desc->async_tx.callback_param);
+ if (tx->callback)
+ tx->callback(tx->callback_param);
/* unmap dma addresses
* (unmap_single vs unmap_page?)
*/
if (desc->group_head && desc->unmap_len) {
- struct iop_adma_desc_slot *unmap = desc->group_head;
- struct device *dev =
- &iop_chan->device->pdev->dev;
- u32 len = unmap->unmap_len;
- enum dma_ctrl_flags flags = desc->async_tx.flags;
- u32 src_cnt;
- dma_addr_t addr;
- dma_addr_t dest;
-
- src_cnt = unmap->unmap_src_cnt;
- dest = iop_desc_get_dest_addr(unmap, iop_chan);
- if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- enum dma_data_direction dir;
-
- if (src_cnt > 1) /* is xor? */
- dir = DMA_BIDIRECTIONAL;
- else
- dir = DMA_FROM_DEVICE;
-
- dma_unmap_page(dev, dest, len, dir);
- }
-
- if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- while (src_cnt--) {
- addr = iop_desc_get_src_addr(unmap,
- iop_chan,
- src_cnt);
- if (addr == dest)
- continue;
- dma_unmap_page(dev, addr, len,
- DMA_TO_DEVICE);
- }
- }
- desc->group_head = NULL;
+ if (iop_desc_is_pq(desc))
+ iop_desc_unmap_pq(iop_chan, desc);
+ else
+ iop_desc_unmap(iop_chan, desc);
}
}
/* run dependent operations */
- dma_run_dependencies(&desc->async_tx);
+ dma_run_dependencies(tx);
return cookie;
}
@@ -287,7 +333,12 @@ static void iop_adma_tasklet(unsigned long data)
{
struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data;
- spin_lock(&iop_chan->lock);
+ /* lockdep will flag depedency submissions as potentially
+ * recursive locking, this is not the case as a dependency
+ * submission will never recurse a channels submit routine.
+ * There are checks in async_tx.c to prevent this.
+ */
+ spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING);
__iop_adma_slot_cleanup(iop_chan);
spin_unlock(&iop_chan->lock);
}
@@ -370,7 +421,7 @@ retry:
}
alloc_tail->group_head = alloc_start;
alloc_tail->async_tx.cookie = -EBUSY;
- list_splice(&chain, &alloc_tail->async_tx.tx_list);
+ list_splice(&chain, &alloc_tail->tx_list);
iop_chan->last_used = last_used;
iop_desc_clear_next_desc(alloc_start);
iop_desc_clear_next_desc(alloc_tail);
@@ -429,7 +480,7 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
old_chain_tail = list_entry(iop_chan->chain.prev,
struct iop_adma_desc_slot, chain_node);
- list_splice_init(&sw_desc->async_tx.tx_list,
+ list_splice_init(&sw_desc->tx_list,
&old_chain_tail->chain_node);
/* fix up the hardware chain */
@@ -496,6 +547,7 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
dma_async_tx_descriptor_init(&slot->async_tx, chan);
slot->async_tx.tx_submit = iop_adma_tx_submit;
+ INIT_LIST_HEAD(&slot->tx_list);
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
hw_desc = (char *) iop_chan->device->dma_desc_pool;
@@ -660,9 +712,9 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
}
static struct dma_async_tx_descriptor *
-iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src,
- unsigned int src_cnt, size_t len, u32 *result,
- unsigned long flags)
+iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
+ unsigned int src_cnt, size_t len, u32 *result,
+ unsigned long flags)
{
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -696,6 +748,118 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src,
return sw_desc ? &sw_desc->async_tx : NULL;
}
+static struct dma_async_tx_descriptor *
+iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ unsigned long flags)
+{
+ struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
+ struct iop_adma_desc_slot *sw_desc, *g;
+ int slot_cnt, slots_per_op;
+ int continue_srcs;
+
+ if (unlikely(!len))
+ return NULL;
+ BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
+
+ dev_dbg(iop_chan->device->common.dev,
+ "%s src_cnt: %d len: %u flags: %lx\n",
+ __func__, src_cnt, len, flags);
+
+ if (dmaf_p_disabled_continue(flags))
+ continue_srcs = 1+src_cnt;
+ else if (dmaf_continue(flags))
+ continue_srcs = 3+src_cnt;
+ else
+ continue_srcs = 0+src_cnt;
+
+ spin_lock_bh(&iop_chan->lock);
+ slot_cnt = iop_chan_pq_slot_count(len, continue_srcs, &slots_per_op);
+ sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
+ if (sw_desc) {
+ int i;
+
+ g = sw_desc->group_head;
+ iop_desc_set_byte_count(g, iop_chan, len);
+
+ /* even if P is disabled its destination address (bits
+ * [3:0]) must match Q. It is ok if P points to an
+ * invalid address, it won't be written.
+ */
+ if (flags & DMA_PREP_PQ_DISABLE_P)
+ dst[0] = dst[1] & 0x7;
+
+ iop_desc_set_pq_addr(g, dst);
+ sw_desc->unmap_src_cnt = src_cnt;
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ for (i = 0; i < src_cnt; i++)
+ iop_desc_set_pq_src_addr(g, i, src[i], scf[i]);
+
+ /* if we are continuing a previous operation factor in
+ * the old p and q values, see the comment for dma_maxpq
+ * in include/linux/dmaengine.h
+ */
+ if (dmaf_p_disabled_continue(flags))
+ iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
+ else if (dmaf_continue(flags)) {
+ iop_desc_set_pq_src_addr(g, i++, dst[0], 0);
+ iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
+ iop_desc_set_pq_src_addr(g, i++, dst[1], 0);
+ }
+ iop_desc_init_pq(g, i, flags);
+ }
+ spin_unlock_bh(&iop_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+static struct dma_async_tx_descriptor *
+iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf,
+ size_t len, enum sum_check_flags *pqres,
+ unsigned long flags)
+{
+ struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
+ struct iop_adma_desc_slot *sw_desc, *g;
+ int slot_cnt, slots_per_op;
+
+ if (unlikely(!len))
+ return NULL;
+ BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
+
+ dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
+ __func__, src_cnt, len);
+
+ spin_lock_bh(&iop_chan->lock);
+ slot_cnt = iop_chan_pq_zero_sum_slot_count(len, src_cnt + 2, &slots_per_op);
+ sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
+ if (sw_desc) {
+ /* for validate operations p and q are tagged onto the
+ * end of the source list
+ */
+ int pq_idx = src_cnt;
+
+ g = sw_desc->group_head;
+ iop_desc_init_pq_zero_sum(g, src_cnt+2, flags);
+ iop_desc_set_pq_zero_sum_byte_count(g, len);
+ g->pq_check_result = pqres;
+ pr_debug("\t%s: g->pq_check_result: %p\n",
+ __func__, g->pq_check_result);
+ sw_desc->unmap_src_cnt = src_cnt+2;
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ while (src_cnt--)
+ iop_desc_set_pq_zero_sum_src_addr(g, src_cnt,
+ src[src_cnt],
+ scf[src_cnt]);
+ iop_desc_set_pq_zero_sum_addr(g, pq_idx, src);
+ }
+ spin_unlock_bh(&iop_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
static void iop_adma_free_chan_resources(struct dma_chan *chan)
{
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
@@ -906,7 +1070,7 @@ out:
#define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
static int __devinit
-iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
+iop_adma_xor_val_self_test(struct iop_adma_device *device)
{
int i, src_idx;
struct page *dest;
@@ -1002,7 +1166,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
PAGE_SIZE, DMA_TO_DEVICE);
/* skip zero sum if the capability is not present */
- if (!dma_has_cap(DMA_ZERO_SUM, dma_chan->device->cap_mask))
+ if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
goto free_resources;
/* zero sum the sources with the destintation page */
@@ -1016,10 +1180,10 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
dma_srcs[i] = dma_map_page(dma_chan->device->dev,
zero_sum_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
- tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs,
- IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
- &zero_sum_result,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
+ IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
+ &zero_sum_result,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan);
@@ -1072,10 +1236,10 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
dma_srcs[i] = dma_map_page(dma_chan->device->dev,
zero_sum_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
- tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs,
- IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
- &zero_sum_result,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
+ IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
+ &zero_sum_result,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan);
@@ -1105,6 +1269,170 @@ out:
return err;
}
+#ifdef CONFIG_MD_RAID6_PQ
+static int __devinit
+iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
+{
+ /* combined sources, software pq results, and extra hw pq results */
+ struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2];
+ /* ptr to the extra hw pq buffers defined above */
+ struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
+ /* address conversion buffers (dma_map / page_address) */
+ void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
+ dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST];
+ dma_addr_t pq_dest[2];
+
+ int i;
+ struct dma_async_tx_descriptor *tx;
+ struct dma_chan *dma_chan;
+ dma_cookie_t cookie;
+ u32 zero_sum_result;
+ int err = 0;
+ struct device *dev;
+
+ dev_dbg(device->common.dev, "%s\n", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(pq); i++) {
+ pq[i] = alloc_page(GFP_KERNEL);
+ if (!pq[i]) {
+ while (i--)
+ __free_page(pq[i]);
+ return -ENOMEM;
+ }
+ }
+
+ /* Fill in src buffers */
+ for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) {
+ pq_sw[i] = page_address(pq[i]);
+ memset(pq_sw[i], 0x11111111 * (1<<i), PAGE_SIZE);
+ }
+ pq_sw[i] = page_address(pq[i]);
+ pq_sw[i+1] = page_address(pq[i+1]);
+
+ dma_chan = container_of(device->common.channels.next,
+ struct dma_chan,
+ device_node);
+ if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ dev = dma_chan->device->dev;
+
+ /* initialize the dests */
+ memset(page_address(pq_hw[0]), 0 , PAGE_SIZE);
+ memset(page_address(pq_hw[1]), 0 , PAGE_SIZE);
+
+ /* test pq */
+ pq_dest[0] = dma_map_page(dev, pq_hw[0], 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ pq_dest[1] = dma_map_page(dev, pq_hw[1], 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
+ pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
+ DMA_TO_DEVICE);
+
+ tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src,
+ IOP_ADMA_NUM_SRC_TEST, (u8 *)raid6_gfexp,
+ PAGE_SIZE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+
+ cookie = iop_adma_tx_submit(tx);
+ iop_adma_issue_pending(dma_chan);
+ msleep(8);
+
+ if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ DMA_SUCCESS) {
+ dev_err(dev, "Self-test pq timed out, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ raid6_call.gen_syndrome(IOP_ADMA_NUM_SRC_TEST+2, PAGE_SIZE, pq_sw);
+
+ if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST],
+ page_address(pq_hw[0]), PAGE_SIZE) != 0) {
+ dev_err(dev, "Self-test p failed compare, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+ if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST+1],
+ page_address(pq_hw[1]), PAGE_SIZE) != 0) {
+ dev_err(dev, "Self-test q failed compare, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ /* test correct zero sum using the software generated pq values */
+ for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
+ pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
+ DMA_TO_DEVICE);
+
+ zero_sum_result = ~0;
+ tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
+ pq_src, IOP_ADMA_NUM_SRC_TEST,
+ raid6_gfexp, PAGE_SIZE, &zero_sum_result,
+ DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
+
+ cookie = iop_adma_tx_submit(tx);
+ iop_adma_issue_pending(dma_chan);
+ msleep(8);
+
+ if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ DMA_SUCCESS) {
+ dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ if (zero_sum_result != 0) {
+ dev_err(dev, "Self-test pq-zero-sum failed to validate: %x\n",
+ zero_sum_result);
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ /* test incorrect zero sum */
+ i = IOP_ADMA_NUM_SRC_TEST;
+ memset(pq_sw[i] + 100, 0, 100);
+ memset(pq_sw[i+1] + 200, 0, 200);
+ for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
+ pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
+ DMA_TO_DEVICE);
+
+ zero_sum_result = 0;
+ tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
+ pq_src, IOP_ADMA_NUM_SRC_TEST,
+ raid6_gfexp, PAGE_SIZE, &zero_sum_result,
+ DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
+
+ cookie = iop_adma_tx_submit(tx);
+ iop_adma_issue_pending(dma_chan);
+ msleep(8);
+
+ if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ DMA_SUCCESS) {
+ dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ if (zero_sum_result != (SUM_CHECK_P_RESULT | SUM_CHECK_Q_RESULT)) {
+ dev_err(dev, "Self-test !pq-zero-sum failed to validate: %x\n",
+ zero_sum_result);
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+free_resources:
+ iop_adma_free_chan_resources(dma_chan);
+out:
+ i = ARRAY_SIZE(pq);
+ while (i--)
+ __free_page(pq[i]);
+ return err;
+}
+#endif
+
static int __devexit iop_adma_remove(struct platform_device *dev)
{
struct iop_adma_device *device = platform_get_drvdata(dev);
@@ -1192,9 +1520,16 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
dma_dev->max_xor = iop_adma_get_max_xor();
dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
}
- if (dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask))
- dma_dev->device_prep_dma_zero_sum =
- iop_adma_prep_dma_zero_sum;
+ if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask))
+ dma_dev->device_prep_dma_xor_val =
+ iop_adma_prep_dma_xor_val;
+ if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
+ dma_set_maxpq(dma_dev, iop_adma_get_max_pq(), 0);
+ dma_dev->device_prep_dma_pq = iop_adma_prep_dma_pq;
+ }
+ if (dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask))
+ dma_dev->device_prep_dma_pq_val =
+ iop_adma_prep_dma_pq_val;
if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
dma_dev->device_prep_dma_interrupt =
iop_adma_prep_dma_interrupt;
@@ -1248,23 +1583,35 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
}
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) ||
- dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
- ret = iop_adma_xor_zero_sum_self_test(adev);
+ dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
+ ret = iop_adma_xor_val_self_test(adev);
dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
if (ret)
goto err_free_iop_chan;
}
+ if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
+ dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
+ #ifdef CONFIG_MD_RAID6_PQ
+ ret = iop_adma_pq_zero_sum_self_test(adev);
+ dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
+ #else
+ /* can not test raid6, so do not publish capability */
+ dma_cap_clear(DMA_PQ, dma_dev->cap_mask);
+ dma_cap_clear(DMA_PQ_VAL, dma_dev->cap_mask);
+ ret = 0;
+ #endif
+ if (ret)
+ goto err_free_iop_chan;
+ }
+
dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: "
- "( %s%s%s%s%s%s%s%s%s%s)\n",
- dma_has_cap(DMA_PQ_XOR, dma_dev->cap_mask) ? "pq_xor " : "",
- dma_has_cap(DMA_PQ_UPDATE, dma_dev->cap_mask) ? "pq_update " : "",
- dma_has_cap(DMA_PQ_ZERO_SUM, dma_dev->cap_mask) ? "pq_zero_sum " : "",
+ "( %s%s%s%s%s%s%s)\n",
+ dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
+ dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
- dma_has_cap(DMA_DUAL_XOR, dma_dev->cap_mask) ? "dual_xor " : "",
- dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask) ? "xor_zero_sum " : "",
+ dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
- dma_has_cap(DMA_MEMCPY_CRC32C, dma_dev->cap_mask) ? "cpy+crc " : "",
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
@@ -1296,7 +1643,7 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
if (sw_desc) {
grp_start = sw_desc->group_head;
- list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain);
+ list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
async_tx_ack(&sw_desc->async_tx);
iop_desc_init_memcpy(grp_start, 0);
iop_desc_set_byte_count(grp_start, iop_chan, 0);
@@ -1352,7 +1699,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
if (sw_desc) {
grp_start = sw_desc->group_head;
- list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain);
+ list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
async_tx_ack(&sw_desc->async_tx);
iop_desc_init_null_xor(grp_start, 2, 0);
iop_desc_set_byte_count(grp_start, iop_chan, 0);
diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c
index 9f6fe46a9b8..c0a272c7368 100644
--- a/drivers/dma/iovlock.c
+++ b/drivers/dma/iovlock.c
@@ -183,6 +183,11 @@ dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
iov_byte_offset,
kdata,
copy);
+ /* poll for a descriptor slot */
+ if (unlikely(dma_cookie < 0)) {
+ dma_async_issue_pending(chan);
+ continue;
+ }
len -= copy;
iov[iovec_idx].iov_len -= copy;
@@ -248,6 +253,11 @@ dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
page,
offset,
copy);
+ /* poll for a descriptor slot */
+ if (unlikely(dma_cookie < 0)) {
+ dma_async_issue_pending(chan);
+ continue;
+ }
len -= copy;
iov[iovec_idx].iov_len -= copy;
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 3f23eabe09f..466ab10c1ff 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -517,7 +517,7 @@ retry:
}
alloc_tail->group_head = alloc_start;
alloc_tail->async_tx.cookie = -EBUSY;
- list_splice(&chain, &alloc_tail->async_tx.tx_list);
+ list_splice(&chain, &alloc_tail->tx_list);
mv_chan->last_used = last_used;
mv_desc_clear_next_desc(alloc_start);
mv_desc_clear_next_desc(alloc_tail);
@@ -565,14 +565,14 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = mv_desc_assign_cookie(mv_chan, sw_desc);
if (list_empty(&mv_chan->chain))
- list_splice_init(&sw_desc->async_tx.tx_list, &mv_chan->chain);
+ list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
else {
new_hw_chain = 0;
old_chain_tail = list_entry(mv_chan->chain.prev,
struct mv_xor_desc_slot,
chain_node);
- list_splice_init(&grp_start->async_tx.tx_list,
+ list_splice_init(&grp_start->tx_list,
&old_chain_tail->chain_node);
if (!mv_can_chain(grp_start))
@@ -632,6 +632,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
slot->async_tx.tx_submit = mv_xor_tx_submit;
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
+ INIT_LIST_HEAD(&slot->tx_list);
hw_desc = (char *) mv_chan->device->dma_desc_pool;
slot->async_tx.phys =
(dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 06cafe1ef52..977b592e976 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -126,9 +126,8 @@ struct mv_xor_chan {
* @idx: pool index
* @unmap_src_cnt: number of xor sources
* @unmap_len: transaction bytecount
+ * @tx_list: list of slots that make up a multi-descriptor transaction
* @async_tx: support for the async_tx api
- * @group_list: list of slots that make up a multi-descriptor transaction
- * for example transfer lengths larger than the supported hw max
* @xor_check_result: result of zero sum
* @crc32_result: result crc calculation
*/
@@ -145,6 +144,7 @@ struct mv_xor_desc_slot {
u16 unmap_src_cnt;
u32 value;
size_t unmap_len;
+ struct list_head tx_list;
struct dma_async_tx_descriptor async_tx;
union {
u32 *xor_check_result;
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
new file mode 100644
index 00000000000..b3b065c4e5c
--- /dev/null
+++ b/drivers/dma/shdma.c
@@ -0,0 +1,786 @@
+/*
+ * Renesas SuperH DMA Engine support
+ *
+ * base is drivers/dma/flsdma.c
+ *
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * - DMA of SuperH does not have Hardware DMA chain mode.
+ * - MAX DMA size is 16MB.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/platform_device.h>
+#include <cpu/dma.h>
+#include <asm/dma-sh.h>
+#include "shdma.h"
+
+/* DMA descriptor control */
+#define DESC_LAST (-1)
+#define DESC_COMP (1)
+#define DESC_NCOMP (0)
+
+#define NR_DESCS_PER_CHANNEL 32
+/*
+ * Define the default configuration for dual address memory-memory transfer.
+ * The 0x400 value represents auto-request, external->external.
+ *
+ * And this driver set 4byte burst mode.
+ * If you want to change mode, you need to change RS_DEFAULT of value.
+ * (ex 1byte burst mode -> (RS_DUAL & ~TS_32)
+ */
+#define RS_DEFAULT (RS_DUAL)
+
+#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
+static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
+{
+ ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg));
+}
+
+static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
+{
+ return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg));
+}
+
+static void dmae_init(struct sh_dmae_chan *sh_chan)
+{
+ u32 chcr = RS_DEFAULT; /* default is DUAL mode */
+ sh_dmae_writel(sh_chan, chcr, CHCR);
+}
+
+/*
+ * Reset DMA controller
+ *
+ * SH7780 has two DMAOR register
+ */
+static void sh_dmae_ctl_stop(int id)
+{
+ unsigned short dmaor = dmaor_read_reg(id);
+
+ dmaor &= ~(DMAOR_NMIF | DMAOR_AE);
+ dmaor_write_reg(id, dmaor);
+}
+
+static int sh_dmae_rst(int id)
+{
+ unsigned short dmaor;
+
+ sh_dmae_ctl_stop(id);
+ dmaor = (dmaor_read_reg(id)|DMAOR_INIT);
+
+ dmaor_write_reg(id, dmaor);
+ if ((dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF))) {
+ pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int dmae_is_idle(struct sh_dmae_chan *sh_chan)
+{
+ u32 chcr = sh_dmae_readl(sh_chan, CHCR);
+ if (chcr & CHCR_DE) {
+ if (!(chcr & CHCR_TE))
+ return -EBUSY; /* working */
+ }
+ return 0; /* waiting */
+}
+
+static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan)
+{
+ u32 chcr = sh_dmae_readl(sh_chan, CHCR);
+ return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT];
+}
+
+static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw)
+{
+ sh_dmae_writel(sh_chan, hw.sar, SAR);
+ sh_dmae_writel(sh_chan, hw.dar, DAR);
+ sh_dmae_writel(sh_chan,
+ (hw.tcr >> calc_xmit_shift(sh_chan)), TCR);
+}
+
+static void dmae_start(struct sh_dmae_chan *sh_chan)
+{
+ u32 chcr = sh_dmae_readl(sh_chan, CHCR);
+
+ chcr |= (CHCR_DE|CHCR_IE);
+ sh_dmae_writel(sh_chan, chcr, CHCR);
+}
+
+static void dmae_halt(struct sh_dmae_chan *sh_chan)
+{
+ u32 chcr = sh_dmae_readl(sh_chan, CHCR);
+
+ chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
+ sh_dmae_writel(sh_chan, chcr, CHCR);
+}
+
+static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
+{
+ int ret = dmae_is_idle(sh_chan);
+ /* When DMA was working, can not set data to CHCR */
+ if (ret)
+ return ret;
+
+ sh_dmae_writel(sh_chan, val, CHCR);
+ return 0;
+}
+
+#define DMARS1_ADDR 0x04
+#define DMARS2_ADDR 0x08
+#define DMARS_SHIFT 8
+#define DMARS_CHAN_MSK 0x01
+static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
+{
+ u32 addr;
+ int shift = 0;
+ int ret = dmae_is_idle(sh_chan);
+ if (ret)
+ return ret;
+
+ if (sh_chan->id & DMARS_CHAN_MSK)
+ shift = DMARS_SHIFT;
+
+ switch (sh_chan->id) {
+ /* DMARS0 */
+ case 0:
+ case 1:
+ addr = SH_DMARS_BASE;
+ break;
+ /* DMARS1 */
+ case 2:
+ case 3:
+ addr = (SH_DMARS_BASE + DMARS1_ADDR);
+ break;
+ /* DMARS2 */
+ case 4:
+ case 5:
+ addr = (SH_DMARS_BASE + DMARS2_ADDR);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ctrl_outw((val << shift) |
+ (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)),
+ addr);
+
+ return 0;
+}
+
+static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct sh_desc *desc = tx_to_sh_desc(tx);
+ struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
+ dma_cookie_t cookie;
+
+ spin_lock_bh(&sh_chan->desc_lock);
+
+ cookie = sh_chan->common.cookie;
+ cookie++;
+ if (cookie < 0)
+ cookie = 1;
+
+ /* If desc only in the case of 1 */
+ if (desc->async_tx.cookie != -EBUSY)
+ desc->async_tx.cookie = cookie;
+ sh_chan->common.cookie = desc->async_tx.cookie;
+
+ list_splice_init(&desc->tx_list, sh_chan->ld_queue.prev);
+
+ spin_unlock_bh(&sh_chan->desc_lock);
+
+ return cookie;
+}
+
+static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
+{
+ struct sh_desc *desc, *_desc, *ret = NULL;
+
+ spin_lock_bh(&sh_chan->desc_lock);
+ list_for_each_entry_safe(desc, _desc, &sh_chan->ld_free, node) {
+ if (async_tx_test_ack(&desc->async_tx)) {
+ list_del(&desc->node);
+ ret = desc;
+ break;
+ }
+ }
+ spin_unlock_bh(&sh_chan->desc_lock);
+
+ return ret;
+}
+
+static void sh_dmae_put_desc(struct sh_dmae_chan *sh_chan, struct sh_desc *desc)
+{
+ if (desc) {
+ spin_lock_bh(&sh_chan->desc_lock);
+
+ list_splice_init(&desc->tx_list, &sh_chan->ld_free);
+ list_add(&desc->node, &sh_chan->ld_free);
+
+ spin_unlock_bh(&sh_chan->desc_lock);
+ }
+}
+
+static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
+ struct sh_desc *desc;
+
+ spin_lock_bh(&sh_chan->desc_lock);
+ while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
+ spin_unlock_bh(&sh_chan->desc_lock);
+ desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
+ if (!desc) {
+ spin_lock_bh(&sh_chan->desc_lock);
+ break;
+ }
+ dma_async_tx_descriptor_init(&desc->async_tx,
+ &sh_chan->common);
+ desc->async_tx.tx_submit = sh_dmae_tx_submit;
+ desc->async_tx.flags = DMA_CTRL_ACK;
+ INIT_LIST_HEAD(&desc->tx_list);
+ sh_dmae_put_desc(sh_chan, desc);
+
+ spin_lock_bh(&sh_chan->desc_lock);
+ sh_chan->descs_allocated++;
+ }
+ spin_unlock_bh(&sh_chan->desc_lock);
+
+ return sh_chan->descs_allocated;
+}
+
+/*
+ * sh_dma_free_chan_resources - Free all resources of the channel.
+ */
+static void sh_dmae_free_chan_resources(struct dma_chan *chan)
+{
+ struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
+ struct sh_desc *desc, *_desc;
+ LIST_HEAD(list);
+
+ BUG_ON(!list_empty(&sh_chan->ld_queue));
+ spin_lock_bh(&sh_chan->desc_lock);
+
+ list_splice_init(&sh_chan->ld_free, &list);
+ sh_chan->descs_allocated = 0;
+
+ spin_unlock_bh(&sh_chan->desc_lock);
+
+ list_for_each_entry_safe(desc, _desc, &list, node)
+ kfree(desc);
+}
+
+static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
+ struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
+ size_t len, unsigned long flags)
+{
+ struct sh_dmae_chan *sh_chan;
+ struct sh_desc *first = NULL, *prev = NULL, *new;
+ size_t copy_size;
+
+ if (!chan)
+ return NULL;
+
+ if (!len)
+ return NULL;
+
+ sh_chan = to_sh_chan(chan);
+
+ do {
+ /* Allocate the link descriptor from DMA pool */
+ new = sh_dmae_get_desc(sh_chan);
+ if (!new) {
+ dev_err(sh_chan->dev,
+ "No free memory for link descriptor\n");
+ goto err_get_desc;
+ }
+
+ copy_size = min(len, (size_t)SH_DMA_TCR_MAX);
+
+ new->hw.sar = dma_src;
+ new->hw.dar = dma_dest;
+ new->hw.tcr = copy_size;
+ if (!first)
+ first = new;
+
+ new->mark = DESC_NCOMP;
+ async_tx_ack(&new->async_tx);
+
+ prev = new;
+ len -= copy_size;
+ dma_src += copy_size;
+ dma_dest += copy_size;
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+ } while (len);
+
+ new->async_tx.flags = flags; /* client is in control of this ack */
+ new->async_tx.cookie = -EBUSY; /* Last desc */
+
+ return &first->async_tx;
+
+err_get_desc:
+ sh_dmae_put_desc(sh_chan, first);
+ return NULL;
+
+}
+
+/*
+ * sh_chan_ld_cleanup - Clean up link descriptors
+ *
+ * This function clean up the ld_queue of DMA channel.
+ */
+static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan)
+{
+ struct sh_desc *desc, *_desc;
+
+ spin_lock_bh(&sh_chan->desc_lock);
+ list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ /* non send data */
+ if (desc->mark == DESC_NCOMP)
+ break;
+
+ /* send data sesc */
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+
+ /* Remove from ld_queue list */
+ list_splice_init(&desc->tx_list, &sh_chan->ld_free);
+
+ dev_dbg(sh_chan->dev, "link descriptor %p will be recycle.\n",
+ desc);
+
+ list_move(&desc->node, &sh_chan->ld_free);
+ /* Run the link descriptor callback function */
+ if (callback) {
+ spin_unlock_bh(&sh_chan->desc_lock);
+ dev_dbg(sh_chan->dev, "link descriptor %p callback\n",
+ desc);
+ callback(callback_param);
+ spin_lock_bh(&sh_chan->desc_lock);
+ }
+ }
+ spin_unlock_bh(&sh_chan->desc_lock);
+}
+
+static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
+{
+ struct list_head *ld_node;
+ struct sh_dmae_regs hw;
+
+ /* DMA work check */
+ if (dmae_is_idle(sh_chan))
+ return;
+
+ /* Find the first un-transfer desciptor */
+ for (ld_node = sh_chan->ld_queue.next;
+ (ld_node != &sh_chan->ld_queue)
+ && (to_sh_desc(ld_node)->mark == DESC_COMP);
+ ld_node = ld_node->next)
+ cpu_relax();
+
+ if (ld_node != &sh_chan->ld_queue) {
+ /* Get the ld start address from ld_queue */
+ hw = to_sh_desc(ld_node)->hw;
+ dmae_set_reg(sh_chan, hw);
+ dmae_start(sh_chan);
+ }
+}
+
+static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
+{
+ struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
+ sh_chan_xfer_ld_queue(sh_chan);
+}
+
+static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ dma_cookie_t *done,
+ dma_cookie_t *used)
+{
+ struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+
+ sh_dmae_chan_ld_cleanup(sh_chan);
+
+ last_used = chan->cookie;
+ last_complete = sh_chan->completed_cookie;
+ if (last_complete == -EBUSY)
+ last_complete = last_used;
+
+ if (done)
+ *done = last_complete;
+
+ if (used)
+ *used = last_used;
+
+ return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+static irqreturn_t sh_dmae_interrupt(int irq, void *data)
+{
+ irqreturn_t ret = IRQ_NONE;
+ struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
+ u32 chcr = sh_dmae_readl(sh_chan, CHCR);
+
+ if (chcr & CHCR_TE) {
+ /* DMA stop */
+ dmae_halt(sh_chan);
+
+ ret = IRQ_HANDLED;
+ tasklet_schedule(&sh_chan->tasklet);
+ }
+
+ return ret;
+}
+
+#if defined(CONFIG_CPU_SH4)
+static irqreturn_t sh_dmae_err(int irq, void *data)
+{
+ int err = 0;
+ struct sh_dmae_device *shdev = (struct sh_dmae_device *)data;
+
+ /* IRQ Multi */
+ if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
+ int cnt = 0;
+ switch (irq) {
+#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
+ case DMTE6_IRQ:
+ cnt++;
+#endif
+ case DMTE0_IRQ:
+ if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) {
+ disable_irq(irq);
+ return IRQ_HANDLED;
+ }
+ default:
+ return IRQ_NONE;
+ }
+ } else {
+ /* reset dma controller */
+ err = sh_dmae_rst(0);
+ if (err)
+ return err;
+ if (shdev->pdata.mode & SHDMA_DMAOR1) {
+ err = sh_dmae_rst(1);
+ if (err)
+ return err;
+ }
+ disable_irq(irq);
+ return IRQ_HANDLED;
+ }
+}
+#endif
+
+static void dmae_do_tasklet(unsigned long data)
+{
+ struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
+ struct sh_desc *desc, *_desc, *cur_desc = NULL;
+ u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
+ list_for_each_entry_safe(desc, _desc,
+ &sh_chan->ld_queue, node) {
+ if ((desc->hw.sar + desc->hw.tcr) == sar_buf) {
+ cur_desc = desc;
+ break;
+ }
+ }
+
+ if (cur_desc) {
+ switch (cur_desc->async_tx.cookie) {
+ case 0: /* other desc data */
+ break;
+ case -EBUSY: /* last desc */
+ sh_chan->completed_cookie =
+ cur_desc->async_tx.cookie;
+ break;
+ default: /* first desc ( 0 < )*/
+ sh_chan->completed_cookie =
+ cur_desc->async_tx.cookie - 1;
+ break;
+ }
+ cur_desc->mark = DESC_COMP;
+ }
+ /* Next desc */
+ sh_chan_xfer_ld_queue(sh_chan);
+ sh_dmae_chan_ld_cleanup(sh_chan);
+}
+
+static unsigned int get_dmae_irq(unsigned int id)
+{
+ unsigned int irq = 0;
+ if (id < ARRAY_SIZE(dmte_irq_map))
+ irq = dmte_irq_map[id];
+ return irq;
+}
+
+static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
+{
+ int err;
+ unsigned int irq = get_dmae_irq(id);
+ unsigned long irqflags = IRQF_DISABLED;
+ struct sh_dmae_chan *new_sh_chan;
+
+ /* alloc channel */
+ new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
+ if (!new_sh_chan) {
+ dev_err(shdev->common.dev, "No free memory for allocating "
+ "dma channels!\n");
+ return -ENOMEM;
+ }
+
+ new_sh_chan->dev = shdev->common.dev;
+ new_sh_chan->id = id;
+
+ /* Init DMA tasklet */
+ tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
+ (unsigned long)new_sh_chan);
+
+ /* Init the channel */
+ dmae_init(new_sh_chan);
+
+ spin_lock_init(&new_sh_chan->desc_lock);
+
+ /* Init descripter manage list */
+ INIT_LIST_HEAD(&new_sh_chan->ld_queue);
+ INIT_LIST_HEAD(&new_sh_chan->ld_free);
+
+ /* copy struct dma_device */
+ new_sh_chan->common.device = &shdev->common;
+
+ /* Add the channel to DMA device channel list */
+ list_add_tail(&new_sh_chan->common.device_node,
+ &shdev->common.channels);
+ shdev->common.chancnt++;
+
+ if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
+ irqflags = IRQF_SHARED;
+#if defined(DMTE6_IRQ)
+ if (irq >= DMTE6_IRQ)
+ irq = DMTE6_IRQ;
+ else
+#endif
+ irq = DMTE0_IRQ;
+ }
+
+ snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
+ "sh-dmae%d", new_sh_chan->id);
+
+ /* set up channel irq */
+ err = request_irq(irq, &sh_dmae_interrupt,
+ irqflags, new_sh_chan->dev_id, new_sh_chan);
+ if (err) {
+ dev_err(shdev->common.dev, "DMA channel %d request_irq error "
+ "with return %d\n", id, err);
+ goto err_no_irq;
+ }
+
+ /* CHCR register control function */
+ new_sh_chan->set_chcr = dmae_set_chcr;
+ /* DMARS register control function */
+ new_sh_chan->set_dmars = dmae_set_dmars;
+
+ shdev->chan[id] = new_sh_chan;
+ return 0;
+
+err_no_irq:
+ /* remove from dmaengine device node */
+ list_del(&new_sh_chan->common.device_node);
+ kfree(new_sh_chan);
+ return err;
+}
+
+static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
+{
+ int i;
+
+ for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
+ if (shdev->chan[i]) {
+ struct sh_dmae_chan *shchan = shdev->chan[i];
+ if (!(shdev->pdata.mode & SHDMA_MIX_IRQ))
+ free_irq(dmte_irq_map[i], shchan);
+
+ list_del(&shchan->common.device_node);
+ kfree(shchan);
+ shdev->chan[i] = NULL;
+ }
+ }
+ shdev->common.chancnt = 0;
+}
+
+static int __init sh_dmae_probe(struct platform_device *pdev)
+{
+ int err = 0, cnt, ecnt;
+ unsigned long irqflags = IRQF_DISABLED;
+#if defined(CONFIG_CPU_SH4)
+ int eirq[] = { DMAE0_IRQ,
+#if defined(DMAE1_IRQ)
+ DMAE1_IRQ
+#endif
+ };
+#endif
+ struct sh_dmae_device *shdev;
+
+ shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
+ if (!shdev) {
+ dev_err(&pdev->dev, "No enough memory\n");
+ err = -ENOMEM;
+ goto shdev_err;
+ }
+
+ /* get platform data */
+ if (!pdev->dev.platform_data)
+ goto shdev_err;
+
+ /* platform data */
+ memcpy(&shdev->pdata, pdev->dev.platform_data,
+ sizeof(struct sh_dmae_pdata));
+
+ /* reset dma controller */
+ err = sh_dmae_rst(0);
+ if (err)
+ goto rst_err;
+
+ /* SH7780/85/23 has DMAOR1 */
+ if (shdev->pdata.mode & SHDMA_DMAOR1) {
+ err = sh_dmae_rst(1);
+ if (err)
+ goto rst_err;
+ }
+
+ INIT_LIST_HEAD(&shdev->common.channels);
+
+ dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
+ shdev->common.device_alloc_chan_resources
+ = sh_dmae_alloc_chan_resources;
+ shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
+ shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
+ shdev->common.device_is_tx_complete = sh_dmae_is_complete;
+ shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
+ shdev->common.dev = &pdev->dev;
+
+#if defined(CONFIG_CPU_SH4)
+ /* Non Mix IRQ mode SH7722/SH7730 etc... */
+ if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
+ irqflags = IRQF_SHARED;
+ eirq[0] = DMTE0_IRQ;
+#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
+ eirq[1] = DMTE6_IRQ;
+#endif
+ }
+
+ for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) {
+ err = request_irq(eirq[ecnt], sh_dmae_err,
+ irqflags, "DMAC Address Error", shdev);
+ if (err) {
+ dev_err(&pdev->dev, "DMA device request_irq"
+ "error (irq %d) with return %d\n",
+ eirq[ecnt], err);
+ goto eirq_err;
+ }
+ }
+#endif /* CONFIG_CPU_SH4 */
+
+ /* Create DMA Channel */
+ for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) {
+ err = sh_dmae_chan_probe(shdev, cnt);
+ if (err)
+ goto chan_probe_err;
+ }
+
+ platform_set_drvdata(pdev, shdev);
+ dma_async_device_register(&shdev->common);
+
+ return err;
+
+chan_probe_err:
+ sh_dmae_chan_remove(shdev);
+
+eirq_err:
+ for (ecnt-- ; ecnt >= 0; ecnt--)
+ free_irq(eirq[ecnt], shdev);
+
+rst_err:
+ kfree(shdev);
+
+shdev_err:
+ return err;
+}
+
+static int __exit sh_dmae_remove(struct platform_device *pdev)
+{
+ struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&shdev->common);
+
+ if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
+ free_irq(DMTE0_IRQ, shdev);
+#if defined(DMTE6_IRQ)
+ free_irq(DMTE6_IRQ, shdev);
+#endif
+ }
+
+ /* channel data remove */
+ sh_dmae_chan_remove(shdev);
+
+ if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) {
+ free_irq(DMAE0_IRQ, shdev);
+#if defined(DMAE1_IRQ)
+ free_irq(DMAE1_IRQ, shdev);
+#endif
+ }
+ kfree(shdev);
+
+ return 0;
+}
+
+static void sh_dmae_shutdown(struct platform_device *pdev)
+{
+ struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
+ sh_dmae_ctl_stop(0);
+ if (shdev->pdata.mode & SHDMA_DMAOR1)
+ sh_dmae_ctl_stop(1);
+}
+
+static struct platform_driver sh_dmae_driver = {
+ .remove = __exit_p(sh_dmae_remove),
+ .shutdown = sh_dmae_shutdown,
+ .driver = {
+ .name = "sh-dma-engine",
+ },
+};
+
+static int __init sh_dmae_init(void)
+{
+ return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
+}
+module_init(sh_dmae_init);
+
+static void __exit sh_dmae_exit(void)
+{
+ platform_driver_unregister(&sh_dmae_driver);
+}
+module_exit(sh_dmae_exit);
+
+MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
+MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
new file mode 100644
index 00000000000..2b4bc15a2c0
--- /dev/null
+++ b/drivers/dma/shdma.h
@@ -0,0 +1,64 @@
+/*
+ * Renesas SuperH DMA Engine support
+ *
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#ifndef __DMA_SHDMA_H
+#define __DMA_SHDMA_H
+
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/dmaengine.h>
+
+#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
+
+struct sh_dmae_regs {
+ u32 sar; /* SAR / source address */
+ u32 dar; /* DAR / destination address */
+ u32 tcr; /* TCR / transfer count */
+};
+
+struct sh_desc {
+ struct list_head tx_list;
+ struct sh_dmae_regs hw;
+ struct list_head node;
+ struct dma_async_tx_descriptor async_tx;
+ int mark;
+};
+
+struct sh_dmae_chan {
+ dma_cookie_t completed_cookie; /* The maximum cookie completed */
+ spinlock_t desc_lock; /* Descriptor operation lock */
+ struct list_head ld_queue; /* Link descriptors queue */
+ struct list_head ld_free; /* Link descriptors free */
+ struct dma_chan common; /* DMA common channel */
+ struct device *dev; /* Channel device */
+ struct tasklet_struct tasklet; /* Tasklet */
+ int descs_allocated; /* desc count */
+ int id; /* Raw id of this channel */
+ char dev_id[16]; /* unique name per DMAC of channel */
+
+ /* Set chcr */
+ int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs);
+ /* Set DMA resource */
+ int (*set_dmars)(struct sh_dmae_chan *sh_chan, u16 res);
+};
+
+struct sh_dmae_device {
+ struct dma_device common;
+ struct sh_dmae_chan *chan[MAX_DMA_CHANNELS];
+ struct sh_dmae_pdata pdata;
+};
+
+#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common)
+#define to_sh_desc(lh) container_of(lh, struct sh_desc, node)
+#define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
+
+#endif /* __DMA_SHDMA_H */
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 7837930146a..fb6bb64e886 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -180,9 +180,8 @@ static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc)
static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc)
{
- if (!list_empty(&desc->txd.tx_list))
- desc = list_entry(desc->txd.tx_list.prev,
- struct txx9dmac_desc, desc_node);
+ if (!list_empty(&desc->tx_list))
+ desc = list_entry(desc->tx_list.prev, typeof(*desc), desc_node);
return desc;
}
@@ -197,6 +196,7 @@ static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc,
desc = kzalloc(sizeof(*desc), flags);
if (!desc)
return NULL;
+ INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->txd, &dc->chan);
desc->txd.tx_submit = txx9dmac_tx_submit;
/* txd.flags will be overwritten in prep funcs */
@@ -245,7 +245,7 @@ static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc,
struct txx9dmac_dev *ddev = dc->ddev;
struct txx9dmac_desc *child;
- list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+ list_for_each_entry(child, &desc->tx_list, desc_node)
dma_sync_single_for_cpu(chan2parent(&dc->chan),
child->txd.phys, ddev->descsize,
DMA_TO_DEVICE);
@@ -267,11 +267,11 @@ static void txx9dmac_desc_put(struct txx9dmac_chan *dc,
txx9dmac_sync_desc_for_cpu(dc, desc);
spin_lock_bh(&dc->lock);
- list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+ list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&dc->chan),
"moving child desc %p to freelist\n",
child);
- list_splice_init(&desc->txd.tx_list, &dc->free_list);
+ list_splice_init(&desc->tx_list, &dc->free_list);
dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n",
desc);
list_add(&desc->desc_node, &dc->free_list);
@@ -429,7 +429,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
param = txd->callback_param;
txx9dmac_sync_desc_for_cpu(dc, desc);
- list_splice_init(&txd->tx_list, &dc->free_list);
+ list_splice_init(&desc->tx_list, &dc->free_list);
list_move(&desc->desc_node, &dc->free_list);
if (!ds) {
@@ -571,7 +571,7 @@ static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr)
"Bad descriptor submitted for DMA! (cookie: %d)\n",
bad_desc->txd.cookie);
txx9dmac_dump_desc(dc, &bad_desc->hwdesc);
- list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
+ list_for_each_entry(child, &bad_desc->tx_list, desc_node)
txx9dmac_dump_desc(dc, &child->hwdesc);
/* Pretend the descriptor completed successfully */
txx9dmac_descriptor_complete(dc, bad_desc);
@@ -613,7 +613,7 @@ static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc)
return;
}
- list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+ list_for_each_entry(child, &desc->tx_list, desc_node)
if (desc_read_CHAR(dc, child) == chain) {
/* Currently in progress */
if (csr & TXX9_DMA_CSR_ABCHC)
@@ -823,8 +823,7 @@ txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
dma_sync_single_for_device(chan2parent(&dc->chan),
prev->txd.phys, ddev->descsize,
DMA_TO_DEVICE);
- list_add_tail(&desc->desc_node,
- &first->txd.tx_list);
+ list_add_tail(&desc->desc_node, &first->tx_list);
}
prev = desc;
}
@@ -919,8 +918,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
prev->txd.phys,
ddev->descsize,
DMA_TO_DEVICE);
- list_add_tail(&desc->desc_node,
- &first->txd.tx_list);
+ list_add_tail(&desc->desc_node, &first->tx_list);
}
prev = desc;
}
diff --git a/drivers/dma/txx9dmac.h b/drivers/dma/txx9dmac.h
index c907ff01d27..365d42366b9 100644
--- a/drivers/dma/txx9dmac.h
+++ b/drivers/dma/txx9dmac.h
@@ -231,6 +231,7 @@ struct txx9dmac_desc {
/* THEN values for driver housekeeping */
struct list_head desc_node ____cacheline_aligned;
+ struct list_head tx_list;
struct dma_async_tx_descriptor txd;
size_t len;
};
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index a3ca18e2d7c..02127e59fe8 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -133,6 +133,13 @@ config EDAC_I3000
Support for error detection and correction on the Intel
3000 and 3010 server chipsets.
+config EDAC_I3200
+ tristate "Intel 3200"
+ depends on EDAC_MM_EDAC && PCI && X86 && EXPERIMENTAL
+ help
+ Support for error detection and correction on the Intel
+ 3200 and 3210 server chipsets.
+
config EDAC_X38
tristate "Intel X38"
depends on EDAC_MM_EDAC && PCI && X86
@@ -176,11 +183,11 @@ config EDAC_I5100
San Clemente MCH.
config EDAC_MPC85XX
- tristate "Freescale MPC85xx"
- depends on EDAC_MM_EDAC && FSL_SOC && MPC85xx
+ tristate "Freescale MPC83xx / MPC85xx"
+ depends on EDAC_MM_EDAC && FSL_SOC && (PPC_83xx || MPC85xx)
help
Support for error detection and correction on the Freescale
- MPC8560, MPC8540, MPC8548
+ MPC8349, MPC8560, MPC8540, MPC8548
config EDAC_MV64X60
tristate "Marvell MV64x60"
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index cfa033ce53a..7a473bbe8ab 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
obj-$(CONFIG_EDAC_I82875P) += i82875p_edac.o
obj-$(CONFIG_EDAC_I82975X) += i82975x_edac.o
obj-$(CONFIG_EDAC_I3000) += i3000_edac.o
+obj-$(CONFIG_EDAC_I3200) += i3200_edac.o
obj-$(CONFIG_EDAC_X38) += x38_edac.o
obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
@@ -49,3 +50,4 @@ obj-$(CONFIG_EDAC_CELL) += cell_edac.o
obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o
obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o
obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o
+
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index 8c54196b5ab..3d50274f134 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -885,14 +885,14 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
if (!devm_request_mem_region(&pdev->dev,
r->start,
- r->end - r->start + 1,
+ resource_size(r),
pdev->name)) {
cpc925_printk(KERN_ERR, "Unable to request mem region\n");
res = -EBUSY;
goto err1;
}
- vbase = devm_ioremap(&pdev->dev, r->start, r->end - r->start + 1);
+ vbase = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!vbase) {
cpc925_printk(KERN_ERR, "Unable to ioremap device\n");
res = -ENOMEM;
@@ -953,7 +953,7 @@ err3:
cpc925_mc_exit(mci);
edac_mc_free(mci);
err2:
- devm_release_mem_region(&pdev->dev, r->start, r->end-r->start+1);
+ devm_release_mem_region(&pdev->dev, r->start, resource_size(r));
err1:
devres_release_group(&pdev->dev, cpc925_probe);
out:
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 871c13b4c14..12f355cafdb 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -286,7 +286,7 @@ enum scrub_type {
* is irrespective of the memory devices being mounted
* on both sides of the memory stick.
*
- * Socket set: All of the memory sticks that are required for for
+ * Socket set: All of the memory sticks that are required for
* a single memory access or all of the memory sticks
* spanned by a chip-select row. A single socket set
* has two chip-select rows and if double-sided sticks
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index b02a6a69a8f..d5e13c94714 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -356,7 +356,6 @@ static void complete_edac_device_list_del(struct rcu_head *head)
edac_dev = container_of(head, struct edac_device_ctl_info, rcu);
INIT_LIST_HEAD(&edac_dev->link);
- complete(&edac_dev->removal_complete);
}
/*
@@ -369,10 +368,8 @@ static void del_edac_device_from_global_list(struct edac_device_ctl_info
*edac_device)
{
list_del_rcu(&edac_device->link);
-
- init_completion(&edac_device->removal_complete);
call_rcu(&edac_device->rcu, complete_edac_device_list_del);
- wait_for_completion(&edac_device->removal_complete);
+ rcu_barrier();
}
/*
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 335b7ebdb11..b629c41756f 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -418,16 +418,14 @@ static void complete_mc_list_del(struct rcu_head *head)
mci = container_of(head, struct mem_ctl_info, rcu);
INIT_LIST_HEAD(&mci->link);
- complete(&mci->complete);
}
static void del_mc_from_global_list(struct mem_ctl_info *mci)
{
atomic_dec(&edac_handlers);
list_del_rcu(&mci->link);
- init_completion(&mci->complete);
call_rcu(&mci->rcu, complete_mc_list_del);
- wait_for_completion(&mci->complete);
+ rcu_barrier();
}
/**
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 30b585b1d60..efb5d565078 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -174,7 +174,6 @@ static void complete_edac_pci_list_del(struct rcu_head *head)
pci = container_of(head, struct edac_pci_ctl_info, rcu);
INIT_LIST_HEAD(&pci->link);
- complete(&pci->complete);
}
/*
@@ -185,9 +184,8 @@ static void complete_edac_pci_list_del(struct rcu_head *head)
static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci)
{
list_del_rcu(&pci->link);
- init_completion(&pci->complete);
call_rcu(&pci->rcu, complete_edac_pci_list_del);
- wait_for_completion(&pci->complete);
+ rcu_barrier();
}
#if 0
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
new file mode 100644
index 00000000000..fde4db91c4d
--- /dev/null
+++ b/drivers/edac/i3200_edac.c
@@ -0,0 +1,527 @@
+/*
+ * Intel 3200/3210 Memory Controller kernel module
+ * Copyright (C) 2008-2009 Akamai Technologies, Inc.
+ * Portions by Hitoshi Mitake <h.mitake@gmail.com>.
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include <linux/io.h>
+#include "edac_core.h"
+
+#define I3200_REVISION "1.1"
+
+#define EDAC_MOD_STR "i3200_edac"
+
+#define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0
+
+#define I3200_RANKS 8
+#define I3200_RANKS_PER_CHANNEL 4
+#define I3200_CHANNELS 2
+
+/* Intel 3200 register addresses - device 0 function 0 - DRAM Controller */
+
+#define I3200_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */
+#define I3200_MCHBAR_HIGH 0x4c
+#define I3200_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */
+#define I3200_MMR_WINDOW_SIZE 16384
+
+#define I3200_TOM 0xa0 /* Top of Memory (16b)
+ *
+ * 15:10 reserved
+ * 9:0 total populated physical memory
+ */
+#define I3200_TOM_MASK 0x3ff /* bits 9:0 */
+#define I3200_TOM_SHIFT 26 /* 64MiB grain */
+
+#define I3200_ERRSTS 0xc8 /* Error Status Register (16b)
+ *
+ * 15 reserved
+ * 14 Isochronous TBWRR Run Behind FIFO Full
+ * (ITCV)
+ * 13 Isochronous TBWRR Run Behind FIFO Put
+ * (ITSTV)
+ * 12 reserved
+ * 11 MCH Thermal Sensor Event
+ * for SMI/SCI/SERR (GTSE)
+ * 10 reserved
+ * 9 LOCK to non-DRAM Memory Flag (LCKF)
+ * 8 reserved
+ * 7 DRAM Throttle Flag (DTF)
+ * 6:2 reserved
+ * 1 Multi-bit DRAM ECC Error Flag (DMERR)
+ * 0 Single-bit DRAM ECC Error Flag (DSERR)
+ */
+#define I3200_ERRSTS_UE 0x0002
+#define I3200_ERRSTS_CE 0x0001
+#define I3200_ERRSTS_BITS (I3200_ERRSTS_UE | I3200_ERRSTS_CE)
+
+
+/* Intel MMIO register space - device 0 function 0 - MMR space */
+
+#define I3200_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4)
+ *
+ * 15:10 reserved
+ * 9:0 Channel 0 DRAM Rank Boundary Address
+ */
+#define I3200_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */
+#define I3200_DRB_MASK 0x3ff /* bits 9:0 */
+#define I3200_DRB_SHIFT 26 /* 64MiB grain */
+
+#define I3200_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b)
+ *
+ * 63:48 Error Column Address (ERRCOL)
+ * 47:32 Error Row Address (ERRROW)
+ * 31:29 Error Bank Address (ERRBANK)
+ * 28:27 Error Rank Address (ERRRANK)
+ * 26:24 reserved
+ * 23:16 Error Syndrome (ERRSYND)
+ * 15: 2 reserved
+ * 1 Multiple Bit Error Status (MERRSTS)
+ * 0 Correctable Error Status (CERRSTS)
+ */
+#define I3200_C1ECCERRLOG 0x680 /* Chan 1 ECC Error Log (64b) */
+#define I3200_ECCERRLOG_CE 0x1
+#define I3200_ECCERRLOG_UE 0x2
+#define I3200_ECCERRLOG_RANK_BITS 0x18000000
+#define I3200_ECCERRLOG_RANK_SHIFT 27
+#define I3200_ECCERRLOG_SYNDROME_BITS 0xff0000
+#define I3200_ECCERRLOG_SYNDROME_SHIFT 16
+#define I3200_CAPID0 0xe0 /* P.95 of spec for details */
+
+struct i3200_priv {
+ void __iomem *window;
+};
+
+static int nr_channels;
+
+static int how_many_channels(struct pci_dev *pdev)
+{
+ unsigned char capid0_8b; /* 8th byte of CAPID0 */
+
+ pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b);
+ if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
+ debugf0("In single channel mode.\n");
+ return 1;
+ } else {
+ debugf0("In dual channel mode.\n");
+ return 2;
+ }
+}
+
+static unsigned long eccerrlog_syndrome(u64 log)
+{
+ return (log & I3200_ECCERRLOG_SYNDROME_BITS) >>
+ I3200_ECCERRLOG_SYNDROME_SHIFT;
+}
+
+static int eccerrlog_row(int channel, u64 log)
+{
+ u64 rank = ((log & I3200_ECCERRLOG_RANK_BITS) >>
+ I3200_ECCERRLOG_RANK_SHIFT);
+ return rank | (channel * I3200_RANKS_PER_CHANNEL);
+}
+
+enum i3200_chips {
+ I3200 = 0,
+};
+
+struct i3200_dev_info {
+ const char *ctl_name;
+};
+
+struct i3200_error_info {
+ u16 errsts;
+ u16 errsts2;
+ u64 eccerrlog[I3200_CHANNELS];
+};
+
+static const struct i3200_dev_info i3200_devs[] = {
+ [I3200] = {
+ .ctl_name = "i3200"
+ },
+};
+
+static struct pci_dev *mci_pdev;
+static int i3200_registered = 1;
+
+
+static void i3200_clear_error_info(struct mem_ctl_info *mci)
+{
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(mci->dev);
+
+ /*
+ * Clear any error bits.
+ * (Yes, we really clear bits by writing 1 to them.)
+ */
+ pci_write_bits16(pdev, I3200_ERRSTS, I3200_ERRSTS_BITS,
+ I3200_ERRSTS_BITS);
+}
+
+static void i3200_get_and_clear_error_info(struct mem_ctl_info *mci,
+ struct i3200_error_info *info)
+{
+ struct pci_dev *pdev;
+ struct i3200_priv *priv = mci->pvt_info;
+ void __iomem *window = priv->window;
+
+ pdev = to_pci_dev(mci->dev);
+
+ /*
+ * This is a mess because there is no atomic way to read all the
+ * registers at once and the registers can transition from CE being
+ * overwritten by UE.
+ */
+ pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts);
+ if (!(info->errsts & I3200_ERRSTS_BITS))
+ return;
+
+ info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG);
+ if (nr_channels == 2)
+ info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG);
+
+ pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts2);
+
+ /*
+ * If the error is the same for both reads then the first set
+ * of reads is valid. If there is a change then there is a CE
+ * with no info and the second set of reads is valid and
+ * should be UE info.
+ */
+ if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
+ info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG);
+ if (nr_channels == 2)
+ info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG);
+ }
+
+ i3200_clear_error_info(mci);
+}
+
+static void i3200_process_error_info(struct mem_ctl_info *mci,
+ struct i3200_error_info *info)
+{
+ int channel;
+ u64 log;
+
+ if (!(info->errsts & I3200_ERRSTS_BITS))
+ return;
+
+ if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
+ edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ info->errsts = info->errsts2;
+ }
+
+ for (channel = 0; channel < nr_channels; channel++) {
+ log = info->eccerrlog[channel];
+ if (log & I3200_ECCERRLOG_UE) {
+ edac_mc_handle_ue(mci, 0, 0,
+ eccerrlog_row(channel, log),
+ "i3200 UE");
+ } else if (log & I3200_ECCERRLOG_CE) {
+ edac_mc_handle_ce(mci, 0, 0,
+ eccerrlog_syndrome(log),
+ eccerrlog_row(channel, log), 0,
+ "i3200 CE");
+ }
+ }
+}
+
+static void i3200_check(struct mem_ctl_info *mci)
+{
+ struct i3200_error_info info;
+
+ debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+ i3200_get_and_clear_error_info(mci, &info);
+ i3200_process_error_info(mci, &info);
+}
+
+
+void __iomem *i3200_map_mchbar(struct pci_dev *pdev)
+{
+ union {
+ u64 mchbar;
+ struct {
+ u32 mchbar_low;
+ u32 mchbar_high;
+ };
+ } u;
+ void __iomem *window;
+
+ pci_read_config_dword(pdev, I3200_MCHBAR_LOW, &u.mchbar_low);
+ pci_read_config_dword(pdev, I3200_MCHBAR_HIGH, &u.mchbar_high);
+ u.mchbar &= I3200_MCHBAR_MASK;
+
+ if (u.mchbar != (resource_size_t)u.mchbar) {
+ printk(KERN_ERR
+ "i3200: mmio space beyond accessible range (0x%llx)\n",
+ (unsigned long long)u.mchbar);
+ return NULL;
+ }
+
+ window = ioremap_nocache(u.mchbar, I3200_MMR_WINDOW_SIZE);
+ if (!window)
+ printk(KERN_ERR "i3200: cannot map mmio space at 0x%llx\n",
+ (unsigned long long)u.mchbar);
+
+ return window;
+}
+
+
+static void i3200_get_drbs(void __iomem *window,
+ u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL])
+{
+ int i;
+
+ for (i = 0; i < I3200_RANKS_PER_CHANNEL; i++) {
+ drbs[0][i] = readw(window + I3200_C0DRB + 2*i) & I3200_DRB_MASK;
+ drbs[1][i] = readw(window + I3200_C1DRB + 2*i) & I3200_DRB_MASK;
+ }
+}
+
+static bool i3200_is_stacked(struct pci_dev *pdev,
+ u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL])
+{
+ u16 tom;
+
+ pci_read_config_word(pdev, I3200_TOM, &tom);
+ tom &= I3200_TOM_MASK;
+
+ return drbs[I3200_CHANNELS - 1][I3200_RANKS_PER_CHANNEL - 1] == tom;
+}
+
+static unsigned long drb_to_nr_pages(
+ u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL], bool stacked,
+ int channel, int rank)
+{
+ int n;
+
+ n = drbs[channel][rank];
+ if (rank > 0)
+ n -= drbs[channel][rank - 1];
+ if (stacked && (channel == 1) &&
+ drbs[channel][rank] == drbs[channel][I3200_RANKS_PER_CHANNEL - 1])
+ n -= drbs[0][I3200_RANKS_PER_CHANNEL - 1];
+
+ n <<= (I3200_DRB_SHIFT - PAGE_SHIFT);
+ return n;
+}
+
+static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc;
+ int i;
+ struct mem_ctl_info *mci = NULL;
+ unsigned long last_page;
+ u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL];
+ bool stacked;
+ void __iomem *window;
+ struct i3200_priv *priv;
+
+ debugf0("MC: %s()\n", __func__);
+
+ window = i3200_map_mchbar(pdev);
+ if (!window)
+ return -ENODEV;
+
+ i3200_get_drbs(window, drbs);
+ nr_channels = how_many_channels(pdev);
+
+ mci = edac_mc_alloc(sizeof(struct i3200_priv), I3200_RANKS,
+ nr_channels, 0);
+ if (!mci)
+ return -ENOMEM;
+
+ debugf3("MC: %s(): init mci\n", __func__);
+
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_DDR2;
+
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = I3200_REVISION;
+ mci->ctl_name = i3200_devs[dev_idx].ctl_name;
+ mci->dev_name = pci_name(pdev);
+ mci->edac_check = i3200_check;
+ mci->ctl_page_to_phys = NULL;
+ priv = mci->pvt_info;
+ priv->window = window;
+
+ stacked = i3200_is_stacked(pdev, drbs);
+
+ /*
+ * The dram rank boundary (DRB) reg values are boundary addresses
+ * for each DRAM rank with a granularity of 64MB. DRB regs are
+ * cumulative; the last one will contain the total memory
+ * contained in all ranks.
+ */
+ last_page = -1UL;
+ for (i = 0; i < mci->nr_csrows; i++) {
+ unsigned long nr_pages;
+ struct csrow_info *csrow = &mci->csrows[i];
+
+ nr_pages = drb_to_nr_pages(drbs, stacked,
+ i / I3200_RANKS_PER_CHANNEL,
+ i % I3200_RANKS_PER_CHANNEL);
+
+ if (nr_pages == 0) {
+ csrow->mtype = MEM_EMPTY;
+ continue;
+ }
+
+ csrow->first_page = last_page + 1;
+ last_page += nr_pages;
+ csrow->last_page = last_page;
+ csrow->nr_pages = nr_pages;
+
+ csrow->grain = nr_pages << PAGE_SHIFT;
+ csrow->mtype = MEM_DDR2;
+ csrow->dtype = DEV_UNKNOWN;
+ csrow->edac_mode = EDAC_UNKNOWN;
+ }
+
+ i3200_clear_error_info(mci);
+
+ rc = -ENODEV;
+ if (edac_mc_add_mc(mci)) {
+ debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail;
+ }
+
+ /* get this far and it's successful */
+ debugf3("MC: %s(): success\n", __func__);
+ return 0;
+
+fail:
+ iounmap(window);
+ if (mci)
+ edac_mc_free(mci);
+
+ return rc;
+}
+
+static int __devinit i3200_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rc;
+
+ debugf0("MC: %s()\n", __func__);
+
+ if (pci_enable_device(pdev) < 0)
+ return -EIO;
+
+ rc = i3200_probe1(pdev, ent->driver_data);
+ if (!mci_pdev)
+ mci_pdev = pci_dev_get(pdev);
+
+ return rc;
+}
+
+static void __devexit i3200_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct i3200_priv *priv;
+
+ debugf0("%s()\n", __func__);
+
+ mci = edac_mc_del_mc(&pdev->dev);
+ if (!mci)
+ return;
+
+ priv = mci->pvt_info;
+ iounmap(priv->window);
+
+ edac_mc_free(mci);
+}
+
+static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
+ {
+ PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I3200},
+ {
+ 0,
+ } /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i3200_pci_tbl);
+
+static struct pci_driver i3200_driver = {
+ .name = EDAC_MOD_STR,
+ .probe = i3200_init_one,
+ .remove = __devexit_p(i3200_remove_one),
+ .id_table = i3200_pci_tbl,
+};
+
+static int __init i3200_init(void)
+{
+ int pci_rc;
+
+ debugf3("MC: %s()\n", __func__);
+
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ pci_rc = pci_register_driver(&i3200_driver);
+ if (pci_rc < 0)
+ goto fail0;
+
+ if (!mci_pdev) {
+ i3200_registered = 0;
+ mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_3200_HB, NULL);
+ if (!mci_pdev) {
+ debugf0("i3200 pci_get_device fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+
+ pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl);
+ if (pci_rc < 0) {
+ debugf0("i3200 init fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+ }
+
+ return 0;
+
+fail1:
+ pci_unregister_driver(&i3200_driver);
+
+fail0:
+ if (mci_pdev)
+ pci_dev_put(mci_pdev);
+
+ return pci_rc;
+}
+
+static void __exit i3200_exit(void)
+{
+ debugf3("MC: %s()\n", __func__);
+
+ pci_unregister_driver(&i3200_driver);
+ if (!i3200_registered) {
+ i3200_remove_one(mci_pdev);
+ pci_dev_put(mci_pdev);
+ }
+}
+
+module_init(i3200_init);
+module_exit(i3200_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Akamai Technologies, Inc.");
+MODULE_DESCRIPTION("MC support for Intel 3200 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 3f2ccfc6407..157f6504f25 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -41,7 +41,9 @@ static u32 orig_pci_err_en;
#endif
static u32 orig_l2_err_disable;
+#ifdef CONFIG_MPC85xx
static u32 orig_hid1[2];
+#endif
/************************ MC SYSFS parts ***********************************/
@@ -646,6 +648,7 @@ static struct of_device_id mpc85xx_l2_err_of_match[] = {
{ .compatible = "fsl,mpc8560-l2-cache-controller", },
{ .compatible = "fsl,mpc8568-l2-cache-controller", },
{ .compatible = "fsl,mpc8572-l2-cache-controller", },
+ { .compatible = "fsl,p2020-l2-cache-controller", },
{},
};
@@ -788,19 +791,20 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
csrow = &mci->csrows[index];
cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
(index * MPC85XX_MC_CS_BNDS_OFS));
- start = (cs_bnds & 0xfff0000) << 4;
- end = ((cs_bnds & 0xfff) << 20);
- if (start)
- start |= 0xfffff;
- if (end)
- end |= 0xfffff;
+
+ start = (cs_bnds & 0xffff0000) >> 16;
+ end = (cs_bnds & 0x0000ffff);
if (start == end)
continue; /* not populated */
+ start <<= (24 - PAGE_SHIFT);
+ end <<= (24 - PAGE_SHIFT);
+ end |= (1 << (24 - PAGE_SHIFT)) - 1;
+
csrow->first_page = start >> PAGE_SHIFT;
csrow->last_page = end >> PAGE_SHIFT;
- csrow->nr_pages = csrow->last_page + 1 - csrow->first_page;
+ csrow->nr_pages = end + 1 - start;
csrow->grain = 8;
csrow->mtype = mtype;
csrow->dtype = DEV_UNKNOWN;
@@ -984,6 +988,8 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
{ .compatible = "fsl,mpc8560-memory-controller", },
{ .compatible = "fsl,mpc8568-memory-controller", },
{ .compatible = "fsl,mpc8572-memory-controller", },
+ { .compatible = "fsl,mpc8349-memory-controller", },
+ { .compatible = "fsl,p2020-memory-controller", },
{},
};
@@ -999,13 +1005,13 @@ static struct of_platform_driver mpc85xx_mc_err_driver = {
},
};
-
+#ifdef CONFIG_MPC85xx
static void __init mpc85xx_mc_clear_rfxe(void *data)
{
orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1);
mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~0x20000));
}
-
+#endif
static int __init mpc85xx_mc_init(void)
{
@@ -1038,26 +1044,32 @@ static int __init mpc85xx_mc_init(void)
printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n");
#endif
+#ifdef CONFIG_MPC85xx
/*
* need to clear HID1[RFXE] to disable machine check int
* so we can catch it
*/
if (edac_op_state == EDAC_OPSTATE_INT)
on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0);
+#endif
return 0;
}
module_init(mpc85xx_mc_init);
+#ifdef CONFIG_MPC85xx
static void __exit mpc85xx_mc_restore_hid1(void *data)
{
mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]);
}
+#endif
static void __exit mpc85xx_mc_exit(void)
{
+#ifdef CONFIG_MPC85xx
on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0);
+#endif
#ifdef CONFIG_PCI
of_unregister_platform_driver(&mpc85xx_pci_err_driver);
#endif
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 5131aaae8e0..a6b9fec13a7 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -90,7 +90,7 @@ static int __init mv64x60_pci_fixup(struct platform_device *pdev)
return -ENOENT;
}
- pci_serr = ioremap(r->start, r->end - r->start + 1);
+ pci_serr = ioremap(r->start, resource_size(r));
if (!pci_serr)
return -ENOMEM;
@@ -140,7 +140,7 @@ static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
if (!devm_request_mem_region(&pdev->dev,
r->start,
- r->end - r->start + 1,
+ resource_size(r),
pdata->name)) {
printk(KERN_ERR "%s: Error while requesting mem region\n",
__func__);
@@ -150,7 +150,7 @@ static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
pdata->pci_vbase = devm_ioremap(&pdev->dev,
r->start,
- r->end - r->start + 1);
+ resource_size(r));
if (!pdata->pci_vbase) {
printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
res = -ENOMEM;
@@ -306,7 +306,7 @@ static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev)
if (!devm_request_mem_region(&pdev->dev,
r->start,
- r->end - r->start + 1,
+ resource_size(r),
pdata->name)) {
printk(KERN_ERR "%s: Error while request mem region\n",
__func__);
@@ -316,7 +316,7 @@ static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev)
pdata->sram_vbase = devm_ioremap(&pdev->dev,
r->start,
- r->end - r->start + 1);
+ resource_size(r));
if (!pdata->sram_vbase) {
printk(KERN_ERR "%s: Unable to setup SRAM err regs\n",
__func__);
@@ -474,7 +474,7 @@ static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
if (!devm_request_mem_region(&pdev->dev,
r->start,
- r->end - r->start + 1,
+ resource_size(r),
pdata->name)) {
printk(KERN_ERR "%s: Error while requesting mem region\n",
__func__);
@@ -484,7 +484,7 @@ static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
pdata->cpu_vbase[0] = devm_ioremap(&pdev->dev,
r->start,
- r->end - r->start + 1);
+ resource_size(r));
if (!pdata->cpu_vbase[0]) {
printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
res = -ENOMEM;
@@ -501,7 +501,7 @@ static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
if (!devm_request_mem_region(&pdev->dev,
r->start,
- r->end - r->start + 1,
+ resource_size(r),
pdata->name)) {
printk(KERN_ERR "%s: Error while requesting mem region\n",
__func__);
@@ -511,7 +511,7 @@ static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
pdata->cpu_vbase[1] = devm_ioremap(&pdev->dev,
r->start,
- r->end - r->start + 1);
+ resource_size(r));
if (!pdata->cpu_vbase[1]) {
printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
res = -ENOMEM;
@@ -726,7 +726,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
if (!devm_request_mem_region(&pdev->dev,
r->start,
- r->end - r->start + 1,
+ resource_size(r),
pdata->name)) {
printk(KERN_ERR "%s: Error while requesting mem region\n",
__func__);
@@ -736,7 +736,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
pdata->mc_vbase = devm_ioremap(&pdev->dev,
r->start,
- r->end - r->start + 1);
+ resource_size(r));
if (!pdata->mc_vbase) {
printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
res = -ENOMEM;
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index f74edae5cb4..e4864e894e4 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -444,16 +444,13 @@ int fw_card_add(struct fw_card *card,
card->guid = guid;
mutex_lock(&card_mutex);
- config_rom = generate_config_rom(card, &length);
- list_add_tail(&card->link, &card_list);
- mutex_unlock(&card_mutex);
+ config_rom = generate_config_rom(card, &length);
ret = card->driver->enable(card, config_rom, length);
- if (ret < 0) {
- mutex_lock(&card_mutex);
- list_del(&card->link);
- mutex_unlock(&card_mutex);
- }
+ if (ret == 0)
+ list_add_tail(&card->link, &card_list);
+
+ mutex_unlock(&card_mutex);
return ret;
}
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 479b22f5a1e..da628c72a46 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -834,7 +834,7 @@ static void handle_topology_map(struct fw_card *card, struct fw_request *request
}
static struct fw_address_handler topology_map = {
- .length = 0x200,
+ .length = 0x400,
.address_callback = handle_topology_map,
};
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 6052816be35..7ff6e758515 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -96,6 +96,20 @@ int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
int fw_compute_block_crc(u32 *block);
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
+static inline struct fw_card *fw_card_get(struct fw_card *card)
+{
+ kref_get(&card->kref);
+
+ return card;
+}
+
+void fw_card_release(struct kref *kref);
+
+static inline void fw_card_put(struct fw_card *card)
+{
+ kref_put(&card->kref, fw_card_release);
+}
+
/* -cdev */
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 76b321bb73f..5d524254499 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -1279,8 +1279,8 @@ static void bus_reset_tasklet(unsigned long data)
* the inverted quadlets and a header quadlet, we shift one
* bit extra to get the actual number of self IDs.
*/
- self_id_count = (reg >> 3) & 0x3ff;
- if (self_id_count == 0) {
+ self_id_count = (reg >> 3) & 0xff;
+ if (self_id_count == 0 || self_id_count > 252) {
fw_notify("inconsistent self IDs\n");
return;
}
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index e5df822a813..50f0176de61 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -354,8 +354,7 @@ static const struct {
/* DViCO Momobay FX-3A with TSB42AA9A bridge */ {
.firmware_revision = 0x002800,
.model = 0x000000,
- .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY |
- SBP2_WORKAROUND_POWER_CONDITION,
+ .workarounds = SBP2_WORKAROUND_POWER_CONDITION,
},
/* Initio bridges, actually only needed for some older ones */ {
.firmware_revision = 0x000200,
@@ -425,19 +424,20 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
struct sbp2_logical_unit *lu = callback_data;
struct sbp2_orb *orb;
struct sbp2_status status;
- size_t header_size;
unsigned long flags;
if (tcode != TCODE_WRITE_BLOCK_REQUEST ||
- length == 0 || length > sizeof(status)) {
+ length < 8 || length > sizeof(status)) {
fw_send_response(card, request, RCODE_TYPE_ERROR);
return;
}
- header_size = min(length, 2 * sizeof(u32));
- fw_memcpy_from_be32(&status, payload, header_size);
- if (length > header_size)
- memcpy(status.data, payload + 8, length - header_size);
+ status.status = be32_to_cpup(payload);
+ status.orb_low = be32_to_cpup(payload + 4);
+ memset(status.data, 0, sizeof(status.data));
+ if (length > 8)
+ memcpy(status.data, payload + 8, length - 8);
+
if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) {
fw_notify("non-orb related status write, not handled\n");
fw_send_response(card, request, RCODE_COMPLETE);
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index d5ea8a68d33..56f9234781f 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -164,7 +164,7 @@ int __init firmware_map_add_early(u64 start, u64 end, const char *type)
{
struct firmware_map_entry *entry;
- entry = alloc_bootmem_low(sizeof(struct firmware_map_entry));
+ entry = alloc_bootmem(sizeof(struct firmware_map_entry));
if (WARN_ON(!entry))
return -ENOMEM;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 6b4c484a699..2ad0128c63c 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -162,6 +162,16 @@ config GPIO_WM831X
Say yes here to access the GPIO signals of WM831x power management
chips from Wolfson Microelectronics.
+config GPIO_ADP5520
+ tristate "GPIO Support for ADP5520 PMIC"
+ depends on PMIC_ADP5520
+ help
+ This option enables support for on-chip GPIO found
+ on Analog Devices ADP5520 PMICs.
+
+ To compile this driver as a module, choose M here: the module will
+ be called adp5520-gpio.
+
comment "PCI GPIO expanders:"
config GPIO_BT8XX
@@ -180,6 +190,12 @@ config GPIO_BT8XX
If unsure, say N.
+config GPIO_LANGWELL
+ bool "Intel Moorestown Platform Langwell GPIO support"
+ depends on PCI
+ help
+ Say Y here to support Intel Moorestown platform GPIO.
+
comment "SPI GPIO expanders:"
config GPIO_MAX7301
@@ -195,4 +211,23 @@ config GPIO_MCP23S08
SPI driver for Microchip MCP23S08 I/O expander. This provides
a GPIO interface supporting inputs and outputs.
+config GPIO_MC33880
+ tristate "Freescale MC33880 high-side/low-side switch"
+ depends on SPI_MASTER
+ help
+ SPI driver for Freescale MC33880 high-side/low-side switch.
+ This provides GPIO interface supporting inputs and outputs.
+
+comment "AC97 GPIO expanders:"
+
+config GPIO_UCB1400
+ bool "Philips UCB1400 GPIO"
+ depends on UCB1400_CORE
+ help
+ This enables support for the Philips UCB1400 GPIO pins.
+ The UCB1400 is an AC97 audio codec.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ucb1400_gpio.
+
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index ea7c745f26a..00a532c9a1e 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -4,13 +4,17 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
obj-$(CONFIG_GPIOLIB) += gpiolib.o
+obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o
+obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o
obj-$(CONFIG_GPIO_MAX7301) += max7301.o
obj-$(CONFIG_GPIO_MAX732X) += max732x.o
+obj-$(CONFIG_GPIO_MC33880) += mc33880.o
obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
obj-$(CONFIG_GPIO_PL061) += pl061.o
obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
+obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o
obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
diff --git a/drivers/gpio/adp5520-gpio.c b/drivers/gpio/adp5520-gpio.c
new file mode 100644
index 00000000000..ad05bbc7ffd
--- /dev/null
+++ b/drivers/gpio/adp5520-gpio.c
@@ -0,0 +1,206 @@
+/*
+ * GPIO driver for Analog Devices ADP5520 MFD PMICs
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/adp5520.h>
+
+#include <linux/gpio.h>
+
+struct adp5520_gpio {
+ struct device *master;
+ struct gpio_chip gpio_chip;
+ unsigned char lut[ADP5520_MAXGPIOS];
+ unsigned long output;
+};
+
+static int adp5520_gpio_get_value(struct gpio_chip *chip, unsigned off)
+{
+ struct adp5520_gpio *dev;
+ uint8_t reg_val;
+
+ dev = container_of(chip, struct adp5520_gpio, gpio_chip);
+
+ /*
+ * There are dedicated registers for GPIO IN/OUT.
+ * Make sure we return the right value, even when configured as output
+ */
+
+ if (test_bit(off, &dev->output))
+ adp5520_read(dev->master, GPIO_OUT, &reg_val);
+ else
+ adp5520_read(dev->master, GPIO_IN, &reg_val);
+
+ return !!(reg_val & dev->lut[off]);
+}
+
+static void adp5520_gpio_set_value(struct gpio_chip *chip,
+ unsigned off, int val)
+{
+ struct adp5520_gpio *dev;
+ dev = container_of(chip, struct adp5520_gpio, gpio_chip);
+
+ if (val)
+ adp5520_set_bits(dev->master, GPIO_OUT, dev->lut[off]);
+ else
+ adp5520_clr_bits(dev->master, GPIO_OUT, dev->lut[off]);
+}
+
+static int adp5520_gpio_direction_input(struct gpio_chip *chip, unsigned off)
+{
+ struct adp5520_gpio *dev;
+ dev = container_of(chip, struct adp5520_gpio, gpio_chip);
+
+ clear_bit(off, &dev->output);
+
+ return adp5520_clr_bits(dev->master, GPIO_CFG_2, dev->lut[off]);
+}
+
+static int adp5520_gpio_direction_output(struct gpio_chip *chip,
+ unsigned off, int val)
+{
+ struct adp5520_gpio *dev;
+ int ret = 0;
+ dev = container_of(chip, struct adp5520_gpio, gpio_chip);
+
+ set_bit(off, &dev->output);
+
+ if (val)
+ ret |= adp5520_set_bits(dev->master, GPIO_OUT, dev->lut[off]);
+ else
+ ret |= adp5520_clr_bits(dev->master, GPIO_OUT, dev->lut[off]);
+
+ ret |= adp5520_set_bits(dev->master, GPIO_CFG_2, dev->lut[off]);
+
+ return ret;
+}
+
+static int __devinit adp5520_gpio_probe(struct platform_device *pdev)
+{
+ struct adp5520_gpio_platfrom_data *pdata = pdev->dev.platform_data;
+ struct adp5520_gpio *dev;
+ struct gpio_chip *gc;
+ int ret, i, gpios;
+ unsigned char ctl_mask = 0;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+
+ if (pdev->id != ID_ADP5520) {
+ dev_err(&pdev->dev, "only ADP5520 supports GPIO\n");
+ return -ENODEV;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ dev_err(&pdev->dev, "failed to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ dev->master = pdev->dev.parent;
+
+ for (gpios = 0, i = 0; i < ADP5520_MAXGPIOS; i++)
+ if (pdata->gpio_en_mask & (1 << i))
+ dev->lut[gpios++] = 1 << i;
+
+ if (gpios < 1) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ gc = &dev->gpio_chip;
+ gc->direction_input = adp5520_gpio_direction_input;
+ gc->direction_output = adp5520_gpio_direction_output;
+ gc->get = adp5520_gpio_get_value;
+ gc->set = adp5520_gpio_set_value;
+ gc->can_sleep = 1;
+
+ gc->base = pdata->gpio_start;
+ gc->ngpio = gpios;
+ gc->label = pdev->name;
+ gc->owner = THIS_MODULE;
+
+ ret = adp5520_clr_bits(dev->master, GPIO_CFG_1,
+ pdata->gpio_en_mask);
+
+ if (pdata->gpio_en_mask & GPIO_C3)
+ ctl_mask |= C3_MODE;
+
+ if (pdata->gpio_en_mask & GPIO_R3)
+ ctl_mask |= R3_MODE;
+
+ if (ctl_mask)
+ ret = adp5520_set_bits(dev->master, LED_CONTROL,
+ ctl_mask);
+
+ ret |= adp5520_set_bits(dev->master, GPIO_PULLUP,
+ pdata->gpio_pullup_mask);
+
+ if (ret) {
+ dev_err(&pdev->dev, "failed to write\n");
+ goto err;
+ }
+
+ ret = gpiochip_add(&dev->gpio_chip);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, dev);
+ return 0;
+
+err:
+ kfree(dev);
+ return ret;
+}
+
+static int __devexit adp5520_gpio_remove(struct platform_device *pdev)
+{
+ struct adp5520_gpio *dev;
+ int ret;
+
+ dev = platform_get_drvdata(pdev);
+ ret = gpiochip_remove(&dev->gpio_chip);
+ if (ret) {
+ dev_err(&pdev->dev, "%s failed, %d\n",
+ "gpiochip_remove()", ret);
+ return ret;
+ }
+
+ kfree(dev);
+ return 0;
+}
+
+static struct platform_driver adp5520_gpio_driver = {
+ .driver = {
+ .name = "adp5520-gpio",
+ .owner = THIS_MODULE,
+ },
+ .probe = adp5520_gpio_probe,
+ .remove = __devexit_p(adp5520_gpio_remove),
+};
+
+static int __init adp5520_gpio_init(void)
+{
+ return platform_driver_register(&adp5520_gpio_driver);
+}
+module_init(adp5520_gpio_init);
+
+static void __exit adp5520_gpio_exit(void)
+{
+ platform_driver_unregister(&adp5520_gpio_driver);
+}
+module_exit(adp5520_gpio_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("GPIO ADP5520 Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:adp5520-gpio");
diff --git a/drivers/gpio/bt8xxgpio.c b/drivers/gpio/bt8xxgpio.c
index 984b587f0f9..2559f228940 100644
--- a/drivers/gpio/bt8xxgpio.c
+++ b/drivers/gpio/bt8xxgpio.c
@@ -46,8 +46,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
-
-#include <asm/gpio.h>
+#include <linux/gpio.h>
/* Steal the hardware definitions from the bttv driver. */
#include "../media/video/bt8xx/bt848.h"
@@ -331,13 +330,13 @@ static struct pci_driver bt8xxgpio_pci_driver = {
.resume = bt8xxgpio_resume,
};
-static int bt8xxgpio_init(void)
+static int __init bt8xxgpio_init(void)
{
return pci_register_driver(&bt8xxgpio_pci_driver);
}
module_init(bt8xxgpio_init)
-static void bt8xxgpio_exit(void)
+static void __exit bt8xxgpio_exit(void)
{
pci_unregister_driver(&bt8xxgpio_pci_driver);
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 51a8d4103be..bb11a429394 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1,5 +1,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
#include <linux/device.h>
@@ -7,6 +8,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/gpio.h>
+#include <linux/idr.h>
/* Optional implementation infrastructure for GPIO interfaces.
@@ -49,6 +51,13 @@ struct gpio_desc {
#define FLAG_RESERVED 2
#define FLAG_EXPORT 3 /* protected by sysfs_lock */
#define FLAG_SYSFS 4 /* exported via /sys/class/gpio/control */
+#define FLAG_TRIG_FALL 5 /* trigger on falling edge */
+#define FLAG_TRIG_RISE 6 /* trigger on rising edge */
+
+#define PDESC_ID_SHIFT 16 /* add new flags before this one */
+
+#define GPIO_FLAGS_MASK ((1 << PDESC_ID_SHIFT) - 1)
+#define GPIO_TRIGGER_MASK (BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE))
#ifdef CONFIG_DEBUG_FS
const char *label;
@@ -56,6 +65,15 @@ struct gpio_desc {
};
static struct gpio_desc gpio_desc[ARCH_NR_GPIOS];
+#ifdef CONFIG_GPIO_SYSFS
+struct poll_desc {
+ struct work_struct work;
+ struct sysfs_dirent *value_sd;
+};
+
+static struct idr pdesc_idr;
+#endif
+
static inline void desc_set_label(struct gpio_desc *d, const char *label)
{
#ifdef CONFIG_DEBUG_FS
@@ -188,10 +206,10 @@ static DEFINE_MUTEX(sysfs_lock);
* /value
* * always readable, subject to hardware behavior
* * may be writable, as zero/nonzero
- *
- * REVISIT there will likely be an attribute for configuring async
- * notifications, e.g. to specify polling interval or IRQ trigger type
- * that would for example trigger a poll() on the "value".
+ * /edge
+ * * configures behavior of poll(2) on /value
+ * * available only if pin can generate IRQs on input
+ * * is read/write as "none", "falling", "rising", or "both"
*/
static ssize_t gpio_direction_show(struct device *dev,
@@ -288,6 +306,175 @@ static ssize_t gpio_value_store(struct device *dev,
static /*const*/ DEVICE_ATTR(value, 0644,
gpio_value_show, gpio_value_store);
+static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
+{
+ struct work_struct *work = priv;
+
+ schedule_work(work);
+ return IRQ_HANDLED;
+}
+
+static void gpio_notify_sysfs(struct work_struct *work)
+{
+ struct poll_desc *pdesc;
+
+ pdesc = container_of(work, struct poll_desc, work);
+ sysfs_notify_dirent(pdesc->value_sd);
+}
+
+static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
+ unsigned long gpio_flags)
+{
+ struct poll_desc *pdesc;
+ unsigned long irq_flags;
+ int ret, irq, id;
+
+ if ((desc->flags & GPIO_TRIGGER_MASK) == gpio_flags)
+ return 0;
+
+ irq = gpio_to_irq(desc - gpio_desc);
+ if (irq < 0)
+ return -EIO;
+
+ id = desc->flags >> PDESC_ID_SHIFT;
+ pdesc = idr_find(&pdesc_idr, id);
+ if (pdesc) {
+ free_irq(irq, &pdesc->work);
+ cancel_work_sync(&pdesc->work);
+ }
+
+ desc->flags &= ~GPIO_TRIGGER_MASK;
+
+ if (!gpio_flags) {
+ ret = 0;
+ goto free_sd;
+ }
+
+ irq_flags = IRQF_SHARED;
+ if (test_bit(FLAG_TRIG_FALL, &gpio_flags))
+ irq_flags |= IRQF_TRIGGER_FALLING;
+ if (test_bit(FLAG_TRIG_RISE, &gpio_flags))
+ irq_flags |= IRQF_TRIGGER_RISING;
+
+ if (!pdesc) {
+ pdesc = kmalloc(sizeof(*pdesc), GFP_KERNEL);
+ if (!pdesc) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ do {
+ ret = -ENOMEM;
+ if (idr_pre_get(&pdesc_idr, GFP_KERNEL))
+ ret = idr_get_new_above(&pdesc_idr,
+ pdesc, 1, &id);
+ } while (ret == -EAGAIN);
+
+ if (ret)
+ goto free_mem;
+
+ desc->flags &= GPIO_FLAGS_MASK;
+ desc->flags |= (unsigned long)id << PDESC_ID_SHIFT;
+
+ if (desc->flags >> PDESC_ID_SHIFT != id) {
+ ret = -ERANGE;
+ goto free_id;
+ }
+
+ pdesc->value_sd = sysfs_get_dirent(dev->kobj.sd, "value");
+ if (!pdesc->value_sd) {
+ ret = -ENODEV;
+ goto free_id;
+ }
+ INIT_WORK(&pdesc->work, gpio_notify_sysfs);
+ }
+
+ ret = request_irq(irq, gpio_sysfs_irq, irq_flags,
+ "gpiolib", &pdesc->work);
+ if (ret)
+ goto free_sd;
+
+ desc->flags |= gpio_flags;
+ return 0;
+
+free_sd:
+ sysfs_put(pdesc->value_sd);
+free_id:
+ idr_remove(&pdesc_idr, id);
+ desc->flags &= GPIO_FLAGS_MASK;
+free_mem:
+ kfree(pdesc);
+err_out:
+ return ret;
+}
+
+static const struct {
+ const char *name;
+ unsigned long flags;
+} trigger_types[] = {
+ { "none", 0 },
+ { "falling", BIT(FLAG_TRIG_FALL) },
+ { "rising", BIT(FLAG_TRIG_RISE) },
+ { "both", BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE) },
+};
+
+static ssize_t gpio_edge_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct gpio_desc *desc = dev_get_drvdata(dev);
+ ssize_t status;
+
+ mutex_lock(&sysfs_lock);
+
+ if (!test_bit(FLAG_EXPORT, &desc->flags))
+ status = -EIO;
+ else {
+ int i;
+
+ status = 0;
+ for (i = 0; i < ARRAY_SIZE(trigger_types); i++)
+ if ((desc->flags & GPIO_TRIGGER_MASK)
+ == trigger_types[i].flags) {
+ status = sprintf(buf, "%s\n",
+ trigger_types[i].name);
+ break;
+ }
+ }
+
+ mutex_unlock(&sysfs_lock);
+ return status;
+}
+
+static ssize_t gpio_edge_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct gpio_desc *desc = dev_get_drvdata(dev);
+ ssize_t status;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(trigger_types); i++)
+ if (sysfs_streq(trigger_types[i].name, buf))
+ goto found;
+ return -EINVAL;
+
+found:
+ mutex_lock(&sysfs_lock);
+
+ if (!test_bit(FLAG_EXPORT, &desc->flags))
+ status = -EIO;
+ else {
+ status = gpio_setup_irq(desc, dev, trigger_types[i].flags);
+ if (!status)
+ status = size;
+ }
+
+ mutex_unlock(&sysfs_lock);
+
+ return status;
+}
+
+static DEVICE_ATTR(edge, 0644, gpio_edge_show, gpio_edge_store);
+
static const struct attribute *gpio_attrs[] = {
&dev_attr_direction.attr,
&dev_attr_value.attr,
@@ -473,7 +660,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
struct device *dev;
dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
- desc, ioname ? ioname : "gpio%d", gpio);
+ desc, ioname ? ioname : "gpio%d", gpio);
if (dev) {
if (direction_may_change)
status = sysfs_create_group(&dev->kobj,
@@ -481,6 +668,14 @@ int gpio_export(unsigned gpio, bool direction_may_change)
else
status = device_create_file(dev,
&dev_attr_value);
+
+ if (!status && gpio_to_irq(gpio) >= 0
+ && (direction_may_change
+ || !test_bit(FLAG_IS_OUT,
+ &desc->flags)))
+ status = device_create_file(dev,
+ &dev_attr_edge);
+
if (status != 0)
device_unregister(dev);
} else
@@ -505,6 +700,51 @@ static int match_export(struct device *dev, void *data)
}
/**
+ * gpio_export_link - create a sysfs link to an exported GPIO node
+ * @dev: device under which to create symlink
+ * @name: name of the symlink
+ * @gpio: gpio to create symlink to, already exported
+ *
+ * Set up a symlink from /sys/.../dev/name to /sys/class/gpio/gpioN
+ * node. Caller is responsible for unlinking.
+ *
+ * Returns zero on success, else an error.
+ */
+int gpio_export_link(struct device *dev, const char *name, unsigned gpio)
+{
+ struct gpio_desc *desc;
+ int status = -EINVAL;
+
+ if (!gpio_is_valid(gpio))
+ goto done;
+
+ mutex_lock(&sysfs_lock);
+
+ desc = &gpio_desc[gpio];
+
+ if (test_bit(FLAG_EXPORT, &desc->flags)) {
+ struct device *tdev;
+
+ tdev = class_find_device(&gpio_class, NULL, desc, match_export);
+ if (tdev != NULL) {
+ status = sysfs_create_link(&dev->kobj, &tdev->kobj,
+ name);
+ } else {
+ status = -ENODEV;
+ }
+ }
+
+ mutex_unlock(&sysfs_lock);
+
+done:
+ if (status)
+ pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(gpio_export_link);
+
+/**
* gpio_unexport - reverse effect of gpio_export()
* @gpio: gpio to make unavailable
*
@@ -527,6 +767,7 @@ void gpio_unexport(unsigned gpio)
dev = class_find_device(&gpio_class, NULL, desc, match_export);
if (dev) {
+ gpio_setup_irq(desc, dev, 0);
clear_bit(FLAG_EXPORT, &desc->flags);
put_device(dev);
device_unregister(dev);
@@ -611,6 +852,8 @@ static int __init gpiolib_sysfs_init(void)
unsigned long flags;
unsigned gpio;
+ idr_init(&pdesc_idr);
+
status = class_register(&gpio_class);
if (status < 0)
return status;
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
new file mode 100644
index 00000000000..5711ce5353c
--- /dev/null
+++ b/drivers/gpio/langwell_gpio.c
@@ -0,0 +1,297 @@
+/* langwell_gpio.c Moorestown platform Langwell chip GPIO driver
+ * Copyright (c) 2008 - 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Moorestown platform Langwell chip.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+
+struct lnw_gpio_register {
+ u32 GPLR[2];
+ u32 GPDR[2];
+ u32 GPSR[2];
+ u32 GPCR[2];
+ u32 GRER[2];
+ u32 GFER[2];
+ u32 GEDR[2];
+};
+
+struct lnw_gpio {
+ struct gpio_chip chip;
+ struct lnw_gpio_register *reg_base;
+ spinlock_t lock;
+ unsigned irq_base;
+};
+
+static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+ u8 reg = offset / 32;
+ void __iomem *gplr;
+
+ gplr = (void __iomem *)(&lnw->reg_base->GPLR[reg]);
+ return readl(gplr) & BIT(offset % 32);
+}
+
+static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+ u8 reg = offset / 32;
+ void __iomem *gpsr, *gpcr;
+
+ if (value) {
+ gpsr = (void __iomem *)(&lnw->reg_base->GPSR[reg]);
+ writel(BIT(offset % 32), gpsr);
+ } else {
+ gpcr = (void __iomem *)(&lnw->reg_base->GPCR[reg]);
+ writel(BIT(offset % 32), gpcr);
+ }
+}
+
+static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+ u8 reg = offset / 32;
+ u32 value;
+ unsigned long flags;
+ void __iomem *gpdr;
+
+ gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]);
+ spin_lock_irqsave(&lnw->lock, flags);
+ value = readl(gpdr);
+ value &= ~BIT(offset % 32);
+ writel(value, gpdr);
+ spin_unlock_irqrestore(&lnw->lock, flags);
+ return 0;
+}
+
+static int lnw_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+ u8 reg = offset / 32;
+ unsigned long flags;
+ void __iomem *gpdr;
+
+ lnw_gpio_set(chip, offset, value);
+ gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]);
+ spin_lock_irqsave(&lnw->lock, flags);
+ value = readl(gpdr);
+ value |= BIT(offset % 32);;
+ writel(value, gpdr);
+ spin_unlock_irqrestore(&lnw->lock, flags);
+ return 0;
+}
+
+static int lnw_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+ return lnw->irq_base + offset;
+}
+
+static int lnw_irq_type(unsigned irq, unsigned type)
+{
+ struct lnw_gpio *lnw = get_irq_chip_data(irq);
+ u32 gpio = irq - lnw->irq_base;
+ u8 reg = gpio / 32;
+ unsigned long flags;
+ u32 value;
+ void __iomem *grer = (void __iomem *)(&lnw->reg_base->GRER[reg]);
+ void __iomem *gfer = (void __iomem *)(&lnw->reg_base->GFER[reg]);
+
+ if (gpio < 0 || gpio > lnw->chip.ngpio)
+ return -EINVAL;
+ spin_lock_irqsave(&lnw->lock, flags);
+ if (type & IRQ_TYPE_EDGE_RISING)
+ value = readl(grer) | BIT(gpio % 32);
+ else
+ value = readl(grer) & (~BIT(gpio % 32));
+ writel(value, grer);
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ value = readl(gfer) | BIT(gpio % 32);
+ else
+ value = readl(gfer) & (~BIT(gpio % 32));
+ writel(value, gfer);
+ spin_unlock_irqrestore(&lnw->lock, flags);
+
+ return 0;
+};
+
+static void lnw_irq_unmask(unsigned irq)
+{
+ struct lnw_gpio *lnw = get_irq_chip_data(irq);
+ u32 gpio = irq - lnw->irq_base;
+ u8 reg = gpio / 32;
+ void __iomem *gedr;
+
+ gedr = (void __iomem *)(&lnw->reg_base->GEDR[reg]);
+ writel(BIT(gpio % 32), gedr);
+};
+
+static void lnw_irq_mask(unsigned irq)
+{
+};
+
+static struct irq_chip lnw_irqchip = {
+ .name = "LNW-GPIO",
+ .mask = lnw_irq_mask,
+ .unmask = lnw_irq_unmask,
+ .set_type = lnw_irq_type,
+};
+
+static struct pci_device_id lnw_gpio_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
+
+static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ struct lnw_gpio *lnw = (struct lnw_gpio *)get_irq_data(irq);
+ u32 reg, gpio;
+ void __iomem *gedr;
+ u32 gedr_v;
+
+ /* check GPIO controller to check which pin triggered the interrupt */
+ for (reg = 0; reg < lnw->chip.ngpio / 32; reg++) {
+ gedr = (void __iomem *)(&lnw->reg_base->GEDR[reg]);
+ gedr_v = readl(gedr);
+ if (!gedr_v)
+ continue;
+ for (gpio = reg*32; gpio < reg*32+32; gpio++) {
+ gedr_v = readl(gedr);
+ if (gedr_v & BIT(gpio % 32)) {
+ pr_debug("pin %d triggered\n", gpio);
+ generic_handle_irq(lnw->irq_base + gpio);
+ }
+ }
+ /* clear the edge detect status bit */
+ writel(gedr_v, gedr);
+ }
+ desc->chip->eoi(irq);
+}
+
+static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ void *base;
+ int i;
+ resource_size_t start, len;
+ struct lnw_gpio *lnw;
+ u32 irq_base;
+ u32 gpio_base;
+ int retval = 0;
+
+ retval = pci_enable_device(pdev);
+ if (retval)
+ goto done;
+
+ retval = pci_request_regions(pdev, "langwell_gpio");
+ if (retval) {
+ dev_err(&pdev->dev, "error requesting resources\n");
+ goto err2;
+ }
+ /* get the irq_base from bar1 */
+ start = pci_resource_start(pdev, 1);
+ len = pci_resource_len(pdev, 1);
+ base = ioremap_nocache(start, len);
+ if (!base) {
+ dev_err(&pdev->dev, "error mapping bar1\n");
+ goto err3;
+ }
+ irq_base = *(u32 *)base;
+ gpio_base = *((u32 *)base + 1);
+ /* release the IO mapping, since we already get the info from bar1 */
+ iounmap(base);
+ /* get the register base from bar0 */
+ start = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+ base = ioremap_nocache(start, len);
+ if (!base) {
+ dev_err(&pdev->dev, "error mapping bar0\n");
+ retval = -EFAULT;
+ goto err3;
+ }
+
+ lnw = kzalloc(sizeof(struct lnw_gpio), GFP_KERNEL);
+ if (!lnw) {
+ dev_err(&pdev->dev, "can't allocate langwell_gpio chip data\n");
+ retval = -ENOMEM;
+ goto err4;
+ }
+ lnw->reg_base = base;
+ lnw->irq_base = irq_base;
+ lnw->chip.label = dev_name(&pdev->dev);
+ lnw->chip.direction_input = lnw_gpio_direction_input;
+ lnw->chip.direction_output = lnw_gpio_direction_output;
+ lnw->chip.get = lnw_gpio_get;
+ lnw->chip.set = lnw_gpio_set;
+ lnw->chip.to_irq = lnw_gpio_to_irq;
+ lnw->chip.base = gpio_base;
+ lnw->chip.ngpio = 64;
+ lnw->chip.can_sleep = 0;
+ pci_set_drvdata(pdev, lnw);
+ retval = gpiochip_add(&lnw->chip);
+ if (retval) {
+ dev_err(&pdev->dev, "langwell gpiochip_add error %d\n", retval);
+ goto err5;
+ }
+ set_irq_data(pdev->irq, lnw);
+ set_irq_chained_handler(pdev->irq, lnw_irq_handler);
+ for (i = 0; i < lnw->chip.ngpio; i++) {
+ set_irq_chip_and_handler_name(i + lnw->irq_base, &lnw_irqchip,
+ handle_simple_irq, "demux");
+ set_irq_chip_data(i + lnw->irq_base, lnw);
+ }
+
+ spin_lock_init(&lnw->lock);
+ goto done;
+err5:
+ kfree(lnw);
+err4:
+ iounmap(base);
+err3:
+ pci_release_regions(pdev);
+err2:
+ pci_disable_device(pdev);
+done:
+ return retval;
+}
+
+static struct pci_driver lnw_gpio_driver = {
+ .name = "langwell_gpio",
+ .id_table = lnw_gpio_ids,
+ .probe = lnw_gpio_probe,
+};
+
+static int __init lnw_gpio_init(void)
+{
+ return pci_register_driver(&lnw_gpio_driver);
+}
+
+device_initcall(lnw_gpio_init);
diff --git a/drivers/gpio/max7301.c b/drivers/gpio/max7301.c
index 7b82eaae262..480956f1ca5 100644
--- a/drivers/gpio/max7301.c
+++ b/drivers/gpio/max7301.c
@@ -339,3 +339,4 @@ module_exit(max7301_exit);
MODULE_AUTHOR("Juergen Beisert");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MAX7301 SPI based GPIO-Expander");
+MODULE_ALIAS("spi:" DRIVER_NAME);
diff --git a/drivers/gpio/mc33880.c b/drivers/gpio/mc33880.c
new file mode 100644
index 00000000000..e7d01bd8fdb
--- /dev/null
+++ b/drivers/gpio/mc33880.c
@@ -0,0 +1,196 @@
+/*
+ * mc33880.c MC33880 high-side/low-side switch GPIO driver
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Freescale MC33880 high-side/low-side switch
+ */
+
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/mc33880.h>
+#include <linux/gpio.h>
+
+#define DRIVER_NAME "mc33880"
+
+/*
+ * Pin configurations, see MAX7301 datasheet page 6
+ */
+#define PIN_CONFIG_MASK 0x03
+#define PIN_CONFIG_IN_PULLUP 0x03
+#define PIN_CONFIG_IN_WO_PULLUP 0x02
+#define PIN_CONFIG_OUT 0x01
+
+#define PIN_NUMBER 8
+
+
+/*
+ * Some registers must be read back to modify.
+ * To save time we cache them here in memory
+ */
+struct mc33880 {
+ struct mutex lock; /* protect from simultanous accesses */
+ u8 port_config;
+ struct gpio_chip chip;
+ struct spi_device *spi;
+};
+
+static int mc33880_write_config(struct mc33880 *mc)
+{
+ return spi_write(mc->spi, &mc->port_config, sizeof(mc->port_config));
+}
+
+
+static int __mc33880_set(struct mc33880 *mc, unsigned offset, int value)
+{
+ if (value)
+ mc->port_config |= 1 << offset;
+ else
+ mc->port_config &= ~(1 << offset);
+
+ return mc33880_write_config(mc);
+}
+
+
+static void mc33880_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct mc33880 *mc = container_of(chip, struct mc33880, chip);
+
+ mutex_lock(&mc->lock);
+
+ __mc33880_set(mc, offset, value);
+
+ mutex_unlock(&mc->lock);
+}
+
+static int __devinit mc33880_probe(struct spi_device *spi)
+{
+ struct mc33880 *mc;
+ struct mc33880_platform_data *pdata;
+ int ret;
+
+ pdata = spi->dev.platform_data;
+ if (!pdata || !pdata->base) {
+ dev_dbg(&spi->dev, "incorrect or missing platform data\n");
+ return -EINVAL;
+ }
+
+ /*
+ * bits_per_word cannot be configured in platform data
+ */
+ spi->bits_per_word = 8;
+
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return ret;
+
+ mc = kzalloc(sizeof(struct mc33880), GFP_KERNEL);
+ if (!mc)
+ return -ENOMEM;
+
+ mutex_init(&mc->lock);
+
+ dev_set_drvdata(&spi->dev, mc);
+
+ mc->spi = spi;
+
+ mc->chip.label = DRIVER_NAME,
+ mc->chip.set = mc33880_set;
+ mc->chip.base = pdata->base;
+ mc->chip.ngpio = PIN_NUMBER;
+ mc->chip.can_sleep = 1;
+ mc->chip.dev = &spi->dev;
+ mc->chip.owner = THIS_MODULE;
+
+ mc->port_config = 0x00;
+ /* write twice, because during initialisation the first setting
+ * is just for testing SPI communication, and the second is the
+ * "real" configuration
+ */
+ ret = mc33880_write_config(mc);
+ mc->port_config = 0x00;
+ if (!ret)
+ ret = mc33880_write_config(mc);
+
+ if (ret) {
+ printk(KERN_ERR "Failed writing to " DRIVER_NAME ": %d\n", ret);
+ goto exit_destroy;
+ }
+
+ ret = gpiochip_add(&mc->chip);
+ if (ret)
+ goto exit_destroy;
+
+ return ret;
+
+exit_destroy:
+ dev_set_drvdata(&spi->dev, NULL);
+ mutex_destroy(&mc->lock);
+ kfree(mc);
+ return ret;
+}
+
+static int mc33880_remove(struct spi_device *spi)
+{
+ struct mc33880 *mc;
+ int ret;
+
+ mc = dev_get_drvdata(&spi->dev);
+ if (mc == NULL)
+ return -ENODEV;
+
+ dev_set_drvdata(&spi->dev, NULL);
+
+ ret = gpiochip_remove(&mc->chip);
+ if (!ret) {
+ mutex_destroy(&mc->lock);
+ kfree(mc);
+ } else
+ dev_err(&spi->dev, "Failed to remove the GPIO controller: %d\n",
+ ret);
+
+ return ret;
+}
+
+static struct spi_driver mc33880_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = mc33880_probe,
+ .remove = __devexit_p(mc33880_remove),
+};
+
+static int __init mc33880_init(void)
+{
+ return spi_register_driver(&mc33880_driver);
+}
+/* register after spi postcore initcall and before
+ * subsys initcalls that may rely on these GPIOs
+ */
+subsys_initcall(mc33880_init);
+
+static void __exit mc33880_exit(void)
+{
+ spi_unregister_driver(&mc33880_driver);
+}
+module_exit(mc33880_exit);
+
+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/gpio/mcp23s08.c b/drivers/gpio/mcp23s08.c
index f6fae0e50e6..cd651ec8d03 100644
--- a/drivers/gpio/mcp23s08.c
+++ b/drivers/gpio/mcp23s08.c
@@ -6,12 +6,10 @@
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
-
+#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/mcp23s08.h>
-#include <asm/gpio.h>
-
/* Registers are all 8 bits wide.
*
@@ -433,3 +431,4 @@ static void __exit mcp23s08_exit(void)
module_exit(mcp23s08_exit);
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:mcp23s08");
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index cdb6574d25a..6a2fb3fbb3d 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/i2c/pca953x.h>
#ifdef CONFIG_OF_GPIO
@@ -20,8 +21,6 @@
#include <linux/of_gpio.h>
#endif
-#include <asm/gpio.h>
-
#define PCA953X_INPUT 0
#define PCA953X_OUTPUT 1
#define PCA953X_INVERT 2
@@ -40,6 +39,7 @@ static const struct i2c_device_id pca953x_id[] = {
{ "pca9557", 8, },
{ "max7310", 8, },
+ { "max7315", 8, },
{ "pca6107", 8, },
{ "tca6408", 8, },
{ "tca6416", 16, },
diff --git a/drivers/gpio/pcf857x.c b/drivers/gpio/pcf857x.c
index 9525724be73..29f19ce3e80 100644
--- a/drivers/gpio/pcf857x.c
+++ b/drivers/gpio/pcf857x.c
@@ -20,14 +20,14 @@
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/i2c/pcf857x.h>
-#include <asm/gpio.h>
-
static const struct i2c_device_id pcf857x_id[] = {
{ "pcf8574", 8 },
+ { "pcf8574a", 8 },
{ "pca8574", 8 },
{ "pca9670", 8 },
{ "pca9672", 8 },
diff --git a/drivers/gpio/ucb1400_gpio.c b/drivers/gpio/ucb1400_gpio.c
new file mode 100644
index 00000000000..50e6bd1392c
--- /dev/null
+++ b/drivers/gpio/ucb1400_gpio.c
@@ -0,0 +1,125 @@
+/*
+ * Philips UCB1400 GPIO driver
+ *
+ * Author: Marek Vasut <marek.vasut@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/ucb1400.h>
+
+struct ucb1400_gpio_data *ucbdata;
+
+static int ucb1400_gpio_dir_in(struct gpio_chip *gc, unsigned off)
+{
+ struct ucb1400_gpio *gpio;
+ gpio = container_of(gc, struct ucb1400_gpio, gc);
+ ucb1400_gpio_set_direction(gpio->ac97, off, 0);
+ return 0;
+}
+
+static int ucb1400_gpio_dir_out(struct gpio_chip *gc, unsigned off, int val)
+{
+ struct ucb1400_gpio *gpio;
+ gpio = container_of(gc, struct ucb1400_gpio, gc);
+ ucb1400_gpio_set_direction(gpio->ac97, off, 1);
+ ucb1400_gpio_set_value(gpio->ac97, off, val);
+ return 0;
+}
+
+static int ucb1400_gpio_get(struct gpio_chip *gc, unsigned off)
+{
+ struct ucb1400_gpio *gpio;
+ gpio = container_of(gc, struct ucb1400_gpio, gc);
+ return ucb1400_gpio_get_value(gpio->ac97, off);
+}
+
+static void ucb1400_gpio_set(struct gpio_chip *gc, unsigned off, int val)
+{
+ struct ucb1400_gpio *gpio;
+ gpio = container_of(gc, struct ucb1400_gpio, gc);
+ ucb1400_gpio_set_value(gpio->ac97, off, val);
+}
+
+static int ucb1400_gpio_probe(struct platform_device *dev)
+{
+ struct ucb1400_gpio *ucb = dev->dev.platform_data;
+ int err = 0;
+
+ if (!(ucbdata && ucbdata->gpio_offset)) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ platform_set_drvdata(dev, ucb);
+
+ ucb->gc.label = "ucb1400_gpio";
+ ucb->gc.base = ucbdata->gpio_offset;
+ ucb->gc.ngpio = 10;
+ ucb->gc.owner = THIS_MODULE;
+
+ ucb->gc.direction_input = ucb1400_gpio_dir_in;
+ ucb->gc.direction_output = ucb1400_gpio_dir_out;
+ ucb->gc.get = ucb1400_gpio_get;
+ ucb->gc.set = ucb1400_gpio_set;
+ ucb->gc.can_sleep = 1;
+
+ err = gpiochip_add(&ucb->gc);
+ if (err)
+ goto err;
+
+ if (ucbdata && ucbdata->gpio_setup)
+ err = ucbdata->gpio_setup(&dev->dev, ucb->gc.ngpio);
+
+err:
+ return err;
+
+}
+
+static int ucb1400_gpio_remove(struct platform_device *dev)
+{
+ int err = 0;
+ struct ucb1400_gpio *ucb = platform_get_drvdata(dev);
+
+ if (ucbdata && ucbdata->gpio_teardown) {
+ err = ucbdata->gpio_teardown(&dev->dev, ucb->gc.ngpio);
+ if (err)
+ return err;
+ }
+
+ err = gpiochip_remove(&ucb->gc);
+ return err;
+}
+
+static struct platform_driver ucb1400_gpio_driver = {
+ .probe = ucb1400_gpio_probe,
+ .remove = ucb1400_gpio_remove,
+ .driver = {
+ .name = "ucb1400_gpio"
+ },
+};
+
+static int __init ucb1400_gpio_init(void)
+{
+ return platform_driver_register(&ucb1400_gpio_driver);
+}
+
+static void __exit ucb1400_gpio_exit(void)
+{
+ platform_driver_unregister(&ucb1400_gpio_driver);
+}
+
+void __init ucb1400_gpio_set_data(struct ucb1400_gpio_data *data)
+{
+ ucbdata = data;
+}
+
+module_init(ucb1400_gpio_init);
+module_exit(ucb1400_gpio_exit);
+
+MODULE_DESCRIPTION("Philips UCB1400 GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e4d971c8b9d..f831ea15929 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -102,6 +102,7 @@ config DRM_I915
select BACKLIGHT_CLASS_DEVICE if ACPI
select INPUT if ACPI
select ACPI_VIDEO if ACPI
+ select ACPI_BUTTON if ACPI
help
Choose this option if you have a system that has Intel 830M, 845G,
852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 230c9ffdd5e..80391995bde 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -142,6 +142,19 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
if (IS_ERR(obj->filp))
goto free;
+ /* Basically we want to disable the OOM killer and handle ENOMEM
+ * ourselves by sacrificing pages from cached buffers.
+ * XXX shmem_file_[gs]et_gfp_mask()
+ */
+ mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
+ GFP_HIGHUSER |
+ __GFP_COLD |
+ __GFP_FS |
+ __GFP_RECLAIMABLE |
+ __GFP_NORETRY |
+ __GFP_NOWARN |
+ __GFP_NOMEMALLOC);
+
kref_init(&obj->refcount);
kref_init(&obj->handlecount);
obj->size = size;
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 5269dfa5f62..fa7b9be096b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -9,6 +9,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_gem.o \
i915_gem_debug.o \
i915_gem_tiling.o \
+ i915_trace_points.o \
intel_display.o \
intel_crt.o \
intel_lvds.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1e3bdcee863..f8ce9a3a420 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -96,11 +96,13 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
struct drm_gem_object *obj = obj_priv->obj;
- seq_printf(m, " %p: %s %08x %08x %d",
+ seq_printf(m, " %p: %s %8zd %08x %08x %d %s",
obj,
get_pin_flag(obj_priv),
+ obj->size,
obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
+ obj_priv->last_rendering_seqno,
+ obj_priv->dirty ? "dirty" : "");
if (obj->name)
seq_printf(m, " (name: %d)", obj->name);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 5a49a1867b3..45d507ebd3f 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -33,6 +33,7 @@
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "i915_trace.h"
#include <linux/vgaarb.h>
/* Really want an OS-independent resettable timer. Would like to have
@@ -50,14 +51,18 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
int i;
+ trace_i915_ring_wait_begin (dev);
+
for (i = 0; i < 100000; i++) {
ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
acthd = I915_READ(acthd_reg);
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
- if (ring->space >= n)
+ if (ring->space >= n) {
+ trace_i915_ring_wait_end (dev);
return 0;
+ }
if (dev->primary->master) {
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -77,6 +82,7 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
}
+ trace_i915_ring_wait_end (dev);
return -EBUSY;
}
@@ -922,7 +928,8 @@ static int i915_get_bridge_dev(struct drm_device *dev)
* how much was set aside so we can use it for our own purposes.
*/
static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
- uint32_t *preallocated_size)
+ uint32_t *preallocated_size,
+ uint32_t *start)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 tmp = 0;
@@ -1009,10 +1016,159 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
return -1;
}
*preallocated_size = stolen - overhead;
+ *start = overhead;
return 0;
}
+#define PTE_ADDRESS_MASK 0xfffff000
+#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
+#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
+#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
+#define PTE_MAPPING_TYPE_CACHED (3 << 1)
+#define PTE_MAPPING_TYPE_MASK (3 << 1)
+#define PTE_VALID (1 << 0)
+
+/**
+ * i915_gtt_to_phys - take a GTT address and turn it into a physical one
+ * @dev: drm device
+ * @gtt_addr: address to translate
+ *
+ * Some chip functions require allocations from stolen space but need the
+ * physical address of the memory in question. We use this routine
+ * to get a physical address suitable for register programming from a given
+ * GTT address.
+ */
+static unsigned long i915_gtt_to_phys(struct drm_device *dev,
+ unsigned long gtt_addr)
+{
+ unsigned long *gtt;
+ unsigned long entry, phys;
+ int gtt_bar = IS_I9XX(dev) ? 0 : 1;
+ int gtt_offset, gtt_size;
+
+ if (IS_I965G(dev)) {
+ if (IS_G4X(dev) || IS_IGDNG(dev)) {
+ gtt_offset = 2*1024*1024;
+ gtt_size = 2*1024*1024;
+ } else {
+ gtt_offset = 512*1024;
+ gtt_size = 512*1024;
+ }
+ } else {
+ gtt_bar = 3;
+ gtt_offset = 0;
+ gtt_size = pci_resource_len(dev->pdev, gtt_bar);
+ }
+
+ gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset,
+ gtt_size);
+ if (!gtt) {
+ DRM_ERROR("ioremap of GTT failed\n");
+ return 0;
+ }
+
+ entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
+
+ DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
+
+ /* Mask out these reserved bits on this hardware. */
+ if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
+ IS_I945G(dev) || IS_I945GM(dev)) {
+ entry &= ~PTE_ADDRESS_MASK_HIGH;
+ }
+
+ /* If it's not a mapping type we know, then bail. */
+ if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
+ (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) {
+ iounmap(gtt);
+ return 0;
+ }
+
+ if (!(entry & PTE_VALID)) {
+ DRM_ERROR("bad GTT entry in stolen space\n");
+ iounmap(gtt);
+ return 0;
+ }
+
+ iounmap(gtt);
+
+ phys =(entry & PTE_ADDRESS_MASK) |
+ ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
+
+ DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
+
+ return phys;
+}
+
+static void i915_warn_stolen(struct drm_device *dev)
+{
+ DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
+ DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
+}
+
+static void i915_setup_compression(struct drm_device *dev, int size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mm_node *compressed_fb, *compressed_llb;
+ unsigned long cfb_base, ll_base;
+
+ /* Leave 1M for line length buffer & misc. */
+ compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
+ if (!compressed_fb) {
+ i915_warn_stolen(dev);
+ return;
+ }
+
+ compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
+ if (!compressed_fb) {
+ i915_warn_stolen(dev);
+ return;
+ }
+
+ cfb_base = i915_gtt_to_phys(dev, compressed_fb->start);
+ if (!cfb_base) {
+ DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
+ drm_mm_put_block(compressed_fb);
+ }
+
+ if (!IS_GM45(dev)) {
+ compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
+ 4096, 0);
+ if (!compressed_llb) {
+ i915_warn_stolen(dev);
+ return;
+ }
+
+ compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096);
+ if (!compressed_llb) {
+ i915_warn_stolen(dev);
+ return;
+ }
+
+ ll_base = i915_gtt_to_phys(dev, compressed_llb->start);
+ if (!ll_base) {
+ DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
+ drm_mm_put_block(compressed_fb);
+ drm_mm_put_block(compressed_llb);
+ }
+ }
+
+ dev_priv->cfb_size = size;
+
+ if (IS_GM45(dev)) {
+ g4x_disable_fbc(dev);
+ I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
+ } else {
+ i8xx_disable_fbc(dev);
+ I915_WRITE(FBC_CFB_BASE, cfb_base);
+ I915_WRITE(FBC_LL_BASE, ll_base);
+ }
+
+ DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
+ ll_base, size >> 20);
+}
+
/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
@@ -1027,6 +1183,7 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
}
static int i915_load_modeset_init(struct drm_device *dev,
+ unsigned long prealloc_start,
unsigned long prealloc_size,
unsigned long agp_size)
{
@@ -1047,6 +1204,10 @@ static int i915_load_modeset_init(struct drm_device *dev,
/* Basic memrange allocator for stolen space (aka vram) */
drm_mm_init(&dev_priv->vram, 0, prealloc_size);
+ DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
+
+ /* We're off and running w/KMS */
+ dev_priv->mm.suspended = 0;
/* Let GEM Manage from end of prealloc space to end of aperture.
*
@@ -1059,10 +1220,25 @@ static int i915_load_modeset_init(struct drm_device *dev,
*/
i915_gem_do_init(dev, prealloc_size, agp_size - 4096);
+ mutex_lock(&dev->struct_mutex);
ret = i915_gem_init_ringbuffer(dev);
+ mutex_unlock(&dev->struct_mutex);
if (ret)
goto out;
+ /* Try to set up FBC with a reasonable compressed buffer size */
+ if (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev) || IS_GM45(dev)) &&
+ i915_powersave) {
+ int cfb_size;
+
+ /* Try to get an 8M buffer... */
+ if (prealloc_size > (9*1024*1024))
+ cfb_size = 8*1024*1024;
+ else /* fall back to 7/8 of the stolen space */
+ cfb_size = prealloc_size * 7 / 8;
+ i915_setup_compression(dev, cfb_size);
+ }
+
/* Allow hardware batchbuffers unless told otherwise.
*/
dev_priv->allow_batchbuffer = 1;
@@ -1180,7 +1356,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
struct drm_i915_private *dev_priv = dev->dev_private;
resource_size_t base, size;
int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
- uint32_t agp_size, prealloc_size;
+ uint32_t agp_size, prealloc_size, prealloc_start;
/* i915 has 4 more counters */
dev->counters += 4;
@@ -1234,7 +1410,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
"performance may suffer.\n");
}
- ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
+ ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start);
if (ret)
goto out_iomapfree;
@@ -1300,8 +1476,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return ret;
}
+ /* Start out suspended */
+ dev_priv->mm.suspended = 1;
+
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
+ ret = i915_load_modeset_init(dev, prealloc_start,
+ prealloc_size, agp_size);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
goto out_workqueue_free;
@@ -1313,6 +1493,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!IS_IGDNG(dev))
intel_opregion_init(dev, 0);
+ setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
+ (unsigned long) dev);
return 0;
out_workqueue_free:
@@ -1333,6 +1515,7 @@ int i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
destroy_workqueue(dev_priv->wq);
+ del_timer_sync(&dev_priv->hangcheck_timer);
io_mapping_free(dev_priv->mm.gtt_mapping);
if (dev_priv->mm.gtt_mtrr >= 0) {
@@ -1472,6 +1655,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index dbe568c9327..b93814c0d3e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -89,6 +89,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
pci_set_power_state(dev->pdev, PCI_D3hot);
}
+ dev_priv->suspended = 1;
+
return 0;
}
@@ -97,8 +99,6 @@ static int i915_resume(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
- pci_set_power_state(dev->pdev, PCI_D0);
- pci_restore_state(dev->pdev);
if (pci_enable_device(dev->pdev))
return -1;
pci_set_master(dev->pdev);
@@ -124,9 +124,135 @@ static int i915_resume(struct drm_device *dev)
drm_helper_resume_force_mode(dev);
}
+ dev_priv->suspended = 0;
+
return ret;
}
+/**
+ * i965_reset - reset chip after a hang
+ * @dev: drm device to reset
+ * @flags: reset domains
+ *
+ * Reset the chip. Useful if a hang is detected. Returns zero on successful
+ * reset or otherwise an error code.
+ *
+ * Procedure is fairly simple:
+ * - reset the chip using the reset reg
+ * - re-init context state
+ * - re-init hardware status page
+ * - re-init ring buffer
+ * - re-init interrupt state
+ * - re-init display
+ */
+int i965_reset(struct drm_device *dev, u8 flags)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long timeout;
+ u8 gdrst;
+ /*
+ * We really should only reset the display subsystem if we actually
+ * need to
+ */
+ bool need_display = true;
+
+ mutex_lock(&dev->struct_mutex);
+
+ /*
+ * Clear request list
+ */
+ i915_gem_retire_requests(dev);
+
+ if (need_display)
+ i915_save_display(dev);
+
+ if (IS_I965G(dev) || IS_G4X(dev)) {
+ /*
+ * Set the domains we want to reset, then the reset bit (bit 0).
+ * Clear the reset bit after a while and wait for hardware status
+ * bit (bit 1) to be set
+ */
+ pci_read_config_byte(dev->pdev, GDRST, &gdrst);
+ pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0));
+ udelay(50);
+ pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe);
+
+ /* ...we don't want to loop forever though, 500ms should be plenty */
+ timeout = jiffies + msecs_to_jiffies(500);
+ do {
+ udelay(100);
+ pci_read_config_byte(dev->pdev, GDRST, &gdrst);
+ } while ((gdrst & 0x1) && time_after(timeout, jiffies));
+
+ if (gdrst & 0x1) {
+ WARN(true, "i915: Failed to reset chip\n");
+ mutex_unlock(&dev->struct_mutex);
+ return -EIO;
+ }
+ } else {
+ DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
+ return -ENODEV;
+ }
+
+ /* Ok, now get things going again... */
+
+ /*
+ * Everything depends on having the GTT running, so we need to start
+ * there. Fortunately we don't need to do this unless we reset the
+ * chip at a PCI level.
+ *
+ * Next we need to restore the context, but we don't use those
+ * yet either...
+ *
+ * Ring buffer needs to be re-initialized in the KMS case, or if X
+ * was running at the time of the reset (i.e. we weren't VT
+ * switched away).
+ */
+ if (drm_core_check_feature(dev, DRIVER_MODESET) ||
+ !dev_priv->mm.suspended) {
+ drm_i915_ring_buffer_t *ring = &dev_priv->ring;
+ struct drm_gem_object *obj = ring->ring_obj;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ dev_priv->mm.suspended = 0;
+
+ /* Stop the ring if it's running. */
+ I915_WRITE(PRB0_CTL, 0);
+ I915_WRITE(PRB0_TAIL, 0);
+ I915_WRITE(PRB0_HEAD, 0);
+
+ /* Initialize the ring. */
+ I915_WRITE(PRB0_START, obj_priv->gtt_offset);
+ I915_WRITE(PRB0_CTL,
+ ((obj->size - 4096) & RING_NR_PAGES) |
+ RING_NO_REPORT |
+ RING_VALID);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_kernel_lost_context(dev);
+ else {
+ ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->Size;
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+ drm_irq_uninstall(dev);
+ drm_irq_install(dev);
+ mutex_lock(&dev->struct_mutex);
+ }
+
+ /*
+ * Display needs restore too...
+ */
+ if (need_display)
+ i915_restore_display(dev);
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+
static int __devinit
i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -234,6 +360,8 @@ static int __init i915_init(void)
{
driver.num_ioctls = i915_max_ioctl;
+ i915_gem_shrinker_init();
+
/*
* If CONFIG_DRM_I915_KMS is set, default to KMS unless
* explicitly disabled with the module pararmeter.
@@ -260,6 +388,7 @@ static int __init i915_init(void)
static void __exit i915_exit(void)
{
+ i915_gem_shrinker_exit();
drm_exit(&driver);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a0632f8e76a..b24b2d145b7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -48,6 +48,11 @@ enum pipe {
PIPE_B,
};
+enum plane {
+ PLANE_A = 0,
+ PLANE_B,
+};
+
#define I915_NUM_PIPE 2
/* Interface history:
@@ -148,6 +153,23 @@ struct drm_i915_error_state {
struct timeval time;
};
+struct drm_i915_display_funcs {
+ void (*dpms)(struct drm_crtc *crtc, int mode);
+ bool (*fbc_enabled)(struct drm_crtc *crtc);
+ void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
+ void (*disable_fbc)(struct drm_device *dev);
+ int (*get_display_clock_speed)(struct drm_device *dev);
+ int (*get_fifo_size)(struct drm_device *dev, int plane);
+ void (*update_wm)(struct drm_device *dev, int planea_clock,
+ int planeb_clock, int sr_hdisplay, int pixel_size);
+ /* clock updates for mode set */
+ /* cursor updates */
+ /* render clock increase/decrease */
+ /* display clock increase/decrease */
+ /* pll clock increase/decrease */
+ /* clock gating init */
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
@@ -198,10 +220,21 @@ typedef struct drm_i915_private {
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
+ /* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
+ struct timer_list hangcheck_timer;
+ int hangcheck_count;
+ uint32_t last_acthd;
+
bool cursor_needs_physical;
struct drm_mm vram;
+ unsigned long cfb_size;
+ unsigned long cfb_pitch;
+ int cfb_fence;
+ int cfb_plane;
+
int irq_enabled;
struct intel_opregion opregion;
@@ -222,6 +255,8 @@ typedef struct drm_i915_private {
unsigned int edp_support:1;
int lvds_ssc_freq;
+ struct notifier_block lid_notifier;
+
int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */
struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
@@ -234,7 +269,11 @@ typedef struct drm_i915_private {
struct work_struct error_work;
struct workqueue_struct *wq;
+ /* Display functions */
+ struct drm_i915_display_funcs display;
+
/* Register state */
+ bool suspended;
u8 saveLBB;
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
@@ -350,6 +389,15 @@ typedef struct drm_i915_private {
int gtt_mtrr;
/**
+ * Membership on list of all loaded devices, used to evict
+ * inactive buffers under memory pressure.
+ *
+ * Modifications should only be done whilst holding the
+ * shrink_list_lock spinlock.
+ */
+ struct list_head shrink_list;
+
+ /**
* List of objects currently involved in rendering from the
* ringbuffer.
*
@@ -432,7 +480,7 @@ typedef struct drm_i915_private {
* It prevents command submission from occuring and makes
* every pending request fail
*/
- int wedged;
+ atomic_t wedged;
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
@@ -491,10 +539,7 @@ struct drm_i915_gem_object {
* This is the same as gtt_space->start
*/
uint32_t gtt_offset;
- /**
- * Required alignment for the object
- */
- uint32_t gtt_alignment;
+
/**
* Fake offset for use by mmap(2)
*/
@@ -541,6 +586,11 @@ struct drm_i915_gem_object {
* in an execbuffer object list.
*/
int in_execbuffer;
+
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ int madv;
};
/**
@@ -585,6 +635,8 @@ extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc;
extern unsigned int i915_powersave;
+extern void i915_save_display(struct drm_device *dev);
+extern void i915_restore_display(struct drm_device *dev);
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
@@ -604,8 +656,10 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *boxes,
int i, int DR1, int DR4);
+extern int i965_reset(struct drm_device *dev, u8 flags);
/* i915_irq.c */
+void i915_hangcheck_elapsed(unsigned long data);
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -676,6 +730,8 @@ int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
@@ -695,6 +751,7 @@ int i915_gem_object_unbind(struct drm_gem_object *obj);
void i915_gem_release_mmap(struct drm_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
uint32_t i915_get_gem_seqno(struct drm_device *dev);
+bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
void i915_gem_retire_requests(struct drm_device *dev);
@@ -720,6 +777,9 @@ int i915_gem_object_get_pages(struct drm_gem_object *obj);
void i915_gem_object_put_pages(struct drm_gem_object *obj);
void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
+void i915_gem_shrinker_init(void);
+void i915_gem_shrinker_exit(void);
+
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
@@ -767,6 +827,8 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; }
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+extern void i8xx_disable_fbc(struct drm_device *dev);
+extern void g4x_disable_fbc(struct drm_device *dev);
/**
* Lock test for when it's just for synchronization of ring access.
@@ -864,6 +926,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
(dev)->pci_device == 0x2E12 || \
(dev)->pci_device == 0x2E22 || \
(dev)->pci_device == 0x2E32 || \
+ (dev)->pci_device == 0x2E42 || \
(dev)->pci_device == 0x0042 || \
(dev)->pci_device == 0x0046)
@@ -876,6 +939,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
(dev)->pci_device == 0x2E12 || \
(dev)->pci_device == 0x2E22 || \
(dev)->pci_device == 0x2E32 || \
+ (dev)->pci_device == 0x2E42 || \
IS_GM45(dev))
#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
@@ -909,12 +973,13 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev))
-#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev))
+#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
/* dsparb controlled by hw only */
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev))
#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev))
+#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev)))
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c67317112f4..40727d4c291 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,6 +29,7 @@
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "i915_trace.h"
#include "intel_drv.h"
#include <linux/swap.h>
#include <linux/pci.h>
@@ -48,11 +49,15 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment);
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
-static int i915_gem_evict_something(struct drm_device *dev);
+static int i915_gem_evict_something(struct drm_device *dev, int min_size);
+static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv);
+static LIST_HEAD(shrink_list);
+static DEFINE_SPINLOCK(shrink_list_lock);
+
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end)
{
@@ -316,6 +321,45 @@ fail_unlock:
return ret;
}
+static inline gfp_t
+i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
+{
+ return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
+}
+
+static inline void
+i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
+{
+ mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
+}
+
+static int
+i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
+{
+ int ret;
+
+ ret = i915_gem_object_get_pages(obj);
+
+ /* If we've insufficient memory to map in the pages, attempt
+ * to make some space by throwing out some old buffers.
+ */
+ if (ret == -ENOMEM) {
+ struct drm_device *dev = obj->dev;
+ gfp_t gfp;
+
+ ret = i915_gem_evict_something(dev, obj->size);
+ if (ret)
+ return ret;
+
+ gfp = i915_gem_object_get_page_gfp_mask(obj);
+ i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
+ ret = i915_gem_object_get_pages(obj);
+ i915_gem_object_set_page_gfp_mask (obj, gfp);
+ }
+
+ return ret;
+}
+
/**
* This is the fallback shmem pread path, which allocates temporary storage
* in kernel space to copy_to_user into outside of the struct_mutex, so we
@@ -367,8 +411,8 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_get_pages(obj);
- if (ret != 0)
+ ret = i915_gem_object_get_pages_or_evict(obj);
+ if (ret)
goto fail_unlock;
ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
@@ -842,8 +886,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_get_pages(obj);
- if (ret != 0)
+ ret = i915_gem_object_get_pages_or_evict(obj);
+ if (ret)
goto fail_unlock;
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
@@ -1155,28 +1199,22 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Now bind it into the GTT if needed */
mutex_lock(&dev->struct_mutex);
if (!obj_priv->gtt_space) {
- ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return VM_FAULT_SIGBUS;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return VM_FAULT_SIGBUS;
- }
+ ret = i915_gem_object_bind_to_gtt(obj, 0);
+ if (ret)
+ goto unlock;
list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto unlock;
}
/* Need a new fence register? */
if (obj_priv->tiling_mode != I915_TILING_NONE) {
ret = i915_gem_object_get_fence_reg(obj);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return VM_FAULT_SIGBUS;
- }
+ if (ret)
+ goto unlock;
}
pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
@@ -1184,18 +1222,18 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
-
+unlock:
mutex_unlock(&dev->struct_mutex);
switch (ret) {
+ case 0:
+ case -ERESTARTSYS:
+ return VM_FAULT_NOPAGE;
case -ENOMEM:
case -EAGAIN:
return VM_FAULT_OOM;
- case -EFAULT:
- case -EINVAL:
- return VM_FAULT_SIGBUS;
default:
- return VM_FAULT_NOPAGE;
+ return VM_FAULT_SIGBUS;
}
}
@@ -1388,6 +1426,14 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
obj_priv = obj->driver_private;
+ if (obj_priv->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to mmap a purgeable buffer\n");
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+
if (!obj_priv->mmap_offset) {
ret = i915_gem_create_mmap_offset(obj);
if (ret) {
@@ -1399,22 +1445,12 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
args->offset = obj_priv->mmap_offset;
- obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
-
- /* Make sure the alignment is correct for fence regs etc */
- if (obj_priv->agp_mem &&
- (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
/*
* Pull it into the GTT so that we have a page list (makes the
* initial fault faster and any subsequent flushing possible).
*/
if (!obj_priv->agp_mem) {
- ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
+ ret = i915_gem_object_bind_to_gtt(obj, 0);
if (ret) {
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@@ -1437,6 +1473,7 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
int i;
BUG_ON(obj_priv->pages_refcount == 0);
+ BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
if (--obj_priv->pages_refcount != 0)
return;
@@ -1444,13 +1481,21 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
if (obj_priv->tiling_mode != I915_TILING_NONE)
i915_gem_object_save_bit_17_swizzle(obj);
- for (i = 0; i < page_count; i++)
- if (obj_priv->pages[i] != NULL) {
- if (obj_priv->dirty)
- set_page_dirty(obj_priv->pages[i]);
+ if (obj_priv->madv == I915_MADV_DONTNEED)
+ obj_priv->dirty = 0;
+
+ for (i = 0; i < page_count; i++) {
+ if (obj_priv->pages[i] == NULL)
+ break;
+
+ if (obj_priv->dirty)
+ set_page_dirty(obj_priv->pages[i]);
+
+ if (obj_priv->madv == I915_MADV_WILLNEED)
mark_page_accessed(obj_priv->pages[i]);
- page_cache_release(obj_priv->pages[i]);
- }
+
+ page_cache_release(obj_priv->pages[i]);
+ }
obj_priv->dirty = 0;
drm_free_large(obj_priv->pages);
@@ -1489,6 +1534,26 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
obj_priv->last_rendering_seqno = 0;
}
+/* Immediately discard the backing storage */
+static void
+i915_gem_object_truncate(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct inode *inode;
+
+ inode = obj->filp->f_path.dentry->d_inode;
+ if (inode->i_op->truncate)
+ inode->i_op->truncate (inode);
+
+ obj_priv->madv = __I915_MADV_PURGED;
+}
+
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
+{
+ return obj_priv->madv == I915_MADV_DONTNEED;
+}
+
static void
i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
{
@@ -1577,15 +1642,24 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
if ((obj->write_domain & flush_domains) ==
obj->write_domain) {
+ uint32_t old_write_domain = obj->write_domain;
+
obj->write_domain = 0;
i915_gem_object_move_to_active(obj, seqno);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
}
}
- if (was_empty && !dev_priv->mm.suspended)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ if (!dev_priv->mm.suspended) {
+ mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+ if (was_empty)
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ }
return seqno;
}
@@ -1623,6 +1697,8 @@ i915_gem_retire_request(struct drm_device *dev,
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ trace_i915_gem_request_retire(dev, request->seqno);
+
/* Move any buffers on the active list that are no longer referenced
* by the ringbuffer to the flushing/inactive lists as appropriate.
*/
@@ -1671,7 +1747,7 @@ out:
/**
* Returns true if seq1 is later than seq2.
*/
-static int
+bool
i915_seqno_passed(uint32_t seq1, uint32_t seq2)
{
return (int32_t)(seq1 - seq2) >= 0;
@@ -1709,7 +1785,7 @@ i915_gem_retire_requests(struct drm_device *dev)
retiring_seqno = request->seqno;
if (i915_seqno_passed(seqno, retiring_seqno) ||
- dev_priv->mm.wedged) {
+ atomic_read(&dev_priv->mm.wedged)) {
i915_gem_retire_request(dev, request);
list_del(&request->list);
@@ -1751,6 +1827,9 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
BUG_ON(seqno == 0);
+ if (atomic_read(&dev_priv->mm.wedged))
+ return -EIO;
+
if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
if (IS_IGDNG(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
@@ -1763,16 +1842,20 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
i915_driver_irq_postinstall(dev);
}
+ trace_i915_gem_request_wait_begin(dev, seqno);
+
dev_priv->mm.waiting_gem_seqno = seqno;
i915_user_irq_get(dev);
ret = wait_event_interruptible(dev_priv->irq_queue,
i915_seqno_passed(i915_get_gem_seqno(dev),
seqno) ||
- dev_priv->mm.wedged);
+ atomic_read(&dev_priv->mm.wedged));
i915_user_irq_put(dev);
dev_priv->mm.waiting_gem_seqno = 0;
+
+ trace_i915_gem_request_wait_end(dev, seqno);
}
- if (dev_priv->mm.wedged)
+ if (atomic_read(&dev_priv->mm.wedged))
ret = -EIO;
if (ret && ret != -ERESTARTSYS)
@@ -1803,6 +1886,8 @@ i915_gem_flush(struct drm_device *dev,
DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
invalidate_domains, flush_domains);
#endif
+ trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
+ invalidate_domains, flush_domains);
if (flush_domains & I915_GEM_DOMAIN_CPU)
drm_agp_chipset_flush(dev);
@@ -1915,6 +2000,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return -EINVAL;
}
+ /* blow away mappings if mapped through GTT */
+ i915_gem_release_mmap(obj);
+
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+ i915_gem_clear_fence_reg(obj);
+
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it. This will
@@ -1928,21 +2019,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return ret;
}
+ BUG_ON(obj_priv->active);
+
if (obj_priv->agp_mem != NULL) {
drm_unbind_agp(obj_priv->agp_mem);
drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
obj_priv->agp_mem = NULL;
}
- BUG_ON(obj_priv->active);
-
- /* blow away mappings if mapped through GTT */
- i915_gem_release_mmap(obj);
-
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- i915_gem_clear_fence_reg(obj);
-
i915_gem_object_put_pages(obj);
+ BUG_ON(obj_priv->pages_refcount);
if (obj_priv->gtt_space) {
atomic_dec(&dev->gtt_count);
@@ -1956,40 +2042,113 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
if (!list_empty(&obj_priv->list))
list_del_init(&obj_priv->list);
+ if (i915_gem_object_is_purgeable(obj_priv))
+ i915_gem_object_truncate(obj);
+
+ trace_i915_gem_object_unbind(obj);
+
return 0;
}
+static struct drm_gem_object *
+i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *best = NULL;
+ struct drm_gem_object *first = NULL;
+
+ /* Try to find the smallest clean object */
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ struct drm_gem_object *obj = obj_priv->obj;
+ if (obj->size >= min_size) {
+ if ((!obj_priv->dirty ||
+ i915_gem_object_is_purgeable(obj_priv)) &&
+ (!best || obj->size < best->size)) {
+ best = obj;
+ if (best->size == min_size)
+ return best;
+ }
+ if (!first)
+ first = obj;
+ }
+ }
+
+ return best ? best : first;
+}
+
static int
-i915_gem_evict_something(struct drm_device *dev)
+i915_gem_evict_everything(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t seqno;
+ int ret;
+ bool lists_empty;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+
+ if (lists_empty)
+ return -ENOSPC;
+
+ /* Flush everything (on to the inactive lists) and evict */
+ i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
+ if (seqno == 0)
+ return -ENOMEM;
+
+ ret = i915_wait_request(dev, seqno);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_evict_from_inactive_list(dev);
+ if (ret)
+ return ret;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+ BUG_ON(!lists_empty);
+
+ return 0;
+}
+
+static int
+i915_gem_evict_something(struct drm_device *dev, int min_size)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret = 0;
+ int ret;
for (;;) {
+ i915_gem_retire_requests(dev);
+
/* If there's an inactive buffer available now, grab it
* and be done.
*/
- if (!list_empty(&dev_priv->mm.inactive_list)) {
- obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
- BUG_ON(obj_priv->pin_count != 0);
+ obj = i915_gem_find_inactive_object(dev, min_size);
+ if (obj) {
+ struct drm_i915_gem_object *obj_priv;
+
#if WATCH_LRU
DRM_INFO("%s: evicting %p\n", __func__, obj);
#endif
+ obj_priv = obj->driver_private;
+ BUG_ON(obj_priv->pin_count != 0);
BUG_ON(obj_priv->active);
/* Wait on the rendering and unbind the buffer. */
- ret = i915_gem_object_unbind(obj);
- break;
+ return i915_gem_object_unbind(obj);
}
/* If we didn't get anything, but the ring is still processing
- * things, wait for one of those things to finish and hopefully
- * leave us a buffer to evict.
+ * things, wait for the next to finish and hopefully leave us
+ * a buffer to evict.
*/
if (!list_empty(&dev_priv->mm.request_list)) {
struct drm_i915_gem_request *request;
@@ -2000,16 +2159,9 @@ i915_gem_evict_something(struct drm_device *dev)
ret = i915_wait_request(dev, request->seqno);
if (ret)
- break;
+ return ret;
- /* if waiting caused an object to become inactive,
- * then loop around and wait for it. Otherwise, we
- * assume that waiting freed and unbound something,
- * so there should now be some space in the GTT
- */
- if (!list_empty(&dev_priv->mm.inactive_list))
- continue;
- break;
+ continue;
}
/* If we didn't have anything on the request list but there
@@ -2018,46 +2170,44 @@ i915_gem_evict_something(struct drm_device *dev)
* will get moved to inactive.
*/
if (!list_empty(&dev_priv->mm.flushing_list)) {
- obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
+ struct drm_i915_gem_object *obj_priv;
- i915_gem_flush(dev,
- obj->write_domain,
- obj->write_domain);
- i915_add_request(dev, NULL, obj->write_domain);
+ /* Find an object that we can immediately reuse */
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+ obj = obj_priv->obj;
+ if (obj->size >= min_size)
+ break;
- obj = NULL;
- continue;
- }
+ obj = NULL;
+ }
- DRM_ERROR("inactive empty %d request empty %d "
- "flushing empty %d\n",
- list_empty(&dev_priv->mm.inactive_list),
- list_empty(&dev_priv->mm.request_list),
- list_empty(&dev_priv->mm.flushing_list));
- /* If we didn't do any of the above, there's nothing to be done
- * and we just can't fit it in.
- */
- return -ENOSPC;
- }
- return ret;
-}
+ if (obj != NULL) {
+ uint32_t seqno;
-static int
-i915_gem_evict_everything(struct drm_device *dev)
-{
- int ret;
+ i915_gem_flush(dev,
+ obj->write_domain,
+ obj->write_domain);
+ seqno = i915_add_request(dev, NULL, obj->write_domain);
+ if (seqno == 0)
+ return -ENOMEM;
- for (;;) {
- ret = i915_gem_evict_something(dev);
- if (ret != 0)
- break;
+ ret = i915_wait_request(dev, seqno);
+ if (ret)
+ return ret;
+
+ continue;
+ }
+ }
+
+ /* If we didn't do any of the above, there's no single buffer
+ * large enough to swap out for the new one, so just evict
+ * everything and start again. (This should be rare.)
+ */
+ if (!list_empty (&dev_priv->mm.inactive_list))
+ return i915_gem_evict_from_inactive_list(dev);
+ else
+ return i915_gem_evict_everything(dev);
}
- if (ret == -ENOSPC)
- return 0;
- return ret;
}
int
@@ -2080,7 +2230,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
BUG_ON(obj_priv->pages != NULL);
obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
if (obj_priv->pages == NULL) {
- DRM_ERROR("Faled to allocate page list\n");
obj_priv->pages_refcount--;
return -ENOMEM;
}
@@ -2091,7 +2240,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
page = read_mapping_page(mapping, i, NULL);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
- DRM_ERROR("read_mapping_page failed: %d\n", ret);
i915_gem_object_put_pages(obj);
return ret;
}
@@ -2328,6 +2476,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
else
i830_write_fence_reg(reg);
+ trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
+
return 0;
}
@@ -2410,10 +2560,17 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
struct drm_mm_node *free_space;
- int page_count, ret;
+ bool retry_alloc = false;
+ int ret;
if (dev_priv->mm.suspended)
return -EBUSY;
+
+ if (obj_priv->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to bind a purgeable object\n");
+ return -EINVAL;
+ }
+
if (alignment == 0)
alignment = i915_gem_get_gtt_alignment(obj);
if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
@@ -2433,30 +2590,16 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
}
}
if (obj_priv->gtt_space == NULL) {
- bool lists_empty;
-
/* If the gtt is empty and we're still having trouble
* fitting our object in, we're out of memory.
*/
#if WATCH_LRU
DRM_INFO("%s: GTT full, evicting something\n", __func__);
#endif
- spin_lock(&dev_priv->mm.active_list_lock);
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list));
- spin_unlock(&dev_priv->mm.active_list_lock);
- if (lists_empty) {
- DRM_ERROR("GTT full, but LRU list empty\n");
- return -ENOSPC;
- }
-
- ret = i915_gem_evict_something(dev);
- if (ret != 0) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Failed to evict a buffer %d\n", ret);
+ ret = i915_gem_evict_something(dev, obj->size);
+ if (ret)
return ret;
- }
+
goto search_free;
}
@@ -2464,27 +2607,56 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
DRM_INFO("Binding object of size %zd at 0x%08x\n",
obj->size, obj_priv->gtt_offset);
#endif
+ if (retry_alloc) {
+ i915_gem_object_set_page_gfp_mask (obj,
+ i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
+ }
ret = i915_gem_object_get_pages(obj);
+ if (retry_alloc) {
+ i915_gem_object_set_page_gfp_mask (obj,
+ i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
+ }
if (ret) {
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
+
+ if (ret == -ENOMEM) {
+ /* first try to clear up some space from the GTT */
+ ret = i915_gem_evict_something(dev, obj->size);
+ if (ret) {
+ /* now try to shrink everyone else */
+ if (! retry_alloc) {
+ retry_alloc = true;
+ goto search_free;
+ }
+
+ return ret;
+ }
+
+ goto search_free;
+ }
+
return ret;
}
- page_count = obj->size / PAGE_SIZE;
/* Create an AGP memory structure pointing at our pages, and bind it
* into the GTT.
*/
obj_priv->agp_mem = drm_agp_bind_pages(dev,
obj_priv->pages,
- page_count,
+ obj->size >> PAGE_SHIFT,
obj_priv->gtt_offset,
obj_priv->agp_type);
if (obj_priv->agp_mem == NULL) {
i915_gem_object_put_pages(obj);
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
- return -ENOMEM;
+
+ ret = i915_gem_evict_something(dev, obj->size);
+ if (ret)
+ return ret;
+
+ goto search_free;
}
atomic_inc(&dev->gtt_count);
atomic_add(obj->size, &dev->gtt_memory);
@@ -2496,6 +2668,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+ trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
+
return 0;
}
@@ -2511,15 +2685,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
if (obj_priv->pages == NULL)
return;
- /* XXX: The 865 in particular appears to be weird in how it handles
- * cache flushing. We haven't figured it out, but the
- * clflush+agp_chipset_flush doesn't appear to successfully get the
- * data visible to the PGU, while wbinvd + agp_chipset_flush does.
- */
- if (IS_I865G(obj->dev)) {
- wbinvd();
- return;
- }
+ trace_i915_gem_object_clflush(obj);
drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
}
@@ -2530,21 +2696,29 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
uint32_t seqno;
+ uint32_t old_write_domain;
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
return;
/* Queue the GPU write cache flushing we need. */
+ old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain);
seqno = i915_add_request(dev, NULL, obj->write_domain);
obj->write_domain = 0;
i915_gem_object_move_to_active(obj, seqno);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
/** Flushes the GTT write domain for the object if it's dirty. */
static void
i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
{
+ uint32_t old_write_domain;
+
if (obj->write_domain != I915_GEM_DOMAIN_GTT)
return;
@@ -2552,7 +2726,12 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
* to it immediately go to main memory as far as we know, so there's
* no chipset flush. It also doesn't land in render cache.
*/
+ old_write_domain = obj->write_domain;
obj->write_domain = 0;
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
/** Flushes the CPU write domain for the object if it's dirty. */
@@ -2560,13 +2739,19 @@ static void
i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ uint32_t old_write_domain;
if (obj->write_domain != I915_GEM_DOMAIN_CPU)
return;
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
+ old_write_domain = obj->write_domain;
obj->write_domain = 0;
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
/**
@@ -2579,6 +2764,7 @@ int
i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ uint32_t old_write_domain, old_read_domains;
int ret;
/* Not valid to be called on unbound objects. */
@@ -2591,6 +2777,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
if (ret != 0)
return ret;
+ old_write_domain = obj->write_domain;
+ old_read_domains = obj->read_domains;
+
/* If we're writing through the GTT domain, then CPU and GPU caches
* will need to be invalidated at next use.
*/
@@ -2609,6 +2798,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
obj_priv->dirty = 1;
}
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+
return 0;
}
@@ -2621,6 +2814,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
static int
i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
{
+ uint32_t old_write_domain, old_read_domains;
int ret;
i915_gem_object_flush_gpu_write_domain(obj);
@@ -2636,6 +2830,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
*/
i915_gem_object_set_to_full_cpu_read_domain(obj);
+ old_write_domain = obj->write_domain;
+ old_read_domains = obj->read_domains;
+
/* Flush the CPU cache if it's still invalid. */
if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj);
@@ -2656,6 +2853,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
obj->write_domain = I915_GEM_DOMAIN_CPU;
}
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+
return 0;
}
@@ -2777,6 +2978,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = obj->driver_private;
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
+ uint32_t old_read_domains;
BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
@@ -2823,6 +3025,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
i915_gem_clflush_object(obj);
}
+ old_read_domains = obj->read_domains;
+
/* The actual obj->write_domain will be updated with
* pending_write_domain after we emit the accumulated flush for all
* of our domain changes in execbuffers (which clears objects'
@@ -2841,6 +3045,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
obj->read_domains, obj->write_domain,
dev->invalidate_domains, dev->flush_domains);
#endif
+
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ obj->write_domain);
}
/**
@@ -2893,6 +3101,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset, uint64_t size)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ uint32_t old_read_domains;
int i, ret;
if (offset == 0 && size == obj->size)
@@ -2939,8 +3148,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
*/
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+ old_read_domains = obj->read_domains;
obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ obj->write_domain);
+
return 0;
}
@@ -2984,6 +3198,21 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
}
target_obj_priv = target_obj->driver_private;
+#if WATCH_RELOC
+ DRM_INFO("%s: obj %p offset %08x target %d "
+ "read %08x write %08x gtt %08x "
+ "presumed %08x delta %08x\n",
+ __func__,
+ obj,
+ (int) reloc->offset,
+ (int) reloc->target_handle,
+ (int) reloc->read_domains,
+ (int) reloc->write_domain,
+ (int) target_obj_priv->gtt_offset,
+ (int) reloc->presumed_offset,
+ reloc->delta);
+#endif
+
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
@@ -2995,25 +3224,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
return -EINVAL;
}
- if (reloc->offset > obj->size - 4) {
- DRM_ERROR("Relocation beyond object bounds: "
- "obj %p target %d offset %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset, (int) obj->size);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
- if (reloc->offset & 3) {
- DRM_ERROR("Relocation not 4-byte aligned: "
- "obj %p target %d offset %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
-
+ /* Validate that the target is in a valid r/w GPU domain */
if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
reloc->read_domains & I915_GEM_DOMAIN_CPU) {
DRM_ERROR("reloc with read/write CPU domains: "
@@ -3027,7 +3238,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
i915_gem_object_unpin(obj);
return -EINVAL;
}
-
if (reloc->write_domain && target_obj->pending_write_domain &&
reloc->write_domain != target_obj->pending_write_domain) {
DRM_ERROR("Write domain conflict: "
@@ -3042,21 +3252,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
return -EINVAL;
}
-#if WATCH_RELOC
- DRM_INFO("%s: obj %p offset %08x target %d "
- "read %08x write %08x gtt %08x "
- "presumed %08x delta %08x\n",
- __func__,
- obj,
- (int) reloc->offset,
- (int) reloc->target_handle,
- (int) reloc->read_domains,
- (int) reloc->write_domain,
- (int) target_obj_priv->gtt_offset,
- (int) reloc->presumed_offset,
- reloc->delta);
-#endif
-
target_obj->pending_read_domains |= reloc->read_domains;
target_obj->pending_write_domain |= reloc->write_domain;
@@ -3068,6 +3263,37 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
continue;
}
+ /* Check that the relocation address is valid... */
+ if (reloc->offset > obj->size - 4) {
+ DRM_ERROR("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset, (int) obj->size);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
+ }
+ if (reloc->offset & 3) {
+ DRM_ERROR("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
+ }
+
+ /* and points to somewhere within the target object. */
+ if (reloc->delta >= target_obj->size) {
+ DRM_ERROR("Relocation beyond target object bounds: "
+ "obj %p target %d delta %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->delta, (int) target_obj->size);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
+ }
+
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret != 0) {
drm_gem_object_unreference(target_obj);
@@ -3126,6 +3352,8 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
exec_len = (uint32_t) exec->batch_len;
+ trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno);
+
count = nbox ? nbox : 1;
for (i = 0; i < count; i++) {
@@ -3363,7 +3591,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_verify_inactive(dev, __FILE__, __LINE__);
- if (dev_priv->mm.wedged) {
+ if (atomic_read(&dev_priv->mm.wedged)) {
DRM_ERROR("Execbuf while wedged\n");
mutex_unlock(&dev->struct_mutex);
ret = -EIO;
@@ -3421,8 +3649,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
/* error other than GTT full, or we've already tried again */
if (ret != -ENOSPC || pin_tries >= 1) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Failed to pin buffers %d\n", ret);
+ if (ret != -ERESTARTSYS) {
+ unsigned long long total_size = 0;
+ for (i = 0; i < args->buffer_count; i++)
+ total_size += object_list[i]->size;
+ DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
+ pinned+1, args->buffer_count,
+ total_size, ret);
+ DRM_ERROR("%d objects [%d pinned], "
+ "%d object bytes [%d pinned], "
+ "%d/%d gtt bytes\n",
+ atomic_read(&dev->object_count),
+ atomic_read(&dev->pin_count),
+ atomic_read(&dev->object_memory),
+ atomic_read(&dev->pin_memory),
+ atomic_read(&dev->gtt_memory),
+ dev->gtt_total);
+ }
goto err;
}
@@ -3433,7 +3676,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
/* evict everyone we can from the aperture */
ret = i915_gem_evict_everything(dev);
- if (ret)
+ if (ret && ret != -ENOSPC)
goto err;
}
@@ -3489,8 +3732,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
+ uint32_t old_write_domain = obj->write_domain;
obj->write_domain = obj->pending_write_domain;
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -3607,11 +3854,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
i915_verify_inactive(dev, __FILE__, __LINE__);
if (obj_priv->gtt_space == NULL) {
ret = i915_gem_object_bind_to_gtt(obj, alignment);
- if (ret != 0) {
- if (ret != -EBUSY && ret != -ERESTARTSYS)
- DRM_ERROR("Failure to bind: %d\n", ret);
+ if (ret)
return ret;
- }
}
/*
* Pre-965 chips need a fence register set up in order to
@@ -3691,6 +3935,13 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
}
obj_priv = obj->driver_private;
+ if (obj_priv->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to pin a purgeable buffer\n");
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
args->handle);
@@ -3803,6 +4054,56 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
return i915_gem_ring_throttle(dev, file_priv);
}
+int
+i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_madvise *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ switch (args->madv) {
+ case I915_MADV_DONTNEED:
+ case I915_MADV_WILLNEED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+ DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
+ args->handle);
+ return -EBADF;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ obj_priv = obj->driver_private;
+
+ if (obj_priv->pin_count) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
+ return -EINVAL;
+ }
+
+ if (obj_priv->madv != __I915_MADV_PURGED)
+ obj_priv->madv = args->madv;
+
+ /* if the object is no longer bound, discard its backing storage */
+ if (i915_gem_object_is_purgeable(obj_priv) &&
+ obj_priv->gtt_space == NULL)
+ i915_gem_object_truncate(obj);
+
+ args->retained = obj_priv->madv != __I915_MADV_PURGED;
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
int i915_gem_init_object(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv;
@@ -3827,6 +4128,9 @@ int i915_gem_init_object(struct drm_gem_object *obj)
obj_priv->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj_priv->list);
INIT_LIST_HEAD(&obj_priv->fence_list);
+ obj_priv->madv = I915_MADV_WILLNEED;
+
+ trace_i915_gem_object_create(obj);
return 0;
}
@@ -3836,6 +4140,8 @@ void i915_gem_free_object(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ trace_i915_gem_object_destroy(obj);
+
while (obj_priv->pin_count > 0)
i915_gem_object_unpin(obj);
@@ -3844,43 +4150,35 @@ void i915_gem_free_object(struct drm_gem_object *obj)
i915_gem_object_unbind(obj);
- i915_gem_free_mmap_offset(obj);
+ if (obj_priv->mmap_offset)
+ i915_gem_free_mmap_offset(obj);
kfree(obj_priv->page_cpu_valid);
kfree(obj_priv->bit_17);
kfree(obj->driver_private);
}
-/** Unbinds all objects that are on the given buffer list. */
+/** Unbinds all inactive objects. */
static int
-i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
+i915_gem_evict_from_inactive_list(struct drm_device *dev)
{
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
+ drm_i915_private_t *dev_priv = dev->dev_private;
- while (!list_empty(head)) {
- obj_priv = list_first_entry(head,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
+ while (!list_empty(&dev_priv->mm.inactive_list)) {
+ struct drm_gem_object *obj;
+ int ret;
- if (obj_priv->pin_count != 0) {
- DRM_ERROR("Pinned object in unbind list\n");
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
+ obj = list_first_entry(&dev_priv->mm.inactive_list,
+ struct drm_i915_gem_object,
+ list)->obj;
ret = i915_gem_object_unbind(obj);
if (ret != 0) {
- DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
- ret);
- mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR("Error unbinding object: %d\n", ret);
return ret;
}
}
-
return 0;
}
@@ -3902,6 +4200,7 @@ i915_gem_idle(struct drm_device *dev)
* We need to replace this with a semaphore, or something.
*/
dev_priv->mm.suspended = 1;
+ del_timer(&dev_priv->hangcheck_timer);
/* Cancel the retire work handler, wait for it to finish if running
*/
@@ -3931,7 +4230,7 @@ i915_gem_idle(struct drm_device *dev)
if (last_seqno == cur_seqno) {
if (stuck++ > 100) {
DRM_ERROR("hardware wedged\n");
- dev_priv->mm.wedged = 1;
+ atomic_set(&dev_priv->mm.wedged, 1);
DRM_WAKEUP(&dev_priv->irq_queue);
break;
}
@@ -3944,7 +4243,7 @@ i915_gem_idle(struct drm_device *dev)
i915_gem_retire_requests(dev);
spin_lock(&dev_priv->mm.active_list_lock);
- if (!dev_priv->mm.wedged) {
+ if (!atomic_read(&dev_priv->mm.wedged)) {
/* Active and flushing should now be empty as we've
* waited for a sequence higher than any pending execbuffer
*/
@@ -3962,29 +4261,41 @@ i915_gem_idle(struct drm_device *dev)
* the GPU domains and just stuff them onto inactive.
*/
while (!list_empty(&dev_priv->mm.active_list)) {
- struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *obj;
+ uint32_t old_write_domain;
- obj_priv = list_first_entry(&dev_priv->mm.active_list,
- struct drm_i915_gem_object,
- list);
- obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj_priv->obj);
+ obj = list_first_entry(&dev_priv->mm.active_list,
+ struct drm_i915_gem_object,
+ list)->obj;
+ old_write_domain = obj->write_domain;
+ obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+ i915_gem_object_move_to_inactive(obj);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
spin_unlock(&dev_priv->mm.active_list_lock);
while (!list_empty(&dev_priv->mm.flushing_list)) {
- struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *obj;
+ uint32_t old_write_domain;
- obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- list);
- obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj_priv->obj);
+ obj = list_first_entry(&dev_priv->mm.flushing_list,
+ struct drm_i915_gem_object,
+ list)->obj;
+ old_write_domain = obj->write_domain;
+ obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+ i915_gem_object_move_to_inactive(obj);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
/* Move all inactive buffers out of the GTT. */
- ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
+ ret = i915_gem_evict_from_inactive_list(dev);
WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
if (ret) {
mutex_unlock(&dev->struct_mutex);
@@ -4206,9 +4517,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- if (dev_priv->mm.wedged) {
+ if (atomic_read(&dev_priv->mm.wedged)) {
DRM_ERROR("Reenabling wedged hardware, good luck\n");
- dev_priv->mm.wedged = 0;
+ atomic_set(&dev_priv->mm.wedged, 0);
}
mutex_lock(&dev->struct_mutex);
@@ -4274,6 +4585,10 @@ i915_gem_load(struct drm_device *dev)
i915_gem_retire_work_handler);
dev_priv->mm.next_gem_seqno = 1;
+ spin_lock(&shrink_list_lock);
+ list_add(&dev_priv->mm.shrink_list, &shrink_list);
+ spin_unlock(&shrink_list_lock);
+
/* Old X drivers will take 0-2 for front, back, depth buffers */
dev_priv->fence_reg_start = 3;
@@ -4491,3 +4806,116 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
list_del_init(i915_file_priv->mm.request_list.next);
mutex_unlock(&dev->struct_mutex);
}
+
+static int
+i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
+{
+ drm_i915_private_t *dev_priv, *next_dev;
+ struct drm_i915_gem_object *obj_priv, *next_obj;
+ int cnt = 0;
+ int would_deadlock = 1;
+
+ /* "fast-path" to count number of available objects */
+ if (nr_to_scan == 0) {
+ spin_lock(&shrink_list_lock);
+ list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
+ struct drm_device *dev = dev_priv->dev;
+
+ if (mutex_trylock(&dev->struct_mutex)) {
+ list_for_each_entry(obj_priv,
+ &dev_priv->mm.inactive_list,
+ list)
+ cnt++;
+ mutex_unlock(&dev->struct_mutex);
+ }
+ }
+ spin_unlock(&shrink_list_lock);
+
+ return (cnt / 100) * sysctl_vfs_cache_pressure;
+ }
+
+ spin_lock(&shrink_list_lock);
+
+ /* first scan for clean buffers */
+ list_for_each_entry_safe(dev_priv, next_dev,
+ &shrink_list, mm.shrink_list) {
+ struct drm_device *dev = dev_priv->dev;
+
+ if (! mutex_trylock(&dev->struct_mutex))
+ continue;
+
+ spin_unlock(&shrink_list_lock);
+
+ i915_gem_retire_requests(dev);
+
+ list_for_each_entry_safe(obj_priv, next_obj,
+ &dev_priv->mm.inactive_list,
+ list) {
+ if (i915_gem_object_is_purgeable(obj_priv)) {
+ i915_gem_object_unbind(obj_priv->obj);
+ if (--nr_to_scan <= 0)
+ break;
+ }
+ }
+
+ spin_lock(&shrink_list_lock);
+ mutex_unlock(&dev->struct_mutex);
+
+ would_deadlock = 0;
+
+ if (nr_to_scan <= 0)
+ break;
+ }
+
+ /* second pass, evict/count anything still on the inactive list */
+ list_for_each_entry_safe(dev_priv, next_dev,
+ &shrink_list, mm.shrink_list) {
+ struct drm_device *dev = dev_priv->dev;
+
+ if (! mutex_trylock(&dev->struct_mutex))
+ continue;
+
+ spin_unlock(&shrink_list_lock);
+
+ list_for_each_entry_safe(obj_priv, next_obj,
+ &dev_priv->mm.inactive_list,
+ list) {
+ if (nr_to_scan > 0) {
+ i915_gem_object_unbind(obj_priv->obj);
+ nr_to_scan--;
+ } else
+ cnt++;
+ }
+
+ spin_lock(&shrink_list_lock);
+ mutex_unlock(&dev->struct_mutex);
+
+ would_deadlock = 0;
+ }
+
+ spin_unlock(&shrink_list_lock);
+
+ if (would_deadlock)
+ return -1;
+ else if (cnt > 0)
+ return (cnt / 100) * sysctl_vfs_cache_pressure;
+ else
+ return 0;
+}
+
+static struct shrinker shrinker = {
+ .shrink = i915_gem_shrink,
+ .seeks = DEFAULT_SEEKS,
+};
+
+__init void
+i915_gem_shrinker_init(void)
+{
+ register_shrinker(&shrinker);
+}
+
+__exit void
+i915_gem_shrinker_exit(void)
+{
+ unregister_shrinker(&shrinker);
+}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 6c89f2ff249..4dfeec7cdd4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -31,6 +31,7 @@
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "i915_trace.h"
#include "intel_drv.h"
#define MAX_NOPID ((u32)~0)
@@ -279,7 +280,9 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
}
if (gt_iir & GT_USER_INTERRUPT) {
- dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
+ u32 seqno = i915_get_gem_seqno(dev);
+ dev_priv->mm.irq_gem_seqno = seqno;
+ trace_i915_gem_request_complete(dev, seqno);
DRM_WAKEUP(&dev_priv->irq_queue);
}
@@ -302,12 +305,25 @@ static void i915_error_work_func(struct work_struct *work)
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
error_work);
struct drm_device *dev = dev_priv->dev;
- char *event_string = "ERROR=1";
- char *envp[] = { event_string, NULL };
+ char *error_event[] = { "ERROR=1", NULL };
+ char *reset_event[] = { "RESET=1", NULL };
+ char *reset_done_event[] = { "ERROR=0", NULL };
DRM_DEBUG("generating error event\n");
-
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
+
+ if (atomic_read(&dev_priv->mm.wedged)) {
+ if (IS_I965G(dev)) {
+ DRM_DEBUG("resetting chip\n");
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
+ if (!i965_reset(dev, GDRST_RENDER)) {
+ atomic_set(&dev_priv->mm.wedged, 0);
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
+ }
+ } else {
+ printk("reboot required\n");
+ }
+ }
}
/**
@@ -372,7 +388,7 @@ out:
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
-static void i915_handle_error(struct drm_device *dev)
+static void i915_handle_error(struct drm_device *dev, bool wedged)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 eir = I915_READ(EIR);
@@ -482,6 +498,16 @@ static void i915_handle_error(struct drm_device *dev)
I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
}
+ if (wedged) {
+ atomic_set(&dev_priv->mm.wedged, 1);
+
+ /*
+ * Wakeup waiting processes so they don't hang
+ */
+ printk("i915: Waking up sleeping processes\n");
+ DRM_WAKEUP(&dev_priv->irq_queue);
+ }
+
queue_work(dev_priv->wq, &dev_priv->error_work);
}
@@ -527,7 +553,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
pipeb_stats = I915_READ(PIPEBSTAT);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev);
+ i915_handle_error(dev, false);
/*
* Clear the PIPE(A|B)STAT regs before the IIR
@@ -599,8 +625,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
}
if (iir & I915_USER_INTERRUPT) {
- dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
+ u32 seqno = i915_get_gem_seqno(dev);
+ dev_priv->mm.irq_gem_seqno = seqno;
+ trace_i915_gem_request_complete(dev, seqno);
DRM_WAKEUP(&dev_priv->irq_queue);
+ dev_priv->hangcheck_count = 0;
+ mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
}
if (pipea_stats & vblank_status) {
@@ -880,6 +910,52 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
return -EINVAL;
}
+struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
+}
+
+/**
+ * This is called when the chip hasn't reported back with completed
+ * batchbuffers in a long time. The first time this is called we simply record
+ * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
+ * again, we assume the chip is wedged and try to fix it.
+ */
+void i915_hangcheck_elapsed(unsigned long data)
+{
+ struct drm_device *dev = (struct drm_device *)data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t acthd;
+
+ if (!IS_I965G(dev))
+ acthd = I915_READ(ACTHD);
+ else
+ acthd = I915_READ(ACTHD_I965);
+
+ /* If all work is done then ACTHD clearly hasn't advanced. */
+ if (list_empty(&dev_priv->mm.request_list) ||
+ i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
+ dev_priv->hangcheck_count = 0;
+ return;
+ }
+
+ if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) {
+ DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+ i915_handle_error(dev, true);
+ return;
+ }
+
+ /* Reset timer case chip hangs without another request being added */
+ mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+
+ if (acthd != dev_priv->last_acthd)
+ dev_priv->hangcheck_count = 0;
+ else
+ dev_priv->hangcheck_count++;
+
+ dev_priv->last_acthd = acthd;
+}
+
/* drm_dma.h hooks
*/
static void igdng_irq_preinstall(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index e4b4e8898e3..2d5193556d3 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -148,6 +148,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
u32 blc_pwm_ctl, blc_pwm_ctl2;
+ u32 max_backlight, level, shift;
if (!(bclp & ASLE_BCLP_VALID))
return ASLE_BACKLIGHT_FAIL;
@@ -157,14 +158,25 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
return ASLE_BACKLIGHT_FAIL;
blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
- blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
- if (blc_pwm_ctl2 & BLM_COMBINATION_MODE)
+ if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
- else
- I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101)-1));
-
+ else {
+ if (IS_IGD(dev)) {
+ blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+ max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT;
+ shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1;
+ } else {
+ blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+ max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+ shift = BACKLIGHT_DUTY_CYCLE_SHIFT;
+ }
+ level = (bclp * max_backlight) / 255;
+ I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift));
+ }
asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
return 0;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3f796355346..0466ddbeba3 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -86,6 +86,10 @@
#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
#define LBB 0xf4
+#define GDRST 0xc0
+#define GDRST_FULL (0<<2)
+#define GDRST_RENDER (1<<2)
+#define GDRST_MEDIA (3<<2)
/* VGA stuff */
@@ -344,9 +348,37 @@
#define FBC_CTL_PLANEA (0<<0)
#define FBC_CTL_PLANEB (1<<0)
#define FBC_FENCE_OFF 0x0321b
+#define FBC_TAG 0x03300
#define FBC_LL_SIZE (1536)
+/* Framebuffer compression for GM45+ */
+#define DPFC_CB_BASE 0x3200
+#define DPFC_CONTROL 0x3208
+#define DPFC_CTL_EN (1<<31)
+#define DPFC_CTL_PLANEA (0<<30)
+#define DPFC_CTL_PLANEB (1<<30)
+#define DPFC_CTL_FENCE_EN (1<<29)
+#define DPFC_SR_EN (1<<10)
+#define DPFC_CTL_LIMIT_1X (0<<6)
+#define DPFC_CTL_LIMIT_2X (1<<6)
+#define DPFC_CTL_LIMIT_4X (2<<6)
+#define DPFC_RECOMP_CTL 0x320c
+#define DPFC_RECOMP_STALL_EN (1<<27)
+#define DPFC_RECOMP_STALL_WM_SHIFT (16)
+#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
+#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
+#define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f)
+#define DPFC_STATUS 0x3210
+#define DPFC_INVAL_SEG_SHIFT (16)
+#define DPFC_INVAL_SEG_MASK (0x07ff0000)
+#define DPFC_COMP_SEG_SHIFT (0)
+#define DPFC_COMP_SEG_MASK (0x000003ff)
+#define DPFC_STATUS2 0x3214
+#define DPFC_FENCE_YOFF 0x3218
+#define DPFC_CHICKEN 0x3224
+#define DPFC_HT_MODIFY (1<<31)
+
/*
* GPIO regs
*/
@@ -2000,6 +2032,8 @@
#define PF_ENABLE (1<<31)
#define PFA_WIN_SZ 0x68074
#define PFB_WIN_SZ 0x68874
+#define PFA_WIN_POS 0x68070
+#define PFB_WIN_POS 0x68870
/* legacy palette */
#define LGC_PALETTE_A 0x4a000
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 20d4d19f556..bd6d8d91ca9 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -228,6 +228,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
+
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
@@ -285,6 +286,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
return;
}
+
static void i915_restore_modeset_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -379,19 +381,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
return;
}
-int i915_save_state(struct drm_device *dev)
+
+void i915_save_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
-
- /* Render Standby */
- if (IS_I965G(dev) && IS_MOBILE(dev))
- dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
-
- /* Hardware status page */
- dev_priv->saveHWS = I915_READ(HWS_PGA);
/* Display arbitration control */
dev_priv->saveDSPARB = I915_READ(DSPARB);
@@ -399,6 +392,7 @@ int i915_save_state(struct drm_device *dev)
/* This is only meaningful in non-KMS mode */
/* Don't save them in KMS mode */
i915_save_modeset_reg(dev);
+
/* Cursor state */
dev_priv->saveCURACNTR = I915_READ(CURACNTR);
dev_priv->saveCURAPOS = I915_READ(CURAPOS);
@@ -448,81 +442,22 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
- /* Interrupt state */
- dev_priv->saveIIR = I915_READ(IIR);
- dev_priv->saveIER = I915_READ(IER);
- dev_priv->saveIMR = I915_READ(IMR);
-
/* VGA state */
dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
- /* Clock gating state */
- dev_priv->saveD_STATE = I915_READ(D_STATE);
- dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
-
- /* Cache mode state */
- dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
-
- /* Memory Arbitration state */
- dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
-
- /* Scratch space */
- for (i = 0; i < 16; i++) {
- dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
- dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
- }
- for (i = 0; i < 3; i++)
- dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
-
- /* Fences */
- if (IS_I965G(dev)) {
- for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- } else {
- for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
-
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
- }
i915_save_vga(dev);
-
- return 0;
}
-int i915_restore_state(struct drm_device *dev)
+void i915_restore_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
-
- /* Render Standby */
- if (IS_I965G(dev) && IS_MOBILE(dev))
- I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
-
- /* Hardware status page */
- I915_WRITE(HWS_PGA, dev_priv->saveHWS);
/* Display arbitration */
I915_WRITE(DSPARB, dev_priv->saveDSPARB);
- /* Fences */
- if (IS_I965G(dev)) {
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
- } else {
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
- }
-
/* Display port ratios (must be done before clock is set) */
if (SUPPORTS_INTEGRATED_DP(dev)) {
I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
@@ -534,9 +469,11 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
}
+
/* This is only meaningful in non-KMS mode */
/* Don't restore them in KMS mode */
i915_restore_modeset_reg(dev);
+
/* Cursor state */
I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
@@ -586,6 +523,95 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
DRM_UDELAY(150);
+ i915_restore_vga(dev);
+}
+
+int i915_save_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+
+ /* Render Standby */
+ if (IS_I965G(dev) && IS_MOBILE(dev))
+ dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
+
+ /* Hardware status page */
+ dev_priv->saveHWS = I915_READ(HWS_PGA);
+
+ i915_save_display(dev);
+
+ /* Interrupt state */
+ dev_priv->saveIER = I915_READ(IER);
+ dev_priv->saveIMR = I915_READ(IMR);
+
+ /* Clock gating state */
+ dev_priv->saveD_STATE = I915_READ(D_STATE);
+ dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); /* Not sure about this */
+
+ /* Cache mode state */
+ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+
+ /* Memory Arbitration state */
+ dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+
+ /* Scratch space */
+ for (i = 0; i < 16; i++) {
+ dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
+ dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+ }
+ for (i = 0; i < 3; i++)
+ dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+
+ /* Fences */
+ if (IS_I965G(dev)) {
+ for (i = 0; i < 16; i++)
+ dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ } else {
+ for (i = 0; i < 8; i++)
+ dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ }
+
+ return 0;
+}
+
+int i915_restore_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+
+ /* Render Standby */
+ if (IS_I965G(dev) && IS_MOBILE(dev))
+ I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
+
+ /* Hardware status page */
+ I915_WRITE(HWS_PGA, dev_priv->saveHWS);
+
+ /* Fences */
+ if (IS_I965G(dev)) {
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
+ } else {
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
+ }
+
+ i915_restore_display(dev);
+
+ /* Interrupt state */
+ I915_WRITE (IER, dev_priv->saveIER);
+ I915_WRITE (IMR, dev_priv->saveIMR);
+
/* Clock gating state */
I915_WRITE (D_STATE, dev_priv->saveD_STATE);
I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
@@ -603,8 +629,6 @@ int i915_restore_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
- i915_restore_vga(dev);
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
new file mode 100644
index 00000000000..5567a40816f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -0,0 +1,315 @@
+#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _I915_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i915
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
+#define TRACE_INCLUDE_FILE i915_trace
+
+/* object tracking */
+
+TRACE_EVENT(i915_gem_object_create,
+
+ TP_PROTO(struct drm_gem_object *obj),
+
+ TP_ARGS(obj),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ __field(u32, size)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->size = obj->size;
+ ),
+
+ TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
+);
+
+TRACE_EVENT(i915_gem_object_bind,
+
+ TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset),
+
+ TP_ARGS(obj, gtt_offset),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ __field(u32, gtt_offset)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->gtt_offset = gtt_offset;
+ ),
+
+ TP_printk("obj=%p, gtt_offset=%08x",
+ __entry->obj, __entry->gtt_offset)
+);
+
+TRACE_EVENT(i915_gem_object_clflush,
+
+ TP_PROTO(struct drm_gem_object *obj),
+
+ TP_ARGS(obj),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ ),
+
+ TP_printk("obj=%p", __entry->obj)
+);
+
+TRACE_EVENT(i915_gem_object_change_domain,
+
+ TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
+
+ TP_ARGS(obj, old_read_domains, old_write_domain),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ __field(u32, read_domains)
+ __field(u32, write_domain)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->read_domains = obj->read_domains | (old_read_domains << 16);
+ __entry->write_domain = obj->write_domain | (old_write_domain << 16);
+ ),
+
+ TP_printk("obj=%p, read=%04x, write=%04x",
+ __entry->obj,
+ __entry->read_domains, __entry->write_domain)
+);
+
+TRACE_EVENT(i915_gem_object_get_fence,
+
+ TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
+
+ TP_ARGS(obj, fence, tiling_mode),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ __field(int, fence)
+ __field(int, tiling_mode)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->fence = fence;
+ __entry->tiling_mode = tiling_mode;
+ ),
+
+ TP_printk("obj=%p, fence=%d, tiling=%d",
+ __entry->obj, __entry->fence, __entry->tiling_mode)
+);
+
+TRACE_EVENT(i915_gem_object_unbind,
+
+ TP_PROTO(struct drm_gem_object *obj),
+
+ TP_ARGS(obj),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ ),
+
+ TP_printk("obj=%p", __entry->obj)
+);
+
+TRACE_EVENT(i915_gem_object_destroy,
+
+ TP_PROTO(struct drm_gem_object *obj),
+
+ TP_ARGS(obj),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ ),
+
+ TP_printk("obj=%p", __entry->obj)
+);
+
+/* batch tracing */
+
+TRACE_EVENT(i915_gem_request_submit,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(struct drm_device *, dev)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+TRACE_EVENT(i915_gem_request_flush,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno,
+ u32 flush_domains, u32 invalidate_domains),
+
+ TP_ARGS(dev, seqno, flush_domains, invalidate_domains),
+
+ TP_STRUCT__entry(
+ __field(struct drm_device *, dev)
+ __field(u32, seqno)
+ __field(u32, flush_domains)
+ __field(u32, invalidate_domains)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->seqno = seqno;
+ __entry->flush_domains = flush_domains;
+ __entry->invalidate_domains = invalidate_domains;
+ ),
+
+ TP_printk("dev=%p, seqno=%u, flush=%04x, invalidate=%04x",
+ __entry->dev, __entry->seqno,
+ __entry->flush_domains, __entry->invalidate_domains)
+);
+
+
+TRACE_EVENT(i915_gem_request_complete,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(struct drm_device *, dev)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+TRACE_EVENT(i915_gem_request_retire,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(struct drm_device *, dev)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+TRACE_EVENT(i915_gem_request_wait_begin,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(struct drm_device *, dev)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+TRACE_EVENT(i915_gem_request_wait_end,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(struct drm_device *, dev)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+TRACE_EVENT(i915_ring_wait_begin,
+
+ TP_PROTO(struct drm_device *dev),
+
+ TP_ARGS(dev),
+
+ TP_STRUCT__entry(
+ __field(struct drm_device *, dev)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ ),
+
+ TP_printk("dev=%p", __entry->dev)
+);
+
+TRACE_EVENT(i915_ring_wait_end,
+
+ TP_PROTO(struct drm_device *dev),
+
+ TP_ARGS(dev),
+
+ TP_STRUCT__entry(
+ __field(struct drm_device *, dev)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ ),
+
+ TP_printk("dev=%p", __entry->dev)
+);
+
+#endif /* _I915_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c
new file mode 100644
index 00000000000..ead876eb6ea
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_trace_points.c
@@ -0,0 +1,11 @@
+/*
+ * Copyright © 2009 Intel Corporation
+ *
+ * Authors:
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ */
+
+#include "i915_drv.h"
+
+#define CREATE_TRACE_POINTS
+#include "i915_trace.h"
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 1e28c1652fd..4337414846b 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -217,6 +217,9 @@ parse_general_features(struct drm_i915_private *dev_priv,
if (IS_I85X(dev_priv->dev))
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 66 : 48;
+ else if (IS_IGDNG(dev_priv->dev))
+ dev_priv->lvds_ssc_freq =
+ general->ssc_freq ? 100 : 120;
else
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 100 : 96;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 88814fa2dfd..212e22740fc 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -179,13 +179,10 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 adpa, temp;
+ u32 adpa;
bool ret;
- temp = adpa = I915_READ(PCH_ADPA);
-
- adpa &= ~ADPA_DAC_ENABLE;
- I915_WRITE(PCH_ADPA, adpa);
+ adpa = I915_READ(PCH_ADPA);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
@@ -212,8 +209,6 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
else
ret = false;
- /* restore origin register */
- I915_WRITE(PCH_ADPA, temp);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 0227b165290..93ff6c03733 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,6 +24,8 @@
* Eric Anholt <eric@anholt.net>
*/
+#include <linux/module.h>
+#include <linux/input.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include "drmP.h"
@@ -875,7 +877,7 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
refclk, best_clock);
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
clock.p2 = limit->p2.p2_fast;
else
@@ -952,6 +954,241 @@ intel_wait_for_vblank(struct drm_device *dev)
mdelay(20);
}
+/* Parameters have changed, update FBC info */
+static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane, i;
+ u32 fbc_ctl, fbc_ctl2;
+
+ dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+
+ if (fb->pitch < dev_priv->cfb_pitch)
+ dev_priv->cfb_pitch = fb->pitch;
+
+ /* FBC_CTL wants 64B units */
+ dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
+ dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_plane = intel_crtc->plane;
+ plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
+
+ /* Clear old tags */
+ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
+ I915_WRITE(FBC_TAG + (i * 4), 0);
+
+ /* Set it up... */
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
+ if (obj_priv->tiling_mode != I915_TILING_NONE)
+ fbc_ctl2 |= FBC_CTL_CPU_FENCE;
+ I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+ I915_WRITE(FBC_FENCE_OFF, crtc->y);
+
+ /* enable it... */
+ fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
+ fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+ fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
+ if (obj_priv->tiling_mode != I915_TILING_NONE)
+ fbc_ctl |= dev_priv->cfb_fence;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ DRM_DEBUG("enabled FBC, pitch %ld, yoff %d, plane %d, ",
+ dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
+}
+
+void i8xx_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 fbc_ctl;
+
+ if (!I915_HAS_FBC(dev))
+ return;
+
+ /* Disable compression */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ fbc_ctl &= ~FBC_CTL_EN;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ /* Wait for compressing bit to clear */
+ while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING)
+ ; /* nothing */
+
+ intel_wait_for_vblank(dev);
+
+ DRM_DEBUG("disabled FBC\n");
+}
+
+static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+}
+
+static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
+ DPFC_CTL_PLANEB);
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
+ dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
+ dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_plane = intel_crtc->plane;
+
+ dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
+ if (obj_priv->tiling_mode != I915_TILING_NONE) {
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
+ I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
+ } else {
+ I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
+ }
+
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+ I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+
+ /* enable it... */
+ I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
+
+ DRM_DEBUG("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
+void g4x_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(DPFC_CONTROL);
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+ intel_wait_for_vblank(dev);
+
+ DRM_DEBUG("disabled FBC\n");
+}
+
+static bool g4x_fbc_enabled(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+/**
+ * intel_update_fbc - enable/disable FBC as needed
+ * @crtc: CRTC to point the compressor at
+ * @mode: mode in use
+ *
+ * Set up the framebuffer compression hardware at mode set time. We
+ * enable it if possible:
+ * - plane A only (on pre-965)
+ * - no pixel mulitply/line duplication
+ * - no alpha buffer discard
+ * - no dual wide
+ * - framebuffer <= 2048 in width, 1536 in height
+ *
+ * We can't assume that any compression will take place (worst case),
+ * so the compressed buffer has to be the same size as the uncompressed
+ * one. It also must reside (along with the line length buffer) in
+ * stolen memory.
+ *
+ * We need to enable/disable FBC on a global basis.
+ */
+static void intel_update_fbc(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj_priv;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = intel_crtc->plane;
+
+ if (!i915_powersave)
+ return;
+
+ if (!dev_priv->display.fbc_enabled ||
+ !dev_priv->display.enable_fbc ||
+ !dev_priv->display.disable_fbc)
+ return;
+
+ if (!crtc->fb)
+ return;
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj_priv = intel_fb->obj->driver_private;
+
+ /*
+ * If FBC is already on, we just have to verify that we can
+ * keep it that way...
+ * Need to disable if:
+ * - changing FBC params (stride, fence, mode)
+ * - new fb is too large to fit in compressed buffer
+ * - going to an unsupported config (interlace, pixel multiply, etc.)
+ */
+ if (intel_fb->obj->size > dev_priv->cfb_size) {
+ DRM_DEBUG("framebuffer too large, disabling compression\n");
+ goto out_disable;
+ }
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
+ DRM_DEBUG("mode incompatible with compression, disabling\n");
+ goto out_disable;
+ }
+ if ((mode->hdisplay > 2048) ||
+ (mode->vdisplay > 1536)) {
+ DRM_DEBUG("mode too large for compression, disabling\n");
+ goto out_disable;
+ }
+ if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
+ DRM_DEBUG("plane not 0, disabling compression\n");
+ goto out_disable;
+ }
+ if (obj_priv->tiling_mode != I915_TILING_X) {
+ DRM_DEBUG("framebuffer not tiled, disabling compression\n");
+ goto out_disable;
+ }
+
+ if (dev_priv->display.fbc_enabled(crtc)) {
+ /* We can re-enable it in this case, but need to update pitch */
+ if (fb->pitch > dev_priv->cfb_pitch)
+ dev_priv->display.disable_fbc(dev);
+ if (obj_priv->fence_reg != dev_priv->cfb_fence)
+ dev_priv->display.disable_fbc(dev);
+ if (plane != dev_priv->cfb_plane)
+ dev_priv->display.disable_fbc(dev);
+ }
+
+ if (!dev_priv->display.fbc_enabled(crtc)) {
+ /* Now try to turn it back on if possible */
+ dev_priv->display.enable_fbc(crtc, 500);
+ }
+
+ return;
+
+out_disable:
+ DRM_DEBUG("unsupported config, disabling FBC\n");
+ /* Multiple disables should be harmless */
+ if (dev_priv->display.fbc_enabled(crtc))
+ dev_priv->display.disable_fbc(dev);
+}
+
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
@@ -964,12 +1201,13 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_i915_gem_object *obj_priv;
struct drm_gem_object *obj;
int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
unsigned long Start, Offset;
- int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR);
- int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dsptileoff = (pipe == 0 ? DSPATILEOFF : DSPBTILEOFF);
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
+ int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
+ int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
+ int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
+ int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr, alignment;
int ret;
@@ -979,12 +1217,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
- switch (pipe) {
+ switch (plane) {
case 0:
case 1:
break;
default:
- DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
+ DRM_ERROR("Can't update plane %d in SAREA\n", plane);
return -EINVAL;
}
@@ -1086,6 +1324,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
I915_READ(dspbase);
}
+ if ((IS_I965G(dev) || plane == 0))
+ intel_update_fbc(crtc, &crtc->mode);
+
intel_wait_for_vblank(dev);
if (old_fb) {
@@ -1217,6 +1458,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
+ int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS;
int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
@@ -1268,6 +1510,19 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
+ /* Enable panel fitting for LVDS */
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(pf_ctl_reg);
+ I915_WRITE(pf_ctl_reg, temp | PF_ENABLE);
+
+ /* currently full aspect */
+ I915_WRITE(pf_win_pos, 0);
+
+ I915_WRITE(pf_win_size,
+ (dev_priv->panel_fixed_mode->hdisplay << 16) |
+ (dev_priv->panel_fixed_mode->vdisplay));
+ }
+
/* Enable CPU pipe */
temp = I915_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) == 0) {
@@ -1532,9 +1787,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR;
+ int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
+ int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
u32 temp;
@@ -1577,6 +1833,9 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
intel_crtc_load_lut(crtc);
+ if ((IS_I965G(dev) || plane == 0))
+ intel_update_fbc(crtc, &crtc->mode);
+
/* Give the overlay scaler a chance to enable if it's on this pipe */
//intel_crtc_dpms_video(crtc, true); TODO
intel_update_watermarks(dev);
@@ -1586,6 +1845,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Give the overlay scaler a chance to disable if it's on this pipe */
//intel_crtc_dpms_video(crtc, FALSE); TODO
+ if (dev_priv->cfb_plane == plane &&
+ dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
+
/* Disable the VGA plane that we never use */
i915_disable_vga(dev);
@@ -1634,15 +1897,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
bool enabled;
- if (IS_IGDNG(dev))
- igdng_crtc_dpms(crtc, mode);
- else
- i9xx_crtc_dpms(crtc, mode);
+ dev_priv->display.dpms(crtc, mode);
intel_crtc->dpms_mode = mode;
@@ -1709,56 +1970,68 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
+static int i945_get_display_clock_speed(struct drm_device *dev)
+{
+ return 400000;
+}
-/** Returns the core display clock speed for i830 - i945 */
-static int intel_get_core_clock_speed(struct drm_device *dev)
+static int i915_get_display_clock_speed(struct drm_device *dev)
{
+ return 333000;
+}
- /* Core clock values taken from the published datasheets.
- * The 830 may go up to 166 Mhz, which we should check.
- */
- if (IS_I945G(dev))
- return 400000;
- else if (IS_I915G(dev))
- return 333000;
- else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
- return 200000;
- else if (IS_I915GM(dev)) {
- u16 gcfgc = 0;
+static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
+{
+ return 200000;
+}
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+static int i915gm_get_display_clock_speed(struct drm_device *dev)
+{
+ u16 gcfgc = 0;
- if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
- return 133000;
- else {
- switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
- case GC_DISPLAY_CLOCK_333_MHZ:
- return 333000;
- default:
- case GC_DISPLAY_CLOCK_190_200_MHZ:
- return 190000;
- }
- }
- } else if (IS_I865G(dev))
- return 266000;
- else if (IS_I855(dev)) {
- u16 hpllcc = 0;
- /* Assume that the hardware is in the high speed state. This
- * should be the default.
- */
- switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
- case GC_CLOCK_133_200:
- case GC_CLOCK_100_200:
- return 200000;
- case GC_CLOCK_166_250:
- return 250000;
- case GC_CLOCK_100_133:
- return 133000;
+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+ if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
+ return 133000;
+ else {
+ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+ case GC_DISPLAY_CLOCK_333_MHZ:
+ return 333000;
+ default:
+ case GC_DISPLAY_CLOCK_190_200_MHZ:
+ return 190000;
}
- } else /* 852, 830 */
+ }
+}
+
+static int i865_get_display_clock_speed(struct drm_device *dev)
+{
+ return 266000;
+}
+
+static int i855_get_display_clock_speed(struct drm_device *dev)
+{
+ u16 hpllcc = 0;
+ /* Assume that the hardware is in the high speed state. This
+ * should be the default.
+ */
+ switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
+ case GC_CLOCK_133_200:
+ case GC_CLOCK_100_200:
+ return 200000;
+ case GC_CLOCK_166_250:
+ return 250000;
+ case GC_CLOCK_100_133:
return 133000;
+ }
+
+ /* Shouldn't happen */
+ return 0;
+}
- return 0; /* Silence gcc warning */
+static int i830_get_display_clock_speed(struct drm_device *dev)
+{
+ return 133000;
}
/**
@@ -1921,7 +2194,14 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
{
long entries_required, wm_size;
- entries_required = (clock_in_khz * pixel_size * latency_ns) / 1000000;
+ /*
+ * Note: we need to make sure we don't overflow for various clock &
+ * latency values.
+ * clocks go from a few thousand to several hundred thousand.
+ * latency is usually a few thousand
+ */
+ entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
+ 1000;
entries_required /= wm->cacheline_size;
DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required);
@@ -1986,14 +2266,13 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
latency = &cxsr_latency_table[i];
if (is_desktop == latency->is_desktop &&
- fsb == latency->fsb_freq && mem == latency->mem_freq)
- break;
+ fsb == latency->fsb_freq && mem == latency->mem_freq)
+ return latency;
}
- if (i >= ARRAY_SIZE(cxsr_latency_table)) {
- DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
- return NULL;
- }
- return latency;
+
+ DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
+
+ return NULL;
}
static void igd_disable_cxsr(struct drm_device *dev)
@@ -2084,32 +2363,36 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
*/
const static int latency_ns = 5000;
-static int intel_get_fifo_size(struct drm_device *dev, int plane)
+static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
int size;
- if (IS_I9XX(dev)) {
- if (plane == 0)
- size = dsparb & 0x7f;
- else
- size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
- (dsparb & 0x7f);
- } else if (IS_I85X(dev)) {
- if (plane == 0)
- size = dsparb & 0x1ff;
- else
- size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) -
- (dsparb & 0x1ff);
- size >>= 1; /* Convert to cachelines */
- } else if (IS_845G(dev)) {
+ if (plane == 0)
size = dsparb & 0x7f;
- size >>= 2; /* Convert to cachelines */
- } else {
- size = dsparb & 0x7f;
- size >>= 1; /* Convert to cachelines */
- }
+ else
+ size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
+ (dsparb & 0x7f);
+
+ DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
+ size);
+
+ return size;
+}
+
+static int i85x_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ if (plane == 0)
+ size = dsparb & 0x1ff;
+ else
+ size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) -
+ (dsparb & 0x1ff);
+ size >>= 1; /* Convert to cachelines */
DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
size);
@@ -2117,7 +2400,38 @@ static int intel_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
-static void g4x_update_wm(struct drm_device *dev)
+static int i845_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 2; /* Convert to cachelines */
+
+ DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
+ size);
+
+ return size;
+}
+
+static int i830_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 1; /* Convert to cachelines */
+
+ DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
+ size);
+
+ return size;
+}
+
+static void g4x_update_wm(struct drm_device *dev, int unused, int unused2,
+ int unused3, int unused4)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 fw_blc_self = I915_READ(FW_BLC_SELF);
@@ -2129,7 +2443,8 @@ static void g4x_update_wm(struct drm_device *dev)
I915_WRITE(FW_BLC_SELF, fw_blc_self);
}
-static void i965_update_wm(struct drm_device *dev)
+static void i965_update_wm(struct drm_device *dev, int unused, int unused2,
+ int unused3, int unused4)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2165,8 +2480,8 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
cacheline_size = planea_params.cacheline_size;
/* Update per-plane FIFO sizes */
- planea_params.fifo_size = intel_get_fifo_size(dev, 0);
- planeb_params.fifo_size = intel_get_fifo_size(dev, 1);
+ planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+ planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1);
planea_wm = intel_calculate_wm(planea_clock, &planea_params,
pixel_size, latency_ns);
@@ -2213,14 +2528,14 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
I915_WRITE(FW_BLC2, fwater_hi);
}
-static void i830_update_wm(struct drm_device *dev, int planea_clock,
- int pixel_size)
+static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
+ int unused2, int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff;
int planea_wm;
- i830_wm_info.fifo_size = intel_get_fifo_size(dev, 0);
+ i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info,
pixel_size, latency_ns);
@@ -2264,6 +2579,7 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock,
*/
static void intel_update_watermarks(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
int sr_hdisplay = 0;
@@ -2302,15 +2618,8 @@ static void intel_update_watermarks(struct drm_device *dev)
else if (IS_IGD(dev))
igd_disable_cxsr(dev);
- if (IS_G4X(dev))
- g4x_update_wm(dev);
- else if (IS_I965G(dev))
- i965_update_wm(dev);
- else if (IS_I9XX(dev) || IS_MOBILE(dev))
- i9xx_update_wm(dev, planea_clock, planeb_clock, sr_hdisplay,
- pixel_size);
- else
- i830_update_wm(dev, planea_clock, pixel_size);
+ dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
+ sr_hdisplay, pixel_size);
}
static int intel_crtc_mode_set(struct drm_crtc *crtc,
@@ -2323,10 +2632,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
int fp_reg = (pipe == 0) ? FPA0 : FPB0;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
@@ -2334,8 +2644,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
- int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+ int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
+ int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
int refclk, num_outputs = 0;
intel_clock_t clock, reduced_clock;
@@ -2568,7 +2878,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
enable color space conversion */
if (!IS_IGDNG(dev)) {
if (pipe == 0)
- dspcntr |= DISPPLANE_SEL_PIPE_A;
+ dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
else
dspcntr |= DISPPLANE_SEL_PIPE_B;
}
@@ -2580,7 +2890,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* XXX: No double-wide on 915GM pipe B. Is that the only reason for the
* pipe == 0 check?
*/
- if (mode->clock > intel_get_core_clock_speed(dev) * 9 / 10)
+ if (mode->clock >
+ dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
pipeconf |= PIPEACONF_DOUBLE_WIDE;
else
pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
@@ -2652,9 +2963,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
udelay(150);
if (IS_I965G(dev) && !IS_IGDNG(dev)) {
- sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
- I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
+ if (is_sdvo) {
+ sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+ I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+ } else
+ I915_WRITE(dpll_md_reg, 0);
} else {
/* write it again -- the BIOS does, after all */
I915_WRITE(dpll_reg, dpll);
@@ -2734,6 +3048,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Flush the plane changes */
ret = intel_pipe_set_base(crtc, x, y, old_fb);
+ if ((IS_I965G(dev) || plane == 0))
+ intel_update_fbc(crtc, &crtc->mode);
+
intel_update_watermarks(dev);
drm_vblank_post_modeset(dev, pipe);
@@ -2778,6 +3095,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_gem_object *bo;
struct drm_i915_gem_object *obj_priv;
int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
uint32_t temp = I915_READ(control);
@@ -2863,6 +3181,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
i915_gem_object_unpin(intel_crtc->cursor_bo);
drm_gem_object_unreference(intel_crtc->cursor_bo);
}
+
+ if ((IS_I965G(dev) || plane == 0))
+ intel_update_fbc(crtc, &crtc->mode);
+
mutex_unlock(&dev->struct_mutex);
intel_crtc->cursor_addr = addr;
@@ -3544,6 +3866,14 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc->lut_b[i] = i;
}
+ /* Swap pipes & planes for FBC on pre-965 */
+ intel_crtc->pipe = pipe;
+ intel_crtc->plane = pipe;
+ if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
+ DRM_DEBUG("swapping pipes & planes for FBC\n");
+ intel_crtc->plane = ((pipe == 0) ? 1 : 0);
+ }
+
intel_crtc->cursor_addr = 0;
intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
@@ -3826,6 +4156,73 @@ void intel_init_clock_gating(struct drm_device *dev)
}
}
+/* Set up chip specific display functions */
+static void intel_init_display(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* We always want a DPMS function */
+ if (IS_IGDNG(dev))
+ dev_priv->display.dpms = igdng_crtc_dpms;
+ else
+ dev_priv->display.dpms = i9xx_crtc_dpms;
+
+ /* Only mobile has FBC, leave pointers NULL for other chips */
+ if (IS_MOBILE(dev)) {
+ if (IS_GM45(dev)) {
+ dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+ dev_priv->display.enable_fbc = g4x_enable_fbc;
+ dev_priv->display.disable_fbc = g4x_disable_fbc;
+ } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) {
+ dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+ dev_priv->display.enable_fbc = i8xx_enable_fbc;
+ dev_priv->display.disable_fbc = i8xx_disable_fbc;
+ }
+ /* 855GM needs testing */
+ }
+
+ /* Returns the core display clock speed */
+ if (IS_I945G(dev))
+ dev_priv->display.get_display_clock_speed =
+ i945_get_display_clock_speed;
+ else if (IS_I915G(dev))
+ dev_priv->display.get_display_clock_speed =
+ i915_get_display_clock_speed;
+ else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
+ dev_priv->display.get_display_clock_speed =
+ i9xx_misc_get_display_clock_speed;
+ else if (IS_I915GM(dev))
+ dev_priv->display.get_display_clock_speed =
+ i915gm_get_display_clock_speed;
+ else if (IS_I865G(dev))
+ dev_priv->display.get_display_clock_speed =
+ i865_get_display_clock_speed;
+ else if (IS_I855(dev))
+ dev_priv->display.get_display_clock_speed =
+ i855_get_display_clock_speed;
+ else /* 852, 830 */
+ dev_priv->display.get_display_clock_speed =
+ i830_get_display_clock_speed;
+
+ /* For FIFO watermark updates */
+ if (IS_G4X(dev))
+ dev_priv->display.update_wm = g4x_update_wm;
+ else if (IS_I965G(dev))
+ dev_priv->display.update_wm = i965_update_wm;
+ else if (IS_I9XX(dev) || IS_MOBILE(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+ } else {
+ if (IS_I85X(dev))
+ dev_priv->display.get_fifo_size = i85x_get_fifo_size;
+ else if (IS_845G(dev))
+ dev_priv->display.get_fifo_size = i845_get_fifo_size;
+ else
+ dev_priv->display.get_fifo_size = i830_get_fifo_size;
+ dev_priv->display.update_wm = i830_update_wm;
+ }
+}
+
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3839,6 +4236,8 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.funcs = (void *)&intel_mode_funcs;
+ intel_init_display(dev);
+
if (IS_I965G(dev)) {
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
@@ -3904,6 +4303,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
+ if (dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
+
drm_mode_config_cleanup(dev);
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2b914d73207..f4856a51047 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -232,7 +232,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
for (i = 0; i < send_bytes; i += 4) {
- uint32_t d = pack_aux(send + i, send_bytes - i);;
+ uint32_t d = pack_aux(send + i, send_bytes - i);
I915_WRITE(ch_data + i, d);
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 3ebbbabfe59..8aa4b7f30da 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -28,6 +28,7 @@
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/i2c-algo-bit.h>
+#include "i915_drv.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
@@ -111,8 +112,8 @@ struct intel_output {
struct intel_crtc {
struct drm_crtc base;
- int pipe;
- int plane;
+ enum pipe pipe;
+ enum plane plane;
struct drm_gem_object *cursor_bo;
uint32_t cursor_addr;
u8 lut_r[256], lut_g[256], lut_b[256];
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index dafc0da1c25..98ae3d73577 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -27,6 +27,7 @@
* Jesse Barnes <jesse.barnes@intel.com>
*/
+#include <acpi/button.h>
#include <linux/dmi.h>
#include <linux/i2c.h>
#include "drmP.h"
@@ -295,6 +296,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
goto out;
}
+ /* full screen scale for now */
+ if (IS_IGDNG(dev))
+ goto out;
+
/* 965+ wants fuzzy fitting */
if (IS_I965G(dev))
pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) |
@@ -322,8 +327,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
- I915_WRITE(BCLRPAT_A, 0);
- I915_WRITE(BCLRPAT_B, 0);
+ if (!IS_IGDNG(dev)) {
+ I915_WRITE(BCLRPAT_A, 0);
+ I915_WRITE(BCLRPAT_B, 0);
+ }
switch (lvds_priv->fitting_mode) {
case DRM_MODE_SCALE_CENTER:
@@ -572,7 +579,6 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
* settings.
*/
- /* No panel fitting yet, fixme */
if (IS_IGDNG(dev))
return;
@@ -585,15 +591,33 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control);
}
+/* Some lid devices report incorrect lid status, assume they're connected */
+static const struct dmi_system_id bad_lid_status[] = {
+ {
+ .ident = "Aspire One",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
+ },
+ },
+ { }
+};
+
/**
* Detect the LVDS connection.
*
- * This always returns CONNECTOR_STATUS_CONNECTED. This connector should only have
- * been set up if the LVDS was actually connected anyway.
+ * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means
+ * connected and closed means disconnected. We also send hotplug events as
+ * needed, using lid status notification from the input layer.
*/
static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
{
- return connector_status_connected;
+ enum drm_connector_status status = connector_status_connected;
+
+ if (!acpi_lid_open() && !dmi_check_system(bad_lid_status))
+ status = connector_status_disconnected;
+
+ return status;
}
/**
@@ -632,6 +656,24 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
return 0;
}
+static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
+ void *unused)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(nb, struct drm_i915_private, lid_notifier);
+ struct drm_device *dev = dev_priv->dev;
+
+ if (acpi_lid_open() && !dev_priv->suspended) {
+ mutex_lock(&dev->mode_config.mutex);
+ drm_helper_resume_force_mode(dev);
+ mutex_unlock(&dev->mode_config.mutex);
+ }
+
+ drm_sysfs_hotplug_event(dev_priv->dev);
+
+ return NOTIFY_OK;
+}
+
/**
* intel_lvds_destroy - unregister and free LVDS structures
* @connector: connector to free
@@ -641,10 +683,14 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
*/
static void intel_lvds_destroy(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
struct intel_output *intel_output = to_intel_output(connector);
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (intel_output->ddc_bus)
intel_i2c_destroy(intel_output->ddc_bus);
+ if (dev_priv->lid_notifier.notifier_call)
+ acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -1011,6 +1057,11 @@ out:
pwm |= PWM_PCH_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
}
+ dev_priv->lid_notifier.notifier_call = intel_lid_notify;
+ if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
+ DRM_DEBUG("lid notifier registration failed\n");
+ dev_priv->lid_notifier.notifier_call = NULL;
+ }
drm_sysfs_connector_add(connector);
return;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 0bf28efcf2c..083bec2e50f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -135,6 +135,30 @@ struct intel_sdvo_priv {
struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
struct intel_sdvo_dtd save_output_dtd[16];
u32 save_SDVOX;
+ /* add the property for the SDVO-TV */
+ struct drm_property *left_property;
+ struct drm_property *right_property;
+ struct drm_property *top_property;
+ struct drm_property *bottom_property;
+ struct drm_property *hpos_property;
+ struct drm_property *vpos_property;
+
+ /* add the property for the SDVO-TV/LVDS */
+ struct drm_property *brightness_property;
+ struct drm_property *contrast_property;
+ struct drm_property *saturation_property;
+ struct drm_property *hue_property;
+
+ /* Add variable to record current setting for the above property */
+ u32 left_margin, right_margin, top_margin, bottom_margin;
+ /* this is to get the range of margin.*/
+ u32 max_hscan, max_vscan;
+ u32 max_hpos, cur_hpos;
+ u32 max_vpos, cur_vpos;
+ u32 cur_brightness, max_brightness;
+ u32 cur_contrast, max_contrast;
+ u32 cur_saturation, max_saturation;
+ u32 cur_hue, max_hue;
};
static bool
@@ -281,6 +305,31 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+ /* Add the op code for SDVO enhancements */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
/* HDMI op code */
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
@@ -981,7 +1030,7 @@ static void intel_sdvo_set_tv_format(struct intel_output *output)
status = intel_sdvo_read_response(output, NULL, 0);
if (status != SDVO_CMD_STATUS_SUCCESS)
- DRM_DEBUG("%s: Failed to set TV format\n",
+ DRM_DEBUG_KMS("%s: Failed to set TV format\n",
SDVO_NAME(sdvo_priv));
}
@@ -1792,6 +1841,45 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
return 1;
}
+static
+void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct drm_device *dev = connector->dev;
+
+ if (sdvo_priv->is_tv) {
+ if (sdvo_priv->left_property)
+ drm_property_destroy(dev, sdvo_priv->left_property);
+ if (sdvo_priv->right_property)
+ drm_property_destroy(dev, sdvo_priv->right_property);
+ if (sdvo_priv->top_property)
+ drm_property_destroy(dev, sdvo_priv->top_property);
+ if (sdvo_priv->bottom_property)
+ drm_property_destroy(dev, sdvo_priv->bottom_property);
+ if (sdvo_priv->hpos_property)
+ drm_property_destroy(dev, sdvo_priv->hpos_property);
+ if (sdvo_priv->vpos_property)
+ drm_property_destroy(dev, sdvo_priv->vpos_property);
+ }
+ if (sdvo_priv->is_tv) {
+ if (sdvo_priv->saturation_property)
+ drm_property_destroy(dev,
+ sdvo_priv->saturation_property);
+ if (sdvo_priv->contrast_property)
+ drm_property_destroy(dev,
+ sdvo_priv->contrast_property);
+ if (sdvo_priv->hue_property)
+ drm_property_destroy(dev, sdvo_priv->hue_property);
+ }
+ if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ if (sdvo_priv->brightness_property)
+ drm_property_destroy(dev,
+ sdvo_priv->brightness_property);
+ }
+ return;
+}
+
static void intel_sdvo_destroy(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
@@ -1812,6 +1900,9 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
drm_property_destroy(connector->dev,
sdvo_priv->tv_format_property);
+ if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
+ intel_sdvo_destroy_enhance_property(connector);
+
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -1829,6 +1920,8 @@ intel_sdvo_set_property(struct drm_connector *connector,
struct drm_crtc *crtc = encoder->crtc;
int ret = 0;
bool changed = false;
+ uint8_t cmd, status;
+ uint16_t temp_value;
ret = drm_connector_property_set_value(connector, property, val);
if (ret < 0)
@@ -1845,11 +1938,102 @@ intel_sdvo_set_property(struct drm_connector *connector,
sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val];
changed = true;
- } else {
- ret = -EINVAL;
- goto out;
}
+ if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ cmd = 0;
+ temp_value = val;
+ if (sdvo_priv->left_property == property) {
+ drm_connector_property_set_value(connector,
+ sdvo_priv->right_property, val);
+ if (sdvo_priv->left_margin == temp_value)
+ goto out;
+
+ sdvo_priv->left_margin = temp_value;
+ sdvo_priv->right_margin = temp_value;
+ temp_value = sdvo_priv->max_hscan -
+ sdvo_priv->left_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_H;
+ } else if (sdvo_priv->right_property == property) {
+ drm_connector_property_set_value(connector,
+ sdvo_priv->left_property, val);
+ if (sdvo_priv->right_margin == temp_value)
+ goto out;
+
+ sdvo_priv->left_margin = temp_value;
+ sdvo_priv->right_margin = temp_value;
+ temp_value = sdvo_priv->max_hscan -
+ sdvo_priv->left_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_H;
+ } else if (sdvo_priv->top_property == property) {
+ drm_connector_property_set_value(connector,
+ sdvo_priv->bottom_property, val);
+ if (sdvo_priv->top_margin == temp_value)
+ goto out;
+
+ sdvo_priv->top_margin = temp_value;
+ sdvo_priv->bottom_margin = temp_value;
+ temp_value = sdvo_priv->max_vscan -
+ sdvo_priv->top_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_V;
+ } else if (sdvo_priv->bottom_property == property) {
+ drm_connector_property_set_value(connector,
+ sdvo_priv->top_property, val);
+ if (sdvo_priv->bottom_margin == temp_value)
+ goto out;
+ sdvo_priv->top_margin = temp_value;
+ sdvo_priv->bottom_margin = temp_value;
+ temp_value = sdvo_priv->max_vscan -
+ sdvo_priv->top_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_V;
+ } else if (sdvo_priv->hpos_property == property) {
+ if (sdvo_priv->cur_hpos == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_POSITION_H;
+ sdvo_priv->cur_hpos = temp_value;
+ } else if (sdvo_priv->vpos_property == property) {
+ if (sdvo_priv->cur_vpos == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_POSITION_V;
+ sdvo_priv->cur_vpos = temp_value;
+ } else if (sdvo_priv->saturation_property == property) {
+ if (sdvo_priv->cur_saturation == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_SATURATION;
+ sdvo_priv->cur_saturation = temp_value;
+ } else if (sdvo_priv->contrast_property == property) {
+ if (sdvo_priv->cur_contrast == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_CONTRAST;
+ sdvo_priv->cur_contrast = temp_value;
+ } else if (sdvo_priv->hue_property == property) {
+ if (sdvo_priv->cur_hue == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_HUE;
+ sdvo_priv->cur_hue = temp_value;
+ } else if (sdvo_priv->brightness_property == property) {
+ if (sdvo_priv->cur_brightness == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_BRIGHTNESS;
+ sdvo_priv->cur_brightness = temp_value;
+ }
+ if (cmd) {
+ intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2);
+ status = intel_sdvo_read_response(intel_output,
+ NULL, 0);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO command \n");
+ return -EINVAL;
+ }
+ changed = true;
+ }
+ }
if (changed && crtc)
drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
crtc->y, crtc->fb);
@@ -2090,6 +2274,8 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+ intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT);
} else if (flags & SDVO_OUTPUT_LVDS0) {
sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
@@ -2176,6 +2362,310 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector)
}
+static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_enhancements_reply sdvo_data;
+ struct drm_device *dev = connector->dev;
+ uint8_t status;
+ uint16_t response, data_value[2];
+
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ NULL, 0);
+ status = intel_sdvo_read_response(intel_output, &sdvo_data,
+ sizeof(sdvo_data));
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS(" incorrect response is returned\n");
+ return;
+ }
+ response = *((uint16_t *)&sdvo_data);
+ if (!response) {
+ DRM_DEBUG_KMS("No enhancement is supported\n");
+ return;
+ }
+ if (sdvo_priv->is_tv) {
+ /* when horizontal overscan is supported, Add the left/right
+ * property
+ */
+ if (sdvo_data.overscan_h) {
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO max "
+ "h_overscan\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_OVERSCAN_H, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n");
+ return;
+ }
+ sdvo_priv->max_hscan = data_value[0];
+ sdvo_priv->left_margin = data_value[0] - response;
+ sdvo_priv->right_margin = sdvo_priv->left_margin;
+ sdvo_priv->left_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "left_margin", 2);
+ sdvo_priv->left_property->values[0] = 0;
+ sdvo_priv->left_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->left_property,
+ sdvo_priv->left_margin);
+ sdvo_priv->right_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "right_margin", 2);
+ sdvo_priv->right_property->values[0] = 0;
+ sdvo_priv->right_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->right_property,
+ sdvo_priv->right_margin);
+ DRM_DEBUG_KMS("h_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.overscan_v) {
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO max "
+ "v_overscan\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_OVERSCAN_V, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n");
+ return;
+ }
+ sdvo_priv->max_vscan = data_value[0];
+ sdvo_priv->top_margin = data_value[0] - response;
+ sdvo_priv->bottom_margin = sdvo_priv->top_margin;
+ sdvo_priv->top_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "top_margin", 2);
+ sdvo_priv->top_property->values[0] = 0;
+ sdvo_priv->top_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->top_property,
+ sdvo_priv->top_margin);
+ sdvo_priv->bottom_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "bottom_margin", 2);
+ sdvo_priv->bottom_property->values[0] = 0;
+ sdvo_priv->bottom_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->bottom_property,
+ sdvo_priv->bottom_margin);
+ DRM_DEBUG_KMS("v_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.position_h) {
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_MAX_POSITION_H, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_POSITION_H, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n");
+ return;
+ }
+ sdvo_priv->max_hpos = data_value[0];
+ sdvo_priv->cur_hpos = response;
+ sdvo_priv->hpos_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "hpos", 2);
+ sdvo_priv->hpos_property->values[0] = 0;
+ sdvo_priv->hpos_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->hpos_property,
+ sdvo_priv->cur_hpos);
+ DRM_DEBUG_KMS("h_position: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.position_v) {
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_MAX_POSITION_V, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_POSITION_V, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n");
+ return;
+ }
+ sdvo_priv->max_vpos = data_value[0];
+ sdvo_priv->cur_vpos = response;
+ sdvo_priv->vpos_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "vpos", 2);
+ sdvo_priv->vpos_property->values[0] = 0;
+ sdvo_priv->vpos_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->vpos_property,
+ sdvo_priv->cur_vpos);
+ DRM_DEBUG_KMS("v_position: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ }
+ if (sdvo_priv->is_tv) {
+ if (sdvo_data.saturation) {
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max sat\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_SATURATION, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get sat\n");
+ return;
+ }
+ sdvo_priv->max_saturation = data_value[0];
+ sdvo_priv->cur_saturation = response;
+ sdvo_priv->saturation_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "saturation", 2);
+ sdvo_priv->saturation_property->values[0] = 0;
+ sdvo_priv->saturation_property->values[1] =
+ data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->saturation_property,
+ sdvo_priv->cur_saturation);
+ DRM_DEBUG_KMS("saturation: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.contrast) {
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_CONTRAST, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get contrast\n");
+ return;
+ }
+ sdvo_priv->max_contrast = data_value[0];
+ sdvo_priv->cur_contrast = response;
+ sdvo_priv->contrast_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "contrast", 2);
+ sdvo_priv->contrast_property->values[0] = 0;
+ sdvo_priv->contrast_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->contrast_property,
+ sdvo_priv->cur_contrast);
+ DRM_DEBUG_KMS("contrast: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.hue) {
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_MAX_HUE, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max hue\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_HUE, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get hue\n");
+ return;
+ }
+ sdvo_priv->max_hue = data_value[0];
+ sdvo_priv->cur_hue = response;
+ sdvo_priv->hue_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "hue", 2);
+ sdvo_priv->hue_property->values[0] = 0;
+ sdvo_priv->hue_property->values[1] =
+ data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->hue_property,
+ sdvo_priv->cur_hue);
+ DRM_DEBUG_KMS("hue: max %d, default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ }
+ if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ if (sdvo_data.brightness) {
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max bright\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_BRIGHTNESS, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get brigh\n");
+ return;
+ }
+ sdvo_priv->max_brightness = data_value[0];
+ sdvo_priv->cur_brightness = response;
+ sdvo_priv->brightness_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "brightness", 2);
+ sdvo_priv->brightness_property->values[0] = 0;
+ sdvo_priv->brightness_property->values[1] =
+ data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->brightness_property,
+ sdvo_priv->cur_brightness);
+ DRM_DEBUG_KMS("brightness: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ }
+ return;
+}
+
bool intel_sdvo_init(struct drm_device *dev, int output_device)
{
struct drm_connector *connector;
@@ -2264,6 +2754,10 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
if (sdvo_priv->is_tv)
intel_sdvo_tv_create_property(connector);
+
+ if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
+ intel_sdvo_create_enhance_property(connector);
+
drm_sysfs_connector_add(connector);
intel_sdvo_select_ddc_bus(sdvo_priv);
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index b710fab21cb..a53b848e0f1 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -239,7 +239,7 @@ static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv)
MGA_WR34, 0x00000000,
MGA_WR42, 0x0000ffff, MGA_WR60, 0x0000ffff);
- /* Padding required to to hardware bug.
+ /* Padding required due to hardware bug.
*/
DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
MGA_DMAPAD, 0xffffffff,
@@ -317,7 +317,7 @@ static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv)
MGA_WR52, MGA_G400_WR_MAGIC, /* tex1 width */
MGA_WR60, MGA_G400_WR_MAGIC); /* tex1 height */
- /* Padding required to to hardware bug */
+ /* Padding required due to hardware bug */
DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
MGA_DMAPAD, 0xffffffff,
MGA_DMAPAD, 0xffffffff,
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index dde2ccbf1d1..d988eece018 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -737,7 +737,7 @@ r600_blit_copy(struct drm_device *dev,
/* dst */
set_render_target(dev_priv, COLOR_8_8_8_8,
- dst_x + cur_size, h,
+ (dst_x + cur_size) / 4, h,
dst_gpu_addr);
/* scissors */
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 0a6f4681f46..acae33e2ad5 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -774,7 +774,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
/* dst 23 */
set_render_target(rdev, COLOR_8_8_8_8,
- dst_x + cur_size, h,
+ (dst_x + cur_size) / 4, h,
dst_gpu_addr);
/* scissors 12 */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index c839b608970..6311b136259 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -49,6 +49,7 @@
#include <linux/list.h>
#include <linux/kref.h>
+#include "radeon_family.h"
#include "radeon_mode.h"
#include "radeon_reg.h"
@@ -77,64 +78,6 @@ extern int radeon_tv;
#define RADEONFB_CONN_LIMIT 4
#define RADEON_BIOS_NUM_SCRATCH 8
-enum radeon_family {
- CHIP_R100,
- CHIP_RV100,
- CHIP_RS100,
- CHIP_RV200,
- CHIP_RS200,
- CHIP_R200,
- CHIP_RV250,
- CHIP_RS300,
- CHIP_RV280,
- CHIP_R300,
- CHIP_R350,
- CHIP_RV350,
- CHIP_RV380,
- CHIP_R420,
- CHIP_R423,
- CHIP_RV410,
- CHIP_RS400,
- CHIP_RS480,
- CHIP_RS600,
- CHIP_RS690,
- CHIP_RS740,
- CHIP_RV515,
- CHIP_R520,
- CHIP_RV530,
- CHIP_RV560,
- CHIP_RV570,
- CHIP_R580,
- CHIP_R600,
- CHIP_RV610,
- CHIP_RV630,
- CHIP_RV670,
- CHIP_RV620,
- CHIP_RV635,
- CHIP_RS780,
- CHIP_RS880,
- CHIP_RV770,
- CHIP_RV730,
- CHIP_RV710,
- CHIP_RV740,
- CHIP_LAST,
-};
-
-enum radeon_chip_flags {
- RADEON_FAMILY_MASK = 0x0000ffffUL,
- RADEON_FLAGS_MASK = 0xffff0000UL,
- RADEON_IS_MOBILITY = 0x00010000UL,
- RADEON_IS_IGP = 0x00020000UL,
- RADEON_SINGLE_CRTC = 0x00040000UL,
- RADEON_IS_AGP = 0x00080000UL,
- RADEON_HAS_HIERZ = 0x00100000UL,
- RADEON_IS_PCIE = 0x00200000UL,
- RADEON_NEW_MEMMAP = 0x00400000UL,
- RADEON_IS_PCI = 0x00800000UL,
- RADEON_IS_IGPGART = 0x01000000UL,
-};
-
-
/*
* Errata workarounds.
*/
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index cb0cfe4b308..350962e0f34 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -34,6 +34,8 @@
#include <linux/firmware.h>
#include <linux/platform_device.h>
+#include "radeon_family.h"
+
/* General customization:
*/
@@ -109,75 +111,12 @@
#define DRIVER_MINOR 31
#define DRIVER_PATCHLEVEL 0
-/*
- * Radeon chip families
- */
-enum radeon_family {
- CHIP_R100,
- CHIP_RV100,
- CHIP_RS100,
- CHIP_RV200,
- CHIP_RS200,
- CHIP_R200,
- CHIP_RV250,
- CHIP_RS300,
- CHIP_RV280,
- CHIP_R300,
- CHIP_R350,
- CHIP_RV350,
- CHIP_RV380,
- CHIP_R420,
- CHIP_R423,
- CHIP_RV410,
- CHIP_RS400,
- CHIP_RS480,
- CHIP_RS600,
- CHIP_RS690,
- CHIP_RS740,
- CHIP_RV515,
- CHIP_R520,
- CHIP_RV530,
- CHIP_RV560,
- CHIP_RV570,
- CHIP_R580,
- CHIP_R600,
- CHIP_RV610,
- CHIP_RV630,
- CHIP_RV620,
- CHIP_RV635,
- CHIP_RV670,
- CHIP_RS780,
- CHIP_RS880,
- CHIP_RV770,
- CHIP_RV730,
- CHIP_RV710,
- CHIP_RV740,
- CHIP_LAST,
-};
-
enum radeon_cp_microcode_version {
UCODE_R100,
UCODE_R200,
UCODE_R300,
};
-/*
- * Chip flags
- */
-enum radeon_chip_flags {
- RADEON_FAMILY_MASK = 0x0000ffffUL,
- RADEON_FLAGS_MASK = 0xffff0000UL,
- RADEON_IS_MOBILITY = 0x00010000UL,
- RADEON_IS_IGP = 0x00020000UL,
- RADEON_SINGLE_CRTC = 0x00040000UL,
- RADEON_IS_AGP = 0x00080000UL,
- RADEON_HAS_HIERZ = 0x00100000UL,
- RADEON_IS_PCIE = 0x00200000UL,
- RADEON_NEW_MEMMAP = 0x00400000UL,
- RADEON_IS_PCI = 0x00800000UL,
- RADEON_IS_IGPGART = 0x01000000UL,
-};
-
typedef struct drm_radeon_freelist {
unsigned int age;
struct drm_buf *buf;
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
new file mode 100644
index 00000000000..797972e344a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+
+/* this file defines the CHIP_ and family flags used in the pciids,
+ * its is common between kms and non-kms because duplicating it and
+ * changing one place is fail.
+ */
+#ifndef RADEON_FAMILY_H
+#define RADEON_FAMILY_H
+/*
+ * Radeon chip families
+ */
+enum radeon_family {
+ CHIP_R100,
+ CHIP_RV100,
+ CHIP_RS100,
+ CHIP_RV200,
+ CHIP_RS200,
+ CHIP_R200,
+ CHIP_RV250,
+ CHIP_RS300,
+ CHIP_RV280,
+ CHIP_R300,
+ CHIP_R350,
+ CHIP_RV350,
+ CHIP_RV380,
+ CHIP_R420,
+ CHIP_R423,
+ CHIP_RV410,
+ CHIP_RS400,
+ CHIP_RS480,
+ CHIP_RS600,
+ CHIP_RS690,
+ CHIP_RS740,
+ CHIP_RV515,
+ CHIP_R520,
+ CHIP_RV530,
+ CHIP_RV560,
+ CHIP_RV570,
+ CHIP_R580,
+ CHIP_R600,
+ CHIP_RV610,
+ CHIP_RV630,
+ CHIP_RV670,
+ CHIP_RV620,
+ CHIP_RV635,
+ CHIP_RS780,
+ CHIP_RS880,
+ CHIP_RV770,
+ CHIP_RV730,
+ CHIP_RV710,
+ CHIP_RV740,
+ CHIP_LAST,
+};
+
+/*
+ * Chip flags
+ */
+enum radeon_chip_flags {
+ RADEON_FAMILY_MASK = 0x0000ffffUL,
+ RADEON_FLAGS_MASK = 0xffff0000UL,
+ RADEON_IS_MOBILITY = 0x00010000UL,
+ RADEON_IS_IGP = 0x00020000UL,
+ RADEON_SINGLE_CRTC = 0x00040000UL,
+ RADEON_IS_AGP = 0x00080000UL,
+ RADEON_HAS_HIERZ = 0x00100000UL,
+ RADEON_IS_PCIE = 0x00200000UL,
+ RADEON_NEW_MEMMAP = 0x00400000UL,
+ RADEON_IS_PCI = 0x00800000UL,
+ RADEON_IS_IGPGART = 0x01000000UL,
+};
+#endif
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 111afbe8de0..24d90ea246c 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -205,13 +205,6 @@ config HID_NTRIG
Support for N-Trig touch screen.
config HID_PANTHERLORD
- tristate "Pantherlord devices support" if EMBEDDED
- depends on USB_HID
- default !EMBEDDED
- ---help---
- Support for PantherLord/GreenAsia based device support.
-
-config HID_PANTHERLORD
tristate "Pantherlord support" if EMBEDDED
depends on USB_HID
default !EMBEDDED
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 342b7d36d7b..be34d32906b 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1089,8 +1089,7 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
return -1;
}
- buf = kmalloc(sizeof(char) * HID_DEBUG_BUFSIZE,
- interrupt ? GFP_ATOMIC : GFP_KERNEL);
+ buf = kmalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC);
if (!buf) {
report = hid_get_report(report_enum, data);
@@ -1238,6 +1237,17 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
}
EXPORT_SYMBOL_GPL(hid_connect);
+void hid_disconnect(struct hid_device *hdev)
+{
+ if (hdev->claimed & HID_CLAIMED_INPUT)
+ hidinput_disconnect(hdev);
+ if (hdev->claimed & HID_CLAIMED_HIDDEV)
+ hdev->hiddev_disconnect(hdev);
+ if (hdev->claimed & HID_CLAIMED_HIDRAW)
+ hidraw_disconnect(hdev);
+}
+EXPORT_SYMBOL_GPL(hid_disconnect);
+
/* a list of devices for which there is a specialized driver on HID bus */
static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 1b0e07a67d6..03bd703255a 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1041,13 +1041,6 @@ static void usbhid_stop(struct hid_device *hid)
hid_cancel_delayed_stuff(usbhid);
- if (hid->claimed & HID_CLAIMED_INPUT)
- hidinput_disconnect(hid);
- if (hid->claimed & HID_CLAIMED_HIDDEV)
- hiddev_disconnect(hid);
- if (hid->claimed & HID_CLAIMED_HIDRAW)
- hidraw_disconnect(hid);
-
hid->claimed = 0;
usb_free_urb(usbhid->urbin);
@@ -1085,7 +1078,7 @@ static struct hid_ll_driver usb_hid_driver = {
.hidinput_input_event = usb_hidinput_input_event,
};
-static int hid_probe(struct usb_interface *intf, const struct usb_device_id *id)
+static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_host_interface *interface = intf->cur_altsetting;
struct usb_device *dev = interface_to_usbdev(intf);
@@ -1117,6 +1110,7 @@ static int hid_probe(struct usb_interface *intf, const struct usb_device_id *id)
hid->ff_init = hid_pidff_init;
#ifdef CONFIG_USB_HIDDEV
hid->hiddev_connect = hiddev_connect;
+ hid->hiddev_disconnect = hiddev_disconnect;
hid->hiddev_hid_event = hiddev_hid_event;
hid->hiddev_report_event = hiddev_report_event;
#endif
@@ -1177,7 +1171,7 @@ err:
return ret;
}
-static void hid_disconnect(struct usb_interface *intf)
+static void usbhid_disconnect(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata(intf);
struct usbhid_device *usbhid;
@@ -1359,8 +1353,8 @@ MODULE_DEVICE_TABLE (usb, hid_usb_ids);
static struct usb_driver hid_driver = {
.name = "usbhid",
- .probe = hid_probe,
- .disconnect = hid_disconnect,
+ .probe = usbhid_probe,
+ .disconnect = usbhid_disconnect,
#ifdef CONFIG_PM
.suspend = hid_suspend,
.resume = hid_resume,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index ed7711d11ae..6857560144b 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -325,34 +325,6 @@ config SENSORS_F75375S
This driver can also be built as a module. If so, the module
will be called f75375s.
-config SENSORS_FSCHER
- tristate "FSC Hermes (DEPRECATED)"
- depends on X86 && I2C
- help
- This driver is DEPRECATED please use the new merged fschmd
- ("FSC Poseidon, Scylla, Hermes, Heimdall and Heracles") driver
- instead.
-
- If you say yes here you get support for Fujitsu Siemens
- Computers Hermes sensor chips.
-
- This driver can also be built as a module. If so, the module
- will be called fscher.
-
-config SENSORS_FSCPOS
- tristate "FSC Poseidon (DEPRECATED)"
- depends on X86 && I2C
- help
- This driver is DEPRECATED please use the new merged fschmd
- ("FSC Poseidon, Scylla, Hermes, Heimdall and Heracles") driver
- instead.
-
- If you say yes here you get support for Fujitsu Siemens
- Computers Poseidon sensor chips.
-
- This driver can also be built as a module. If so, the module
- will be called fscpos.
-
config SENSORS_FSCHMD
tristate "Fujitsu Siemens Computers sensor chips"
depends on X86 && I2C
@@ -401,12 +373,12 @@ config SENSORS_GL520SM
will be called gl520sm.
config SENSORS_CORETEMP
- tristate "Intel Core (2) Duo/Solo temperature sensor"
+ tristate "Intel Core/Core2/Atom temperature sensor"
depends on X86 && EXPERIMENTAL
help
If you say yes here you get support for the temperature
- sensor inside your CPU. Supported all are all known variants
- of Intel Core family.
+ sensor inside your CPU. Most of the family 6 CPUs
+ are supported. Check documentation/driver for details.
config SENSORS_IBMAEM
tristate "IBM Active Energy Manager temperature/power sensors and control"
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index bcf73a9bb61..9f46cb019cc 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -42,9 +42,7 @@ obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
obj-$(CONFIG_SENSORS_F71805F) += f71805f.o
obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o
obj-$(CONFIG_SENSORS_F75375S) += f75375s.o
-obj-$(CONFIG_SENSORS_FSCHER) += fscher.o
obj-$(CONFIG_SENSORS_FSCHMD) += fschmd.o
-obj-$(CONFIG_SENSORS_FSCPOS) += fscpos.o
obj-$(CONFIG_SENSORS_G760A) += g760a.o
obj-$(CONFIG_SENSORS_GL518SM) += gl518sm.o
obj-$(CONFIG_SENSORS_GL520SM) += gl520sm.o
diff --git a/drivers/hwmon/adcxx.c b/drivers/hwmon/adcxx.c
index 242294db3db..5e9e095f113 100644
--- a/drivers/hwmon/adcxx.c
+++ b/drivers/hwmon/adcxx.c
@@ -43,6 +43,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/mutex.h>
+#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
#define DRVNAME "adcxx"
@@ -157,8 +158,9 @@ static struct sensor_device_attribute ad_input[] = {
/*----------------------------------------------------------------------*/
-static int __devinit adcxx_probe(struct spi_device *spi, int channels)
+static int __devinit adcxx_probe(struct spi_device *spi)
{
+ int channels = spi_get_device_id(spi)->driver_data;
struct adcxx *adc;
int status;
int i;
@@ -204,26 +206,6 @@ out_err:
return status;
}
-static int __devinit adcxx1s_probe(struct spi_device *spi)
-{
- return adcxx_probe(spi, 1);
-}
-
-static int __devinit adcxx2s_probe(struct spi_device *spi)
-{
- return adcxx_probe(spi, 2);
-}
-
-static int __devinit adcxx4s_probe(struct spi_device *spi)
-{
- return adcxx_probe(spi, 4);
-}
-
-static int __devinit adcxx8s_probe(struct spi_device *spi)
-{
- return adcxx_probe(spi, 8);
-}
-
static int __devexit adcxx_remove(struct spi_device *spi)
{
struct adcxx *adc = dev_get_drvdata(&spi->dev);
@@ -241,79 +223,33 @@ static int __devexit adcxx_remove(struct spi_device *spi)
return 0;
}
-static struct spi_driver adcxx1s_driver = {
- .driver = {
- .name = "adcxx1s",
- .owner = THIS_MODULE,
- },
- .probe = adcxx1s_probe,
- .remove = __devexit_p(adcxx_remove),
+static const struct spi_device_id adcxx_ids[] = {
+ { "adcxx1s", 1 },
+ { "adcxx2s", 2 },
+ { "adcxx4s", 4 },
+ { "adcxx8s", 8 },
+ { },
};
+MODULE_DEVICE_TABLE(spi, adcxx_ids);
-static struct spi_driver adcxx2s_driver = {
+static struct spi_driver adcxx_driver = {
.driver = {
- .name = "adcxx2s",
+ .name = "adcxx",
.owner = THIS_MODULE,
},
- .probe = adcxx2s_probe,
- .remove = __devexit_p(adcxx_remove),
-};
-
-static struct spi_driver adcxx4s_driver = {
- .driver = {
- .name = "adcxx4s",
- .owner = THIS_MODULE,
- },
- .probe = adcxx4s_probe,
- .remove = __devexit_p(adcxx_remove),
-};
-
-static struct spi_driver adcxx8s_driver = {
- .driver = {
- .name = "adcxx8s",
- .owner = THIS_MODULE,
- },
- .probe = adcxx8s_probe,
+ .id_table = adcxx_ids,
+ .probe = adcxx_probe,
.remove = __devexit_p(adcxx_remove),
};
static int __init init_adcxx(void)
{
- int status;
- status = spi_register_driver(&adcxx1s_driver);
- if (status)
- goto reg_1_failed;
-
- status = spi_register_driver(&adcxx2s_driver);
- if (status)
- goto reg_2_failed;
-
- status = spi_register_driver(&adcxx4s_driver);
- if (status)
- goto reg_4_failed;
-
- status = spi_register_driver(&adcxx8s_driver);
- if (status)
- goto reg_8_failed;
-
- return status;
-
-reg_8_failed:
- spi_unregister_driver(&adcxx4s_driver);
-reg_4_failed:
- spi_unregister_driver(&adcxx2s_driver);
-reg_2_failed:
- spi_unregister_driver(&adcxx1s_driver);
-reg_1_failed:
- return status;
+ return spi_register_driver(&adcxx_driver);
}
static void __exit exit_adcxx(void)
{
- spi_unregister_driver(&adcxx1s_driver);
- spi_unregister_driver(&adcxx2s_driver);
- spi_unregister_driver(&adcxx4s_driver);
- spi_unregister_driver(&adcxx8s_driver);
+ spi_unregister_driver(&adcxx_driver);
}
module_init(init_adcxx);
@@ -322,8 +258,3 @@ module_exit(exit_adcxx);
MODULE_AUTHOR("Marc Pignat");
MODULE_DESCRIPTION("National Semiconductor adcxx8sxxx Linux driver");
MODULE_LICENSE("GPL");
-
-MODULE_ALIAS("adcxx1s");
-MODULE_ALIAS("adcxx2s");
-MODULE_ALIAS("adcxx4s");
-MODULE_ALIAS("adcxx8s");
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index b11e06f644b..afc59431812 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -83,16 +83,14 @@ struct adm1021_data {
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
+ char low_power; /* !=0 if device in low power mode */
unsigned long last_updated; /* In jiffies */
- s8 temp_max[2]; /* Register values */
- s8 temp_min[2];
- s8 temp[2];
+ int temp_max[2]; /* Register values */
+ int temp_min[2];
+ int temp[2];
u8 alarms;
/* Special values for ADM1023 only */
- u8 remote_temp_prec;
- u8 remote_temp_os_prec;
- u8 remote_temp_hyst_prec;
u8 remote_temp_offset;
u8 remote_temp_offset_prec;
};
@@ -141,7 +139,7 @@ static ssize_t show_temp(struct device *dev,
int index = to_sensor_dev_attr(devattr)->index;
struct adm1021_data *data = adm1021_update_device(dev);
- return sprintf(buf, "%d\n", 1000 * data->temp[index]);
+ return sprintf(buf, "%d\n", data->temp[index]);
}
static ssize_t show_temp_max(struct device *dev,
@@ -150,7 +148,7 @@ static ssize_t show_temp_max(struct device *dev,
int index = to_sensor_dev_attr(devattr)->index;
struct adm1021_data *data = adm1021_update_device(dev);
- return sprintf(buf, "%d\n", 1000 * data->temp_max[index]);
+ return sprintf(buf, "%d\n", data->temp_max[index]);
}
static ssize_t show_temp_min(struct device *dev,
@@ -159,7 +157,7 @@ static ssize_t show_temp_min(struct device *dev,
int index = to_sensor_dev_attr(devattr)->index;
struct adm1021_data *data = adm1021_update_device(dev);
- return sprintf(buf, "%d\n", 1000 * data->temp_min[index]);
+ return sprintf(buf, "%d\n", data->temp_min[index]);
}
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
@@ -216,6 +214,35 @@ static ssize_t set_temp_min(struct device *dev,
return count;
}
+static ssize_t show_low_power(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct adm1021_data *data = adm1021_update_device(dev);
+ return sprintf(buf, "%d\n", data->low_power);
+}
+
+static ssize_t set_low_power(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adm1021_data *data = i2c_get_clientdata(client);
+ int low_power = simple_strtol(buf, NULL, 10) != 0;
+
+ mutex_lock(&data->update_lock);
+ if (low_power != data->low_power) {
+ int config = i2c_smbus_read_byte_data(
+ client, ADM1021_REG_CONFIG_R);
+ data->low_power = low_power;
+ i2c_smbus_write_byte_data(client, ADM1021_REG_CONFIG_W,
+ (config & 0xBF) | (low_power << 6));
+ }
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
set_temp_max, 0);
@@ -233,6 +260,7 @@ static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR(low_power, S_IWUSR | S_IRUGO, show_low_power, set_low_power);
static struct attribute *adm1021_attributes[] = {
&sensor_dev_attr_temp1_max.dev_attr.attr,
@@ -247,6 +275,7 @@ static struct attribute *adm1021_attributes[] = {
&sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_fault.dev_attr.attr,
&dev_attr_alarms.attr,
+ &dev_attr_low_power.attr,
NULL
};
@@ -412,25 +441,27 @@ static struct adm1021_data *adm1021_update_device(struct device *dev)
dev_dbg(&client->dev, "Starting adm1021 update\n");
for (i = 0; i < 2; i++) {
- data->temp[i] = i2c_smbus_read_byte_data(client,
- ADM1021_REG_TEMP(i));
- data->temp_max[i] = i2c_smbus_read_byte_data(client,
- ADM1021_REG_TOS_R(i));
- data->temp_min[i] = i2c_smbus_read_byte_data(client,
- ADM1021_REG_THYST_R(i));
+ data->temp[i] = 1000 *
+ (s8) i2c_smbus_read_byte_data(
+ client, ADM1021_REG_TEMP(i));
+ data->temp_max[i] = 1000 *
+ (s8) i2c_smbus_read_byte_data(
+ client, ADM1021_REG_TOS_R(i));
+ data->temp_min[i] = 1000 *
+ (s8) i2c_smbus_read_byte_data(
+ client, ADM1021_REG_THYST_R(i));
}
data->alarms = i2c_smbus_read_byte_data(client,
ADM1021_REG_STATUS) & 0x7c;
if (data->type == adm1023) {
- data->remote_temp_prec =
- i2c_smbus_read_byte_data(client,
- ADM1023_REG_REM_TEMP_PREC);
- data->remote_temp_os_prec =
- i2c_smbus_read_byte_data(client,
- ADM1023_REG_REM_TOS_PREC);
- data->remote_temp_hyst_prec =
- i2c_smbus_read_byte_data(client,
- ADM1023_REG_REM_THYST_PREC);
+ /* The ADM1023 provides 3 extra bits of precision for
+ * the remote sensor in extra registers. */
+ data->temp[1] += 125 * (i2c_smbus_read_byte_data(
+ client, ADM1023_REG_REM_TEMP_PREC) >> 5);
+ data->temp_max[1] += 125 * (i2c_smbus_read_byte_data(
+ client, ADM1023_REG_REM_TOS_PREC) >> 5);
+ data->temp_min[1] += 125 * (i2c_smbus_read_byte_data(
+ client, ADM1023_REG_REM_THYST_PREC) >> 5);
data->remote_temp_offset =
i2c_smbus_read_byte_data(client,
ADM1023_REG_REM_OFFSET);
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 789441830cd..56905955352 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -37,6 +37,7 @@
#define ADM1031_REG_PWM (0x22)
#define ADM1031_REG_FAN_MIN(nr) (0x10 + (nr))
+#define ADM1031_REG_TEMP_OFFSET(nr) (0x0d + (nr))
#define ADM1031_REG_TEMP_MAX(nr) (0x14 + 4 * (nr))
#define ADM1031_REG_TEMP_MIN(nr) (0x15 + 4 * (nr))
#define ADM1031_REG_TEMP_CRIT(nr) (0x16 + 4 * (nr))
@@ -93,6 +94,7 @@ struct adm1031_data {
u8 auto_temp_min[3];
u8 auto_temp_off[3];
u8 auto_temp_max[3];
+ s8 temp_offset[3];
s8 temp_min[3];
s8 temp_max[3];
s8 temp_crit[3];
@@ -145,6 +147,10 @@ adm1031_write_value(struct i2c_client *client, u8 reg, unsigned int value)
#define TEMP_FROM_REG_EXT(val, ext) (TEMP_FROM_REG(val) + (ext) * 125)
+#define TEMP_OFFSET_TO_REG(val) (TEMP_TO_REG(val) & 0x8f)
+#define TEMP_OFFSET_FROM_REG(val) TEMP_FROM_REG((val) < 0 ? \
+ (val) | 0x70 : (val))
+
#define FAN_FROM_REG(reg, div) ((reg) ? (11250 * 60) / ((reg) * (div)) : 0)
static int FAN_TO_REG(int reg, int div)
@@ -585,6 +591,14 @@ static ssize_t show_temp(struct device *dev,
(((data->ext_temp[nr] >> ((nr - 1) * 3)) & 7));
return sprintf(buf, "%d\n", TEMP_FROM_REG_EXT(data->temp[nr], ext));
}
+static ssize_t show_temp_offset(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nr = to_sensor_dev_attr(attr)->index;
+ struct adm1031_data *data = adm1031_update_device(dev);
+ return sprintf(buf, "%d\n",
+ TEMP_OFFSET_FROM_REG(data->temp_offset[nr]));
+}
static ssize_t show_temp_min(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -606,6 +620,24 @@ static ssize_t show_temp_crit(struct device *dev,
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_crit[nr]));
}
+static ssize_t set_temp_offset(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adm1031_data *data = i2c_get_clientdata(client);
+ int nr = to_sensor_dev_attr(attr)->index;
+ int val;
+
+ val = simple_strtol(buf, NULL, 10);
+ val = SENSORS_LIMIT(val, -15000, 15000);
+ mutex_lock(&data->update_lock);
+ data->temp_offset[nr] = TEMP_OFFSET_TO_REG(val);
+ adm1031_write_value(client, ADM1031_REG_TEMP_OFFSET(nr),
+ data->temp_offset[nr]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -661,6 +693,8 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
#define temp_reg(offset) \
static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
show_temp, NULL, offset - 1); \
+static SENSOR_DEVICE_ATTR(temp##offset##_offset, S_IRUGO | S_IWUSR, \
+ show_temp_offset, set_temp_offset, offset - 1); \
static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
show_temp_min, set_temp_min, offset - 1); \
static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
@@ -714,6 +748,7 @@ static struct attribute *adm1031_attributes[] = {
&sensor_dev_attr_pwm1.dev_attr.attr,
&sensor_dev_attr_auto_fan1_channel.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_offset.dev_attr.attr,
&sensor_dev_attr_temp1_min.dev_attr.attr,
&sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
@@ -721,6 +756,7 @@ static struct attribute *adm1031_attributes[] = {
&sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_offset.dev_attr.attr,
&sensor_dev_attr_temp2_min.dev_attr.attr,
&sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
@@ -757,6 +793,7 @@ static struct attribute *adm1031_attributes_opt[] = {
&sensor_dev_attr_pwm2.dev_attr.attr,
&sensor_dev_attr_auto_fan2_channel.dev_attr.attr,
&sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_offset.dev_attr.attr,
&sensor_dev_attr_temp3_min.dev_attr.attr,
&sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp3_max.dev_attr.attr,
@@ -937,6 +974,9 @@ static struct adm1031_data *adm1031_update_device(struct device *dev)
}
data->temp[chan] = newh;
+ data->temp_offset[chan] =
+ adm1031_read_value(client,
+ ADM1031_REG_TEMP_OFFSET(chan));
data->temp_min[chan] =
adm1031_read_value(client,
ADM1031_REG_TEMP_MIN(chan));
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 753b34885f9..7ea6a8f6605 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -178,6 +178,8 @@ static const int debug;
static struct platform_device *pdev;
static s16 rest_x;
static s16 rest_y;
+static u8 backlight_state[2];
+
static struct device *hwmon_dev;
static struct input_polled_dev *applesmc_idev;
@@ -497,17 +499,36 @@ static int applesmc_probe(struct platform_device *dev)
return 0;
}
-static int applesmc_resume(struct platform_device *dev)
+/* Synchronize device with memorized backlight state */
+static int applesmc_pm_resume(struct device *dev)
{
- return applesmc_device_init();
+ mutex_lock(&applesmc_lock);
+ if (applesmc_light)
+ applesmc_write_key(BACKLIGHT_KEY, backlight_state, 2);
+ mutex_unlock(&applesmc_lock);
+ return 0;
}
+/* Reinitialize device on resume from hibernation */
+static int applesmc_pm_restore(struct device *dev)
+{
+ int ret = applesmc_device_init();
+ if (ret)
+ return ret;
+ return applesmc_pm_resume(dev);
+}
+
+static struct dev_pm_ops applesmc_pm_ops = {
+ .resume = applesmc_pm_resume,
+ .restore = applesmc_pm_restore,
+};
+
static struct platform_driver applesmc_driver = {
.probe = applesmc_probe,
- .resume = applesmc_resume,
.driver = {
.name = "applesmc",
.owner = THIS_MODULE,
+ .pm = &applesmc_pm_ops,
},
};
@@ -804,17 +825,10 @@ static ssize_t applesmc_calibrate_store(struct device *dev,
return count;
}
-/* Store the next backlight value to be written by the work */
-static unsigned int backlight_value;
-
static void applesmc_backlight_set(struct work_struct *work)
{
- u8 buffer[2];
-
mutex_lock(&applesmc_lock);
- buffer[0] = backlight_value;
- buffer[1] = 0x00;
- applesmc_write_key(BACKLIGHT_KEY, buffer, 2);
+ applesmc_write_key(BACKLIGHT_KEY, backlight_state, 2);
mutex_unlock(&applesmc_lock);
}
static DECLARE_WORK(backlight_work, &applesmc_backlight_set);
@@ -824,7 +838,7 @@ static void applesmc_brightness_set(struct led_classdev *led_cdev,
{
int ret;
- backlight_value = value;
+ backlight_state[0] = value;
ret = queue_work(applesmc_led_wq, &backlight_work);
if (debug && (!ret))
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 93c17223b52..caef39cda8c 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -157,17 +157,26 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
/* The 100C is default for both mobile and non mobile CPUs */
int tjmax = 100000;
- int ismobile = 1;
+ int tjmax_ee = 85000;
+ int usemsr_ee = 1;
int err;
u32 eax, edx;
/* Early chips have no MSR for TjMax */
if ((c->x86_model == 0xf) && (c->x86_mask < 4)) {
- ismobile = 0;
+ usemsr_ee = 0;
}
- if ((c->x86_model > 0xe) && (ismobile)) {
+ /* Atoms seems to have TjMax at 90C */
+
+ if (c->x86_model == 0x1c) {
+ usemsr_ee = 0;
+ tjmax = 90000;
+ }
+
+ if ((c->x86_model > 0xe) && (usemsr_ee)) {
+ u8 platform_id;
/* Now we can detect the mobile CPU using Intel provided table
http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
@@ -179,13 +188,29 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
dev_warn(dev,
"Unable to access MSR 0x17, assuming desktop"
" CPU\n");
- ismobile = 0;
- } else if (!(eax & 0x10000000)) {
- ismobile = 0;
+ usemsr_ee = 0;
+ } else if (c->x86_model < 0x17 && !(eax & 0x10000000)) {
+ /* Trust bit 28 up to Penryn, I could not find any
+ documentation on that; if you happen to know
+ someone at Intel please ask */
+ usemsr_ee = 0;
+ } else {
+ /* Platform ID bits 52:50 (EDX starts at bit 32) */
+ platform_id = (edx >> 18) & 0x7;
+
+ /* Mobile Penryn CPU seems to be platform ID 7 or 5
+ (guesswork) */
+ if ((c->x86_model == 0x17) &&
+ ((platform_id == 5) || (platform_id == 7))) {
+ /* If MSR EE bit is set, set it to 90 degrees C,
+ otherwise 105 degrees C */
+ tjmax_ee = 90000;
+ tjmax = 105000;
+ }
}
}
- if (ismobile) {
+ if (usemsr_ee) {
err = rdmsr_safe_on_cpu(id, 0xee, &eax, &edx);
if (err) {
@@ -193,9 +218,11 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
"Unable to access MSR 0xEE, for Tjmax, left"
" at default");
} else if (eax & 0x40000000) {
- tjmax = 85000;
+ tjmax = tjmax_ee;
}
- } else {
+ /* if we dont use msr EE it means we are desktop CPU (with exeception
+ of Atom) */
+ } else if (tjmax == 100000) {
dev_warn(dev, "Using relative temperature scale!\n");
}
@@ -248,9 +275,9 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
/* read the still undocumented IA32_TEMPERATURE_TARGET it exists
- on older CPUs but not in this register */
+ on older CPUs but not in this register, Atoms don't have it either */
- if (c->x86_model > 0xe) {
+ if ((c->x86_model > 0xe) && (c->x86_model != 0x1c)) {
err = rdmsr_safe_on_cpu(data->id, 0x1a2, &eax, &edx);
if (err) {
dev_warn(&pdev->dev, "Unable to read"
@@ -413,11 +440,15 @@ static int __init coretemp_init(void)
for_each_online_cpu(i) {
struct cpuinfo_x86 *c = &cpu_data(i);
- /* check if family 6, models 0xe, 0xf, 0x16, 0x17, 0x1A */
+ /* check if family 6, models 0xe (Pentium M DC),
+ 0xf (Core 2 DC 65nm), 0x16 (Core 2 SC 65nm),
+ 0x17 (Penryn 45nm), 0x1a (Nehalem), 0x1c (Atom),
+ 0x1e (Lynnfield) */
if ((c->cpuid_level < 0) || (c->x86 != 0x6) ||
!((c->x86_model == 0xe) || (c->x86_model == 0xf) ||
(c->x86_model == 0x16) || (c->x86_model == 0x17) ||
- (c->x86_model == 0x1A))) {
+ (c->x86_model == 0x1a) || (c->x86_model == 0x1c) ||
+ (c->x86_model == 0x1e))) {
/* supported CPU not found, but report the unknown
family 6 CPU */
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index 9814d51b3af..2c2cb1ec94c 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -1134,7 +1134,7 @@ static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
res = PWM_FREQ_FROM_REG(data->pwm_freq[ix]);
break;
case SYS_PWM_ENABLE:
- if (ix > 3) {
+ if (ix >= 3) {
res = 1; /* pwm[5-6] hard-wired to manual mode */
} else {
res = PWM_EN_FROM_REG(data->pwm_config[ix]);
diff --git a/drivers/hwmon/fscher.c b/drivers/hwmon/fscher.c
deleted file mode 100644
index 12c70e402cb..00000000000
--- a/drivers/hwmon/fscher.c
+++ /dev/null
@@ -1,680 +0,0 @@
-/*
- * fscher.c - Part of lm_sensors, Linux kernel modules for hardware
- * monitoring
- * Copyright (C) 2003, 2004 Reinhard Nissl <rnissl@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/*
- * fujitsu siemens hermes chip,
- * module based on fscpos.c
- * Copyright (C) 2000 Hermann Jung <hej@odn.de>
- * Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl>
- * and Philip Edelbrock <phil@netroedge.com>
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/i2c.h>
-#include <linux/hwmon.h>
-#include <linux/err.h>
-#include <linux/mutex.h>
-#include <linux/sysfs.h>
-
-/*
- * Addresses to scan
- */
-
-static const unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END };
-
-/*
- * Insmod parameters
- */
-
-I2C_CLIENT_INSMOD_1(fscher);
-
-/*
- * The FSCHER registers
- */
-
-/* chip identification */
-#define FSCHER_REG_IDENT_0 0x00
-#define FSCHER_REG_IDENT_1 0x01
-#define FSCHER_REG_IDENT_2 0x02
-#define FSCHER_REG_REVISION 0x03
-
-/* global control and status */
-#define FSCHER_REG_EVENT_STATE 0x04
-#define FSCHER_REG_CONTROL 0x05
-
-/* watchdog */
-#define FSCHER_REG_WDOG_PRESET 0x28
-#define FSCHER_REG_WDOG_STATE 0x23
-#define FSCHER_REG_WDOG_CONTROL 0x21
-
-/* fan 0 */
-#define FSCHER_REG_FAN0_MIN 0x55
-#define FSCHER_REG_FAN0_ACT 0x0e
-#define FSCHER_REG_FAN0_STATE 0x0d
-#define FSCHER_REG_FAN0_RIPPLE 0x0f
-
-/* fan 1 */
-#define FSCHER_REG_FAN1_MIN 0x65
-#define FSCHER_REG_FAN1_ACT 0x6b
-#define FSCHER_REG_FAN1_STATE 0x62
-#define FSCHER_REG_FAN1_RIPPLE 0x6f
-
-/* fan 2 */
-#define FSCHER_REG_FAN2_MIN 0xb5
-#define FSCHER_REG_FAN2_ACT 0xbb
-#define FSCHER_REG_FAN2_STATE 0xb2
-#define FSCHER_REG_FAN2_RIPPLE 0xbf
-
-/* voltage supervision */
-#define FSCHER_REG_VOLT_12 0x45
-#define FSCHER_REG_VOLT_5 0x42
-#define FSCHER_REG_VOLT_BATT 0x48
-
-/* temperature 0 */
-#define FSCHER_REG_TEMP0_ACT 0x64
-#define FSCHER_REG_TEMP0_STATE 0x71
-
-/* temperature 1 */
-#define FSCHER_REG_TEMP1_ACT 0x32
-#define FSCHER_REG_TEMP1_STATE 0x81
-
-/* temperature 2 */
-#define FSCHER_REG_TEMP2_ACT 0x35
-#define FSCHER_REG_TEMP2_STATE 0x91
-
-/*
- * Functions declaration
- */
-
-static int fscher_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
-static int fscher_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info);
-static int fscher_remove(struct i2c_client *client);
-static struct fscher_data *fscher_update_device(struct device *dev);
-static void fscher_init_client(struct i2c_client *client);
-
-static int fscher_read_value(struct i2c_client *client, u8 reg);
-static int fscher_write_value(struct i2c_client *client, u8 reg, u8 value);
-
-/*
- * Driver data (common to all clients)
- */
-
-static const struct i2c_device_id fscher_id[] = {
- { "fscher", fscher },
- { }
-};
-
-static struct i2c_driver fscher_driver = {
- .class = I2C_CLASS_HWMON,
- .driver = {
- .name = "fscher",
- },
- .probe = fscher_probe,
- .remove = fscher_remove,
- .id_table = fscher_id,
- .detect = fscher_detect,
- .address_data = &addr_data,
-};
-
-/*
- * Client data (each client gets its own)
- */
-
-struct fscher_data {
- struct device *hwmon_dev;
- struct mutex update_lock;
- char valid; /* zero until following fields are valid */
- unsigned long last_updated; /* in jiffies */
-
- /* register values */
- u8 revision; /* revision of chip */
- u8 global_event; /* global event status */
- u8 global_control; /* global control register */
- u8 watchdog[3]; /* watchdog */
- u8 volt[3]; /* 12, 5, battery voltage */
- u8 temp_act[3]; /* temperature */
- u8 temp_status[3]; /* status of sensor */
- u8 fan_act[3]; /* fans revolutions per second */
- u8 fan_status[3]; /* fan status */
- u8 fan_min[3]; /* fan min value for rps */
- u8 fan_ripple[3]; /* divider for rps */
-};
-
-/*
- * Sysfs stuff
- */
-
-#define sysfs_r(kind, sub, offset, reg) \
-static ssize_t show_##kind##sub (struct fscher_data *, char *, int); \
-static ssize_t show_##kind##offset##sub (struct device *, struct device_attribute *attr, char *); \
-static ssize_t show_##kind##offset##sub (struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- struct fscher_data *data = fscher_update_device(dev); \
- return show_##kind##sub(data, buf, (offset)); \
-}
-
-#define sysfs_w(kind, sub, offset, reg) \
-static ssize_t set_##kind##sub (struct i2c_client *, struct fscher_data *, const char *, size_t, int, int); \
-static ssize_t set_##kind##offset##sub (struct device *, struct device_attribute *attr, const char *, size_t); \
-static ssize_t set_##kind##offset##sub (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) \
-{ \
- struct i2c_client *client = to_i2c_client(dev); \
- struct fscher_data *data = i2c_get_clientdata(client); \
- return set_##kind##sub(client, data, buf, count, (offset), reg); \
-}
-
-#define sysfs_rw_n(kind, sub, offset, reg) \
-sysfs_r(kind, sub, offset, reg) \
-sysfs_w(kind, sub, offset, reg) \
-static DEVICE_ATTR(kind##offset##sub, S_IRUGO | S_IWUSR, show_##kind##offset##sub, set_##kind##offset##sub);
-
-#define sysfs_rw(kind, sub, reg) \
-sysfs_r(kind, sub, 0, reg) \
-sysfs_w(kind, sub, 0, reg) \
-static DEVICE_ATTR(kind##sub, S_IRUGO | S_IWUSR, show_##kind##0##sub, set_##kind##0##sub);
-
-#define sysfs_ro_n(kind, sub, offset, reg) \
-sysfs_r(kind, sub, offset, reg) \
-static DEVICE_ATTR(kind##offset##sub, S_IRUGO, show_##kind##offset##sub, NULL);
-
-#define sysfs_ro(kind, sub, reg) \
-sysfs_r(kind, sub, 0, reg) \
-static DEVICE_ATTR(kind, S_IRUGO, show_##kind##0##sub, NULL);
-
-#define sysfs_fan(offset, reg_status, reg_min, reg_ripple, reg_act) \
-sysfs_rw_n(pwm, , offset, reg_min) \
-sysfs_rw_n(fan, _status, offset, reg_status) \
-sysfs_rw_n(fan, _div , offset, reg_ripple) \
-sysfs_ro_n(fan, _input , offset, reg_act)
-
-#define sysfs_temp(offset, reg_status, reg_act) \
-sysfs_rw_n(temp, _status, offset, reg_status) \
-sysfs_ro_n(temp, _input , offset, reg_act)
-
-#define sysfs_in(offset, reg_act) \
-sysfs_ro_n(in, _input, offset, reg_act)
-
-#define sysfs_revision(reg_revision) \
-sysfs_ro(revision, , reg_revision)
-
-#define sysfs_alarms(reg_events) \
-sysfs_ro(alarms, , reg_events)
-
-#define sysfs_control(reg_control) \
-sysfs_rw(control, , reg_control)
-
-#define sysfs_watchdog(reg_control, reg_status, reg_preset) \
-sysfs_rw(watchdog, _control, reg_control) \
-sysfs_rw(watchdog, _status , reg_status) \
-sysfs_rw(watchdog, _preset , reg_preset)
-
-sysfs_fan(1, FSCHER_REG_FAN0_STATE, FSCHER_REG_FAN0_MIN,
- FSCHER_REG_FAN0_RIPPLE, FSCHER_REG_FAN0_ACT)
-sysfs_fan(2, FSCHER_REG_FAN1_STATE, FSCHER_REG_FAN1_MIN,
- FSCHER_REG_FAN1_RIPPLE, FSCHER_REG_FAN1_ACT)
-sysfs_fan(3, FSCHER_REG_FAN2_STATE, FSCHER_REG_FAN2_MIN,
- FSCHER_REG_FAN2_RIPPLE, FSCHER_REG_FAN2_ACT)
-
-sysfs_temp(1, FSCHER_REG_TEMP0_STATE, FSCHER_REG_TEMP0_ACT)
-sysfs_temp(2, FSCHER_REG_TEMP1_STATE, FSCHER_REG_TEMP1_ACT)
-sysfs_temp(3, FSCHER_REG_TEMP2_STATE, FSCHER_REG_TEMP2_ACT)
-
-sysfs_in(0, FSCHER_REG_VOLT_12)
-sysfs_in(1, FSCHER_REG_VOLT_5)
-sysfs_in(2, FSCHER_REG_VOLT_BATT)
-
-sysfs_revision(FSCHER_REG_REVISION)
-sysfs_alarms(FSCHER_REG_EVENTS)
-sysfs_control(FSCHER_REG_CONTROL)
-sysfs_watchdog(FSCHER_REG_WDOG_CONTROL, FSCHER_REG_WDOG_STATE, FSCHER_REG_WDOG_PRESET)
-
-static struct attribute *fscher_attributes[] = {
- &dev_attr_revision.attr,
- &dev_attr_alarms.attr,
- &dev_attr_control.attr,
-
- &dev_attr_watchdog_status.attr,
- &dev_attr_watchdog_control.attr,
- &dev_attr_watchdog_preset.attr,
-
- &dev_attr_in0_input.attr,
- &dev_attr_in1_input.attr,
- &dev_attr_in2_input.attr,
-
- &dev_attr_fan1_status.attr,
- &dev_attr_fan1_div.attr,
- &dev_attr_fan1_input.attr,
- &dev_attr_pwm1.attr,
- &dev_attr_fan2_status.attr,
- &dev_attr_fan2_div.attr,
- &dev_attr_fan2_input.attr,
- &dev_attr_pwm2.attr,
- &dev_attr_fan3_status.attr,
- &dev_attr_fan3_div.attr,
- &dev_attr_fan3_input.attr,
- &dev_attr_pwm3.attr,
-
- &dev_attr_temp1_status.attr,
- &dev_attr_temp1_input.attr,
- &dev_attr_temp2_status.attr,
- &dev_attr_temp2_input.attr,
- &dev_attr_temp3_status.attr,
- &dev_attr_temp3_input.attr,
- NULL
-};
-
-static const struct attribute_group fscher_group = {
- .attrs = fscher_attributes,
-};
-
-/*
- * Real code
- */
-
-/* Return 0 if detection is successful, -ENODEV otherwise */
-static int fscher_detect(struct i2c_client *new_client, int kind,
- struct i2c_board_info *info)
-{
- struct i2c_adapter *adapter = new_client->adapter;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
- return -ENODEV;
-
- /* Do the remaining detection unless force or force_fscher parameter */
- if (kind < 0) {
- if ((i2c_smbus_read_byte_data(new_client,
- FSCHER_REG_IDENT_0) != 0x48) /* 'H' */
- || (i2c_smbus_read_byte_data(new_client,
- FSCHER_REG_IDENT_1) != 0x45) /* 'E' */
- || (i2c_smbus_read_byte_data(new_client,
- FSCHER_REG_IDENT_2) != 0x52)) /* 'R' */
- return -ENODEV;
- }
-
- strlcpy(info->type, "fscher", I2C_NAME_SIZE);
-
- return 0;
-}
-
-static int fscher_probe(struct i2c_client *new_client,
- const struct i2c_device_id *id)
-{
- struct fscher_data *data;
- int err;
-
- data = kzalloc(sizeof(struct fscher_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
-
- i2c_set_clientdata(new_client, data);
- data->valid = 0;
- mutex_init(&data->update_lock);
-
- fscher_init_client(new_client);
-
- /* Register sysfs hooks */
- if ((err = sysfs_create_group(&new_client->dev.kobj, &fscher_group)))
- goto exit_free;
-
- data->hwmon_dev = hwmon_device_register(&new_client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove_files;
- }
-
- return 0;
-
-exit_remove_files:
- sysfs_remove_group(&new_client->dev.kobj, &fscher_group);
-exit_free:
- kfree(data);
-exit:
- return err;
-}
-
-static int fscher_remove(struct i2c_client *client)
-{
- struct fscher_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &fscher_group);
-
- kfree(data);
- return 0;
-}
-
-static int fscher_read_value(struct i2c_client *client, u8 reg)
-{
- dev_dbg(&client->dev, "read reg 0x%02x\n", reg);
-
- return i2c_smbus_read_byte_data(client, reg);
-}
-
-static int fscher_write_value(struct i2c_client *client, u8 reg, u8 value)
-{
- dev_dbg(&client->dev, "write reg 0x%02x, val 0x%02x\n",
- reg, value);
-
- return i2c_smbus_write_byte_data(client, reg, value);
-}
-
-/* Called when we have found a new FSC Hermes. */
-static void fscher_init_client(struct i2c_client *client)
-{
- struct fscher_data *data = i2c_get_clientdata(client);
-
- /* Read revision from chip */
- data->revision = fscher_read_value(client, FSCHER_REG_REVISION);
-}
-
-static struct fscher_data *fscher_update_device(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct fscher_data *data = i2c_get_clientdata(client);
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) {
-
- dev_dbg(&client->dev, "Starting fscher update\n");
-
- data->temp_act[0] = fscher_read_value(client, FSCHER_REG_TEMP0_ACT);
- data->temp_act[1] = fscher_read_value(client, FSCHER_REG_TEMP1_ACT);
- data->temp_act[2] = fscher_read_value(client, FSCHER_REG_TEMP2_ACT);
- data->temp_status[0] = fscher_read_value(client, FSCHER_REG_TEMP0_STATE);
- data->temp_status[1] = fscher_read_value(client, FSCHER_REG_TEMP1_STATE);
- data->temp_status[2] = fscher_read_value(client, FSCHER_REG_TEMP2_STATE);
-
- data->volt[0] = fscher_read_value(client, FSCHER_REG_VOLT_12);
- data->volt[1] = fscher_read_value(client, FSCHER_REG_VOLT_5);
- data->volt[2] = fscher_read_value(client, FSCHER_REG_VOLT_BATT);
-
- data->fan_act[0] = fscher_read_value(client, FSCHER_REG_FAN0_ACT);
- data->fan_act[1] = fscher_read_value(client, FSCHER_REG_FAN1_ACT);
- data->fan_act[2] = fscher_read_value(client, FSCHER_REG_FAN2_ACT);
- data->fan_status[0] = fscher_read_value(client, FSCHER_REG_FAN0_STATE);
- data->fan_status[1] = fscher_read_value(client, FSCHER_REG_FAN1_STATE);
- data->fan_status[2] = fscher_read_value(client, FSCHER_REG_FAN2_STATE);
- data->fan_min[0] = fscher_read_value(client, FSCHER_REG_FAN0_MIN);
- data->fan_min[1] = fscher_read_value(client, FSCHER_REG_FAN1_MIN);
- data->fan_min[2] = fscher_read_value(client, FSCHER_REG_FAN2_MIN);
- data->fan_ripple[0] = fscher_read_value(client, FSCHER_REG_FAN0_RIPPLE);
- data->fan_ripple[1] = fscher_read_value(client, FSCHER_REG_FAN1_RIPPLE);
- data->fan_ripple[2] = fscher_read_value(client, FSCHER_REG_FAN2_RIPPLE);
-
- data->watchdog[0] = fscher_read_value(client, FSCHER_REG_WDOG_PRESET);
- data->watchdog[1] = fscher_read_value(client, FSCHER_REG_WDOG_STATE);
- data->watchdog[2] = fscher_read_value(client, FSCHER_REG_WDOG_CONTROL);
-
- data->global_event = fscher_read_value(client, FSCHER_REG_EVENT_STATE);
- data->global_control = fscher_read_value(client,
- FSCHER_REG_CONTROL);
-
- data->last_updated = jiffies;
- data->valid = 1;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
-}
-
-
-
-#define FAN_INDEX_FROM_NUM(nr) ((nr) - 1)
-
-static ssize_t set_fan_status(struct i2c_client *client, struct fscher_data *data,
- const char *buf, size_t count, int nr, int reg)
-{
- /* bits 0..1, 3..7 reserved => mask with 0x04 */
- unsigned long v = simple_strtoul(buf, NULL, 10) & 0x04;
-
- mutex_lock(&data->update_lock);
- data->fan_status[FAN_INDEX_FROM_NUM(nr)] &= ~v;
- fscher_write_value(client, reg, v);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t show_fan_status(struct fscher_data *data, char *buf, int nr)
-{
- /* bits 0..1, 3..7 reserved => mask with 0x04 */
- return sprintf(buf, "%u\n", data->fan_status[FAN_INDEX_FROM_NUM(nr)] & 0x04);
-}
-
-static ssize_t set_pwm(struct i2c_client *client, struct fscher_data *data,
- const char *buf, size_t count, int nr, int reg)
-{
- unsigned long v = simple_strtoul(buf, NULL, 10);
-
- mutex_lock(&data->update_lock);
- data->fan_min[FAN_INDEX_FROM_NUM(nr)] = v > 0xff ? 0xff : v;
- fscher_write_value(client, reg, data->fan_min[FAN_INDEX_FROM_NUM(nr)]);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t show_pwm(struct fscher_data *data, char *buf, int nr)
-{
- return sprintf(buf, "%u\n", data->fan_min[FAN_INDEX_FROM_NUM(nr)]);
-}
-
-static ssize_t set_fan_div(struct i2c_client *client, struct fscher_data *data,
- const char *buf, size_t count, int nr, int reg)
-{
- /* supported values: 2, 4, 8 */
- unsigned long v = simple_strtoul(buf, NULL, 10);
-
- switch (v) {
- case 2: v = 1; break;
- case 4: v = 2; break;
- case 8: v = 3; break;
- default:
- dev_err(&client->dev, "fan_div value %ld not "
- "supported. Choose one of 2, 4 or 8!\n", v);
- return -EINVAL;
- }
-
- mutex_lock(&data->update_lock);
-
- /* bits 2..7 reserved => mask with 0x03 */
- data->fan_ripple[FAN_INDEX_FROM_NUM(nr)] &= ~0x03;
- data->fan_ripple[FAN_INDEX_FROM_NUM(nr)] |= v;
-
- fscher_write_value(client, reg, data->fan_ripple[FAN_INDEX_FROM_NUM(nr)]);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t show_fan_div(struct fscher_data *data, char *buf, int nr)
-{
- /* bits 2..7 reserved => mask with 0x03 */
- return sprintf(buf, "%u\n", 1 << (data->fan_ripple[FAN_INDEX_FROM_NUM(nr)] & 0x03));
-}
-
-#define RPM_FROM_REG(val) (val*60)
-
-static ssize_t show_fan_input (struct fscher_data *data, char *buf, int nr)
-{
- return sprintf(buf, "%u\n", RPM_FROM_REG(data->fan_act[FAN_INDEX_FROM_NUM(nr)]));
-}
-
-
-
-#define TEMP_INDEX_FROM_NUM(nr) ((nr) - 1)
-
-static ssize_t set_temp_status(struct i2c_client *client, struct fscher_data *data,
- const char *buf, size_t count, int nr, int reg)
-{
- /* bits 2..7 reserved, 0 read only => mask with 0x02 */
- unsigned long v = simple_strtoul(buf, NULL, 10) & 0x02;
-
- mutex_lock(&data->update_lock);
- data->temp_status[TEMP_INDEX_FROM_NUM(nr)] &= ~v;
- fscher_write_value(client, reg, v);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t show_temp_status(struct fscher_data *data, char *buf, int nr)
-{
- /* bits 2..7 reserved => mask with 0x03 */
- return sprintf(buf, "%u\n", data->temp_status[TEMP_INDEX_FROM_NUM(nr)] & 0x03);
-}
-
-#define TEMP_FROM_REG(val) (((val) - 128) * 1000)
-
-static ssize_t show_temp_input(struct fscher_data *data, char *buf, int nr)
-{
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_act[TEMP_INDEX_FROM_NUM(nr)]));
-}
-
-/*
- * The final conversion is specified in sensors.conf, as it depends on
- * mainboard specific values. We export the registers contents as
- * pseudo-hundredths-of-Volts (range 0V - 2.55V). Not that it makes much
- * sense per se, but it minimizes the conversions count and keeps the
- * values within a usual range.
- */
-#define VOLT_FROM_REG(val) ((val) * 10)
-
-static ssize_t show_in_input(struct fscher_data *data, char *buf, int nr)
-{
- return sprintf(buf, "%u\n", VOLT_FROM_REG(data->volt[nr]));
-}
-
-
-
-static ssize_t show_revision(struct fscher_data *data, char *buf, int nr)
-{
- return sprintf(buf, "%u\n", data->revision);
-}
-
-
-
-static ssize_t show_alarms(struct fscher_data *data, char *buf, int nr)
-{
- /* bits 2, 5..6 reserved => mask with 0x9b */
- return sprintf(buf, "%u\n", data->global_event & 0x9b);
-}
-
-
-
-static ssize_t set_control(struct i2c_client *client, struct fscher_data *data,
- const char *buf, size_t count, int nr, int reg)
-{
- /* bits 1..7 reserved => mask with 0x01 */
- unsigned long v = simple_strtoul(buf, NULL, 10) & 0x01;
-
- mutex_lock(&data->update_lock);
- data->global_control = v;
- fscher_write_value(client, reg, v);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t show_control(struct fscher_data *data, char *buf, int nr)
-{
- /* bits 1..7 reserved => mask with 0x01 */
- return sprintf(buf, "%u\n", data->global_control & 0x01);
-}
-
-
-
-static ssize_t set_watchdog_control(struct i2c_client *client, struct
- fscher_data *data, const char *buf, size_t count,
- int nr, int reg)
-{
- /* bits 0..3 reserved => mask with 0xf0 */
- unsigned long v = simple_strtoul(buf, NULL, 10) & 0xf0;
-
- mutex_lock(&data->update_lock);
- data->watchdog[2] &= ~0xf0;
- data->watchdog[2] |= v;
- fscher_write_value(client, reg, data->watchdog[2]);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t show_watchdog_control(struct fscher_data *data, char *buf, int nr)
-{
- /* bits 0..3 reserved, bit 5 write only => mask with 0xd0 */
- return sprintf(buf, "%u\n", data->watchdog[2] & 0xd0);
-}
-
-static ssize_t set_watchdog_status(struct i2c_client *client, struct fscher_data *data,
- const char *buf, size_t count, int nr, int reg)
-{
- /* bits 0, 2..7 reserved => mask with 0x02 */
- unsigned long v = simple_strtoul(buf, NULL, 10) & 0x02;
-
- mutex_lock(&data->update_lock);
- data->watchdog[1] &= ~v;
- fscher_write_value(client, reg, v);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t show_watchdog_status(struct fscher_data *data, char *buf, int nr)
-{
- /* bits 0, 2..7 reserved => mask with 0x02 */
- return sprintf(buf, "%u\n", data->watchdog[1] & 0x02);
-}
-
-static ssize_t set_watchdog_preset(struct i2c_client *client, struct fscher_data *data,
- const char *buf, size_t count, int nr, int reg)
-{
- unsigned long v = simple_strtoul(buf, NULL, 10) & 0xff;
-
- mutex_lock(&data->update_lock);
- data->watchdog[0] = v;
- fscher_write_value(client, reg, data->watchdog[0]);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t show_watchdog_preset(struct fscher_data *data, char *buf, int nr)
-{
- return sprintf(buf, "%u\n", data->watchdog[0]);
-}
-
-static int __init sensors_fscher_init(void)
-{
- return i2c_add_driver(&fscher_driver);
-}
-
-static void __exit sensors_fscher_exit(void)
-{
- i2c_del_driver(&fscher_driver);
-}
-
-MODULE_AUTHOR("Reinhard Nissl <rnissl@gmx.de>");
-MODULE_DESCRIPTION("FSC Hermes driver");
-MODULE_LICENSE("GPL");
-
-module_init(sensors_fscher_init);
-module_exit(sensors_fscher_exit);
diff --git a/drivers/hwmon/fscpos.c b/drivers/hwmon/fscpos.c
deleted file mode 100644
index 8a7bcf500b4..00000000000
--- a/drivers/hwmon/fscpos.c
+++ /dev/null
@@ -1,654 +0,0 @@
-/*
- fscpos.c - Kernel module for hardware monitoring with FSC Poseidon chips
- Copyright (C) 2004, 2005 Stefan Ott <stefan@desire.ch>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-/*
- fujitsu siemens poseidon chip,
- module based on the old fscpos module by Hermann Jung <hej@odn.de> and
- the fscher module by Reinhard Nissl <rnissl@gmx.de>
-
- original module based on lm80.c
- Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl>
- and Philip Edelbrock <phil@netroedge.com>
-
- Thanks to Jean Delvare for reviewing my code and suggesting a lot of
- improvements.
-*/
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/i2c.h>
-#include <linux/init.h>
-#include <linux/hwmon.h>
-#include <linux/err.h>
-#include <linux/mutex.h>
-#include <linux/sysfs.h>
-
-/*
- * Addresses to scan
- */
-static const unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END };
-
-/*
- * Insmod parameters
- */
-I2C_CLIENT_INSMOD_1(fscpos);
-
-/*
- * The FSCPOS registers
- */
-
-/* chip identification */
-#define FSCPOS_REG_IDENT_0 0x00
-#define FSCPOS_REG_IDENT_1 0x01
-#define FSCPOS_REG_IDENT_2 0x02
-#define FSCPOS_REG_REVISION 0x03
-
-/* global control and status */
-#define FSCPOS_REG_EVENT_STATE 0x04
-#define FSCPOS_REG_CONTROL 0x05
-
-/* watchdog */
-#define FSCPOS_REG_WDOG_PRESET 0x28
-#define FSCPOS_REG_WDOG_STATE 0x23
-#define FSCPOS_REG_WDOG_CONTROL 0x21
-
-/* voltages */
-#define FSCPOS_REG_VOLT_12 0x45
-#define FSCPOS_REG_VOLT_5 0x42
-#define FSCPOS_REG_VOLT_BATT 0x48
-
-/* fans - the chip does not support minimum speed for fan2 */
-static u8 FSCPOS_REG_PWM[] = { 0x55, 0x65 };
-static u8 FSCPOS_REG_FAN_ACT[] = { 0x0e, 0x6b, 0xab };
-static u8 FSCPOS_REG_FAN_STATE[] = { 0x0d, 0x62, 0xa2 };
-static u8 FSCPOS_REG_FAN_RIPPLE[] = { 0x0f, 0x6f, 0xaf };
-
-/* temperatures */
-static u8 FSCPOS_REG_TEMP_ACT[] = { 0x64, 0x32, 0x35 };
-static u8 FSCPOS_REG_TEMP_STATE[] = { 0x71, 0x81, 0x91 };
-
-/*
- * Functions declaration
- */
-static int fscpos_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
-static int fscpos_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info);
-static int fscpos_remove(struct i2c_client *client);
-
-static int fscpos_read_value(struct i2c_client *client, u8 reg);
-static int fscpos_write_value(struct i2c_client *client, u8 reg, u8 value);
-static struct fscpos_data *fscpos_update_device(struct device *dev);
-static void fscpos_init_client(struct i2c_client *client);
-
-static void reset_fan_alarm(struct i2c_client *client, int nr);
-
-/*
- * Driver data (common to all clients)
- */
-static const struct i2c_device_id fscpos_id[] = {
- { "fscpos", fscpos },
- { }
-};
-
-static struct i2c_driver fscpos_driver = {
- .class = I2C_CLASS_HWMON,
- .driver = {
- .name = "fscpos",
- },
- .probe = fscpos_probe,
- .remove = fscpos_remove,
- .id_table = fscpos_id,
- .detect = fscpos_detect,
- .address_data = &addr_data,
-};
-
-/*
- * Client data (each client gets its own)
- */
-struct fscpos_data {
- struct device *hwmon_dev;
- struct mutex update_lock;
- char valid; /* 0 until following fields are valid */
- unsigned long last_updated; /* In jiffies */
-
- /* register values */
- u8 revision; /* revision of chip */
- u8 global_event; /* global event status */
- u8 global_control; /* global control register */
- u8 wdog_control; /* watchdog control */
- u8 wdog_state; /* watchdog status */
- u8 wdog_preset; /* watchdog preset */
- u8 volt[3]; /* 12, 5, battery current */
- u8 temp_act[3]; /* temperature */
- u8 temp_status[3]; /* status of sensor */
- u8 fan_act[3]; /* fans revolutions per second */
- u8 fan_status[3]; /* fan status */
- u8 pwm[2]; /* fan min value for rps */
- u8 fan_ripple[3]; /* divider for rps */
-};
-
-/* Temperature */
-#define TEMP_FROM_REG(val) (((val) - 128) * 1000)
-
-static ssize_t show_temp_input(struct fscpos_data *data, char *buf, int nr)
-{
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_act[nr - 1]));
-}
-
-static ssize_t show_temp_status(struct fscpos_data *data, char *buf, int nr)
-{
- /* bits 2..7 reserved => mask with 0x03 */
- return sprintf(buf, "%u\n", data->temp_status[nr - 1] & 0x03);
-}
-
-static ssize_t show_temp_reset(struct fscpos_data *data, char *buf, int nr)
-{
- return sprintf(buf, "1\n");
-}
-
-static ssize_t set_temp_reset(struct i2c_client *client, struct fscpos_data
- *data, const char *buf, size_t count, int nr, int reg)
-{
- unsigned long v = simple_strtoul(buf, NULL, 10);
- if (v != 1) {
- dev_err(&client->dev, "temp_reset value %ld not supported. "
- "Use 1 to reset the alarm!\n", v);
- return -EINVAL;
- }
-
- dev_info(&client->dev, "You used the temp_reset feature which has not "
- "been proplerly tested. Please report your "
- "experience to the module author.\n");
-
- /* Supported value: 2 (clears the status) */
- fscpos_write_value(client, FSCPOS_REG_TEMP_STATE[nr - 1], 2);
- return count;
-}
-
-/* Fans */
-#define RPM_FROM_REG(val) ((val) * 60)
-
-static ssize_t show_fan_status(struct fscpos_data *data, char *buf, int nr)
-{
- /* bits 0..1, 3..7 reserved => mask with 0x04 */
- return sprintf(buf, "%u\n", data->fan_status[nr - 1] & 0x04);
-}
-
-static ssize_t show_fan_input(struct fscpos_data *data, char *buf, int nr)
-{
- return sprintf(buf, "%u\n", RPM_FROM_REG(data->fan_act[nr - 1]));
-}
-
-static ssize_t show_fan_ripple(struct fscpos_data *data, char *buf, int nr)
-{
- /* bits 2..7 reserved => mask with 0x03 */
- return sprintf(buf, "%u\n", data->fan_ripple[nr - 1] & 0x03);
-}
-
-static ssize_t set_fan_ripple(struct i2c_client *client, struct fscpos_data
- *data, const char *buf, size_t count, int nr, int reg)
-{
- /* supported values: 2, 4, 8 */
- unsigned long v = simple_strtoul(buf, NULL, 10);
-
- switch (v) {
- case 2: v = 1; break;
- case 4: v = 2; break;
- case 8: v = 3; break;
- default:
- dev_err(&client->dev, "fan_ripple value %ld not supported. "
- "Must be one of 2, 4 or 8!\n", v);
- return -EINVAL;
- }
-
- mutex_lock(&data->update_lock);
- /* bits 2..7 reserved => mask with 0x03 */
- data->fan_ripple[nr - 1] &= ~0x03;
- data->fan_ripple[nr - 1] |= v;
-
- fscpos_write_value(client, reg, data->fan_ripple[nr - 1]);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t show_pwm(struct fscpos_data *data, char *buf, int nr)
-{
- return sprintf(buf, "%u\n", data->pwm[nr - 1]);
-}
-
-static ssize_t set_pwm(struct i2c_client *client, struct fscpos_data *data,
- const char *buf, size_t count, int nr, int reg)
-{
- unsigned long v = simple_strtoul(buf, NULL, 10);
-
- /* Range: 0..255 */
- if (v < 0) v = 0;
- if (v > 255) v = 255;
-
- mutex_lock(&data->update_lock);
- data->pwm[nr - 1] = v;
- fscpos_write_value(client, reg, data->pwm[nr - 1]);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static void reset_fan_alarm(struct i2c_client *client, int nr)
-{
- fscpos_write_value(client, FSCPOS_REG_FAN_STATE[nr], 4);
-}
-
-/* Volts */
-#define VOLT_FROM_REG(val, mult) ((val) * (mult) / 255)
-
-static ssize_t show_volt_12(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct fscpos_data *data = fscpos_update_device(dev);
- return sprintf(buf, "%u\n", VOLT_FROM_REG(data->volt[0], 14200));
-}
-
-static ssize_t show_volt_5(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct fscpos_data *data = fscpos_update_device(dev);
- return sprintf(buf, "%u\n", VOLT_FROM_REG(data->volt[1], 6600));
-}
-
-static ssize_t show_volt_batt(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct fscpos_data *data = fscpos_update_device(dev);
- return sprintf(buf, "%u\n", VOLT_FROM_REG(data->volt[2], 3300));
-}
-
-/* Watchdog */
-static ssize_t show_wdog_control(struct fscpos_data *data, char *buf)
-{
- /* bits 0..3 reserved, bit 6 write only => mask with 0xb0 */
- return sprintf(buf, "%u\n", data->wdog_control & 0xb0);
-}
-
-static ssize_t set_wdog_control(struct i2c_client *client, struct fscpos_data
- *data, const char *buf, size_t count, int reg)
-{
- /* bits 0..3 reserved => mask with 0xf0 */
- unsigned long v = simple_strtoul(buf, NULL, 10) & 0xf0;
-
- mutex_lock(&data->update_lock);
- data->wdog_control &= ~0xf0;
- data->wdog_control |= v;
- fscpos_write_value(client, reg, data->wdog_control);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t show_wdog_state(struct fscpos_data *data, char *buf)
-{
- /* bits 0, 2..7 reserved => mask with 0x02 */
- return sprintf(buf, "%u\n", data->wdog_state & 0x02);
-}
-
-static ssize_t set_wdog_state(struct i2c_client *client, struct fscpos_data
- *data, const char *buf, size_t count, int reg)
-{
- unsigned long v = simple_strtoul(buf, NULL, 10) & 0x02;
-
- /* Valid values: 2 (clear) */
- if (v != 2) {
- dev_err(&client->dev, "wdog_state value %ld not supported. "
- "Must be 2 to clear the state!\n", v);
- return -EINVAL;
- }
-
- mutex_lock(&data->update_lock);
- data->wdog_state &= ~v;
- fscpos_write_value(client, reg, v);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t show_wdog_preset(struct fscpos_data *data, char *buf)
-{
- return sprintf(buf, "%u\n", data->wdog_preset);
-}
-
-static ssize_t set_wdog_preset(struct i2c_client *client, struct fscpos_data
- *data, const char *buf, size_t count, int reg)
-{
- unsigned long v = simple_strtoul(buf, NULL, 10) & 0xff;
-
- mutex_lock(&data->update_lock);
- data->wdog_preset = v;
- fscpos_write_value(client, reg, data->wdog_preset);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-/* Event */
-static ssize_t show_event(struct device *dev, struct device_attribute *attr, char *buf)
-{
- /* bits 5..7 reserved => mask with 0x1f */
- struct fscpos_data *data = fscpos_update_device(dev);
- return sprintf(buf, "%u\n", data->global_event & 0x9b);
-}
-
-/*
- * Sysfs stuff
- */
-#define create_getter(kind, sub) \
- static ssize_t sysfs_show_##kind##sub(struct device *dev, struct device_attribute *attr, char *buf) \
- { \
- struct fscpos_data *data = fscpos_update_device(dev); \
- return show_##kind##sub(data, buf); \
- }
-
-#define create_getter_n(kind, offset, sub) \
- static ssize_t sysfs_show_##kind##offset##sub(struct device *dev, struct device_attribute *attr, char\
- *buf) \
- { \
- struct fscpos_data *data = fscpos_update_device(dev); \
- return show_##kind##sub(data, buf, offset); \
- }
-
-#define create_setter(kind, sub, reg) \
- static ssize_t sysfs_set_##kind##sub (struct device *dev, struct device_attribute *attr, const char \
- *buf, size_t count) \
- { \
- struct i2c_client *client = to_i2c_client(dev); \
- struct fscpos_data *data = i2c_get_clientdata(client); \
- return set_##kind##sub(client, data, buf, count, reg); \
- }
-
-#define create_setter_n(kind, offset, sub, reg) \
- static ssize_t sysfs_set_##kind##offset##sub (struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
- { \
- struct i2c_client *client = to_i2c_client(dev); \
- struct fscpos_data *data = i2c_get_clientdata(client); \
- return set_##kind##sub(client, data, buf, count, offset, reg);\
- }
-
-#define create_sysfs_device_ro(kind, sub, offset) \
- static DEVICE_ATTR(kind##offset##sub, S_IRUGO, \
- sysfs_show_##kind##offset##sub, NULL);
-
-#define create_sysfs_device_rw(kind, sub, offset) \
- static DEVICE_ATTR(kind##offset##sub, S_IRUGO | S_IWUSR, \
- sysfs_show_##kind##offset##sub, sysfs_set_##kind##offset##sub);
-
-#define sysfs_ro_n(kind, sub, offset) \
- create_getter_n(kind, offset, sub); \
- create_sysfs_device_ro(kind, sub, offset);
-
-#define sysfs_rw_n(kind, sub, offset, reg) \
- create_getter_n(kind, offset, sub); \
- create_setter_n(kind, offset, sub, reg); \
- create_sysfs_device_rw(kind, sub, offset);
-
-#define sysfs_rw(kind, sub, reg) \
- create_getter(kind, sub); \
- create_setter(kind, sub, reg); \
- create_sysfs_device_rw(kind, sub,);
-
-#define sysfs_fan_with_min(offset, reg_status, reg_ripple, reg_min) \
- sysfs_fan(offset, reg_status, reg_ripple); \
- sysfs_rw_n(pwm,, offset, reg_min);
-
-#define sysfs_fan(offset, reg_status, reg_ripple) \
- sysfs_ro_n(fan, _input, offset); \
- sysfs_ro_n(fan, _status, offset); \
- sysfs_rw_n(fan, _ripple, offset, reg_ripple);
-
-#define sysfs_temp(offset, reg_status) \
- sysfs_ro_n(temp, _input, offset); \
- sysfs_ro_n(temp, _status, offset); \
- sysfs_rw_n(temp, _reset, offset, reg_status);
-
-#define sysfs_watchdog(reg_wdog_preset, reg_wdog_state, reg_wdog_control) \
- sysfs_rw(wdog, _control, reg_wdog_control); \
- sysfs_rw(wdog, _preset, reg_wdog_preset); \
- sysfs_rw(wdog, _state, reg_wdog_state);
-
-sysfs_fan_with_min(1, FSCPOS_REG_FAN_STATE[0], FSCPOS_REG_FAN_RIPPLE[0],
- FSCPOS_REG_PWM[0]);
-sysfs_fan_with_min(2, FSCPOS_REG_FAN_STATE[1], FSCPOS_REG_FAN_RIPPLE[1],
- FSCPOS_REG_PWM[1]);
-sysfs_fan(3, FSCPOS_REG_FAN_STATE[2], FSCPOS_REG_FAN_RIPPLE[2]);
-
-sysfs_temp(1, FSCPOS_REG_TEMP_STATE[0]);
-sysfs_temp(2, FSCPOS_REG_TEMP_STATE[1]);
-sysfs_temp(3, FSCPOS_REG_TEMP_STATE[2]);
-
-sysfs_watchdog(FSCPOS_REG_WDOG_PRESET, FSCPOS_REG_WDOG_STATE,
- FSCPOS_REG_WDOG_CONTROL);
-
-static DEVICE_ATTR(event, S_IRUGO, show_event, NULL);
-static DEVICE_ATTR(in0_input, S_IRUGO, show_volt_12, NULL);
-static DEVICE_ATTR(in1_input, S_IRUGO, show_volt_5, NULL);
-static DEVICE_ATTR(in2_input, S_IRUGO, show_volt_batt, NULL);
-
-static struct attribute *fscpos_attributes[] = {
- &dev_attr_event.attr,
- &dev_attr_in0_input.attr,
- &dev_attr_in1_input.attr,
- &dev_attr_in2_input.attr,
-
- &dev_attr_wdog_control.attr,
- &dev_attr_wdog_preset.attr,
- &dev_attr_wdog_state.attr,
-
- &dev_attr_temp1_input.attr,
- &dev_attr_temp1_status.attr,
- &dev_attr_temp1_reset.attr,
- &dev_attr_temp2_input.attr,
- &dev_attr_temp2_status.attr,
- &dev_attr_temp2_reset.attr,
- &dev_attr_temp3_input.attr,
- &dev_attr_temp3_status.attr,
- &dev_attr_temp3_reset.attr,
-
- &dev_attr_fan1_input.attr,
- &dev_attr_fan1_status.attr,
- &dev_attr_fan1_ripple.attr,
- &dev_attr_pwm1.attr,
- &dev_attr_fan2_input.attr,
- &dev_attr_fan2_status.attr,
- &dev_attr_fan2_ripple.attr,
- &dev_attr_pwm2.attr,
- &dev_attr_fan3_input.attr,
- &dev_attr_fan3_status.attr,
- &dev_attr_fan3_ripple.attr,
- NULL
-};
-
-static const struct attribute_group fscpos_group = {
- .attrs = fscpos_attributes,
-};
-
-/* Return 0 if detection is successful, -ENODEV otherwise */
-static int fscpos_detect(struct i2c_client *new_client, int kind,
- struct i2c_board_info *info)
-{
- struct i2c_adapter *adapter = new_client->adapter;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
- return -ENODEV;
-
- /* Do the remaining detection unless force or force_fscpos parameter */
- if (kind < 0) {
- if ((fscpos_read_value(new_client, FSCPOS_REG_IDENT_0)
- != 0x50) /* 'P' */
- || (fscpos_read_value(new_client, FSCPOS_REG_IDENT_1)
- != 0x45) /* 'E' */
- || (fscpos_read_value(new_client, FSCPOS_REG_IDENT_2)
- != 0x47))/* 'G' */
- return -ENODEV;
- }
-
- strlcpy(info->type, "fscpos", I2C_NAME_SIZE);
-
- return 0;
-}
-
-static int fscpos_probe(struct i2c_client *new_client,
- const struct i2c_device_id *id)
-{
- struct fscpos_data *data;
- int err;
-
- data = kzalloc(sizeof(struct fscpos_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
-
- i2c_set_clientdata(new_client, data);
- data->valid = 0;
- mutex_init(&data->update_lock);
-
- /* Inizialize the fscpos chip */
- fscpos_init_client(new_client);
-
- /* Announce that the chip was found */
- dev_info(&new_client->dev, "Found fscpos chip, rev %u\n", data->revision);
-
- /* Register sysfs hooks */
- if ((err = sysfs_create_group(&new_client->dev.kobj, &fscpos_group)))
- goto exit_free;
-
- data->hwmon_dev = hwmon_device_register(&new_client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove_files;
- }
-
- return 0;
-
-exit_remove_files:
- sysfs_remove_group(&new_client->dev.kobj, &fscpos_group);
-exit_free:
- kfree(data);
-exit:
- return err;
-}
-
-static int fscpos_remove(struct i2c_client *client)
-{
- struct fscpos_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &fscpos_group);
-
- kfree(data);
- return 0;
-}
-
-static int fscpos_read_value(struct i2c_client *client, u8 reg)
-{
- dev_dbg(&client->dev, "Read reg 0x%02x\n", reg);
- return i2c_smbus_read_byte_data(client, reg);
-}
-
-static int fscpos_write_value(struct i2c_client *client, u8 reg, u8 value)
-{
- dev_dbg(&client->dev, "Write reg 0x%02x, val 0x%02x\n", reg, value);
- return i2c_smbus_write_byte_data(client, reg, value);
-}
-
-/* Called when we have found a new FSCPOS chip */
-static void fscpos_init_client(struct i2c_client *client)
-{
- struct fscpos_data *data = i2c_get_clientdata(client);
-
- /* read revision from chip */
- data->revision = fscpos_read_value(client, FSCPOS_REG_REVISION);
-}
-
-static struct fscpos_data *fscpos_update_device(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct fscpos_data *data = i2c_get_clientdata(client);
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) {
- int i;
-
- dev_dbg(&client->dev, "Starting fscpos update\n");
-
- for (i = 0; i < 3; i++) {
- data->temp_act[i] = fscpos_read_value(client,
- FSCPOS_REG_TEMP_ACT[i]);
- data->temp_status[i] = fscpos_read_value(client,
- FSCPOS_REG_TEMP_STATE[i]);
- data->fan_act[i] = fscpos_read_value(client,
- FSCPOS_REG_FAN_ACT[i]);
- data->fan_status[i] = fscpos_read_value(client,
- FSCPOS_REG_FAN_STATE[i]);
- data->fan_ripple[i] = fscpos_read_value(client,
- FSCPOS_REG_FAN_RIPPLE[i]);
- if (i < 2) {
- /* fan2_min is not supported by the chip */
- data->pwm[i] = fscpos_read_value(client,
- FSCPOS_REG_PWM[i]);
- }
- /* reset fan status if speed is back to > 0 */
- if (data->fan_status[i] != 0 && data->fan_act[i] > 0) {
- reset_fan_alarm(client, i);
- }
- }
-
- data->volt[0] = fscpos_read_value(client, FSCPOS_REG_VOLT_12);
- data->volt[1] = fscpos_read_value(client, FSCPOS_REG_VOLT_5);
- data->volt[2] = fscpos_read_value(client, FSCPOS_REG_VOLT_BATT);
-
- data->wdog_preset = fscpos_read_value(client,
- FSCPOS_REG_WDOG_PRESET);
- data->wdog_state = fscpos_read_value(client,
- FSCPOS_REG_WDOG_STATE);
- data->wdog_control = fscpos_read_value(client,
- FSCPOS_REG_WDOG_CONTROL);
-
- data->global_event = fscpos_read_value(client,
- FSCPOS_REG_EVENT_STATE);
-
- data->last_updated = jiffies;
- data->valid = 1;
- }
- mutex_unlock(&data->update_lock);
- return data;
-}
-
-static int __init sm_fscpos_init(void)
-{
- return i2c_add_driver(&fscpos_driver);
-}
-
-static void __exit sm_fscpos_exit(void)
-{
- i2c_del_driver(&fscpos_driver);
-}
-
-MODULE_AUTHOR("Stefan Ott <stefan@desire.ch> based on work from Hermann Jung "
- "<hej@odn.de>, Frodo Looijaard <frodol@dds.nl>"
- " and Philip Edelbrock <phil@netroedge.com>");
-MODULE_DESCRIPTION("fujitsu siemens poseidon chip driver");
-MODULE_LICENSE("GPL");
-
-module_init(sm_fscpos_init);
-module_exit(sm_fscpos_exit);
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index 271338bdb6b..cf5afb9a10a 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -454,6 +454,15 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
(p->click_thresh_y << 4));
}
+ if (p->wakeup_flags && (dev->whoami == LIS_SINGLE_ID)) {
+ dev->write(dev, FF_WU_CFG_1, p->wakeup_flags);
+ dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f);
+ /* default to 2.5ms for now */
+ dev->write(dev, FF_WU_DURATION_1, 1);
+ /* enable high pass filter for both free-fall units */
+ dev->write(dev, CTRL_REG2, HP_FF_WU1 | HP_FF_WU2);
+ }
+
if (p->irq_cfg)
dev->write(dev, CTRL_REG3, p->irq_cfg);
}
diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
index e320e2f511f..3e1ff46f72d 100644
--- a/drivers/hwmon/lis3lv02d.h
+++ b/drivers/hwmon/lis3lv02d.h
@@ -58,15 +58,17 @@ enum lis3_reg {
OUTZ_L = 0x2C,
OUTZ_H = 0x2D,
OUTZ = 0x2D,
- FF_WU_CFG = 0x30,
- FF_WU_SRC = 0x31,
- FF_WU_ACK = 0x32,
- FF_WU_THS_L = 0x34,
- FF_WU_THS_H = 0x35,
- FF_WU_DURATION = 0x36,
};
enum lis302d_reg {
+ FF_WU_CFG_1 = 0x30,
+ FF_WU_SRC_1 = 0x31,
+ FF_WU_THS_1 = 0x32,
+ FF_WU_DURATION_1 = 0x33,
+ FF_WU_CFG_2 = 0x34,
+ FF_WU_SRC_2 = 0x35,
+ FF_WU_THS_2 = 0x36,
+ FF_WU_DURATION_2 = 0x37,
CLICK_CFG = 0x38,
CLICK_SRC = 0x39,
CLICK_THSY_X = 0x3B,
@@ -77,6 +79,12 @@ enum lis302d_reg {
};
enum lis3lv02d_reg {
+ FF_WU_CFG = 0x30,
+ FF_WU_SRC = 0x31,
+ FF_WU_ACK = 0x32,
+ FF_WU_THS_L = 0x34,
+ FF_WU_THS_H = 0x35,
+ FF_WU_DURATION = 0x36,
DD_CFG = 0x38,
DD_SRC = 0x39,
DD_ACK = 0x3A,
@@ -107,6 +115,10 @@ enum lis3lv02d_ctrl2 {
CTRL2_FS = 0x80, /* Full Scale selection */
};
+enum lis302d_ctrl2 {
+ HP_FF_WU2 = 0x08,
+ HP_FF_WU1 = 0x04,
+};
enum lis3lv02d_ctrl3 {
CTRL3_CFS0 = 0x01,
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c
index 3827ff04485..ecd739534f6 100644
--- a/drivers/hwmon/lis3lv02d_spi.c
+++ b/drivers/hwmon/lis3lv02d_spi.c
@@ -66,17 +66,16 @@ static int __devinit lis302dl_spi_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- lis3_dev.bus_priv = spi;
- lis3_dev.init = lis3_spi_init;
- lis3_dev.read = lis3_spi_read;
- lis3_dev.write = lis3_spi_write;
- lis3_dev.irq = spi->irq;
- lis3_dev.ac = lis3lv02d_axis_normal;
- lis3_dev.pdata = spi->dev.platform_data;
+ lis3_dev.bus_priv = spi;
+ lis3_dev.init = lis3_spi_init;
+ lis3_dev.read = lis3_spi_read;
+ lis3_dev.write = lis3_spi_write;
+ lis3_dev.irq = spi->irq;
+ lis3_dev.ac = lis3lv02d_axis_normal;
+ lis3_dev.pdata = spi->dev.platform_data;
spi_set_drvdata(spi, &lis3_dev);
- ret = lis3lv02d_init_device(&lis3_dev);
- return ret;
+ return lis3lv02d_init_device(&lis3_dev);
}
static int __devexit lis302dl_spi_remove(struct spi_device *spi)
@@ -87,6 +86,32 @@ static int __devexit lis302dl_spi_remove(struct spi_device *spi)
return 0;
}
+#ifdef CONFIG_PM
+static int lis3lv02d_spi_suspend(struct spi_device *spi, pm_message_t mesg)
+{
+ struct lis3lv02d *lis3 = spi_get_drvdata(spi);
+
+ if (!lis3->pdata->wakeup_flags)
+ lis3lv02d_poweroff(&lis3_dev);
+
+ return 0;
+}
+
+static int lis3lv02d_spi_resume(struct spi_device *spi)
+{
+ struct lis3lv02d *lis3 = spi_get_drvdata(spi);
+
+ if (!lis3->pdata->wakeup_flags)
+ lis3lv02d_poweron(lis3);
+
+ return 0;
+}
+
+#else
+#define lis3lv02d_spi_suspend NULL
+#define lis3lv02d_spi_resume NULL
+#endif
+
static struct spi_driver lis302dl_spi_driver = {
.driver = {
.name = DRV_NAME,
@@ -94,6 +119,8 @@ static struct spi_driver lis302dl_spi_driver = {
},
.probe = lis302dl_spi_probe,
.remove = __devexit_p(lis302dl_spi_remove),
+ .suspend = lis3lv02d_spi_suspend,
+ .resume = lis3lv02d_spi_resume,
};
static int __init lis302dl_init(void)
@@ -112,4 +139,4 @@ module_exit(lis302dl_exit);
MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
MODULE_DESCRIPTION("lis3lv02d SPI glue layer");
MODULE_LICENSE("GPL");
-
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index ae6204f3321..ab8a5d3c769 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -32,6 +32,7 @@
#include <linux/sysfs.h>
#include <linux/hwmon.h>
#include <linux/mutex.h>
+#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
@@ -130,11 +131,20 @@ static DEVICE_ATTR(name, S_IRUGO, lm70_show_name, NULL);
/*----------------------------------------------------------------------*/
-static int __devinit common_probe(struct spi_device *spi, int chip)
+static int __devinit lm70_probe(struct spi_device *spi)
{
+ int chip = spi_get_device_id(spi)->driver_data;
struct lm70 *p_lm70;
int status;
+ /* signaling is SPI_MODE_0 for both LM70 and TMP121 */
+ if (spi->mode & (SPI_CPOL | SPI_CPHA))
+ return -EINVAL;
+
+ /* 3-wire link (shared SI/SO) for LM70 */
+ if (chip == LM70_CHIP_LM70 && !(spi->mode & SPI_3WIRE))
+ return -EINVAL;
+
/* NOTE: we assume 8-bit words, and convert to 16 bits manually */
p_lm70 = kzalloc(sizeof *p_lm70, GFP_KERNEL);
@@ -170,24 +180,6 @@ out_dev_reg_failed:
return status;
}
-static int __devinit lm70_probe(struct spi_device *spi)
-{
- /* signaling is SPI_MODE_0 on a 3-wire link (shared SI/SO) */
- if ((spi->mode & (SPI_CPOL | SPI_CPHA)) || !(spi->mode & SPI_3WIRE))
- return -EINVAL;
-
- return common_probe(spi, LM70_CHIP_LM70);
-}
-
-static int __devinit tmp121_probe(struct spi_device *spi)
-{
- /* signaling is SPI_MODE_0 with only MISO connected */
- if (spi->mode & (SPI_CPOL | SPI_CPHA))
- return -EINVAL;
-
- return common_probe(spi, LM70_CHIP_TMP121);
-}
-
static int __devexit lm70_remove(struct spi_device *spi)
{
struct lm70 *p_lm70 = dev_get_drvdata(&spi->dev);
@@ -201,41 +193,32 @@ static int __devexit lm70_remove(struct spi_device *spi)
return 0;
}
-static struct spi_driver tmp121_driver = {
- .driver = {
- .name = "tmp121",
- .owner = THIS_MODULE,
- },
- .probe = tmp121_probe,
- .remove = __devexit_p(lm70_remove),
+
+static const struct spi_device_id lm70_ids[] = {
+ { "lm70", LM70_CHIP_LM70 },
+ { "tmp121", LM70_CHIP_TMP121 },
+ { },
};
+MODULE_DEVICE_TABLE(spi, lm70_ids);
static struct spi_driver lm70_driver = {
.driver = {
.name = "lm70",
.owner = THIS_MODULE,
},
+ .id_table = lm70_ids,
.probe = lm70_probe,
.remove = __devexit_p(lm70_remove),
};
static int __init init_lm70(void)
{
- int ret = spi_register_driver(&lm70_driver);
- if (ret)
- return ret;
-
- ret = spi_register_driver(&tmp121_driver);
- if (ret)
- spi_unregister_driver(&lm70_driver);
-
- return ret;
+ return spi_register_driver(&lm70_driver);
}
static void __exit cleanup_lm70(void)
{
spi_unregister_driver(&lm70_driver);
- spi_unregister_driver(&tmp121_driver);
}
module_init(init_lm70);
diff --git a/drivers/hwmon/ltc4215.c b/drivers/hwmon/ltc4215.c
index 9386e2a3921..6c9a04136e0 100644
--- a/drivers/hwmon/ltc4215.c
+++ b/drivers/hwmon/ltc4215.c
@@ -259,7 +259,7 @@ static int ltc4215_probe(struct i2c_client *client,
mutex_init(&data->update_lock);
/* Initialize the LTC4215 chip */
- /* TODO */
+ i2c_smbus_write_byte_data(client, LTC4215_FAULT, 0x00);
/* Register sysfs hooks */
ret = sysfs_create_group(&client->dev.kobj, &ltc4215_group);
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index 034b2c51584..e3896433361 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -382,7 +382,8 @@ static int ltc4245_probe(struct i2c_client *client,
mutex_init(&data->update_lock);
/* Initialize the LTC4245 chip */
- /* TODO */
+ i2c_smbus_write_byte_data(client, LTC4245_FAULT1, 0x00);
+ i2c_smbus_write_byte_data(client, LTC4245_FAULT2, 0x00);
/* Register sysfs hooks */
ret = sysfs_create_group(&client->dev.kobj, &ltc4245_group);
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index bfaa665ccf3..9ac497271ad 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -242,3 +242,4 @@ module_exit(max1111_exit);
MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>");
MODULE_DESCRIPTION("MAX1111 ADC Driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:max1111");
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 6290a259456..303c02694c3 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -562,7 +562,7 @@ static int __devinit sht15_probe(struct platform_device *pdev)
ret = sysfs_create_group(&pdev->dev.kobj, &sht15_attr_group);
if (ret) {
dev_err(&pdev->dev, "sysfs create failed");
- goto err_free_data;
+ goto err_release_gpio_data;
}
ret = request_irq(gpio_to_irq(data->pdata->gpio_data),
@@ -581,10 +581,12 @@ static int __devinit sht15_probe(struct platform_device *pdev)
data->hwmon_dev = hwmon_device_register(data->dev);
if (IS_ERR(data->hwmon_dev)) {
ret = PTR_ERR(data->hwmon_dev);
- goto err_release_gpio_data;
+ goto err_release_irq;
}
return 0;
+err_release_irq:
+ free_irq(gpio_to_irq(data->pdata->gpio_data), data);
err_release_gpio_data:
gpio_free(data->pdata->gpio_data);
err_release_gpio_sck:
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 711ca08ab77..d7ece131b4f 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -27,6 +27,14 @@ config I2C_BOARDINFO
boolean
default y
+config I2C_COMPAT
+ boolean "Enable compatibility bits for old user-space"
+ default y
+ help
+ Say Y here if you intend to run lm-sensors 3.1.1 or older, or any
+ other user-space package which expects i2c adapters to be class
+ devices. If you don't know, say Y.
+
config I2C_CHARDEV
tristate "I2C device interface"
help
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 8206442fbab..737335ff2b2 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -113,7 +113,7 @@ config I2C_ISCH
will be called i2c-isch.
config I2C_PIIX4
- tristate "Intel PIIX4 and compatible (ATI/Serverworks/Broadcom/SMSC)"
+ tristate "Intel PIIX4 and compatible (ATI/AMD/Serverworks/Broadcom/SMSC)"
depends on PCI
help
If you say yes to this option, support will be included for the Intel
@@ -128,6 +128,7 @@ config I2C_PIIX4
ATI SB600
ATI SB700
ATI SB800
+ AMD SB900
Serverworks OSB4
Serverworks CSB5
Serverworks CSB6
@@ -231,6 +232,22 @@ config I2C_VIAPRO
This driver can also be built as a module. If so, the module
will be called i2c-viapro.
+if ACPI
+
+comment "ACPI drivers"
+
+config I2C_SCMI
+ tristate "SMBus Control Method Interface"
+ help
+ This driver supports the SMBus Control Method Interface. It needs the
+ BIOS to declare ACPI control methods as described in the SMBus Control
+ Method Interface specification.
+
+ To compile this driver as a module, choose M here:
+ the module will be called i2c-scmi.
+
+endif # ACPI
+
comment "Mac SMBus host controller drivers"
depends on PPC_CHRP || PPC_PMAC
@@ -460,8 +477,8 @@ config I2C_PNX
will be called i2c-pnx.
config I2C_PXA
- tristate "Intel PXA2XX I2C adapter (EXPERIMENTAL)"
- depends on EXPERIMENTAL && ARCH_PXA
+ tristate "Intel PXA2XX I2C adapter"
+ depends on ARCH_PXA || ARCH_MMP
help
If you have devices in the PXA I2C bus, say yes to this option.
This driver can also be built as a module. If so, the module
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index e654263bfc0..ff937ac69f5 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -2,6 +2,9 @@
# Makefile for the i2c bus drivers.
#
+# ACPI drivers
+obj-$(CONFIG_I2C_SCMI) += i2c-scmi.o
+
# PC SMBus host controller drivers
obj-$(CONFIG_I2C_ALI1535) += i2c-ali1535.o
obj-$(CONFIG_I2C_ALI1563) += i2c-ali1563.o
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 0249a7d762b..a782c7a08f9 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -22,6 +22,7 @@
Intel PIIX4, 440MX
Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100
ATI IXP200, IXP300, IXP400, SB600, SB700, SB800
+ AMD SB900
SMSC Victory66
Note: we assume there can only be one device, with one SMBus interface.
@@ -479,6 +480,7 @@ static struct pci_device_id piix4_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_OSB4) },
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
@@ -499,9 +501,10 @@ static int __devinit piix4_probe(struct pci_dev *dev,
{
int retval;
- if ((dev->vendor == PCI_VENDOR_ID_ATI) &&
- (dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS) &&
- (dev->revision >= 0x40))
+ if ((dev->vendor == PCI_VENDOR_ID_ATI &&
+ dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
+ dev->revision >= 0x40) ||
+ dev->vendor == PCI_VENDOR_ID_AMD)
/* base address location etc changed in SB800 */
retval = piix4_setup_sb800(dev, id);
else
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index ec15cff556b..6ff6c20f1e7 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -586,7 +586,8 @@ static int __devinit i2c_pnx_probe(struct platform_device *pdev)
alg_data->mif.timer.data = (unsigned long)i2c_pnx->adapter;
/* Register I/O resource */
- if (!request_region(alg_data->base, I2C_PNX_REGION_SIZE, pdev->name)) {
+ if (!request_mem_region(alg_data->base, I2C_PNX_REGION_SIZE,
+ pdev->name)) {
dev_err(&pdev->dev,
"I/O region 0x%08x for I2C already in use.\n",
alg_data->base);
@@ -650,7 +651,7 @@ out_clock:
out_unmap:
iounmap((void *)alg_data->ioaddr);
out_release:
- release_region(alg_data->base, I2C_PNX_REGION_SIZE);
+ release_mem_region(alg_data->base, I2C_PNX_REGION_SIZE);
out_drvdata:
platform_set_drvdata(pdev, NULL);
out:
@@ -667,7 +668,7 @@ static int __devexit i2c_pnx_remove(struct platform_device *pdev)
i2c_del_adapter(adap);
i2c_pnx->set_clock_stop(pdev);
iounmap((void *)alg_data->ioaddr);
- release_region(alg_data->base, I2C_PNX_REGION_SIZE);
+ release_mem_region(alg_data->base, I2C_PNX_REGION_SIZE);
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
new file mode 100644
index 00000000000..b4a55d407bf
--- /dev/null
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -0,0 +1,429 @@
+/*
+ * SMBus driver for ACPI SMBus CMI
+ *
+ * Copyright (C) 2009 Crane Cai <crane.cai@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+
+#define ACPI_SMBUS_HC_CLASS "smbus"
+#define ACPI_SMBUS_HC_DEVICE_NAME "cmi"
+
+ACPI_MODULE_NAME("smbus_cmi");
+
+struct smbus_methods_t {
+ char *mt_info;
+ char *mt_sbr;
+ char *mt_sbw;
+};
+
+struct acpi_smbus_cmi {
+ acpi_handle handle;
+ struct i2c_adapter adapter;
+ u8 cap_info:1;
+ u8 cap_read:1;
+ u8 cap_write:1;
+};
+
+static const struct smbus_methods_t smbus_methods = {
+ .mt_info = "_SBI",
+ .mt_sbr = "_SBR",
+ .mt_sbw = "_SBW",
+};
+
+static const struct acpi_device_id acpi_smbus_cmi_ids[] = {
+ {"SMBUS01", 0},
+ {"", 0}
+};
+
+#define ACPI_SMBUS_STATUS_OK 0x00
+#define ACPI_SMBUS_STATUS_FAIL 0x07
+#define ACPI_SMBUS_STATUS_DNAK 0x10
+#define ACPI_SMBUS_STATUS_DERR 0x11
+#define ACPI_SMBUS_STATUS_CMD_DENY 0x12
+#define ACPI_SMBUS_STATUS_UNKNOWN 0x13
+#define ACPI_SMBUS_STATUS_ACC_DENY 0x17
+#define ACPI_SMBUS_STATUS_TIMEOUT 0x18
+#define ACPI_SMBUS_STATUS_NOTSUP 0x19
+#define ACPI_SMBUS_STATUS_BUSY 0x1a
+#define ACPI_SMBUS_STATUS_PEC 0x1f
+
+#define ACPI_SMBUS_PRTCL_WRITE 0x00
+#define ACPI_SMBUS_PRTCL_READ 0x01
+#define ACPI_SMBUS_PRTCL_QUICK 0x02
+#define ACPI_SMBUS_PRTCL_BYTE 0x04
+#define ACPI_SMBUS_PRTCL_BYTE_DATA 0x06
+#define ACPI_SMBUS_PRTCL_WORD_DATA 0x08
+#define ACPI_SMBUS_PRTCL_BLOCK_DATA 0x0a
+
+
+static int
+acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
+ char read_write, u8 command, int size,
+ union i2c_smbus_data *data)
+{
+ int result = 0;
+ struct acpi_smbus_cmi *smbus_cmi = adap->algo_data;
+ unsigned char protocol;
+ acpi_status status = 0;
+ struct acpi_object_list input;
+ union acpi_object mt_params[5];
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ union acpi_object *pkg;
+ char *method;
+ int len = 0;
+
+ dev_dbg(&adap->dev, "access size: %d %s\n", size,
+ (read_write) ? "READ" : "WRITE");
+ switch (size) {
+ case I2C_SMBUS_QUICK:
+ protocol = ACPI_SMBUS_PRTCL_QUICK;
+ command = 0;
+ if (read_write == I2C_SMBUS_WRITE) {
+ mt_params[3].type = ACPI_TYPE_INTEGER;
+ mt_params[3].integer.value = 0;
+ mt_params[4].type = ACPI_TYPE_INTEGER;
+ mt_params[4].integer.value = 0;
+ }
+ break;
+
+ case I2C_SMBUS_BYTE:
+ protocol = ACPI_SMBUS_PRTCL_BYTE;
+ if (read_write == I2C_SMBUS_WRITE) {
+ mt_params[3].type = ACPI_TYPE_INTEGER;
+ mt_params[3].integer.value = 0;
+ mt_params[4].type = ACPI_TYPE_INTEGER;
+ mt_params[4].integer.value = 0;
+ } else {
+ command = 0;
+ }
+ break;
+
+ case I2C_SMBUS_BYTE_DATA:
+ protocol = ACPI_SMBUS_PRTCL_BYTE_DATA;
+ if (read_write == I2C_SMBUS_WRITE) {
+ mt_params[3].type = ACPI_TYPE_INTEGER;
+ mt_params[3].integer.value = 1;
+ mt_params[4].type = ACPI_TYPE_INTEGER;
+ mt_params[4].integer.value = data->byte;
+ }
+ break;
+
+ case I2C_SMBUS_WORD_DATA:
+ protocol = ACPI_SMBUS_PRTCL_WORD_DATA;
+ if (read_write == I2C_SMBUS_WRITE) {
+ mt_params[3].type = ACPI_TYPE_INTEGER;
+ mt_params[3].integer.value = 2;
+ mt_params[4].type = ACPI_TYPE_INTEGER;
+ mt_params[4].integer.value = data->word;
+ }
+ break;
+
+ case I2C_SMBUS_BLOCK_DATA:
+ protocol = ACPI_SMBUS_PRTCL_BLOCK_DATA;
+ if (read_write == I2C_SMBUS_WRITE) {
+ len = data->block[0];
+ if (len == 0 || len > I2C_SMBUS_BLOCK_MAX)
+ return -EINVAL;
+ mt_params[3].type = ACPI_TYPE_INTEGER;
+ mt_params[3].integer.value = len;
+ mt_params[4].type = ACPI_TYPE_BUFFER;
+ mt_params[4].buffer.pointer = data->block + 1;
+ }
+ break;
+
+ default:
+ dev_warn(&adap->dev, "Unsupported transaction %d\n", size);
+ return -EOPNOTSUPP;
+ }
+
+ if (read_write == I2C_SMBUS_READ) {
+ protocol |= ACPI_SMBUS_PRTCL_READ;
+ method = smbus_methods.mt_sbr;
+ input.count = 3;
+ } else {
+ protocol |= ACPI_SMBUS_PRTCL_WRITE;
+ method = smbus_methods.mt_sbw;
+ input.count = 5;
+ }
+
+ input.pointer = mt_params;
+ mt_params[0].type = ACPI_TYPE_INTEGER;
+ mt_params[0].integer.value = protocol;
+ mt_params[1].type = ACPI_TYPE_INTEGER;
+ mt_params[1].integer.value = addr;
+ mt_params[2].type = ACPI_TYPE_INTEGER;
+ mt_params[2].integer.value = command;
+
+ status = acpi_evaluate_object(smbus_cmi->handle, method, &input,
+ &buffer);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO, "Evaluating %s: %i", method, status));
+ return -EIO;
+ }
+
+ pkg = buffer.pointer;
+ if (pkg && pkg->type == ACPI_TYPE_PACKAGE)
+ obj = pkg->package.elements;
+ else {
+ ACPI_ERROR((AE_INFO, "Invalid argument type"));
+ result = -EIO;
+ goto out;
+ }
+ if (obj == NULL || obj->type != ACPI_TYPE_INTEGER) {
+ ACPI_ERROR((AE_INFO, "Invalid argument type"));
+ result = -EIO;
+ goto out;
+ }
+
+ result = obj->integer.value;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%s return status: %i\n",
+ method, result));
+
+ switch (result) {
+ case ACPI_SMBUS_STATUS_OK:
+ result = 0;
+ break;
+ case ACPI_SMBUS_STATUS_BUSY:
+ result = -EBUSY;
+ goto out;
+ case ACPI_SMBUS_STATUS_TIMEOUT:
+ result = -ETIMEDOUT;
+ goto out;
+ case ACPI_SMBUS_STATUS_DNAK:
+ result = -ENXIO;
+ goto out;
+ default:
+ result = -EIO;
+ goto out;
+ }
+
+ if (read_write == I2C_SMBUS_WRITE || size == I2C_SMBUS_QUICK)
+ goto out;
+
+ obj = pkg->package.elements + 1;
+ if (obj == NULL || obj->type != ACPI_TYPE_INTEGER) {
+ ACPI_ERROR((AE_INFO, "Invalid argument type"));
+ result = -EIO;
+ goto out;
+ }
+
+ len = obj->integer.value;
+ obj = pkg->package.elements + 2;
+ switch (size) {
+ case I2C_SMBUS_BYTE:
+ case I2C_SMBUS_BYTE_DATA:
+ case I2C_SMBUS_WORD_DATA:
+ if (obj == NULL || obj->type != ACPI_TYPE_INTEGER) {
+ ACPI_ERROR((AE_INFO, "Invalid argument type"));
+ result = -EIO;
+ goto out;
+ }
+ if (len == 2)
+ data->word = obj->integer.value;
+ else
+ data->byte = obj->integer.value;
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ if (obj == NULL || obj->type != ACPI_TYPE_BUFFER) {
+ ACPI_ERROR((AE_INFO, "Invalid argument type"));
+ result = -EIO;
+ goto out;
+ }
+ if (len == 0 || len > I2C_SMBUS_BLOCK_MAX)
+ return -EPROTO;
+ data->block[0] = len;
+ memcpy(data->block + 1, obj->buffer.pointer, len);
+ break;
+ }
+
+out:
+ kfree(buffer.pointer);
+ dev_dbg(&adap->dev, "Transaction status: %i\n", result);
+ return result;
+}
+
+static u32 acpi_smbus_cmi_func(struct i2c_adapter *adapter)
+{
+ struct acpi_smbus_cmi *smbus_cmi = adapter->algo_data;
+ u32 ret;
+
+ ret = smbus_cmi->cap_read | smbus_cmi->cap_write ?
+ I2C_FUNC_SMBUS_QUICK : 0;
+
+ ret |= smbus_cmi->cap_read ?
+ (I2C_FUNC_SMBUS_READ_BYTE |
+ I2C_FUNC_SMBUS_READ_BYTE_DATA |
+ I2C_FUNC_SMBUS_READ_WORD_DATA |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA) : 0;
+
+ ret |= smbus_cmi->cap_write ?
+ (I2C_FUNC_SMBUS_WRITE_BYTE |
+ I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
+ I2C_FUNC_SMBUS_WRITE_WORD_DATA |
+ I2C_FUNC_SMBUS_WRITE_BLOCK_DATA) : 0;
+
+ return ret;
+}
+
+static const struct i2c_algorithm acpi_smbus_cmi_algorithm = {
+ .smbus_xfer = acpi_smbus_cmi_access,
+ .functionality = acpi_smbus_cmi_func,
+};
+
+
+static int acpi_smbus_cmi_add_cap(struct acpi_smbus_cmi *smbus_cmi,
+ const char *name)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+
+ if (!strcmp(name, smbus_methods.mt_info)) {
+ status = acpi_evaluate_object(smbus_cmi->handle,
+ smbus_methods.mt_info,
+ NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO, "Evaluating %s: %i",
+ smbus_methods.mt_info, status));
+ return -EIO;
+ }
+
+ obj = buffer.pointer;
+ if (obj && obj->type == ACPI_TYPE_PACKAGE)
+ obj = obj->package.elements;
+ else {
+ ACPI_ERROR((AE_INFO, "Invalid argument type"));
+ kfree(buffer.pointer);
+ return -EIO;
+ }
+
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ ACPI_ERROR((AE_INFO, "Invalid argument type"));
+ kfree(buffer.pointer);
+ return -EIO;
+ } else
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "SMBus CMI Version %x"
+ "\n", (int)obj->integer.value));
+
+ kfree(buffer.pointer);
+ smbus_cmi->cap_info = 1;
+ } else if (!strcmp(name, smbus_methods.mt_sbr))
+ smbus_cmi->cap_read = 1;
+ else if (!strcmp(name, smbus_methods.mt_sbw))
+ smbus_cmi->cap_write = 1;
+ else
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unsupported CMI method: %s\n",
+ name));
+
+ return 0;
+}
+
+static acpi_status acpi_smbus_cmi_query_methods(acpi_handle handle, u32 level,
+ void *context, void **return_value)
+{
+ char node_name[5];
+ struct acpi_buffer buffer = { sizeof(node_name), node_name };
+ struct acpi_smbus_cmi *smbus_cmi = context;
+ acpi_status status;
+
+ status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
+
+ if (ACPI_SUCCESS(status))
+ acpi_smbus_cmi_add_cap(smbus_cmi, node_name);
+
+ return AE_OK;
+}
+
+static int acpi_smbus_cmi_add(struct acpi_device *device)
+{
+ struct acpi_smbus_cmi *smbus_cmi;
+
+ smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL);
+ if (!smbus_cmi)
+ return -ENOMEM;
+
+ smbus_cmi->handle = device->handle;
+ strcpy(acpi_device_name(device), ACPI_SMBUS_HC_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_SMBUS_HC_CLASS);
+ device->driver_data = smbus_cmi;
+ smbus_cmi->cap_info = 0;
+ smbus_cmi->cap_read = 0;
+ smbus_cmi->cap_write = 0;
+
+ acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1,
+ acpi_smbus_cmi_query_methods, smbus_cmi, NULL);
+
+ if (smbus_cmi->cap_info == 0)
+ goto err;
+
+ snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name),
+ "SMBus CMI adapter %s",
+ acpi_device_name(device));
+ smbus_cmi->adapter.owner = THIS_MODULE;
+ smbus_cmi->adapter.algo = &acpi_smbus_cmi_algorithm;
+ smbus_cmi->adapter.algo_data = smbus_cmi;
+ smbus_cmi->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ smbus_cmi->adapter.dev.parent = &device->dev;
+
+ if (i2c_add_adapter(&smbus_cmi->adapter)) {
+ dev_err(&device->dev, "Couldn't register adapter!\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ kfree(smbus_cmi);
+ device->driver_data = NULL;
+ return -EIO;
+}
+
+static int acpi_smbus_cmi_remove(struct acpi_device *device, int type)
+{
+ struct acpi_smbus_cmi *smbus_cmi = acpi_driver_data(device);
+
+ i2c_del_adapter(&smbus_cmi->adapter);
+ kfree(smbus_cmi);
+ device->driver_data = NULL;
+
+ return 0;
+}
+
+static struct acpi_driver acpi_smbus_cmi_driver = {
+ .name = ACPI_SMBUS_HC_DEVICE_NAME,
+ .class = ACPI_SMBUS_HC_CLASS,
+ .ids = acpi_smbus_cmi_ids,
+ .ops = {
+ .add = acpi_smbus_cmi_add,
+ .remove = acpi_smbus_cmi_remove,
+ },
+};
+
+static int __init acpi_smbus_cmi_init(void)
+{
+ return acpi_bus_register_driver(&acpi_smbus_cmi_driver);
+}
+
+static void __exit acpi_smbus_cmi_exit(void)
+{
+ acpi_bus_unregister_driver(&acpi_smbus_cmi_driver);
+}
+
+module_init(acpi_smbus_cmi_init);
+module_exit(acpi_smbus_cmi_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Crane Cai <crane.cai@amd.com>");
+MODULE_DESCRIPTION("ACPI SMBus CMI driver");
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index 224aa12ee7c..dd39c1eb03e 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -32,10 +32,12 @@
#define TAOS_STATE_INIT 0
#define TAOS_STATE_IDLE 1
-#define TAOS_STATE_SEND 2
+#define TAOS_STATE_EOFF 2
#define TAOS_STATE_RECV 3
#define TAOS_CMD_RESET 0x12
+#define TAOS_CMD_ECHO_ON '+'
+#define TAOS_CMD_ECHO_OFF '-'
static DECLARE_WAIT_QUEUE_HEAD(wq);
@@ -102,17 +104,9 @@ static int taos_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
/* Send the transaction to the TAOS EVM */
dev_dbg(&adapter->dev, "Command buffer: %s\n", taos->buffer);
- taos->pos = 0;
- taos->state = TAOS_STATE_SEND;
- serio_write(serio, taos->buffer[0]);
- wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE,
- msecs_to_jiffies(250));
- if (taos->state != TAOS_STATE_IDLE) {
- dev_err(&adapter->dev, "Transaction failed "
- "(state=%d, pos=%d)\n", taos->state, taos->pos);
- taos->addr = 0;
- return -EIO;
- }
+ for (p = taos->buffer; *p; p++)
+ serio_write(serio, *p);
+
taos->addr = addr;
/* Start the transaction and read the answer */
@@ -122,7 +116,7 @@ static int taos_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE,
msecs_to_jiffies(150));
if (taos->state != TAOS_STATE_IDLE
- || taos->pos != 6) {
+ || taos->pos != 5) {
dev_err(&adapter->dev, "Transaction timeout (pos=%d)\n",
taos->pos);
return -EIO;
@@ -130,7 +124,7 @@ static int taos_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
dev_dbg(&adapter->dev, "Answer buffer: %s\n", taos->buffer);
/* Interpret the returned string */
- p = taos->buffer + 2;
+ p = taos->buffer + 1;
p[3] = '\0';
if (!strcmp(p, "NAK"))
return -ENODEV;
@@ -173,13 +167,9 @@ static irqreturn_t taos_interrupt(struct serio *serio, unsigned char data,
wake_up_interruptible(&wq);
}
break;
- case TAOS_STATE_SEND:
- if (taos->buffer[++taos->pos])
- serio_write(serio, taos->buffer[taos->pos]);
- else {
- taos->state = TAOS_STATE_IDLE;
- wake_up_interruptible(&wq);
- }
+ case TAOS_STATE_EOFF:
+ taos->state = TAOS_STATE_IDLE;
+ wake_up_interruptible(&wq);
break;
case TAOS_STATE_RECV:
taos->buffer[taos->pos++] = data;
@@ -257,6 +247,19 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
}
strlcpy(adapter->name, name, sizeof(adapter->name));
+ /* Turn echo off for better performance */
+ taos->state = TAOS_STATE_EOFF;
+ serio_write(serio, TAOS_CMD_ECHO_OFF);
+
+ wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE,
+ msecs_to_jiffies(250));
+ if (taos->state != TAOS_STATE_IDLE) {
+ err = -ENODEV;
+ dev_err(&adapter->dev, "Echo off failed "
+ "(state=%d)\n", taos->state);
+ goto exit_close;
+ }
+
err = i2c_add_adapter(adapter);
if (err)
goto exit_close;
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 648ecc6f60e..cf994bd01d9 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -217,8 +217,10 @@ static void scx200_acb_machine(struct scx200_acb_iface *iface, u8 status)
return;
error:
- dev_err(&iface->adapter.dev, "%s in state %s\n", errmsg,
- scx200_acb_state_name[iface->state]);
+ dev_err(&iface->adapter.dev,
+ "%s in state %s (addr=0x%02x, len=%d, status=0x%02x)\n", errmsg,
+ scx200_acb_state_name[iface->state], iface->address_byte,
+ iface->len, status);
iface->state = state_idle;
iface->result = -EIO;
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index 02d746c9c47..f9618f4d4e4 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -16,54 +16,6 @@ config DS1682
This driver can also be built as a module. If so, the module
will be called ds1682.
-config SENSORS_PCF8574
- tristate "Philips PCF8574 and PCF8574A (DEPRECATED)"
- depends on EXPERIMENTAL && GPIO_PCF857X = "n"
- default n
- help
- If you say yes here you get support for Philips PCF8574 and
- PCF8574A chips. These chips are 8-bit I/O expanders for the I2C bus.
-
- This driver can also be built as a module. If so, the module
- will be called pcf8574.
-
- This driver is deprecated and will be dropped soon. Use
- drivers/gpio/pcf857x.c instead.
-
- These devices are hard to detect and rarely found on mainstream
- hardware. If unsure, say N.
-
-config PCF8575
- tristate "Philips PCF8575 (DEPRECATED)"
- default n
- depends on GPIO_PCF857X = "n"
- help
- If you say yes here you get support for Philips PCF8575 chip.
- This chip is a 16-bit I/O expander for the I2C bus. Several other
- chip manufacturers sell equivalent chips, e.g. Texas Instruments.
-
- This driver can also be built as a module. If so, the module
- will be called pcf8575.
-
- This driver is deprecated and will be dropped soon. Use
- drivers/gpio/pcf857x.c instead.
-
- This device is hard to detect and is rarely found on mainstream
- hardware. If unsure, say N.
-
-config SENSORS_PCA9539
- tristate "Philips PCA9539 16-bit I/O port (DEPRECATED)"
- depends on EXPERIMENTAL && GPIO_PCA953X = "n"
- help
- If you say yes here you get support for the Philips PCA9539
- 16-bit I/O port.
-
- This driver can also be built as a module. If so, the module
- will be called pca9539.
-
- This driver is deprecated and will be dropped soon. Use
- drivers/gpio/pca953x.c instead.
-
config SENSORS_TSL2550
tristate "Taos TSL2550 ambient light sensor"
depends on EXPERIMENTAL
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
index f4680d16ee3..749cf360629 100644
--- a/drivers/i2c/chips/Makefile
+++ b/drivers/i2c/chips/Makefile
@@ -11,9 +11,6 @@
#
obj-$(CONFIG_DS1682) += ds1682.o
-obj-$(CONFIG_SENSORS_PCA9539) += pca9539.o
-obj-$(CONFIG_SENSORS_PCF8574) += pcf8574.o
-obj-$(CONFIG_PCF8575) += pcf8575.o
obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
ifeq ($(CONFIG_I2C_DEBUG_CHIP),y)
diff --git a/drivers/i2c/chips/pca9539.c b/drivers/i2c/chips/pca9539.c
deleted file mode 100644
index 270de4e56a8..00000000000
--- a/drivers/i2c/chips/pca9539.c
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- pca9539.c - 16-bit I/O port with interrupt and reset
-
- Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-*/
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/hwmon-sysfs.h>
-
-/* Addresses to scan: none, device is not autodetected */
-static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
-
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(pca9539);
-
-enum pca9539_cmd
-{
- PCA9539_INPUT_0 = 0,
- PCA9539_INPUT_1 = 1,
- PCA9539_OUTPUT_0 = 2,
- PCA9539_OUTPUT_1 = 3,
- PCA9539_INVERT_0 = 4,
- PCA9539_INVERT_1 = 5,
- PCA9539_DIRECTION_0 = 6,
- PCA9539_DIRECTION_1 = 7,
-};
-
-/* following are the sysfs callback functions */
-static ssize_t pca9539_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *psa = to_sensor_dev_attr(attr);
- struct i2c_client *client = to_i2c_client(dev);
- return sprintf(buf, "%d\n", i2c_smbus_read_byte_data(client,
- psa->index));
-}
-
-static ssize_t pca9539_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct sensor_device_attribute *psa = to_sensor_dev_attr(attr);
- struct i2c_client *client = to_i2c_client(dev);
- unsigned long val = simple_strtoul(buf, NULL, 0);
- if (val > 0xff)
- return -EINVAL;
- i2c_smbus_write_byte_data(client, psa->index, val);
- return count;
-}
-
-/* Define the device attributes */
-
-#define PCA9539_ENTRY_RO(name, cmd_idx) \
- static SENSOR_DEVICE_ATTR(name, S_IRUGO, pca9539_show, NULL, cmd_idx)
-
-#define PCA9539_ENTRY_RW(name, cmd_idx) \
- static SENSOR_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, pca9539_show, \
- pca9539_store, cmd_idx)
-
-PCA9539_ENTRY_RO(input0, PCA9539_INPUT_0);
-PCA9539_ENTRY_RO(input1, PCA9539_INPUT_1);
-PCA9539_ENTRY_RW(output0, PCA9539_OUTPUT_0);
-PCA9539_ENTRY_RW(output1, PCA9539_OUTPUT_1);
-PCA9539_ENTRY_RW(invert0, PCA9539_INVERT_0);
-PCA9539_ENTRY_RW(invert1, PCA9539_INVERT_1);
-PCA9539_ENTRY_RW(direction0, PCA9539_DIRECTION_0);
-PCA9539_ENTRY_RW(direction1, PCA9539_DIRECTION_1);
-
-static struct attribute *pca9539_attributes[] = {
- &sensor_dev_attr_input0.dev_attr.attr,
- &sensor_dev_attr_input1.dev_attr.attr,
- &sensor_dev_attr_output0.dev_attr.attr,
- &sensor_dev_attr_output1.dev_attr.attr,
- &sensor_dev_attr_invert0.dev_attr.attr,
- &sensor_dev_attr_invert1.dev_attr.attr,
- &sensor_dev_attr_direction0.dev_attr.attr,
- &sensor_dev_attr_direction1.dev_attr.attr,
- NULL
-};
-
-static struct attribute_group pca9539_defattr_group = {
- .attrs = pca9539_attributes,
-};
-
-/* Return 0 if detection is successful, -ENODEV otherwise */
-static int pca9539_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info)
-{
- struct i2c_adapter *adapter = client->adapter;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
- return -ENODEV;
-
- strlcpy(info->type, "pca9539", I2C_NAME_SIZE);
-
- return 0;
-}
-
-static int pca9539_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- /* Register sysfs hooks */
- return sysfs_create_group(&client->dev.kobj,
- &pca9539_defattr_group);
-}
-
-static int pca9539_remove(struct i2c_client *client)
-{
- sysfs_remove_group(&client->dev.kobj, &pca9539_defattr_group);
- return 0;
-}
-
-static const struct i2c_device_id pca9539_id[] = {
- { "pca9539", 0 },
- { }
-};
-
-static struct i2c_driver pca9539_driver = {
- .driver = {
- .name = "pca9539",
- },
- .probe = pca9539_probe,
- .remove = pca9539_remove,
- .id_table = pca9539_id,
-
- .detect = pca9539_detect,
- .address_data = &addr_data,
-};
-
-static int __init pca9539_init(void)
-{
- return i2c_add_driver(&pca9539_driver);
-}
-
-static void __exit pca9539_exit(void)
-{
- i2c_del_driver(&pca9539_driver);
-}
-
-MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>");
-MODULE_DESCRIPTION("PCA9539 driver");
-MODULE_LICENSE("GPL");
-
-module_init(pca9539_init);
-module_exit(pca9539_exit);
-
diff --git a/drivers/i2c/chips/pcf8574.c b/drivers/i2c/chips/pcf8574.c
deleted file mode 100644
index 6ec309894c8..00000000000
--- a/drivers/i2c/chips/pcf8574.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- Copyright (c) 2000 Frodo Looijaard <frodol@dds.nl>,
- Philip Edelbrock <phil@netroedge.com>,
- Dan Eaton <dan.eaton@rocketlogix.com>
- Ported to Linux 2.6 by Aurelien Jarno <aurel32@debian.org> with
- the help of Jean Delvare <khali@linux-fr.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-/* A few notes about the PCF8574:
-
-* The PCF8574 is an 8-bit I/O expander for the I2C bus produced by
- Philips Semiconductors. It is designed to provide a byte I2C
- interface to up to 8 separate devices.
-
-* The PCF8574 appears as a very simple SMBus device which can be
- read from or written to with SMBUS byte read/write accesses.
-
- --Dan
-
-*/
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-
-/* Addresses to scan: none, device can't be detected */
-static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
-
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_2(pcf8574, pcf8574a);
-
-/* Each client has this additional data */
-struct pcf8574_data {
- int write; /* Remember last written value */
-};
-
-static void pcf8574_init_client(struct i2c_client *client);
-
-/* following are the sysfs callback functions */
-static ssize_t show_read(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct i2c_client *client = to_i2c_client(dev);
- return sprintf(buf, "%u\n", i2c_smbus_read_byte(client));
-}
-
-static DEVICE_ATTR(read, S_IRUGO, show_read, NULL);
-
-static ssize_t show_write(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct pcf8574_data *data = i2c_get_clientdata(to_i2c_client(dev));
-
- if (data->write < 0)
- return data->write;
-
- return sprintf(buf, "%d\n", data->write);
-}
-
-static ssize_t set_write(struct device *dev, struct device_attribute *attr, const char *buf,
- size_t count)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct pcf8574_data *data = i2c_get_clientdata(client);
- unsigned long val = simple_strtoul(buf, NULL, 10);
-
- if (val > 0xff)
- return -EINVAL;
-
- data->write = val;
- i2c_smbus_write_byte(client, data->write);
- return count;
-}
-
-static DEVICE_ATTR(write, S_IWUSR | S_IRUGO, show_write, set_write);
-
-static struct attribute *pcf8574_attributes[] = {
- &dev_attr_read.attr,
- &dev_attr_write.attr,
- NULL
-};
-
-static const struct attribute_group pcf8574_attr_group = {
- .attrs = pcf8574_attributes,
-};
-
-/*
- * Real code
- */
-
-/* Return 0 if detection is successful, -ENODEV otherwise */
-static int pcf8574_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info)
-{
- struct i2c_adapter *adapter = client->adapter;
- const char *client_name;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
- return -ENODEV;
-
- /* Now, we would do the remaining detection. But the PCF8574 is plainly
- impossible to detect! Stupid chip. */
-
- /* Determine the chip type */
- if (kind <= 0) {
- if (client->addr >= 0x38 && client->addr <= 0x3f)
- kind = pcf8574a;
- else
- kind = pcf8574;
- }
-
- if (kind == pcf8574a)
- client_name = "pcf8574a";
- else
- client_name = "pcf8574";
- strlcpy(info->type, client_name, I2C_NAME_SIZE);
-
- return 0;
-}
-
-static int pcf8574_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct pcf8574_data *data;
- int err;
-
- data = kzalloc(sizeof(struct pcf8574_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
-
- i2c_set_clientdata(client, data);
-
- /* Initialize the PCF8574 chip */
- pcf8574_init_client(client);
-
- /* Register sysfs hooks */
- err = sysfs_create_group(&client->dev.kobj, &pcf8574_attr_group);
- if (err)
- goto exit_free;
- return 0;
-
- exit_free:
- kfree(data);
- exit:
- return err;
-}
-
-static int pcf8574_remove(struct i2c_client *client)
-{
- sysfs_remove_group(&client->dev.kobj, &pcf8574_attr_group);
- kfree(i2c_get_clientdata(client));
- return 0;
-}
-
-/* Called when we have found a new PCF8574. */
-static void pcf8574_init_client(struct i2c_client *client)
-{
- struct pcf8574_data *data = i2c_get_clientdata(client);
- data->write = -EAGAIN;
-}
-
-static const struct i2c_device_id pcf8574_id[] = {
- { "pcf8574", 0 },
- { "pcf8574a", 0 },
- { }
-};
-
-static struct i2c_driver pcf8574_driver = {
- .driver = {
- .name = "pcf8574",
- },
- .probe = pcf8574_probe,
- .remove = pcf8574_remove,
- .id_table = pcf8574_id,
-
- .detect = pcf8574_detect,
- .address_data = &addr_data,
-};
-
-static int __init pcf8574_init(void)
-{
- return i2c_add_driver(&pcf8574_driver);
-}
-
-static void __exit pcf8574_exit(void)
-{
- i2c_del_driver(&pcf8574_driver);
-}
-
-
-MODULE_AUTHOR
- ("Frodo Looijaard <frodol@dds.nl>, "
- "Philip Edelbrock <phil@netroedge.com>, "
- "Dan Eaton <dan.eaton@rocketlogix.com> "
- "and Aurelien Jarno <aurelien@aurel32.net>");
-MODULE_DESCRIPTION("PCF8574 driver");
-MODULE_LICENSE("GPL");
-
-module_init(pcf8574_init);
-module_exit(pcf8574_exit);
diff --git a/drivers/i2c/chips/pcf8575.c b/drivers/i2c/chips/pcf8575.c
deleted file mode 100644
index 07fd7cb3c57..00000000000
--- a/drivers/i2c/chips/pcf8575.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- pcf8575.c
-
- About the PCF8575 chip: the PCF8575 is a 16-bit I/O expander for the I2C bus
- produced by a.o. Philips Semiconductors.
-
- Copyright (C) 2006 Michael Hennerich, Analog Devices Inc.
- <hennerich@blackfin.uclinux.org>
- Based on pcf8574.c.
-
- Copyright (c) 2007 Bart Van Assche <bart.vanassche@gmail.com>.
- Ported this driver from ucLinux to the mainstream Linux kernel.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/slab.h> /* kzalloc() */
-#include <linux/sysfs.h> /* sysfs_create_group() */
-
-/* Addresses to scan: none, device can't be detected */
-static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
-
-/* Insmod parameters */
-I2C_CLIENT_INSMOD;
-
-
-/* Each client has this additional data */
-struct pcf8575_data {
- int write; /* last written value, or error code */
-};
-
-/* following are the sysfs callback functions */
-static ssize_t show_read(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct i2c_client *client = to_i2c_client(dev);
- u16 val;
- u8 iopin_state[2];
-
- i2c_master_recv(client, iopin_state, 2);
-
- val = iopin_state[0];
- val |= iopin_state[1] << 8;
-
- return sprintf(buf, "%u\n", val);
-}
-
-static DEVICE_ATTR(read, S_IRUGO, show_read, NULL);
-
-static ssize_t show_write(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct pcf8575_data *data = dev_get_drvdata(dev);
- if (data->write < 0)
- return data->write;
- return sprintf(buf, "%d\n", data->write);
-}
-
-static ssize_t set_write(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct pcf8575_data *data = i2c_get_clientdata(client);
- unsigned long val = simple_strtoul(buf, NULL, 10);
- u8 iopin_state[2];
-
- if (val > 0xffff)
- return -EINVAL;
-
- data->write = val;
-
- iopin_state[0] = val & 0xFF;
- iopin_state[1] = val >> 8;
-
- i2c_master_send(client, iopin_state, 2);
-
- return count;
-}
-
-static DEVICE_ATTR(write, S_IWUSR | S_IRUGO, show_write, set_write);
-
-static struct attribute *pcf8575_attributes[] = {
- &dev_attr_read.attr,
- &dev_attr_write.attr,
- NULL
-};
-
-static const struct attribute_group pcf8575_attr_group = {
- .attrs = pcf8575_attributes,
-};
-
-/*
- * Real code
- */
-
-/* Return 0 if detection is successful, -ENODEV otherwise */
-static int pcf8575_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info)
-{
- struct i2c_adapter *adapter = client->adapter;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
- return -ENODEV;
-
- /* This is the place to detect whether the chip at the specified
- address really is a PCF8575 chip. However, there is no method known
- to detect whether an I2C chip is a PCF8575 or any other I2C chip. */
-
- strlcpy(info->type, "pcf8575", I2C_NAME_SIZE);
-
- return 0;
-}
-
-static int pcf8575_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct pcf8575_data *data;
- int err;
-
- data = kzalloc(sizeof(struct pcf8575_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
-
- i2c_set_clientdata(client, data);
- data->write = -EAGAIN;
-
- /* Register sysfs hooks */
- err = sysfs_create_group(&client->dev.kobj, &pcf8575_attr_group);
- if (err)
- goto exit_free;
-
- return 0;
-
-exit_free:
- kfree(data);
-exit:
- return err;
-}
-
-static int pcf8575_remove(struct i2c_client *client)
-{
- sysfs_remove_group(&client->dev.kobj, &pcf8575_attr_group);
- kfree(i2c_get_clientdata(client));
- return 0;
-}
-
-static const struct i2c_device_id pcf8575_id[] = {
- { "pcf8575", 0 },
- { }
-};
-
-static struct i2c_driver pcf8575_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "pcf8575",
- },
- .probe = pcf8575_probe,
- .remove = pcf8575_remove,
- .id_table = pcf8575_id,
-
- .detect = pcf8575_detect,
- .address_data = &addr_data,
-};
-
-static int __init pcf8575_init(void)
-{
- return i2c_add_driver(&pcf8575_driver);
-}
-
-static void __exit pcf8575_exit(void)
-{
- i2c_del_driver(&pcf8575_driver);
-}
-
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>, "
- "Bart Van Assche <bart.vanassche@gmail.com>");
-MODULE_DESCRIPTION("pcf8575 driver");
-MODULE_LICENSE("GPL");
-
-module_init(pcf8575_init);
-module_exit(pcf8575_exit);
diff --git a/drivers/i2c/chips/tsl2550.c b/drivers/i2c/chips/tsl2550.c
index b96f3025e58..aa96bd2d27e 100644
--- a/drivers/i2c/chips/tsl2550.c
+++ b/drivers/i2c/chips/tsl2550.c
@@ -24,10 +24,9 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
-#include <linux/delay.h>
#define TSL2550_DRV_NAME "tsl2550"
-#define DRIVER_VERSION "1.1.2"
+#define DRIVER_VERSION "1.2"
/*
* Defines
@@ -96,32 +95,13 @@ static int tsl2550_set_power_state(struct i2c_client *client, int state)
static int tsl2550_get_adc_value(struct i2c_client *client, u8 cmd)
{
- unsigned long end;
- int loop = 0, ret = 0;
+ int ret;
- /*
- * Read ADC channel waiting at most 400ms (see data sheet for further
- * info).
- * To avoid long busy wait we spin for few milliseconds then
- * start sleeping.
- */
- end = jiffies + msecs_to_jiffies(400);
- while (time_before(jiffies, end)) {
- i2c_smbus_write_byte(client, cmd);
-
- if (loop++ < 5)
- mdelay(1);
- else
- msleep(1);
-
- ret = i2c_smbus_read_byte(client);
- if (ret < 0)
- return ret;
- else if (ret & 0x0080)
- break;
- }
+ ret = i2c_smbus_read_byte_data(client, cmd);
+ if (ret < 0)
+ return ret;
if (!(ret & 0x80))
- return -EIO;
+ return -EAGAIN;
return ret & 0x7f; /* remove the "valid" bit */
}
@@ -285,8 +265,6 @@ static ssize_t __tsl2550_show_lux(struct i2c_client *client, char *buf)
return ret;
ch0 = ret;
- mdelay(1);
-
ret = tsl2550_get_adc_value(client, TSL2550_READ_ADC1);
if (ret < 0)
return ret;
@@ -345,11 +323,10 @@ static int tsl2550_init_client(struct i2c_client *client)
* Probe the chip. To do so we try to power up the device and then to
* read back the 0x03 code
*/
- err = i2c_smbus_write_byte(client, TSL2550_POWER_UP);
+ err = i2c_smbus_read_byte_data(client, TSL2550_POWER_UP);
if (err < 0)
return err;
- mdelay(1);
- if (i2c_smbus_read_byte(client) != TSL2550_POWER_UP)
+ if (err != TSL2550_POWER_UP)
return -ENODEV;
data->power_state = 1;
@@ -374,7 +351,8 @@ static int __devinit tsl2550_probe(struct i2c_client *client,
struct tsl2550_data *data;
int *opmode, err = 0;
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) {
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE
+ | I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
err = -EIO;
goto exit;
}
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 0e45c296d3d..8d80fceca6a 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -46,6 +46,7 @@ static DEFINE_MUTEX(core_lock);
static DEFINE_IDR(i2c_adapter_idr);
static LIST_HEAD(userspace_devices);
+static struct device_type i2c_client_type;
static int i2c_check_addr(struct i2c_adapter *adapter, int addr);
static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver);
@@ -64,9 +65,13 @@ static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
static int i2c_device_match(struct device *dev, struct device_driver *drv)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct i2c_driver *driver = to_i2c_driver(drv);
+ struct i2c_client *client = i2c_verify_client(dev);
+ struct i2c_driver *driver;
+
+ if (!client)
+ return 0;
+ driver = to_i2c_driver(drv);
/* match on an id table if there is one */
if (driver->id_table)
return i2c_match_id(driver->id_table, client) != NULL;
@@ -94,10 +99,14 @@ static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
static int i2c_device_probe(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct i2c_driver *driver = to_i2c_driver(dev->driver);
+ struct i2c_client *client = i2c_verify_client(dev);
+ struct i2c_driver *driver;
int status;
+ if (!client)
+ return 0;
+
+ driver = to_i2c_driver(dev->driver);
if (!driver->probe || !driver->id_table)
return -ENODEV;
client->driver = driver;
@@ -114,11 +123,11 @@ static int i2c_device_probe(struct device *dev)
static int i2c_device_remove(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = i2c_verify_client(dev);
struct i2c_driver *driver;
int status;
- if (!dev->driver)
+ if (!client || !dev->driver)
return 0;
driver = to_i2c_driver(dev->driver);
@@ -136,37 +145,40 @@ static int i2c_device_remove(struct device *dev)
static void i2c_device_shutdown(struct device *dev)
{
+ struct i2c_client *client = i2c_verify_client(dev);
struct i2c_driver *driver;
- if (!dev->driver)
+ if (!client || !dev->driver)
return;
driver = to_i2c_driver(dev->driver);
if (driver->shutdown)
- driver->shutdown(to_i2c_client(dev));
+ driver->shutdown(client);
}
static int i2c_device_suspend(struct device *dev, pm_message_t mesg)
{
+ struct i2c_client *client = i2c_verify_client(dev);
struct i2c_driver *driver;
- if (!dev->driver)
+ if (!client || !dev->driver)
return 0;
driver = to_i2c_driver(dev->driver);
if (!driver->suspend)
return 0;
- return driver->suspend(to_i2c_client(dev), mesg);
+ return driver->suspend(client, mesg);
}
static int i2c_device_resume(struct device *dev)
{
+ struct i2c_client *client = i2c_verify_client(dev);
struct i2c_driver *driver;
- if (!dev->driver)
+ if (!client || !dev->driver)
return 0;
driver = to_i2c_driver(dev->driver);
if (!driver->resume)
return 0;
- return driver->resume(to_i2c_client(dev));
+ return driver->resume(client);
}
static void i2c_client_dev_release(struct device *dev)
@@ -175,10 +187,10 @@ static void i2c_client_dev_release(struct device *dev)
}
static ssize_t
-show_client_name(struct device *dev, struct device_attribute *attr, char *buf)
+show_name(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- return sprintf(buf, "%s\n", client->name);
+ return sprintf(buf, "%s\n", dev->type == &i2c_client_type ?
+ to_i2c_client(dev)->name : to_i2c_adapter(dev)->name);
}
static ssize_t
@@ -188,18 +200,28 @@ show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name);
}
-static struct device_attribute i2c_dev_attrs[] = {
- __ATTR(name, S_IRUGO, show_client_name, NULL),
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL);
+
+static struct attribute *i2c_dev_attrs[] = {
+ &dev_attr_name.attr,
/* modalias helps coldplug: modprobe $(cat .../modalias) */
- __ATTR(modalias, S_IRUGO, show_modalias, NULL),
- { },
+ &dev_attr_modalias.attr,
+ NULL
+};
+
+static struct attribute_group i2c_dev_attr_group = {
+ .attrs = i2c_dev_attrs,
+};
+
+static const struct attribute_group *i2c_dev_attr_groups[] = {
+ &i2c_dev_attr_group,
+ NULL
};
struct bus_type i2c_bus_type = {
.name = "i2c",
- .dev_attrs = i2c_dev_attrs,
.match = i2c_device_match,
- .uevent = i2c_device_uevent,
.probe = i2c_device_probe,
.remove = i2c_device_remove,
.shutdown = i2c_device_shutdown,
@@ -208,6 +230,12 @@ struct bus_type i2c_bus_type = {
};
EXPORT_SYMBOL_GPL(i2c_bus_type);
+static struct device_type i2c_client_type = {
+ .groups = i2c_dev_attr_groups,
+ .uevent = i2c_device_uevent,
+ .release = i2c_client_dev_release,
+};
+
/**
* i2c_verify_client - return parameter as i2c_client, or NULL
@@ -220,7 +248,7 @@ EXPORT_SYMBOL_GPL(i2c_bus_type);
*/
struct i2c_client *i2c_verify_client(struct device *dev)
{
- return (dev->bus == &i2c_bus_type)
+ return (dev->type == &i2c_client_type)
? to_i2c_client(dev)
: NULL;
}
@@ -273,7 +301,7 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
client->dev.parent = &client->adapter->dev;
client->dev.bus = &i2c_bus_type;
- client->dev.release = i2c_client_dev_release;
+ client->dev.type = &i2c_client_type;
dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
client->addr);
@@ -368,13 +396,6 @@ static void i2c_adapter_dev_release(struct device *dev)
complete(&adap->dev_released);
}
-static ssize_t
-show_adapter_name(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct i2c_adapter *adap = to_i2c_adapter(dev);
- return sprintf(buf, "%s\n", adap->name);
-}
-
/*
* Let users instantiate I2C devices through sysfs. This can be used when
* platform initialization code doesn't contain the proper data for
@@ -493,19 +514,34 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr,
return res;
}
-static struct device_attribute i2c_adapter_attrs[] = {
- __ATTR(name, S_IRUGO, show_adapter_name, NULL),
- __ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device),
- __ATTR(delete_device, S_IWUSR, NULL, i2c_sysfs_delete_device),
- { },
+static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
+static DEVICE_ATTR(delete_device, S_IWUSR, NULL, i2c_sysfs_delete_device);
+
+static struct attribute *i2c_adapter_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_new_device.attr,
+ &dev_attr_delete_device.attr,
+ NULL
};
-static struct class i2c_adapter_class = {
- .owner = THIS_MODULE,
- .name = "i2c-adapter",
- .dev_attrs = i2c_adapter_attrs,
+static struct attribute_group i2c_adapter_attr_group = {
+ .attrs = i2c_adapter_attrs,
};
+static const struct attribute_group *i2c_adapter_attr_groups[] = {
+ &i2c_adapter_attr_group,
+ NULL
+};
+
+static struct device_type i2c_adapter_type = {
+ .groups = i2c_adapter_attr_groups,
+ .release = i2c_adapter_dev_release,
+};
+
+#ifdef CONFIG_I2C_COMPAT
+static struct class_compat *i2c_adapter_compat_class;
+#endif
+
static void i2c_scan_static_board_info(struct i2c_adapter *adapter)
{
struct i2c_devinfo *devinfo;
@@ -555,14 +591,22 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
adap->timeout = HZ;
dev_set_name(&adap->dev, "i2c-%d", adap->nr);
- adap->dev.release = &i2c_adapter_dev_release;
- adap->dev.class = &i2c_adapter_class;
+ adap->dev.bus = &i2c_bus_type;
+ adap->dev.type = &i2c_adapter_type;
res = device_register(&adap->dev);
if (res)
goto out_list;
dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name);
+#ifdef CONFIG_I2C_COMPAT
+ res = class_compat_create_link(i2c_adapter_compat_class, &adap->dev,
+ adap->dev.parent);
+ if (res)
+ dev_warn(&adap->dev,
+ "Failed to create compatibility class link\n");
+#endif
+
/* create pre-declared device nodes */
if (adap->nr < __i2c_first_dynamic_bus_num)
i2c_scan_static_board_info(adap);
@@ -741,6 +785,11 @@ int i2c_del_adapter(struct i2c_adapter *adap)
checking the returned value. */
res = device_for_each_child(&adap->dev, NULL, __unregister_client);
+#ifdef CONFIG_I2C_COMPAT
+ class_compat_remove_link(i2c_adapter_compat_class, &adap->dev,
+ adap->dev.parent);
+#endif
+
/* clean up the sysfs representation */
init_completion(&adap->dev_released);
device_unregister(&adap->dev);
@@ -768,9 +817,13 @@ EXPORT_SYMBOL(i2c_del_adapter);
static int __attach_adapter(struct device *dev, void *data)
{
- struct i2c_adapter *adapter = to_i2c_adapter(dev);
+ struct i2c_adapter *adapter;
struct i2c_driver *driver = data;
+ if (dev->type != &i2c_adapter_type)
+ return 0;
+ adapter = to_i2c_adapter(dev);
+
i2c_detect(adapter, driver);
/* Legacy drivers scan i2c busses directly */
@@ -809,8 +862,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
INIT_LIST_HEAD(&driver->clients);
/* Walk the adapters that are already present */
mutex_lock(&core_lock);
- class_for_each_device(&i2c_adapter_class, NULL, driver,
- __attach_adapter);
+ bus_for_each_dev(&i2c_bus_type, NULL, driver, __attach_adapter);
mutex_unlock(&core_lock);
return 0;
@@ -819,10 +871,14 @@ EXPORT_SYMBOL(i2c_register_driver);
static int __detach_adapter(struct device *dev, void *data)
{
- struct i2c_adapter *adapter = to_i2c_adapter(dev);
+ struct i2c_adapter *adapter;
struct i2c_driver *driver = data;
struct i2c_client *client, *_n;
+ if (dev->type != &i2c_adapter_type)
+ return 0;
+ adapter = to_i2c_adapter(dev);
+
/* Remove the devices we created ourselves as the result of hardware
* probing (using a driver's detect method) */
list_for_each_entry_safe(client, _n, &driver->clients, detected) {
@@ -850,8 +906,7 @@ static int __detach_adapter(struct device *dev, void *data)
void i2c_del_driver(struct i2c_driver *driver)
{
mutex_lock(&core_lock);
- class_for_each_device(&i2c_adapter_class, NULL, driver,
- __detach_adapter);
+ bus_for_each_dev(&i2c_bus_type, NULL, driver, __detach_adapter);
mutex_unlock(&core_lock);
driver_unregister(&driver->driver);
@@ -940,17 +995,23 @@ static int __init i2c_init(void)
retval = bus_register(&i2c_bus_type);
if (retval)
return retval;
- retval = class_register(&i2c_adapter_class);
- if (retval)
+#ifdef CONFIG_I2C_COMPAT
+ i2c_adapter_compat_class = class_compat_register("i2c-adapter");
+ if (!i2c_adapter_compat_class) {
+ retval = -ENOMEM;
goto bus_err;
+ }
+#endif
retval = i2c_add_driver(&dummy_driver);
if (retval)
goto class_err;
return 0;
class_err:
- class_unregister(&i2c_adapter_class);
+#ifdef CONFIG_I2C_COMPAT
+ class_compat_unregister(i2c_adapter_compat_class);
bus_err:
+#endif
bus_unregister(&i2c_bus_type);
return retval;
}
@@ -958,7 +1019,9 @@ bus_err:
static void __exit i2c_exit(void)
{
i2c_del_driver(&dummy_driver);
- class_unregister(&i2c_adapter_class);
+#ifdef CONFIG_I2C_COMPAT
+ class_compat_unregister(i2c_adapter_compat_class);
+#endif
bus_unregister(&i2c_bus_type);
}
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index c509c991646..c0cf45a11b9 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -114,8 +114,6 @@ static int ide_get_dev_handle(struct device *dev, acpi_handle *handle,
unsigned int bus, devnum, func;
acpi_integer addr;
acpi_handle dev_handle;
- struct acpi_buffer buffer = {.length = ACPI_ALLOCATE_BUFFER,
- .pointer = NULL};
acpi_status status;
struct acpi_device_info *dinfo = NULL;
int ret = -ENODEV;
@@ -134,12 +132,11 @@ static int ide_get_dev_handle(struct device *dev, acpi_handle *handle,
goto err;
}
- status = acpi_get_object_info(dev_handle, &buffer);
+ status = acpi_get_object_info(dev_handle, &dinfo);
if (ACPI_FAILURE(status)) {
DEBPRINT("get_object_info for device failed\n");
goto err;
}
- dinfo = buffer.pointer;
if (dinfo && (dinfo->valid & ACPI_VALID_ADR) &&
dinfo->address == addr) {
*pcidevfn = addr;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index b79ca419d8d..64207df8da8 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1686,7 +1686,7 @@ static int idecd_revalidate_disk(struct gendisk *disk)
return 0;
}
-static struct block_device_operations idecd_ops = {
+static const struct block_device_operations idecd_ops = {
.owner = THIS_MODULE,
.open = idecd_open,
.release = idecd_release,
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index 214119026b3..753241429c2 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -321,7 +321,7 @@ static int ide_gd_ioctl(struct block_device *bdev, fmode_t mode,
return drive->disk_ops->ioctl(drive, bdev, mode, cmd, arg);
}
-static struct block_device_operations ide_gd_ops = {
+static const struct block_device_operations ide_gd_ops = {
.owner = THIS_MODULE,
.open = ide_gd_open,
.release = ide_gd_release,
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 8de442cbee9..63c53d65e87 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1212,7 +1212,7 @@ static int ide_find_port_slot(const struct ide_port_info *d)
{
int idx = -ENOENT;
u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1;
- u8 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0;;
+ u8 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0;
/*
* Claim an unassigned slot.
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 9d6f62baac2..58fc920d5c3 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1913,7 +1913,7 @@ static int idetape_ioctl(struct block_device *bdev, fmode_t mode,
return err;
}
-static struct block_device_operations idetape_block_ops = {
+static const struct block_device_operations idetape_block_ops = {
.owner = THIS_MODULE,
.open = idetape_open,
.release = idetape_release,
diff --git a/drivers/ide/umc8672.c b/drivers/ide/umc8672.c
index 0608d41fb6d..60f936e2319 100644
--- a/drivers/ide/umc8672.c
+++ b/drivers/ide/umc8672.c
@@ -170,9 +170,9 @@ static int __init umc8672_init(void)
goto out;
if (umc8672_probe() == 0)
- return 0;;
+ return 0;
out:
- return -ENODEV;;
+ return -ENODEV;
}
module_init(umc8672_init);
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
index 949c97ff57e..1f20a042a4f 100644
--- a/drivers/idle/i7300_idle.c
+++ b/drivers/idle/i7300_idle.c
@@ -29,8 +29,8 @@
#include <asm/idle.h>
-#include "../dma/ioatdma_hw.h"
-#include "../dma/ioatdma_registers.h"
+#include "../dma/ioat/hw.h"
+#include "../dma/ioat/registers.h"
#define I7300_IDLE_DRIVER_VERSION "1.55"
#define I7300_PRINT "i7300_idle:"
@@ -126,9 +126,9 @@ static void i7300_idle_ioat_stop(void)
udelay(10);
sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
- IOAT_CHANSTS_DMA_TRANSFER_STATUS;
+ IOAT_CHANSTS_STATUS;
- if (sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE)
+ if (sts != IOAT_CHANSTS_ACTIVE)
break;
}
@@ -160,9 +160,9 @@ static int __init i7300_idle_ioat_selftest(u8 *ctl,
udelay(1000);
chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
- IOAT_CHANSTS_DMA_TRANSFER_STATUS;
+ IOAT_CHANSTS_STATUS;
- if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE) {
+ if (chan_sts != IOAT_CHANSTS_DONE) {
/* Not complete, reset the channel */
writeb(IOAT_CHANCMD_RESET,
ioat_chanbase + IOAT1_CHANCMD_OFFSET);
@@ -288,9 +288,9 @@ static void __exit i7300_idle_ioat_exit(void)
ioat_chanbase + IOAT1_CHANCMD_OFFSET);
chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
- IOAT_CHANSTS_DMA_TRANSFER_STATUS;
+ IOAT_CHANSTS_STATUS;
- if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) {
+ if (chan_sts != IOAT_CHANSTS_ACTIVE) {
writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET);
break;
}
@@ -298,14 +298,14 @@ static void __exit i7300_idle_ioat_exit(void)
}
chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
- IOAT_CHANSTS_DMA_TRANSFER_STATUS;
+ IOAT_CHANSTS_STATUS;
/*
* We tried to reset multiple times. If IO A/T channel is still active
* flag an error and return without cleanup. Memory leak is better
* than random corruption in that extreme error situation.
*/
- if (chan_sts == IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) {
+ if (chan_sts == IOAT_CHANSTS_ACTIVE) {
printk(KERN_ERR I7300_PRINT "Unable to stop IO A/T channels."
" Not freeing resources\n");
return;
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index da5f8829b50..0bc3d78ce7b 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -2272,8 +2272,10 @@ static ssize_t raw1394_write(struct file *file, const char __user * buffer,
return -EFAULT;
}
- if (!mutex_trylock(&fi->state_mutex))
+ if (!mutex_trylock(&fi->state_mutex)) {
+ free_pending_request(req);
return -EAGAIN;
+ }
switch (fi->state) {
case opened:
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 52b25f8b111..f199896c411 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -372,8 +372,7 @@ static const struct {
/* DViCO Momobay FX-3A with TSB42AA9A bridge */ {
.firmware_revision = 0x002800,
.model = 0x000000,
- .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY |
- SBP2_WORKAROUND_POWER_CONDITION,
+ .workarounds = SBP2_WORKAROUND_POWER_CONDITION,
},
/* Initio bridges, actually only needed for some older ones */ {
.firmware_revision = 0x000200,
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 57a3c6f947b..4e0f2829e0e 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -37,7 +37,8 @@
enum rmpp_state {
RMPP_STATE_ACTIVE,
RMPP_STATE_TIMEOUT,
- RMPP_STATE_COMPLETE
+ RMPP_STATE_COMPLETE,
+ RMPP_STATE_CANCELING
};
struct mad_rmpp_recv {
@@ -87,18 +88,22 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
spin_lock_irqsave(&agent->lock, flags);
list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
+ if (rmpp_recv->state != RMPP_STATE_COMPLETE)
+ ib_free_recv_mad(rmpp_recv->rmpp_wc);
+ rmpp_recv->state = RMPP_STATE_CANCELING;
+ }
+ spin_unlock_irqrestore(&agent->lock, flags);
+
+ list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
cancel_delayed_work(&rmpp_recv->timeout_work);
cancel_delayed_work(&rmpp_recv->cleanup_work);
}
- spin_unlock_irqrestore(&agent->lock, flags);
flush_workqueue(agent->qp_info->port_priv->wq);
list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
&agent->rmpp_list, list) {
list_del(&rmpp_recv->list);
- if (rmpp_recv->state != RMPP_STATE_COMPLETE)
- ib_free_recv_mad(rmpp_recv->rmpp_wc);
destroy_rmpp_recv(rmpp_recv);
}
}
@@ -260,6 +265,10 @@ static void recv_cleanup_handler(struct work_struct *work)
unsigned long flags;
spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
+ if (rmpp_recv->state == RMPP_STATE_CANCELING) {
+ spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
+ return;
+ }
list_del(&rmpp_recv->list);
spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
destroy_rmpp_recv(rmpp_recv);
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 7663a2a9f13..7550a534005 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -2463,7 +2463,7 @@ int ehca_create_busmap(void)
int ret;
ehca_mr_len = 0;
- ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
+ ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
ehca_create_busmap_callback);
return ret;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index 02831ad070b..4bd39c8af80 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -809,7 +809,7 @@ static int ipath_setup_ht_reset(struct ipath_devdata *dd)
* errors. We only bother to do this at load time, because it's OK if
* it happened before we were loaded (first time after boot/reset),
* but any time after that, it's fatal anyway. Also need to not check
- * for for upper byte errors if we are in 8 bit mode, so figure out
+ * for upper byte errors if we are in 8 bit mode, so figure out
* our width. For now, at least, also complain if it's 8 bit.
*/
static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev,
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index 056b2a4c697..0aa0110e4b6 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -68,11 +68,16 @@ static void catas_reset(struct work_struct *work)
spin_unlock_irq(&catas_lock);
list_for_each_entry_safe(dev, tmpdev, &tlist, catas_err.list) {
+ struct pci_dev *pdev = dev->pdev;
ret = __mthca_restart_one(dev->pdev);
+ /* 'dev' now is not valid */
if (ret)
- mthca_err(dev, "Reset failed (%d)\n", ret);
- else
- mthca_dbg(dev, "Reset succeeded\n");
+ printk(KERN_ERR "mthca %s: Reset failed (%d)\n",
+ pci_name(pdev), ret);
+ else {
+ struct mthca_dev *d = pci_get_drvdata(pdev);
+ mthca_dbg(d, "Reset succeeded\n");
+ }
}
mutex_unlock(&mthca_device_mutex);
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 538e409d451..e593af3354b 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1566,7 +1566,6 @@ static const struct net_device_ops nes_netdev_ops = {
.ndo_set_mac_address = nes_netdev_set_mac_address,
.ndo_set_multicast_list = nes_netdev_set_multicast_list,
.ndo_change_mtu = nes_netdev_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_vlan_rx_register = nes_netdev_vlan_rx_register,
};
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 25874fc680c..8763c1ea5eb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -362,12 +362,19 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
carrier_on_task);
+ struct ib_port_attr attr;
/*
* Take rtnl_lock to avoid racing with ipoib_stop() and
* turning the carrier back on while a device is being
* removed.
*/
+ if (ib_query_port(priv->ca, priv->port, &attr) ||
+ attr.state != IB_PORT_ACTIVE) {
+ ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
+ return;
+ }
+
rtnl_lock();
netif_carrier_on(priv->dev);
rtnl_unlock();
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 556539d617a..e828aab7dac 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -11,6 +11,7 @@
*/
#include <linux/init.h>
+#include <linux/types.h>
#include <linux/input.h>
#include <linux/module.h>
#include <linux/random.h>
@@ -514,7 +515,7 @@ static void input_disconnect_device(struct input_dev *dev)
* that there are no threads in the middle of input_open_device()
*/
mutex_lock(&dev->mutex);
- dev->going_away = 1;
+ dev->going_away = true;
mutex_unlock(&dev->mutex);
spin_lock_irq(&dev->event_lock);
@@ -1259,10 +1260,71 @@ static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
return 0;
}
+#define INPUT_DO_TOGGLE(dev, type, bits, on) \
+ do { \
+ int i; \
+ if (!test_bit(EV_##type, dev->evbit)) \
+ break; \
+ for (i = 0; i < type##_MAX; i++) { \
+ if (!test_bit(i, dev->bits##bit) || \
+ !test_bit(i, dev->bits)) \
+ continue; \
+ dev->event(dev, EV_##type, i, on); \
+ } \
+ } while (0)
+
+static void input_dev_reset(struct input_dev *dev, bool activate)
+{
+ if (!dev->event)
+ return;
+
+ INPUT_DO_TOGGLE(dev, LED, led, activate);
+ INPUT_DO_TOGGLE(dev, SND, snd, activate);
+
+ if (activate && test_bit(EV_REP, dev->evbit)) {
+ dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]);
+ dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]);
+ }
+}
+
+#ifdef CONFIG_PM
+static int input_dev_suspend(struct device *dev)
+{
+ struct input_dev *input_dev = to_input_dev(dev);
+
+ mutex_lock(&input_dev->mutex);
+ input_dev_reset(input_dev, false);
+ mutex_unlock(&input_dev->mutex);
+
+ return 0;
+}
+
+static int input_dev_resume(struct device *dev)
+{
+ struct input_dev *input_dev = to_input_dev(dev);
+
+ mutex_lock(&input_dev->mutex);
+ input_dev_reset(input_dev, true);
+ mutex_unlock(&input_dev->mutex);
+
+ return 0;
+}
+
+static const struct dev_pm_ops input_dev_pm_ops = {
+ .suspend = input_dev_suspend,
+ .resume = input_dev_resume,
+ .poweroff = input_dev_suspend,
+ .restore = input_dev_resume,
+};
+#endif /* CONFIG_PM */
+
static struct device_type input_dev_type = {
.groups = input_dev_attr_groups,
.release = input_dev_release,
.uevent = input_dev_uevent,
+#ifdef CONFIG_PM
+ .pm = &input_dev_pm_ops,
+#endif
};
static char *input_devnode(struct device *dev, mode_t *mode)
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 3525c19be42..ee98b1bc5d8 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -24,6 +24,16 @@ config KEYBOARD_AAED2000
To compile this driver as a module, choose M here: the
module will be called aaed2000_kbd.
+config KEYBOARD_ADP5588
+ tristate "ADP5588 I2C QWERTY Keypad and IO Expander"
+ depends on I2C
+ help
+ Say Y here if you want to use a ADP5588 attached to your
+ system I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adp5588-keys.
+
config KEYBOARD_AMIGA
tristate "Amiga keyboard"
depends on AMIGA
@@ -104,6 +114,16 @@ config KEYBOARD_ATKBD_RDI_KEYCODES
right-hand column will be interpreted as the key shown in the
left-hand column.
+config QT2160
+ tristate "Atmel AT42QT2160 Touch Sensor Chip"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for Atmel AT42QT2160 Touch
+ Sensor chip as a keyboard input.
+
+ This driver can also be built as a module. If so, the module
+ will be called qt2160.
+
config KEYBOARD_BFIN
tristate "Blackfin BF54x keypad support"
depends on (BF54x && !BF544)
@@ -251,6 +271,17 @@ config KEYBOARD_MAPLE
To compile this driver as a module, choose M here: the
module will be called maple_keyb.
+config KEYBOARD_MAX7359
+ tristate "Maxim MAX7359 Key Switch Controller"
+ depends on I2C
+ help
+ If you say yes here you get support for the Maxim MAX7359 Key
+ Switch Controller chip. This providers microprocessors with
+ management of up to 64 key switches
+
+ To compile this driver as a module, choose M here: the
+ module will be called max7359_keypad.
+
config KEYBOARD_NEWTON
tristate "Newton keyboard"
select SERIO
@@ -260,6 +291,15 @@ config KEYBOARD_NEWTON
To compile this driver as a module, choose M here: the
module will be called newtonkbd.
+config KEYBOARD_OPENCORES
+ tristate "OpenCores Keyboard Controller"
+ help
+ Say Y here if you want to use the OpenCores Keyboard Controller
+ http://www.opencores.org/project,keyboardcontroller
+
+ To compile this driver as a module, choose M here; the
+ module will be called opencores-kbd.
+
config KEYBOARD_PXA27x
tristate "PXA27x/PXA3xx keypad support"
depends on PXA27x || PXA3xx
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 8a7a22b3026..babad5e58b7 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -5,6 +5,7 @@
# Each configuration option enables a list of files.
obj-$(CONFIG_KEYBOARD_AAED2000) += aaed2000_kbd.o
+obj-$(CONFIG_KEYBOARD_ADP5588) += adp5588-keys.o
obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
@@ -21,10 +22,13 @@ obj-$(CONFIG_KEYBOARD_LM8323) += lm8323.o
obj-$(CONFIG_KEYBOARD_LOCOMO) += locomokbd.o
obj-$(CONFIG_KEYBOARD_MAPLE) += maple_keyb.o
obj-$(CONFIG_KEYBOARD_MATRIX) += matrix_keypad.o
+obj-$(CONFIG_KEYBOARD_MAX7359) += max7359_keypad.o
obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o
obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o
+obj-$(CONFIG_KEYBOARD_OPENCORES) += opencores-kbd.o
obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o
+obj-$(CONFIG_KEYBOARD_QT2160) += qt2160.o
obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o
obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o
obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
new file mode 100644
index 00000000000..d48c808d592
--- /dev/null
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -0,0 +1,361 @@
+/*
+ * File: drivers/input/keyboard/adp5588_keys.c
+ * Description: keypad driver for ADP5588 I2C QWERTY Keypad and IO Expander
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Copyright (C) 2008-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/i2c.h>
+
+#include <linux/i2c/adp5588.h>
+
+ /* Configuration Register1 */
+#define AUTO_INC (1 << 7)
+#define GPIEM_CFG (1 << 6)
+#define OVR_FLOW_M (1 << 5)
+#define INT_CFG (1 << 4)
+#define OVR_FLOW_IEN (1 << 3)
+#define K_LCK_IM (1 << 2)
+#define GPI_IEN (1 << 1)
+#define KE_IEN (1 << 0)
+
+/* Interrupt Status Register */
+#define CMP2_INT (1 << 5)
+#define CMP1_INT (1 << 4)
+#define OVR_FLOW_INT (1 << 3)
+#define K_LCK_INT (1 << 2)
+#define GPI_INT (1 << 1)
+#define KE_INT (1 << 0)
+
+/* Key Lock and Event Counter Register */
+#define K_LCK_EN (1 << 6)
+#define LCK21 0x30
+#define KEC 0xF
+
+/* Key Event Register xy */
+#define KEY_EV_PRESSED (1 << 7)
+#define KEY_EV_MASK (0x7F)
+
+#define KP_SEL(x) (0xFFFF >> (16 - x)) /* 2^x-1 */
+
+#define KEYP_MAX_EVENT 10
+
+/*
+ * Early pre 4.0 Silicon required to delay readout by at least 25ms,
+ * since the Event Counter Register updated 25ms after the interrupt
+ * asserted.
+ */
+#define WA_DELAYED_READOUT_REVID(rev) ((rev) < 4)
+
+struct adp5588_kpad {
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct delayed_work work;
+ unsigned long delay;
+ unsigned short keycode[ADP5588_KEYMAPSIZE];
+};
+
+static int adp5588_read(struct i2c_client *client, u8 reg)
+{
+ int ret = i2c_smbus_read_byte_data(client, reg);
+
+ if (ret < 0)
+ dev_err(&client->dev, "Read Error\n");
+
+ return ret;
+}
+
+static int adp5588_write(struct i2c_client *client, u8 reg, u8 val)
+{
+ return i2c_smbus_write_byte_data(client, reg, val);
+}
+
+static void adp5588_work(struct work_struct *work)
+{
+ struct adp5588_kpad *kpad = container_of(work,
+ struct adp5588_kpad, work.work);
+ struct i2c_client *client = kpad->client;
+ int i, key, status, ev_cnt;
+
+ status = adp5588_read(client, INT_STAT);
+
+ if (status & OVR_FLOW_INT) /* Unlikely and should never happen */
+ dev_err(&client->dev, "Event Overflow Error\n");
+
+ if (status & KE_INT) {
+ ev_cnt = adp5588_read(client, KEY_LCK_EC_STAT) & KEC;
+ if (ev_cnt) {
+ for (i = 0; i < ev_cnt; i++) {
+ key = adp5588_read(client, Key_EVENTA + i);
+ input_report_key(kpad->input,
+ kpad->keycode[(key & KEY_EV_MASK) - 1],
+ key & KEY_EV_PRESSED);
+ }
+ input_sync(kpad->input);
+ }
+ }
+ adp5588_write(client, INT_STAT, status); /* Status is W1C */
+}
+
+static irqreturn_t adp5588_irq(int irq, void *handle)
+{
+ struct adp5588_kpad *kpad = handle;
+
+ /*
+ * use keventd context to read the event fifo registers
+ * Schedule readout at least 25ms after notification for
+ * REVID < 4
+ */
+
+ schedule_delayed_work(&kpad->work, kpad->delay);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit adp5588_setup(struct i2c_client *client)
+{
+ struct adp5588_kpad_platform_data *pdata = client->dev.platform_data;
+ int i, ret;
+
+ ret = adp5588_write(client, KP_GPIO1, KP_SEL(pdata->rows));
+ ret |= adp5588_write(client, KP_GPIO2, KP_SEL(pdata->cols) & 0xFF);
+ ret |= adp5588_write(client, KP_GPIO3, KP_SEL(pdata->cols) >> 8);
+
+ if (pdata->en_keylock) {
+ ret |= adp5588_write(client, UNLOCK1, pdata->unlock_key1);
+ ret |= adp5588_write(client, UNLOCK2, pdata->unlock_key2);
+ ret |= adp5588_write(client, KEY_LCK_EC_STAT, K_LCK_EN);
+ }
+
+ for (i = 0; i < KEYP_MAX_EVENT; i++)
+ ret |= adp5588_read(client, Key_EVENTA);
+
+ ret |= adp5588_write(client, INT_STAT, CMP2_INT | CMP1_INT |
+ OVR_FLOW_INT | K_LCK_INT |
+ GPI_INT | KE_INT); /* Status is W1C */
+
+ ret |= adp5588_write(client, CFG, INT_CFG | OVR_FLOW_IEN | KE_IEN);
+
+ if (ret < 0) {
+ dev_err(&client->dev, "Write Error\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __devinit adp5588_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adp5588_kpad *kpad;
+ struct adp5588_kpad_platform_data *pdata = client->dev.platform_data;
+ struct input_dev *input;
+ unsigned int revid;
+ int ret, i;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
+ return -EIO;
+ }
+
+ if (!pdata) {
+ dev_err(&client->dev, "no platform data?\n");
+ return -EINVAL;
+ }
+
+ if (!pdata->rows || !pdata->cols || !pdata->keymap) {
+ dev_err(&client->dev, "no rows, cols or keymap from pdata\n");
+ return -EINVAL;
+ }
+
+ if (pdata->keymapsize != ADP5588_KEYMAPSIZE) {
+ dev_err(&client->dev, "invalid keymapsize\n");
+ return -EINVAL;
+ }
+
+ if (!client->irq) {
+ dev_err(&client->dev, "no IRQ?\n");
+ return -EINVAL;
+ }
+
+ kpad = kzalloc(sizeof(*kpad), GFP_KERNEL);
+ input = input_allocate_device();
+ if (!kpad || !input) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ kpad->client = client;
+ kpad->input = input;
+ INIT_DELAYED_WORK(&kpad->work, adp5588_work);
+
+ ret = adp5588_read(client, DEV_ID);
+ if (ret < 0) {
+ error = ret;
+ goto err_free_mem;
+ }
+
+ revid = (u8) ret & ADP5588_DEVICE_ID_MASK;
+ if (WA_DELAYED_READOUT_REVID(revid))
+ kpad->delay = msecs_to_jiffies(30);
+
+ input->name = client->name;
+ input->phys = "adp5588-keys/input0";
+ input->dev.parent = &client->dev;
+
+ input_set_drvdata(input, kpad);
+
+ input->id.bustype = BUS_I2C;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0001;
+ input->id.version = revid;
+
+ input->keycodesize = sizeof(kpad->keycode[0]);
+ input->keycodemax = pdata->keymapsize;
+ input->keycode = kpad->keycode;
+
+ memcpy(kpad->keycode, pdata->keymap,
+ pdata->keymapsize * input->keycodesize);
+
+ /* setup input device */
+ __set_bit(EV_KEY, input->evbit);
+
+ if (pdata->repeat)
+ __set_bit(EV_REP, input->evbit);
+
+ for (i = 0; i < input->keycodemax; i++)
+ __set_bit(kpad->keycode[i] & KEY_MAX, input->keybit);
+ __clear_bit(KEY_RESERVED, input->keybit);
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(&client->dev, "unable to register input device\n");
+ goto err_free_mem;
+ }
+
+ error = request_irq(client->irq, adp5588_irq,
+ IRQF_TRIGGER_FALLING | IRQF_DISABLED,
+ client->dev.driver->name, kpad);
+ if (error) {
+ dev_err(&client->dev, "irq %d busy?\n", client->irq);
+ goto err_unreg_dev;
+ }
+
+ error = adp5588_setup(client);
+ if (error)
+ goto err_free_irq;
+
+ device_init_wakeup(&client->dev, 1);
+ i2c_set_clientdata(client, kpad);
+
+ dev_info(&client->dev, "Rev.%d keypad, irq %d\n", revid, client->irq);
+ return 0;
+
+ err_free_irq:
+ free_irq(client->irq, kpad);
+ err_unreg_dev:
+ input_unregister_device(input);
+ input = NULL;
+ err_free_mem:
+ input_free_device(input);
+ kfree(kpad);
+
+ return error;
+}
+
+static int __devexit adp5588_remove(struct i2c_client *client)
+{
+ struct adp5588_kpad *kpad = i2c_get_clientdata(client);
+
+ adp5588_write(client, CFG, 0);
+ free_irq(client->irq, kpad);
+ cancel_delayed_work_sync(&kpad->work);
+ input_unregister_device(kpad->input);
+ i2c_set_clientdata(client, NULL);
+ kfree(kpad);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int adp5588_suspend(struct device *dev)
+{
+ struct adp5588_kpad *kpad = dev_get_drvdata(dev);
+ struct i2c_client *client = kpad->client;
+
+ disable_irq(client->irq);
+ cancel_delayed_work_sync(&kpad->work);
+
+ if (device_may_wakeup(&client->dev))
+ enable_irq_wake(client->irq);
+
+ return 0;
+}
+
+static int adp5588_resume(struct device *dev)
+{
+ struct adp5588_kpad *kpad = dev_get_drvdata(dev);
+ struct i2c_client *client = kpad->client;
+
+ if (device_may_wakeup(&client->dev))
+ disable_irq_wake(client->irq);
+
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static struct dev_pm_ops adp5588_dev_pm_ops = {
+ .suspend = adp5588_suspend,
+ .resume = adp5588_resume,
+};
+#endif
+
+static const struct i2c_device_id adp5588_id[] = {
+ { KBUILD_MODNAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adp5588_id);
+
+static struct i2c_driver adp5588_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+#ifdef CONFIG_PM
+ .pm = &adp5588_dev_pm_ops,
+#endif
+ },
+ .probe = adp5588_probe,
+ .remove = __devexit_p(adp5588_remove),
+ .id_table = adp5588_id,
+};
+
+static int __init adp5588_init(void)
+{
+ return i2c_add_driver(&adp5588_driver);
+}
+module_init(adp5588_init);
+
+static void __exit adp5588_exit(void)
+{
+ i2c_del_driver(&adp5588_driver);
+}
+module_exit(adp5588_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("ADP5588 Keypad driver");
+MODULE_ALIAS("platform:adp5588-keys");
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index c9523e48c6a..4709e15af60 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -229,7 +229,7 @@ struct atkbd {
};
/*
- * System-specific ketymap fixup routine
+ * System-specific keymap fixup routine
*/
static void (*atkbd_platform_fixup)(struct atkbd *, const void *data);
static void *atkbd_platform_fixup_data;
@@ -773,23 +773,6 @@ static int atkbd_select_set(struct atkbd *atkbd, int target_set, int allow_extra
static int atkbd_activate(struct atkbd *atkbd)
{
struct ps2dev *ps2dev = &atkbd->ps2dev;
- unsigned char param[1];
-
-/*
- * Set the LEDs to a defined state.
- */
-
- param[0] = 0;
- if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS))
- return -1;
-
-/*
- * Set autorepeat to fastest possible.
- */
-
- param[0] = 0;
- if (ps2_command(ps2dev, param, ATKBD_CMD_SETREP))
- return -1;
/*
* Enable the keyboard to receive keystrokes.
@@ -1158,14 +1141,6 @@ static int atkbd_reconnect(struct serio *serio)
return -1;
atkbd_activate(atkbd);
-
-/*
- * Restore repeat rate and LEDs (that were reset by atkbd_activate)
- * to pre-resume state
- */
- if (!atkbd->softrepeat)
- atkbd_set_repeat_rate(atkbd);
- atkbd_set_leds(atkbd);
}
atkbd_enable(atkbd);
diff --git a/drivers/input/keyboard/max7359_keypad.c b/drivers/input/keyboard/max7359_keypad.c
new file mode 100644
index 00000000000..3b5b948eba3
--- /dev/null
+++ b/drivers/input/keyboard/max7359_keypad.c
@@ -0,0 +1,330 @@
+/*
+ * max7359_keypad.c - MAX7359 Key Switch Controller Driver
+ *
+ * Copyright (C) 2009 Samsung Electronics
+ * Kim Kyuwon <q1.kim@samsung.com>
+ *
+ * Based on pxa27x_keypad.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Datasheet: http://www.maxim-ic.com/quick_view2.cfm/qv_pk/5456
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
+
+#define MAX7359_MAX_KEY_ROWS 8
+#define MAX7359_MAX_KEY_COLS 8
+#define MAX7359_MAX_KEY_NUM (MAX7359_MAX_KEY_ROWS * MAX7359_MAX_KEY_COLS)
+#define MAX7359_ROW_SHIFT 3
+
+/*
+ * MAX7359 registers
+ */
+#define MAX7359_REG_KEYFIFO 0x00
+#define MAX7359_REG_CONFIG 0x01
+#define MAX7359_REG_DEBOUNCE 0x02
+#define MAX7359_REG_INTERRUPT 0x03
+#define MAX7359_REG_PORTS 0x04
+#define MAX7359_REG_KEYREP 0x05
+#define MAX7359_REG_SLEEP 0x06
+
+/*
+ * Configuration register bits
+ */
+#define MAX7359_CFG_SLEEP (1 << 7)
+#define MAX7359_CFG_INTERRUPT (1 << 5)
+#define MAX7359_CFG_KEY_RELEASE (1 << 3)
+#define MAX7359_CFG_WAKEUP (1 << 1)
+#define MAX7359_CFG_TIMEOUT (1 << 0)
+
+/*
+ * Autosleep register values (ms)
+ */
+#define MAX7359_AUTOSLEEP_8192 0x01
+#define MAX7359_AUTOSLEEP_4096 0x02
+#define MAX7359_AUTOSLEEP_2048 0x03
+#define MAX7359_AUTOSLEEP_1024 0x04
+#define MAX7359_AUTOSLEEP_512 0x05
+#define MAX7359_AUTOSLEEP_256 0x06
+
+struct max7359_keypad {
+ /* matrix key code map */
+ unsigned short keycodes[MAX7359_MAX_KEY_NUM];
+
+ struct input_dev *input_dev;
+ struct i2c_client *client;
+};
+
+static int max7359_write_reg(struct i2c_client *client, u8 reg, u8 val)
+{
+ int ret = i2c_smbus_write_byte_data(client, reg, val);
+
+ if (ret < 0)
+ dev_err(&client->dev, "%s: reg 0x%x, val 0x%x, err %d\n",
+ __func__, reg, val, ret);
+ return ret;
+}
+
+static int max7359_read_reg(struct i2c_client *client, int reg)
+{
+ int ret = i2c_smbus_read_byte_data(client, reg);
+
+ if (ret < 0)
+ dev_err(&client->dev, "%s: reg 0x%x, err %d\n",
+ __func__, reg, ret);
+ return ret;
+}
+
+static void max7359_build_keycode(struct max7359_keypad *keypad,
+ const struct matrix_keymap_data *keymap_data)
+{
+ struct input_dev *input_dev = keypad->input_dev;
+ int i;
+
+ for (i = 0; i < keymap_data->keymap_size; i++) {
+ unsigned int key = keymap_data->keymap[i];
+ unsigned int row = KEY_ROW(key);
+ unsigned int col = KEY_COL(key);
+ unsigned int scancode = MATRIX_SCAN_CODE(row, col,
+ MAX7359_ROW_SHIFT);
+ unsigned short keycode = KEY_VAL(key);
+
+ keypad->keycodes[scancode] = keycode;
+ __set_bit(keycode, input_dev->keybit);
+ }
+ __clear_bit(KEY_RESERVED, input_dev->keybit);
+}
+
+/* runs in an IRQ thread -- can (and will!) sleep */
+static irqreturn_t max7359_interrupt(int irq, void *dev_id)
+{
+ struct max7359_keypad *keypad = dev_id;
+ struct input_dev *input_dev = keypad->input_dev;
+ int val, row, col, release, code;
+
+ val = max7359_read_reg(keypad->client, MAX7359_REG_KEYFIFO);
+ row = val & 0x7;
+ col = (val >> 3) & 0x7;
+ release = val & 0x40;
+
+ code = MATRIX_SCAN_CODE(row, col, MAX7359_ROW_SHIFT);
+
+ dev_dbg(&keypad->client->dev,
+ "key[%d:%d] %s\n", row, col, release ? "release" : "press");
+
+ input_event(input_dev, EV_MSC, MSC_SCAN, code);
+ input_report_key(input_dev, keypad->keycodes[code], !release);
+ input_sync(input_dev);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Let MAX7359 fall into a deep sleep:
+ * If no keys are pressed, enter sleep mode for 8192 ms. And if any
+ * key is pressed, the MAX7359 returns to normal operating mode.
+ */
+static inline void max7359_fall_deepsleep(struct i2c_client *client)
+{
+ max7359_write_reg(client, MAX7359_REG_SLEEP, MAX7359_AUTOSLEEP_8192);
+}
+
+/*
+ * Let MAX7359 take a catnap:
+ * Autosleep just for 256 ms.
+ */
+static inline void max7359_take_catnap(struct i2c_client *client)
+{
+ max7359_write_reg(client, MAX7359_REG_SLEEP, MAX7359_AUTOSLEEP_256);
+}
+
+static int max7359_open(struct input_dev *dev)
+{
+ struct max7359_keypad *keypad = input_get_drvdata(dev);
+
+ max7359_take_catnap(keypad->client);
+
+ return 0;
+}
+
+static void max7359_close(struct input_dev *dev)
+{
+ struct max7359_keypad *keypad = input_get_drvdata(dev);
+
+ max7359_fall_deepsleep(keypad->client);
+}
+
+static void max7359_initialize(struct i2c_client *client)
+{
+ max7359_write_reg(client, MAX7359_REG_CONFIG,
+ MAX7359_CFG_INTERRUPT | /* Irq clears after host read */
+ MAX7359_CFG_KEY_RELEASE | /* Key release enable */
+ MAX7359_CFG_WAKEUP); /* Key press wakeup enable */
+
+ /* Full key-scan functionality */
+ max7359_write_reg(client, MAX7359_REG_DEBOUNCE, 0x1F);
+
+ /* nINT asserts every debounce cycles */
+ max7359_write_reg(client, MAX7359_REG_INTERRUPT, 0x01);
+
+ max7359_fall_deepsleep(client);
+}
+
+static int __devinit max7359_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct matrix_keymap_data *keymap_data = client->dev.platform_data;
+ struct max7359_keypad *keypad;
+ struct input_dev *input_dev;
+ int ret;
+ int error;
+
+ if (!client->irq) {
+ dev_err(&client->dev, "The irq number should not be zero\n");
+ return -EINVAL;
+ }
+
+ /* Detect MAX7359: The initial Keys FIFO value is '0x3F' */
+ ret = max7359_read_reg(client, MAX7359_REG_KEYFIFO);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to detect device\n");
+ return -ENODEV;
+ }
+
+ dev_dbg(&client->dev, "keys FIFO is 0x%02x\n", ret);
+
+ keypad = kzalloc(sizeof(struct max7359_keypad), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!keypad || !input_dev) {
+ dev_err(&client->dev, "failed to allocate memory\n");
+ error = -ENOMEM;
+ goto failed_free_mem;
+ }
+
+ keypad->client = client;
+ keypad->input_dev = input_dev;
+
+ input_dev->name = client->name;
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->open = max7359_open;
+ input_dev->close = max7359_close;
+ input_dev->dev.parent = &client->dev;
+
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+ input_dev->keycodesize = sizeof(keypad->keycodes[0]);
+ input_dev->keycodemax = ARRAY_SIZE(keypad->keycodes);
+ input_dev->keycode = keypad->keycodes;
+
+ input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+ input_set_drvdata(input_dev, keypad);
+
+ max7359_build_keycode(keypad, keymap_data);
+
+ error = request_threaded_irq(client->irq, NULL, max7359_interrupt,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ client->name, keypad);
+ if (error) {
+ dev_err(&client->dev, "failed to register interrupt\n");
+ goto failed_free_mem;
+ }
+
+ /* Register the input device */
+ error = input_register_device(input_dev);
+ if (error) {
+ dev_err(&client->dev, "failed to register input device\n");
+ goto failed_free_irq;
+ }
+
+ /* Initialize MAX7359 */
+ max7359_initialize(client);
+
+ i2c_set_clientdata(client, keypad);
+ device_init_wakeup(&client->dev, 1);
+
+ return 0;
+
+failed_free_irq:
+ free_irq(client->irq, keypad);
+failed_free_mem:
+ input_free_device(input_dev);
+ kfree(keypad);
+ return error;
+}
+
+static int __devexit max7359_remove(struct i2c_client *client)
+{
+ struct max7359_keypad *keypad = i2c_get_clientdata(client);
+
+ free_irq(client->irq, keypad);
+ input_unregister_device(keypad->input_dev);
+ i2c_set_clientdata(client, NULL);
+ kfree(keypad);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int max7359_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ max7359_fall_deepsleep(client);
+
+ if (device_may_wakeup(&client->dev))
+ enable_irq_wake(client->irq);
+
+ return 0;
+}
+
+static int max7359_resume(struct i2c_client *client)
+{
+ if (device_may_wakeup(&client->dev))
+ disable_irq_wake(client->irq);
+
+ /* Restore the default setting */
+ max7359_take_catnap(client);
+
+ return 0;
+}
+#else
+#define max7359_suspend NULL
+#define max7359_resume NULL
+#endif
+
+static const struct i2c_device_id max7359_ids[] = {
+ { "max7359", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max7359_ids);
+
+static struct i2c_driver max7359_i2c_driver = {
+ .driver = {
+ .name = "max7359",
+ },
+ .probe = max7359_probe,
+ .remove = __devexit_p(max7359_remove),
+ .suspend = max7359_suspend,
+ .resume = max7359_resume,
+ .id_table = max7359_ids,
+};
+
+static int __init max7359_init(void)
+{
+ return i2c_add_driver(&max7359_i2c_driver);
+}
+module_init(max7359_init);
+
+static void __exit max7359_exit(void)
+{
+ i2c_del_driver(&max7359_i2c_driver);
+}
+module_exit(max7359_exit);
+
+MODULE_AUTHOR("Kim Kyuwon <q1.kim@samsung.com>");
+MODULE_DESCRIPTION("MAX7359 Key Switch Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/opencores-kbd.c b/drivers/input/keyboard/opencores-kbd.c
new file mode 100644
index 00000000000..78cccddbf55
--- /dev/null
+++ b/drivers/input/keyboard/opencores-kbd.c
@@ -0,0 +1,180 @@
+/*
+ * OpenCores Keyboard Controller Driver
+ * http://www.opencores.org/project,keyboardcontroller
+ *
+ * Copyright 2007-2009 HV Sistemas S.L.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+struct opencores_kbd {
+ struct input_dev *input;
+ struct resource *addr_res;
+ void __iomem *addr;
+ int irq;
+ unsigned short keycodes[128];
+};
+
+static irqreturn_t opencores_kbd_isr(int irq, void *dev_id)
+{
+ struct opencores_kbd *opencores_kbd = dev_id;
+ struct input_dev *input = opencores_kbd->input;
+ unsigned char c;
+
+ c = readb(opencores_kbd->addr);
+ input_report_key(input, c & 0x7f, c & 0x80 ? 0 : 1);
+ input_sync(input);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit opencores_kbd_probe(struct platform_device *pdev)
+{
+ struct input_dev *input;
+ struct opencores_kbd *opencores_kbd;
+ struct resource *res;
+ int irq, i, error;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "missing board memory resource\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "missing board IRQ resource\n");
+ return -EINVAL;
+ }
+
+ opencores_kbd = kzalloc(sizeof(*opencores_kbd), GFP_KERNEL);
+ input = input_allocate_device();
+ if (!opencores_kbd || !input) {
+ dev_err(&pdev->dev, "failed to allocate device structures\n");
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ opencores_kbd->addr_res = res;
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to request I/O memory\n");
+ error = -EBUSY;
+ goto err_free_mem;
+ }
+
+ opencores_kbd->addr = ioremap(res->start, resource_size(res));
+ if (!opencores_kbd->addr) {
+ dev_err(&pdev->dev, "failed to remap I/O memory\n");
+ error = -ENXIO;
+ goto err_rel_mem;
+ }
+
+ opencores_kbd->input = input;
+ opencores_kbd->irq = irq;
+
+ input->name = pdev->name;
+ input->phys = "opencores-kbd/input0";
+ input->dev.parent = &pdev->dev;
+
+ input_set_drvdata(input, opencores_kbd);
+
+ input->id.bustype = BUS_HOST;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0001;
+ input->id.version = 0x0100;
+
+ input->keycode = opencores_kbd->keycodes;
+ input->keycodesize = sizeof(opencores_kbd->keycodes[0]);
+ input->keycodemax = ARRAY_SIZE(opencores_kbd->keycodes);
+
+ __set_bit(EV_KEY, input->evbit);
+
+ for (i = 0; i < ARRAY_SIZE(opencores_kbd->keycodes); i++) {
+ /*
+ * OpenCores controller happens to have scancodes match
+ * our KEY_* definitions.
+ */
+ opencores_kbd->keycodes[i] = i;
+ __set_bit(opencores_kbd->keycodes[i], input->keybit);
+ }
+ __clear_bit(KEY_RESERVED, input->keybit);
+
+ error = request_irq(irq, &opencores_kbd_isr,
+ IRQF_TRIGGER_RISING, pdev->name, opencores_kbd);
+ if (error) {
+ dev_err(&pdev->dev, "unable to claim irq %d\n", irq);
+ goto err_unmap_mem;
+ }
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(&pdev->dev, "unable to register input device\n");
+ goto err_free_irq;
+ }
+
+ platform_set_drvdata(pdev, opencores_kbd);
+
+ return 0;
+
+ err_free_irq:
+ free_irq(irq, opencores_kbd);
+ err_unmap_mem:
+ iounmap(opencores_kbd->addr);
+ err_rel_mem:
+ release_mem_region(res->start, resource_size(res));
+ err_free_mem:
+ input_free_device(input);
+ kfree(opencores_kbd);
+
+ return error;
+}
+
+static int __devexit opencores_kbd_remove(struct platform_device *pdev)
+{
+ struct opencores_kbd *opencores_kbd = platform_get_drvdata(pdev);
+
+ free_irq(opencores_kbd->irq, opencores_kbd);
+
+ iounmap(opencores_kbd->addr);
+ release_mem_region(opencores_kbd->addr_res->start,
+ resource_size(opencores_kbd->addr_res));
+ input_unregister_device(opencores_kbd->input);
+ kfree(opencores_kbd);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver opencores_kbd_device_driver = {
+ .probe = opencores_kbd_probe,
+ .remove = __devexit_p(opencores_kbd_remove),
+ .driver = {
+ .name = "opencores-kbd",
+ },
+};
+
+static int __init opencores_kbd_init(void)
+{
+ return platform_driver_register(&opencores_kbd_device_driver);
+}
+module_init(opencores_kbd_init);
+
+static void __exit opencores_kbd_exit(void)
+{
+ platform_driver_unregister(&opencores_kbd_device_driver);
+}
+module_exit(opencores_kbd_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Javier Herrero <jherrero@hvsistemas.es>");
+MODULE_DESCRIPTION("Keyboard driver for OpenCores Keyboard Controller");
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
new file mode 100644
index 00000000000..191cc51d6cf
--- /dev/null
+++ b/drivers/input/keyboard/qt2160.c
@@ -0,0 +1,397 @@
+/*
+ * qt2160.c - Atmel AT42QT2160 Touch Sense Controller
+ *
+ * Copyright (C) 2009 Raphael Derosso Pereira <raphaelpereira@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+
+#define QT2160_VALID_CHIPID 0x11
+
+#define QT2160_CMD_CHIPID 0
+#define QT2160_CMD_CODEVER 1
+#define QT2160_CMD_GSTAT 2
+#define QT2160_CMD_KEYS3 3
+#define QT2160_CMD_KEYS4 4
+#define QT2160_CMD_SLIDE 5
+#define QT2160_CMD_GPIOS 6
+#define QT2160_CMD_SUBVER 7
+#define QT2160_CMD_CALIBRATE 10
+
+#define QT2160_CYCLE_INTERVAL (2*HZ)
+
+static unsigned char qt2160_key2code[] = {
+ KEY_0, KEY_1, KEY_2, KEY_3,
+ KEY_4, KEY_5, KEY_6, KEY_7,
+ KEY_8, KEY_9, KEY_A, KEY_B,
+ KEY_C, KEY_D, KEY_E, KEY_F,
+};
+
+struct qt2160_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct delayed_work dwork;
+ spinlock_t lock; /* Protects canceling/rescheduling of dwork */
+ unsigned short keycodes[ARRAY_SIZE(qt2160_key2code)];
+ u16 key_matrix;
+};
+
+static int qt2160_read_block(struct i2c_client *client,
+ u8 inireg, u8 *buffer, unsigned int count)
+{
+ int error, idx = 0;
+
+ /*
+ * Can't use SMBus block data read. Check for I2C functionality to speed
+ * things up whenever possible. Otherwise we will be forced to read
+ * sequentially.
+ */
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+
+ error = i2c_smbus_write_byte(client, inireg + idx);
+ if (error) {
+ dev_err(&client->dev,
+ "couldn't send request. Returned %d\n", error);
+ return error;
+ }
+
+ error = i2c_master_recv(client, buffer, count);
+ if (error != count) {
+ dev_err(&client->dev,
+ "couldn't read registers. Returned %d bytes\n", error);
+ return error;
+ }
+ } else {
+
+ while (count--) {
+ int data;
+
+ error = i2c_smbus_write_byte(client, inireg + idx);
+ if (error) {
+ dev_err(&client->dev,
+ "couldn't send request. Returned %d\n", error);
+ return error;
+ }
+
+ data = i2c_smbus_read_byte(client);
+ if (data < 0) {
+ dev_err(&client->dev,
+ "couldn't read register. Returned %d\n", data);
+ return data;
+ }
+
+ buffer[idx++] = data;
+ }
+ }
+
+ return 0;
+}
+
+static int qt2160_get_key_matrix(struct qt2160_data *qt2160)
+{
+ struct i2c_client *client = qt2160->client;
+ struct input_dev *input = qt2160->input;
+ u8 regs[6];
+ u16 old_matrix, new_matrix;
+ int ret, i, mask;
+
+ dev_dbg(&client->dev, "requesting keys...\n");
+
+ /*
+ * Read all registers from General Status Register
+ * to GPIOs register
+ */
+ ret = qt2160_read_block(client, QT2160_CMD_GSTAT, regs, 6);
+ if (ret) {
+ dev_err(&client->dev,
+ "could not perform chip read.\n");
+ return ret;
+ }
+
+ old_matrix = qt2160->key_matrix;
+ qt2160->key_matrix = new_matrix = (regs[2] << 8) | regs[1];
+
+ mask = 0x01;
+ for (i = 0; i < 16; ++i, mask <<= 1) {
+ int keyval = new_matrix & mask;
+
+ if ((old_matrix & mask) != keyval) {
+ input_report_key(input, qt2160->keycodes[i], keyval);
+ dev_dbg(&client->dev, "key %d %s\n",
+ i, keyval ? "pressed" : "released");
+ }
+ }
+
+ input_sync(input);
+
+ return 0;
+}
+
+static irqreturn_t qt2160_irq(int irq, void *_qt2160)
+{
+ struct qt2160_data *qt2160 = _qt2160;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qt2160->lock, flags);
+
+ __cancel_delayed_work(&qt2160->dwork);
+ schedule_delayed_work(&qt2160->dwork, 0);
+
+ spin_unlock_irqrestore(&qt2160->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void qt2160_schedule_read(struct qt2160_data *qt2160)
+{
+ spin_lock_irq(&qt2160->lock);
+ schedule_delayed_work(&qt2160->dwork, QT2160_CYCLE_INTERVAL);
+ spin_unlock_irq(&qt2160->lock);
+}
+
+static void qt2160_worker(struct work_struct *work)
+{
+ struct qt2160_data *qt2160 =
+ container_of(work, struct qt2160_data, dwork.work);
+
+ dev_dbg(&qt2160->client->dev, "worker\n");
+
+ qt2160_get_key_matrix(qt2160);
+
+ /* Avoid device lock up by checking every so often */
+ qt2160_schedule_read(qt2160);
+}
+
+static int __devinit qt2160_read(struct i2c_client *client, u8 reg)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte(client, reg);
+ if (ret) {
+ dev_err(&client->dev,
+ "couldn't send request. Returned %d\n", ret);
+ return ret;
+ }
+
+ ret = i2c_smbus_read_byte(client);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "couldn't read register. Returned %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int __devinit qt2160_write(struct i2c_client *client, u8 reg, u8 data)
+{
+ int error;
+
+ error = i2c_smbus_write_byte(client, reg);
+ if (error) {
+ dev_err(&client->dev,
+ "couldn't send request. Returned %d\n", error);
+ return error;
+ }
+
+ error = i2c_smbus_write_byte(client, data);
+ if (error) {
+ dev_err(&client->dev,
+ "couldn't write data. Returned %d\n", error);
+ return error;
+ }
+
+ return error;
+}
+
+
+static bool __devinit qt2160_identify(struct i2c_client *client)
+{
+ int id, ver, rev;
+
+ /* Read Chid ID to check if chip is valid */
+ id = qt2160_read(client, QT2160_CMD_CHIPID);
+ if (id != QT2160_VALID_CHIPID) {
+ dev_err(&client->dev, "ID %d not supported\n", id);
+ return false;
+ }
+
+ /* Read chip firmware version */
+ ver = qt2160_read(client, QT2160_CMD_CODEVER);
+ if (ver < 0) {
+ dev_err(&client->dev, "could not get firmware version\n");
+ return false;
+ }
+
+ /* Read chip firmware revision */
+ rev = qt2160_read(client, QT2160_CMD_SUBVER);
+ if (rev < 0) {
+ dev_err(&client->dev, "could not get firmware revision\n");
+ return false;
+ }
+
+ dev_info(&client->dev, "AT42QT2160 firmware version %d.%d.%d\n",
+ ver >> 4, ver & 0xf, rev);
+
+ return true;
+}
+
+static int __devinit qt2160_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct qt2160_data *qt2160;
+ struct input_dev *input;
+ int i;
+ int error;
+
+ /* Check functionality */
+ error = i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE);
+ if (!error) {
+ dev_err(&client->dev, "%s adapter not supported\n",
+ dev_driver_string(&client->adapter->dev));
+ return -ENODEV;
+ }
+
+ if (!qt2160_identify(client))
+ return -ENODEV;
+
+ /* Chip is valid and active. Allocate structure */
+ qt2160 = kzalloc(sizeof(struct qt2160_data), GFP_KERNEL);
+ input = input_allocate_device();
+ if (!qt2160 || !input) {
+ dev_err(&client->dev, "insufficient memory\n");
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ qt2160->client = client;
+ qt2160->input = input;
+ INIT_DELAYED_WORK(&qt2160->dwork, qt2160_worker);
+ spin_lock_init(&qt2160->lock);
+
+ input->name = "AT42QT2160 Touch Sense Keyboard";
+ input->id.bustype = BUS_I2C;
+
+ input->keycode = qt2160->keycodes;
+ input->keycodesize = sizeof(qt2160->keycodes[0]);
+ input->keycodemax = ARRAY_SIZE(qt2160_key2code);
+
+ __set_bit(EV_KEY, input->evbit);
+ __clear_bit(EV_REP, input->evbit);
+ for (i = 0; i < ARRAY_SIZE(qt2160_key2code); i++) {
+ qt2160->keycodes[i] = qt2160_key2code[i];
+ __set_bit(qt2160_key2code[i], input->keybit);
+ }
+ __clear_bit(KEY_RESERVED, input->keybit);
+
+ /* Calibrate device */
+ error = qt2160_write(client, QT2160_CMD_CALIBRATE, 1);
+ if (error) {
+ dev_err(&client->dev, "failed to calibrate device\n");
+ goto err_free_mem;
+ }
+
+ if (client->irq) {
+ error = request_irq(client->irq, qt2160_irq,
+ IRQF_TRIGGER_FALLING, "qt2160", qt2160);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to allocate irq %d\n", client->irq);
+ goto err_free_mem;
+ }
+ }
+
+ error = input_register_device(qt2160->input);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to register input device\n");
+ goto err_free_irq;
+ }
+
+ i2c_set_clientdata(client, qt2160);
+ qt2160_schedule_read(qt2160);
+
+ return 0;
+
+err_free_irq:
+ if (client->irq)
+ free_irq(client->irq, qt2160);
+err_free_mem:
+ input_free_device(input);
+ kfree(qt2160);
+ return error;
+}
+
+static int __devexit qt2160_remove(struct i2c_client *client)
+{
+ struct qt2160_data *qt2160 = i2c_get_clientdata(client);
+
+ /* Release IRQ so no queue will be scheduled */
+ if (client->irq)
+ free_irq(client->irq, qt2160);
+
+ cancel_delayed_work_sync(&qt2160->dwork);
+
+ input_unregister_device(qt2160->input);
+ kfree(qt2160);
+
+ i2c_set_clientdata(client, NULL);
+ return 0;
+}
+
+static struct i2c_device_id qt2160_idtable[] = {
+ { "qt2160", 0, },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, qt2160_idtable);
+
+static struct i2c_driver qt2160_driver = {
+ .driver = {
+ .name = "qt2160",
+ .owner = THIS_MODULE,
+ },
+
+ .id_table = qt2160_idtable,
+ .probe = qt2160_probe,
+ .remove = __devexit_p(qt2160_remove),
+};
+
+static int __init qt2160_init(void)
+{
+ return i2c_add_driver(&qt2160_driver);
+}
+module_init(qt2160_init);
+
+static void __exit qt2160_cleanup(void)
+{
+ i2c_del_driver(&qt2160_driver);
+}
+module_exit(qt2160_cleanup);
+
+MODULE_AUTHOR("Raphael Derosso Pereira <raphaelpereira@gmail.com>");
+MODULE_DESCRIPTION("Driver for AT42QT2160 Touch Sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 1a50be379cb..02f4f8f1db6 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -222,6 +222,23 @@ config INPUT_SGI_BTNS
To compile this driver as a module, choose M here: the
module will be called sgi_btns.
+config INPUT_WINBOND_CIR
+ tristate "Winbond IR remote control"
+ depends on X86 && PNP
+ select NEW_LEDS
+ select LEDS_CLASS
+ select BITREVERSE
+ help
+ Say Y here if you want to use the IR remote functionality found
+ in some Winbond SuperI/O chips. Currently only the WPCD376I
+ chip is supported (included in some Intel Media series motherboards).
+
+ IR Receive and wake-on-IR from suspend and power-off is currently
+ supported.
+
+ To compile this driver as a module, choose M here: the module will be
+ called winbond_cir.
+
config HP_SDC_RTC
tristate "HP SDC Real Time Clock"
depends on (GSC || HP300) && SERIO
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index bf4db626c31..a8b84854fb7 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o
obj-$(CONFIG_INPUT_TWL4030_PWRBUTTON) += twl4030-pwrbutton.o
obj-$(CONFIG_INPUT_UINPUT) += uinput.o
+obj-$(CONFIG_INPUT_WINBOND_CIR) += winbond-cir.o
obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o
obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o
obj-$(CONFIG_INPUT_YEALINK) += yealink.o
diff --git a/drivers/input/misc/dm355evm_keys.c b/drivers/input/misc/dm355evm_keys.c
index 0918acae584..f2b67dc81d8 100644
--- a/drivers/input/misc/dm355evm_keys.c
+++ b/drivers/input/misc/dm355evm_keys.c
@@ -96,7 +96,13 @@ static struct {
{ 0x3169, KEY_PAUSE, },
};
-/* runs in an IRQ thread -- can (and will!) sleep */
+/*
+ * Because we communicate with the MSP430 using I2C, and all I2C calls
+ * in Linux sleep, we use a threaded IRQ handler. The IRQ itself is
+ * active low, but we go through the GPIO controller so we can trigger
+ * on falling edges and not worry about enabling/disabling the IRQ in
+ * the keypress handling path.
+ */
static irqreturn_t dm355evm_keys_irq(int irq, void *_keys)
{
struct dm355evm_keys *keys = _keys;
@@ -171,18 +177,6 @@ static irqreturn_t dm355evm_keys_irq(int irq, void *_keys)
return IRQ_HANDLED;
}
-/*
- * Because we communicate with the MSP430 using I2C, and all I2C calls
- * in Linux sleep, we use a threaded IRQ handler. The IRQ itself is
- * active low, but we go through the GPIO controller so we can trigger
- * on falling edges and not worry about enabling/disabling the IRQ in
- * the keypress handling path.
- */
-static irqreturn_t dm355evm_keys_hardirq(int irq, void *_keys)
-{
- return IRQ_WAKE_THREAD;
-}
-
static int dm355evm_setkeycode(struct input_dev *dev, int index, int keycode)
{
u16 old_keycode;
@@ -257,10 +251,8 @@ static int __devinit dm355evm_keys_probe(struct platform_device *pdev)
/* REVISIT: flush the event queue? */
- status = request_threaded_irq(keys->irq,
- dm355evm_keys_hardirq, dm355evm_keys_irq,
- IRQF_TRIGGER_FALLING,
- dev_name(&pdev->dev), keys);
+ status = request_threaded_irq(keys->irq, NULL, dm355evm_keys_irq,
+ IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), keys);
if (status < 0)
goto fail1;
diff --git a/drivers/input/misc/winbond-cir.c b/drivers/input/misc/winbond-cir.c
new file mode 100644
index 00000000000..33309fe44e2
--- /dev/null
+++ b/drivers/input/misc/winbond-cir.c
@@ -0,0 +1,1614 @@
+/*
+ * winbond-cir.c - Driver for the Consumer IR functionality of Winbond
+ * SuperI/O chips.
+ *
+ * Currently supports the Winbond WPCD376i chip (PNP id WEC1022), but
+ * could probably support others (Winbond WEC102X, NatSemi, etc)
+ * with minor modifications.
+ *
+ * Original Author: David Härdeman <david@hardeman.nu>
+ * Copyright (C) 2009 David Härdeman <david@hardeman.nu>
+ *
+ * Dedicated to Matilda, my newborn daughter, without whose loving attention
+ * this driver would have been finished in half the time and with a fraction
+ * of the bugs.
+ *
+ * Written using:
+ * o Winbond WPCD376I datasheet helpfully provided by Jesse Barnes at Intel
+ * o NatSemi PC87338/PC97338 datasheet (for the serial port stuff)
+ * o DSDT dumps
+ *
+ * Supported features:
+ * o RC6
+ * o Wake-On-CIR functionality
+ *
+ * To do:
+ * o Test NEC and RC5
+ *
+ * Left as an exercise for the reader:
+ * o Learning (I have neither the hardware, nor the need)
+ * o IR Transmit (ibid)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/pnp.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/input.h>
+#include <linux/leds.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/pci_ids.h>
+#include <linux/io.h>
+#include <linux/bitrev.h>
+#include <linux/bitops.h>
+
+#define DRVNAME "winbond-cir"
+
+/* CEIR Wake-Up Registers, relative to data->wbase */
+#define WBCIR_REG_WCEIR_CTL 0x03 /* CEIR Receiver Control */
+#define WBCIR_REG_WCEIR_STS 0x04 /* CEIR Receiver Status */
+#define WBCIR_REG_WCEIR_EV_EN 0x05 /* CEIR Receiver Event Enable */
+#define WBCIR_REG_WCEIR_CNTL 0x06 /* CEIR Receiver Counter Low */
+#define WBCIR_REG_WCEIR_CNTH 0x07 /* CEIR Receiver Counter High */
+#define WBCIR_REG_WCEIR_INDEX 0x08 /* CEIR Receiver Index */
+#define WBCIR_REG_WCEIR_DATA 0x09 /* CEIR Receiver Data */
+#define WBCIR_REG_WCEIR_CSL 0x0A /* CEIR Re. Compare Strlen */
+#define WBCIR_REG_WCEIR_CFG1 0x0B /* CEIR Re. Configuration 1 */
+#define WBCIR_REG_WCEIR_CFG2 0x0C /* CEIR Re. Configuration 2 */
+
+/* CEIR Enhanced Functionality Registers, relative to data->ebase */
+#define WBCIR_REG_ECEIR_CTS 0x00 /* Enhanced IR Control Status */
+#define WBCIR_REG_ECEIR_CCTL 0x01 /* Infrared Counter Control */
+#define WBCIR_REG_ECEIR_CNT_LO 0x02 /* Infrared Counter LSB */
+#define WBCIR_REG_ECEIR_CNT_HI 0x03 /* Infrared Counter MSB */
+#define WBCIR_REG_ECEIR_IREM 0x04 /* Infrared Emitter Status */
+
+/* SP3 Banked Registers, relative to data->sbase */
+#define WBCIR_REG_SP3_BSR 0x03 /* Bank Select, all banks */
+ /* Bank 0 */
+#define WBCIR_REG_SP3_RXDATA 0x00 /* FIFO RX data (r) */
+#define WBCIR_REG_SP3_TXDATA 0x00 /* FIFO TX data (w) */
+#define WBCIR_REG_SP3_IER 0x01 /* Interrupt Enable */
+#define WBCIR_REG_SP3_EIR 0x02 /* Event Identification (r) */
+#define WBCIR_REG_SP3_FCR 0x02 /* FIFO Control (w) */
+#define WBCIR_REG_SP3_MCR 0x04 /* Mode Control */
+#define WBCIR_REG_SP3_LSR 0x05 /* Link Status */
+#define WBCIR_REG_SP3_MSR 0x06 /* Modem Status */
+#define WBCIR_REG_SP3_ASCR 0x07 /* Aux Status and Control */
+ /* Bank 2 */
+#define WBCIR_REG_SP3_BGDL 0x00 /* Baud Divisor LSB */
+#define WBCIR_REG_SP3_BGDH 0x01 /* Baud Divisor MSB */
+#define WBCIR_REG_SP3_EXCR1 0x02 /* Extended Control 1 */
+#define WBCIR_REG_SP3_EXCR2 0x04 /* Extended Control 2 */
+#define WBCIR_REG_SP3_TXFLV 0x06 /* TX FIFO Level */
+#define WBCIR_REG_SP3_RXFLV 0x07 /* RX FIFO Level */
+ /* Bank 3 */
+#define WBCIR_REG_SP3_MRID 0x00 /* Module Identification */
+#define WBCIR_REG_SP3_SH_LCR 0x01 /* LCR Shadow */
+#define WBCIR_REG_SP3_SH_FCR 0x02 /* FCR Shadow */
+ /* Bank 4 */
+#define WBCIR_REG_SP3_IRCR1 0x02 /* Infrared Control 1 */
+ /* Bank 5 */
+#define WBCIR_REG_SP3_IRCR2 0x04 /* Infrared Control 2 */
+ /* Bank 6 */
+#define WBCIR_REG_SP3_IRCR3 0x00 /* Infrared Control 3 */
+#define WBCIR_REG_SP3_SIR_PW 0x02 /* SIR Pulse Width */
+ /* Bank 7 */
+#define WBCIR_REG_SP3_IRRXDC 0x00 /* IR RX Demod Control */
+#define WBCIR_REG_SP3_IRTXMC 0x01 /* IR TX Mod Control */
+#define WBCIR_REG_SP3_RCCFG 0x02 /* CEIR Config */
+#define WBCIR_REG_SP3_IRCFG1 0x04 /* Infrared Config 1 */
+#define WBCIR_REG_SP3_IRCFG4 0x07 /* Infrared Config 4 */
+
+/*
+ * Magic values follow
+ */
+
+/* No interrupts for WBCIR_REG_SP3_IER and WBCIR_REG_SP3_EIR */
+#define WBCIR_IRQ_NONE 0x00
+/* RX data bit for WBCIR_REG_SP3_IER and WBCIR_REG_SP3_EIR */
+#define WBCIR_IRQ_RX 0x01
+/* Over/Under-flow bit for WBCIR_REG_SP3_IER and WBCIR_REG_SP3_EIR */
+#define WBCIR_IRQ_ERR 0x04
+/* Led enable/disable bit for WBCIR_REG_ECEIR_CTS */
+#define WBCIR_LED_ENABLE 0x80
+/* RX data available bit for WBCIR_REG_SP3_LSR */
+#define WBCIR_RX_AVAIL 0x01
+/* RX disable bit for WBCIR_REG_SP3_ASCR */
+#define WBCIR_RX_DISABLE 0x20
+/* Extended mode enable bit for WBCIR_REG_SP3_EXCR1 */
+#define WBCIR_EXT_ENABLE 0x01
+/* Select compare register in WBCIR_REG_WCEIR_INDEX (bits 5 & 6) */
+#define WBCIR_REGSEL_COMPARE 0x10
+/* Select mask register in WBCIR_REG_WCEIR_INDEX (bits 5 & 6) */
+#define WBCIR_REGSEL_MASK 0x20
+/* Starting address of selected register in WBCIR_REG_WCEIR_INDEX */
+#define WBCIR_REG_ADDR0 0x00
+
+/* Valid banks for the SP3 UART */
+enum wbcir_bank {
+ WBCIR_BANK_0 = 0x00,
+ WBCIR_BANK_1 = 0x80,
+ WBCIR_BANK_2 = 0xE0,
+ WBCIR_BANK_3 = 0xE4,
+ WBCIR_BANK_4 = 0xE8,
+ WBCIR_BANK_5 = 0xEC,
+ WBCIR_BANK_6 = 0xF0,
+ WBCIR_BANK_7 = 0xF4,
+};
+
+/* Supported IR Protocols */
+enum wbcir_protocol {
+ IR_PROTOCOL_RC5 = 0x0,
+ IR_PROTOCOL_NEC = 0x1,
+ IR_PROTOCOL_RC6 = 0x2,
+};
+
+/* Misc */
+#define WBCIR_NAME "Winbond CIR"
+#define WBCIR_ID_FAMILY 0xF1 /* Family ID for the WPCD376I */
+#define WBCIR_ID_CHIP 0x04 /* Chip ID for the WPCD376I */
+#define IR_KEYPRESS_TIMEOUT 250 /* FIXME: should be per-protocol? */
+#define INVALID_SCANCODE 0x7FFFFFFF /* Invalid with all protos */
+#define WAKEUP_IOMEM_LEN 0x10 /* Wake-Up I/O Reg Len */
+#define EHFUNC_IOMEM_LEN 0x10 /* Enhanced Func I/O Reg Len */
+#define SP_IOMEM_LEN 0x08 /* Serial Port 3 (IR) Reg Len */
+#define WBCIR_MAX_IDLE_BYTES 10
+
+static DEFINE_SPINLOCK(wbcir_lock);
+static DEFINE_RWLOCK(keytable_lock);
+
+struct wbcir_key {
+ u32 scancode;
+ unsigned int keycode;
+};
+
+struct wbcir_keyentry {
+ struct wbcir_key key;
+ struct list_head list;
+};
+
+static struct wbcir_key rc6_def_keymap[] = {
+ { 0x800F0400, KEY_NUMERIC_0 },
+ { 0x800F0401, KEY_NUMERIC_1 },
+ { 0x800F0402, KEY_NUMERIC_2 },
+ { 0x800F0403, KEY_NUMERIC_3 },
+ { 0x800F0404, KEY_NUMERIC_4 },
+ { 0x800F0405, KEY_NUMERIC_5 },
+ { 0x800F0406, KEY_NUMERIC_6 },
+ { 0x800F0407, KEY_NUMERIC_7 },
+ { 0x800F0408, KEY_NUMERIC_8 },
+ { 0x800F0409, KEY_NUMERIC_9 },
+ { 0x800F041D, KEY_NUMERIC_STAR },
+ { 0x800F041C, KEY_NUMERIC_POUND },
+ { 0x800F0410, KEY_VOLUMEUP },
+ { 0x800F0411, KEY_VOLUMEDOWN },
+ { 0x800F0412, KEY_CHANNELUP },
+ { 0x800F0413, KEY_CHANNELDOWN },
+ { 0x800F040E, KEY_MUTE },
+ { 0x800F040D, KEY_VENDOR }, /* Vista Logo Key */
+ { 0x800F041E, KEY_UP },
+ { 0x800F041F, KEY_DOWN },
+ { 0x800F0420, KEY_LEFT },
+ { 0x800F0421, KEY_RIGHT },
+ { 0x800F0422, KEY_OK },
+ { 0x800F0423, KEY_ESC },
+ { 0x800F040F, KEY_INFO },
+ { 0x800F040A, KEY_CLEAR },
+ { 0x800F040B, KEY_ENTER },
+ { 0x800F045B, KEY_RED },
+ { 0x800F045C, KEY_GREEN },
+ { 0x800F045D, KEY_YELLOW },
+ { 0x800F045E, KEY_BLUE },
+ { 0x800F045A, KEY_TEXT },
+ { 0x800F0427, KEY_SWITCHVIDEOMODE },
+ { 0x800F040C, KEY_POWER },
+ { 0x800F0450, KEY_RADIO },
+ { 0x800F0448, KEY_PVR },
+ { 0x800F0447, KEY_AUDIO },
+ { 0x800F0426, KEY_EPG },
+ { 0x800F0449, KEY_CAMERA },
+ { 0x800F0425, KEY_TV },
+ { 0x800F044A, KEY_VIDEO },
+ { 0x800F0424, KEY_DVD },
+ { 0x800F0416, KEY_PLAY },
+ { 0x800F0418, KEY_PAUSE },
+ { 0x800F0419, KEY_STOP },
+ { 0x800F0414, KEY_FASTFORWARD },
+ { 0x800F041A, KEY_NEXT },
+ { 0x800F041B, KEY_PREVIOUS },
+ { 0x800F0415, KEY_REWIND },
+ { 0x800F0417, KEY_RECORD },
+};
+
+/* Registers and other state is protected by wbcir_lock */
+struct wbcir_data {
+ unsigned long wbase; /* Wake-Up Baseaddr */
+ unsigned long ebase; /* Enhanced Func. Baseaddr */
+ unsigned long sbase; /* Serial Port Baseaddr */
+ unsigned int irq; /* Serial Port IRQ */
+
+ struct input_dev *input_dev;
+ struct timer_list timer_keyup;
+ struct led_trigger *rxtrigger;
+ struct led_trigger *txtrigger;
+ struct led_classdev led;
+
+ u32 last_scancode;
+ unsigned int last_keycode;
+ u8 last_toggle;
+ u8 keypressed;
+ unsigned long keyup_jiffies;
+ unsigned int idle_count;
+
+ /* RX irdata and parsing state */
+ unsigned long irdata[30];
+ unsigned int irdata_count;
+ unsigned int irdata_idle;
+ unsigned int irdata_off;
+ unsigned int irdata_error;
+
+ /* Protected by keytable_lock */
+ struct list_head keytable;
+};
+
+static enum wbcir_protocol protocol = IR_PROTOCOL_RC6;
+module_param(protocol, uint, 0444);
+MODULE_PARM_DESC(protocol, "IR protocol to use "
+ "(0 = RC5, 1 = NEC, 2 = RC6A, default)");
+
+static int invert; /* default = 0 */
+module_param(invert, bool, 0444);
+MODULE_PARM_DESC(invert, "Invert the signal from the IR receiver");
+
+static unsigned int wake_sc = 0x800F040C;
+module_param(wake_sc, uint, 0644);
+MODULE_PARM_DESC(wake_sc, "Scancode of the power-on IR command");
+
+static unsigned int wake_rc6mode = 6;
+module_param(wake_rc6mode, uint, 0644);
+MODULE_PARM_DESC(wake_rc6mode, "RC6 mode for the power-on command "
+ "(0 = 0, 6 = 6A, default)");
+
+
+
+/*****************************************************************************
+ *
+ * UTILITY FUNCTIONS
+ *
+ *****************************************************************************/
+
+/* Caller needs to hold wbcir_lock */
+static void
+wbcir_set_bits(unsigned long addr, u8 bits, u8 mask)
+{
+ u8 val;
+
+ val = inb(addr);
+ val = ((val & ~mask) | (bits & mask));
+ outb(val, addr);
+}
+
+/* Selects the register bank for the serial port */
+static inline void
+wbcir_select_bank(struct wbcir_data *data, enum wbcir_bank bank)
+{
+ outb(bank, data->sbase + WBCIR_REG_SP3_BSR);
+}
+
+static enum led_brightness
+wbcir_led_brightness_get(struct led_classdev *led_cdev)
+{
+ struct wbcir_data *data = container_of(led_cdev,
+ struct wbcir_data,
+ led);
+
+ if (inb(data->ebase + WBCIR_REG_ECEIR_CTS) & WBCIR_LED_ENABLE)
+ return LED_FULL;
+ else
+ return LED_OFF;
+}
+
+static void
+wbcir_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct wbcir_data *data = container_of(led_cdev,
+ struct wbcir_data,
+ led);
+
+ wbcir_set_bits(data->ebase + WBCIR_REG_ECEIR_CTS,
+ brightness == LED_OFF ? 0x00 : WBCIR_LED_ENABLE,
+ WBCIR_LED_ENABLE);
+}
+
+/* Manchester encodes bits to RC6 message cells (see wbcir_parse_rc6) */
+static u8
+wbcir_to_rc6cells(u8 val)
+{
+ u8 coded = 0x00;
+ int i;
+
+ val &= 0x0F;
+ for (i = 0; i < 4; i++) {
+ if (val & 0x01)
+ coded |= 0x02 << (i * 2);
+ else
+ coded |= 0x01 << (i * 2);
+ val >>= 1;
+ }
+
+ return coded;
+}
+
+
+
+/*****************************************************************************
+ *
+ * INPUT FUNCTIONS
+ *
+ *****************************************************************************/
+
+static unsigned int
+wbcir_do_getkeycode(struct wbcir_data *data, u32 scancode)
+{
+ struct wbcir_keyentry *keyentry;
+ unsigned int keycode = KEY_RESERVED;
+ unsigned long flags;
+
+ read_lock_irqsave(&keytable_lock, flags);
+
+ list_for_each_entry(keyentry, &data->keytable, list) {
+ if (keyentry->key.scancode == scancode) {
+ keycode = keyentry->key.keycode;
+ break;
+ }
+ }
+
+ read_unlock_irqrestore(&keytable_lock, flags);
+ return keycode;
+}
+
+static int
+wbcir_getkeycode(struct input_dev *dev, int scancode, int *keycode)
+{
+ struct wbcir_data *data = input_get_drvdata(dev);
+
+ *keycode = (int)wbcir_do_getkeycode(data, (u32)scancode);
+ return 0;
+}
+
+static int
+wbcir_setkeycode(struct input_dev *dev, int sscancode, int keycode)
+{
+ struct wbcir_data *data = input_get_drvdata(dev);
+ struct wbcir_keyentry *keyentry;
+ struct wbcir_keyentry *new_keyentry;
+ unsigned long flags;
+ unsigned int old_keycode = KEY_RESERVED;
+ u32 scancode = (u32)sscancode;
+
+ if (keycode < 0 || keycode > KEY_MAX)
+ return -EINVAL;
+
+ new_keyentry = kmalloc(sizeof(*new_keyentry), GFP_KERNEL);
+ if (!new_keyentry)
+ return -ENOMEM;
+
+ write_lock_irqsave(&keytable_lock, flags);
+
+ list_for_each_entry(keyentry, &data->keytable, list) {
+ if (keyentry->key.scancode != scancode)
+ continue;
+
+ old_keycode = keyentry->key.keycode;
+ keyentry->key.keycode = keycode;
+
+ if (keyentry->key.keycode == KEY_RESERVED) {
+ list_del(&keyentry->list);
+ kfree(keyentry);
+ }
+
+ break;
+ }
+
+ set_bit(keycode, dev->keybit);
+
+ if (old_keycode == KEY_RESERVED) {
+ new_keyentry->key.scancode = scancode;
+ new_keyentry->key.keycode = keycode;
+ list_add(&new_keyentry->list, &data->keytable);
+ } else {
+ kfree(new_keyentry);
+ clear_bit(old_keycode, dev->keybit);
+ list_for_each_entry(keyentry, &data->keytable, list) {
+ if (keyentry->key.keycode == old_keycode) {
+ set_bit(old_keycode, dev->keybit);
+ break;
+ }
+ }
+ }
+
+ write_unlock_irqrestore(&keytable_lock, flags);
+ return 0;
+}
+
+/*
+ * Timer function to report keyup event some time after keydown is
+ * reported by the ISR.
+ */
+static void
+wbcir_keyup(unsigned long cookie)
+{
+ struct wbcir_data *data = (struct wbcir_data *)cookie;
+ unsigned long flags;
+
+ /*
+ * data->keyup_jiffies is used to prevent a race condition if a
+ * hardware interrupt occurs at this point and the keyup timer
+ * event is moved further into the future as a result.
+ *
+ * The timer will then be reactivated and this function called
+ * again in the future. We need to exit gracefully in that case
+ * to allow the input subsystem to do its auto-repeat magic or
+ * a keyup event might follow immediately after the keydown.
+ */
+
+ spin_lock_irqsave(&wbcir_lock, flags);
+
+ if (time_is_after_eq_jiffies(data->keyup_jiffies) && data->keypressed) {
+ data->keypressed = 0;
+ led_trigger_event(data->rxtrigger, LED_OFF);
+ input_report_key(data->input_dev, data->last_keycode, 0);
+ input_sync(data->input_dev);
+ }
+
+ spin_unlock_irqrestore(&wbcir_lock, flags);
+}
+
+static void
+wbcir_keydown(struct wbcir_data *data, u32 scancode, u8 toggle)
+{
+ unsigned int keycode;
+
+ /* Repeat? */
+ if (data->last_scancode == scancode &&
+ data->last_toggle == toggle &&
+ data->keypressed)
+ goto set_timer;
+ data->last_scancode = scancode;
+
+ /* Do we need to release an old keypress? */
+ if (data->keypressed) {
+ input_report_key(data->input_dev, data->last_keycode, 0);
+ input_sync(data->input_dev);
+ data->keypressed = 0;
+ }
+
+ /* Report scancode */
+ input_event(data->input_dev, EV_MSC, MSC_SCAN, (int)scancode);
+
+ /* Do we know this scancode? */
+ keycode = wbcir_do_getkeycode(data, scancode);
+ if (keycode == KEY_RESERVED)
+ goto set_timer;
+
+ /* Register a keypress */
+ input_report_key(data->input_dev, keycode, 1);
+ data->keypressed = 1;
+ data->last_keycode = keycode;
+ data->last_toggle = toggle;
+
+set_timer:
+ input_sync(data->input_dev);
+ led_trigger_event(data->rxtrigger,
+ data->keypressed ? LED_FULL : LED_OFF);
+ data->keyup_jiffies = jiffies + msecs_to_jiffies(IR_KEYPRESS_TIMEOUT);
+ mod_timer(&data->timer_keyup, data->keyup_jiffies);
+}
+
+
+
+/*****************************************************************************
+ *
+ * IR PARSING FUNCTIONS
+ *
+ *****************************************************************************/
+
+/* Resets all irdata */
+static void
+wbcir_reset_irdata(struct wbcir_data *data)
+{
+ memset(data->irdata, 0, sizeof(data->irdata));
+ data->irdata_count = 0;
+ data->irdata_off = 0;
+ data->irdata_error = 0;
+}
+
+/* Adds one bit of irdata */
+static void
+add_irdata_bit(struct wbcir_data *data, int set)
+{
+ if (data->irdata_count >= sizeof(data->irdata) * 8) {
+ data->irdata_error = 1;
+ return;
+ }
+
+ if (set)
+ __set_bit(data->irdata_count, data->irdata);
+ data->irdata_count++;
+}
+
+/* Gets count bits of irdata */
+static u16
+get_bits(struct wbcir_data *data, int count)
+{
+ u16 val = 0x0;
+
+ if (data->irdata_count - data->irdata_off < count) {
+ data->irdata_error = 1;
+ return 0x0;
+ }
+
+ while (count > 0) {
+ val <<= 1;
+ if (test_bit(data->irdata_off, data->irdata))
+ val |= 0x1;
+ count--;
+ data->irdata_off++;
+ }
+
+ return val;
+}
+
+/* Reads 16 cells and converts them to a byte */
+static u8
+wbcir_rc6cells_to_byte(struct wbcir_data *data)
+{
+ u16 raw = get_bits(data, 16);
+ u8 val = 0x00;
+ int bit;
+
+ for (bit = 0; bit < 8; bit++) {
+ switch (raw & 0x03) {
+ case 0x01:
+ break;
+ case 0x02:
+ val |= (0x01 << bit);
+ break;
+ default:
+ data->irdata_error = 1;
+ break;
+ }
+ raw >>= 2;
+ }
+
+ return val;
+}
+
+/* Decodes a number of bits from raw RC5 data */
+static u8
+wbcir_get_rc5bits(struct wbcir_data *data, unsigned int count)
+{
+ u16 raw = get_bits(data, count * 2);
+ u8 val = 0x00;
+ int bit;
+
+ for (bit = 0; bit < count; bit++) {
+ switch (raw & 0x03) {
+ case 0x01:
+ val |= (0x01 << bit);
+ break;
+ case 0x02:
+ break;
+ default:
+ data->irdata_error = 1;
+ break;
+ }
+ raw >>= 2;
+ }
+
+ return val;
+}
+
+static void
+wbcir_parse_rc6(struct device *dev, struct wbcir_data *data)
+{
+ /*
+ * Normal bits are manchester coded as follows:
+ * cell0 + cell1 = logic "0"
+ * cell1 + cell0 = logic "1"
+ *
+ * The IR pulse has the following components:
+ *
+ * Leader - 6 * cell1 - discarded
+ * Gap - 2 * cell0 - discarded
+ * Start bit - Normal Coding - always "1"
+ * Mode Bit 2 - 0 - Normal Coding
+ * Toggle bit - Normal Coding with double bit time,
+ * e.g. cell0 + cell0 + cell1 + cell1
+ * means logic "0".
+ *
+ * The rest depends on the mode, the following modes are known:
+ *
+ * MODE 0:
+ * Address Bit 7 - 0 - Normal Coding
+ * Command Bit 7 - 0 - Normal Coding
+ *
+ * MODE 6:
+ * The above Toggle Bit is used as a submode bit, 0 = A, 1 = B.
+ * Submode B is for pointing devices, only remotes using submode A
+ * are supported.
+ *
+ * Customer range bit - 0 => Customer = 7 bits, 0...127
+ * 1 => Customer = 15 bits, 32768...65535
+ * Customer Bits - Normal Coding
+ *
+ * Customer codes are allocated by Philips. The rest of the bits
+ * are customer dependent. The following is commonly used (and the
+ * only supported config):
+ *
+ * Toggle Bit - Normal Coding
+ * Address Bit 6 - 0 - Normal Coding
+ * Command Bit 7 - 0 - Normal Coding
+ *
+ * All modes are followed by at least 6 * cell0.
+ *
+ * MODE 0 msglen:
+ * 1 * 2 (start bit) + 3 * 2 (mode) + 2 * 2 (toggle) +
+ * 8 * 2 (address) + 8 * 2 (command) =
+ * 44 cells
+ *
+ * MODE 6A msglen:
+ * 1 * 2 (start bit) + 3 * 2 (mode) + 2 * 2 (submode) +
+ * 1 * 2 (customer range bit) + 7/15 * 2 (customer bits) +
+ * 1 * 2 (toggle bit) + 7 * 2 (address) + 8 * 2 (command) =
+ * 60 - 76 cells
+ */
+ u8 mode;
+ u8 toggle;
+ u16 customer = 0x0;
+ u8 address;
+ u8 command;
+ u32 scancode;
+
+ /* Leader mark */
+ while (get_bits(data, 1) && !data->irdata_error)
+ /* Do nothing */;
+
+ /* Leader space */
+ if (get_bits(data, 1)) {
+ dev_dbg(dev, "RC6 - Invalid leader space\n");
+ return;
+ }
+
+ /* Start bit */
+ if (get_bits(data, 2) != 0x02) {
+ dev_dbg(dev, "RC6 - Invalid start bit\n");
+ return;
+ }
+
+ /* Mode */
+ mode = get_bits(data, 6);
+ switch (mode) {
+ case 0x15: /* 010101 = b000 */
+ mode = 0;
+ break;
+ case 0x29: /* 101001 = b110 */
+ mode = 6;
+ break;
+ default:
+ dev_dbg(dev, "RC6 - Invalid mode\n");
+ return;
+ }
+
+ /* Toggle bit / Submode bit */
+ toggle = get_bits(data, 4);
+ switch (toggle) {
+ case 0x03:
+ toggle = 0;
+ break;
+ case 0x0C:
+ toggle = 1;
+ break;
+ default:
+ dev_dbg(dev, "RC6 - Toggle bit error\n");
+ break;
+ }
+
+ /* Customer */
+ if (mode == 6) {
+ if (toggle != 0) {
+ dev_dbg(dev, "RC6B - Not Supported\n");
+ return;
+ }
+
+ customer = wbcir_rc6cells_to_byte(data);
+
+ if (customer & 0x80) {
+ /* 15 bit customer value */
+ customer <<= 8;
+ customer |= wbcir_rc6cells_to_byte(data);
+ }
+ }
+
+ /* Address */
+ address = wbcir_rc6cells_to_byte(data);
+ if (mode == 6) {
+ toggle = address >> 7;
+ address &= 0x7F;
+ }
+
+ /* Command */
+ command = wbcir_rc6cells_to_byte(data);
+
+ /* Create scancode */
+ scancode = command;
+ scancode |= address << 8;
+ scancode |= customer << 16;
+
+ /* Last sanity check */
+ if (data->irdata_error) {
+ dev_dbg(dev, "RC6 - Cell error(s)\n");
+ return;
+ }
+
+ dev_info(dev, "IR-RC6 ad 0x%02X cm 0x%02X cu 0x%04X "
+ "toggle %u mode %u scan 0x%08X\n",
+ address,
+ command,
+ customer,
+ (unsigned int)toggle,
+ (unsigned int)mode,
+ scancode);
+
+ wbcir_keydown(data, scancode, toggle);
+}
+
+static void
+wbcir_parse_rc5(struct device *dev, struct wbcir_data *data)
+{
+ /*
+ * Bits are manchester coded as follows:
+ * cell1 + cell0 = logic "0"
+ * cell0 + cell1 = logic "1"
+ * (i.e. the reverse of RC6)
+ *
+ * Start bit 1 - "1" - discarded
+ * Start bit 2 - Must be inverted to get command bit 6
+ * Toggle bit
+ * Address Bit 4 - 0
+ * Command Bit 5 - 0
+ */
+ u8 toggle;
+ u8 address;
+ u8 command;
+ u32 scancode;
+
+ /* Start bit 1 */
+ if (!get_bits(data, 1)) {
+ dev_dbg(dev, "RC5 - Invalid start bit\n");
+ return;
+ }
+
+ /* Start bit 2 */
+ if (!wbcir_get_rc5bits(data, 1))
+ command = 0x40;
+ else
+ command = 0x00;
+
+ toggle = wbcir_get_rc5bits(data, 1);
+ address = wbcir_get_rc5bits(data, 5);
+ command |= wbcir_get_rc5bits(data, 6);
+ scancode = address << 7 | command;
+
+ /* Last sanity check */
+ if (data->irdata_error) {
+ dev_dbg(dev, "RC5 - Invalid message\n");
+ return;
+ }
+
+ dev_dbg(dev, "IR-RC5 ad %u cm %u t %u s %u\n",
+ (unsigned int)address,
+ (unsigned int)command,
+ (unsigned int)toggle,
+ (unsigned int)scancode);
+
+ wbcir_keydown(data, scancode, toggle);
+}
+
+static void
+wbcir_parse_nec(struct device *dev, struct wbcir_data *data)
+{
+ /*
+ * Each bit represents 560 us.
+ *
+ * Leader - 9 ms burst
+ * Gap - 4.5 ms silence
+ * Address1 bit 0 - 7 - Address 1
+ * Address2 bit 0 - 7 - Address 2
+ * Command1 bit 0 - 7 - Command 1
+ * Command2 bit 0 - 7 - Command 2
+ *
+ * Note the bit order!
+ *
+ * With the old NEC protocol, Address2 was the inverse of Address1
+ * and Command2 was the inverse of Command1 and were used as
+ * an error check.
+ *
+ * With NEC extended, Address1 is the LSB of the Address and
+ * Address2 is the MSB, Command parsing remains unchanged.
+ *
+ * A repeat message is coded as:
+ * Leader - 9 ms burst
+ * Gap - 2.25 ms silence
+ * Repeat - 560 us active
+ */
+ u8 address1;
+ u8 address2;
+ u8 command1;
+ u8 command2;
+ u16 address;
+ u32 scancode;
+
+ /* Leader mark */
+ while (get_bits(data, 1) && !data->irdata_error)
+ /* Do nothing */;
+
+ /* Leader space */
+ if (get_bits(data, 4)) {
+ dev_dbg(dev, "NEC - Invalid leader space\n");
+ return;
+ }
+
+ /* Repeat? */
+ if (get_bits(data, 1)) {
+ if (!data->keypressed) {
+ dev_dbg(dev, "NEC - Stray repeat message\n");
+ return;
+ }
+
+ dev_dbg(dev, "IR-NEC repeat s %u\n",
+ (unsigned int)data->last_scancode);
+
+ wbcir_keydown(data, data->last_scancode, data->last_toggle);
+ return;
+ }
+
+ /* Remaining leader space */
+ if (get_bits(data, 3)) {
+ dev_dbg(dev, "NEC - Invalid leader space\n");
+ return;
+ }
+
+ address1 = bitrev8(get_bits(data, 8));
+ address2 = bitrev8(get_bits(data, 8));
+ command1 = bitrev8(get_bits(data, 8));
+ command2 = bitrev8(get_bits(data, 8));
+
+ /* Sanity check */
+ if (data->irdata_error) {
+ dev_dbg(dev, "NEC - Invalid message\n");
+ return;
+ }
+
+ /* Check command validity */
+ if (command1 != ~command2) {
+ dev_dbg(dev, "NEC - Command bytes mismatch\n");
+ return;
+ }
+
+ /* Check for extended NEC protocol */
+ address = address1;
+ if (address1 != ~address2)
+ address |= address2 << 8;
+
+ scancode = address << 8 | command1;
+
+ dev_dbg(dev, "IR-NEC ad %u cm %u s %u\n",
+ (unsigned int)address,
+ (unsigned int)command1,
+ (unsigned int)scancode);
+
+ wbcir_keydown(data, scancode, !data->last_toggle);
+}
+
+
+
+/*****************************************************************************
+ *
+ * INTERRUPT FUNCTIONS
+ *
+ *****************************************************************************/
+
+static irqreturn_t
+wbcir_irq_handler(int irqno, void *cookie)
+{
+ struct pnp_dev *device = cookie;
+ struct wbcir_data *data = pnp_get_drvdata(device);
+ struct device *dev = &device->dev;
+ u8 status;
+ unsigned long flags;
+ u8 irdata[8];
+ int i;
+ unsigned int hw;
+
+ spin_lock_irqsave(&wbcir_lock, flags);
+
+ wbcir_select_bank(data, WBCIR_BANK_0);
+
+ status = inb(data->sbase + WBCIR_REG_SP3_EIR);
+
+ if (!(status & (WBCIR_IRQ_RX | WBCIR_IRQ_ERR))) {
+ spin_unlock_irqrestore(&wbcir_lock, flags);
+ return IRQ_NONE;
+ }
+
+ if (status & WBCIR_IRQ_ERR)
+ data->irdata_error = 1;
+
+ if (!(status & WBCIR_IRQ_RX))
+ goto out;
+
+ /* Since RXHDLEV is set, at least 8 bytes are in the FIFO */
+ insb(data->sbase + WBCIR_REG_SP3_RXDATA, &irdata[0], 8);
+
+ for (i = 0; i < sizeof(irdata); i++) {
+ hw = hweight8(irdata[i]);
+ if (hw > 4)
+ add_irdata_bit(data, 0);
+ else
+ add_irdata_bit(data, 1);
+
+ if (hw == 8)
+ data->idle_count++;
+ else
+ data->idle_count = 0;
+ }
+
+ if (data->idle_count > WBCIR_MAX_IDLE_BYTES) {
+ /* Set RXINACTIVE... */
+ outb(WBCIR_RX_DISABLE, data->sbase + WBCIR_REG_SP3_ASCR);
+
+ /* ...and drain the FIFO */
+ while (inb(data->sbase + WBCIR_REG_SP3_LSR) & WBCIR_RX_AVAIL)
+ inb(data->sbase + WBCIR_REG_SP3_RXDATA);
+
+ dev_dbg(dev, "IRDATA:\n");
+ for (i = 0; i < data->irdata_count; i += BITS_PER_LONG)
+ dev_dbg(dev, "0x%08lX\n", data->irdata[i/BITS_PER_LONG]);
+
+ switch (protocol) {
+ case IR_PROTOCOL_RC5:
+ wbcir_parse_rc5(dev, data);
+ break;
+ case IR_PROTOCOL_RC6:
+ wbcir_parse_rc6(dev, data);
+ break;
+ case IR_PROTOCOL_NEC:
+ wbcir_parse_nec(dev, data);
+ break;
+ }
+
+ wbcir_reset_irdata(data);
+ data->idle_count = 0;
+ }
+
+out:
+ spin_unlock_irqrestore(&wbcir_lock, flags);
+ return IRQ_HANDLED;
+}
+
+
+
+/*****************************************************************************
+ *
+ * SUSPEND/RESUME FUNCTIONS
+ *
+ *****************************************************************************/
+
+static void
+wbcir_shutdown(struct pnp_dev *device)
+{
+ struct device *dev = &device->dev;
+ struct wbcir_data *data = pnp_get_drvdata(device);
+ int do_wake = 1;
+ u8 match[11];
+ u8 mask[11];
+ u8 rc6_csl = 0;
+ int i;
+
+ memset(match, 0, sizeof(match));
+ memset(mask, 0, sizeof(mask));
+
+ if (wake_sc == INVALID_SCANCODE || !device_may_wakeup(dev)) {
+ do_wake = 0;
+ goto finish;
+ }
+
+ switch (protocol) {
+ case IR_PROTOCOL_RC5:
+ if (wake_sc > 0xFFF) {
+ do_wake = 0;
+ dev_err(dev, "RC5 - Invalid wake scancode\n");
+ break;
+ }
+
+ /* Mask = 13 bits, ex toggle */
+ mask[0] = 0xFF;
+ mask[1] = 0x17;
+
+ match[0] = (wake_sc & 0x003F); /* 6 command bits */
+ match[0] |= (wake_sc & 0x0180) >> 1; /* 2 address bits */
+ match[1] = (wake_sc & 0x0E00) >> 9; /* 3 address bits */
+ if (!(wake_sc & 0x0040)) /* 2nd start bit */
+ match[1] |= 0x10;
+
+ break;
+
+ case IR_PROTOCOL_NEC:
+ if (wake_sc > 0xFFFFFF) {
+ do_wake = 0;
+ dev_err(dev, "NEC - Invalid wake scancode\n");
+ break;
+ }
+
+ mask[0] = mask[1] = mask[2] = mask[3] = 0xFF;
+
+ match[1] = bitrev8((wake_sc & 0xFF));
+ match[0] = ~match[1];
+
+ match[3] = bitrev8((wake_sc & 0xFF00) >> 8);
+ if (wake_sc > 0xFFFF)
+ match[2] = bitrev8((wake_sc & 0xFF0000) >> 16);
+ else
+ match[2] = ~match[3];
+
+ break;
+
+ case IR_PROTOCOL_RC6:
+
+ if (wake_rc6mode == 0) {
+ if (wake_sc > 0xFFFF) {
+ do_wake = 0;
+ dev_err(dev, "RC6 - Invalid wake scancode\n");
+ break;
+ }
+
+ /* Command */
+ match[0] = wbcir_to_rc6cells(wake_sc >> 0);
+ mask[0] = 0xFF;
+ match[1] = wbcir_to_rc6cells(wake_sc >> 4);
+ mask[1] = 0xFF;
+
+ /* Address */
+ match[2] = wbcir_to_rc6cells(wake_sc >> 8);
+ mask[2] = 0xFF;
+ match[3] = wbcir_to_rc6cells(wake_sc >> 12);
+ mask[3] = 0xFF;
+
+ /* Header */
+ match[4] = 0x50; /* mode1 = mode0 = 0, ignore toggle */
+ mask[4] = 0xF0;
+ match[5] = 0x09; /* start bit = 1, mode2 = 0 */
+ mask[5] = 0x0F;
+
+ rc6_csl = 44;
+
+ } else if (wake_rc6mode == 6) {
+ i = 0;
+
+ /* Command */
+ match[i] = wbcir_to_rc6cells(wake_sc >> 0);
+ mask[i++] = 0xFF;
+ match[i] = wbcir_to_rc6cells(wake_sc >> 4);
+ mask[i++] = 0xFF;
+
+ /* Address + Toggle */
+ match[i] = wbcir_to_rc6cells(wake_sc >> 8);
+ mask[i++] = 0xFF;
+ match[i] = wbcir_to_rc6cells(wake_sc >> 12);
+ mask[i++] = 0x3F;
+
+ /* Customer bits 7 - 0 */
+ match[i] = wbcir_to_rc6cells(wake_sc >> 16);
+ mask[i++] = 0xFF;
+ match[i] = wbcir_to_rc6cells(wake_sc >> 20);
+ mask[i++] = 0xFF;
+
+ if (wake_sc & 0x80000000) {
+ /* Customer range bit and bits 15 - 8 */
+ match[i] = wbcir_to_rc6cells(wake_sc >> 24);
+ mask[i++] = 0xFF;
+ match[i] = wbcir_to_rc6cells(wake_sc >> 28);
+ mask[i++] = 0xFF;
+ rc6_csl = 76;
+ } else if (wake_sc <= 0x007FFFFF) {
+ rc6_csl = 60;
+ } else {
+ do_wake = 0;
+ dev_err(dev, "RC6 - Invalid wake scancode\n");
+ break;
+ }
+
+ /* Header */
+ match[i] = 0x93; /* mode1 = mode0 = 1, submode = 0 */
+ mask[i++] = 0xFF;
+ match[i] = 0x0A; /* start bit = 1, mode2 = 1 */
+ mask[i++] = 0x0F;
+
+ } else {
+ do_wake = 0;
+ dev_err(dev, "RC6 - Invalid wake mode\n");
+ }
+
+ break;
+
+ default:
+ do_wake = 0;
+ break;
+ }
+
+finish:
+ if (do_wake) {
+ /* Set compare and compare mask */
+ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_INDEX,
+ WBCIR_REGSEL_COMPARE | WBCIR_REG_ADDR0,
+ 0x3F);
+ outsb(data->wbase + WBCIR_REG_WCEIR_DATA, match, 11);
+ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_INDEX,
+ WBCIR_REGSEL_MASK | WBCIR_REG_ADDR0,
+ 0x3F);
+ outsb(data->wbase + WBCIR_REG_WCEIR_DATA, mask, 11);
+
+ /* RC6 Compare String Len */
+ outb(rc6_csl, data->wbase + WBCIR_REG_WCEIR_CSL);
+
+ /* Clear status bits NEC_REP, BUFF, MSG_END, MATCH */
+ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_STS, 0x17, 0x17);
+
+ /* Clear BUFF_EN, Clear END_EN, Set MATCH_EN */
+ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_EV_EN, 0x01, 0x07);
+
+ /* Set CEIR_EN */
+ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_CTL, 0x01, 0x01);
+
+ } else {
+ /* Clear BUFF_EN, Clear END_EN, Clear MATCH_EN */
+ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_EV_EN, 0x00, 0x07);
+
+ /* Clear CEIR_EN */
+ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_CTL, 0x00, 0x01);
+ }
+
+ /* Disable interrupts */
+ outb(WBCIR_IRQ_NONE, data->sbase + WBCIR_REG_SP3_IER);
+}
+
+static int
+wbcir_suspend(struct pnp_dev *device, pm_message_t state)
+{
+ wbcir_shutdown(device);
+ return 0;
+}
+
+static int
+wbcir_resume(struct pnp_dev *device)
+{
+ struct wbcir_data *data = pnp_get_drvdata(device);
+
+ /* Clear BUFF_EN, Clear END_EN, Clear MATCH_EN */
+ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_EV_EN, 0x00, 0x07);
+
+ /* Clear CEIR_EN */
+ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_CTL, 0x00, 0x01);
+
+ /* Enable interrupts */
+ wbcir_reset_irdata(data);
+ outb(WBCIR_IRQ_RX | WBCIR_IRQ_ERR, data->sbase + WBCIR_REG_SP3_IER);
+
+ return 0;
+}
+
+
+
+/*****************************************************************************
+ *
+ * SETUP/INIT FUNCTIONS
+ *
+ *****************************************************************************/
+
+static void
+wbcir_cfg_ceir(struct wbcir_data *data)
+{
+ u8 tmp;
+
+ /* Set PROT_SEL, RX_INV, Clear CEIR_EN (needed for the led) */
+ tmp = protocol << 4;
+ if (invert)
+ tmp |= 0x08;
+ outb(tmp, data->wbase + WBCIR_REG_WCEIR_CTL);
+
+ /* Clear status bits NEC_REP, BUFF, MSG_END, MATCH */
+ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_STS, 0x17, 0x17);
+
+ /* Clear BUFF_EN, Clear END_EN, Clear MATCH_EN */
+ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_EV_EN, 0x00, 0x07);
+
+ /* Set RC5 cell time to correspond to 36 kHz */
+ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_CFG1, 0x4A, 0x7F);
+
+ /* Set IRTX_INV */
+ if (invert)
+ outb(0x04, data->ebase + WBCIR_REG_ECEIR_CCTL);
+ else
+ outb(0x00, data->ebase + WBCIR_REG_ECEIR_CCTL);
+
+ /*
+ * Clear IR LED, set SP3 clock to 24Mhz
+ * set SP3_IRRX_SW to binary 01, helpfully not documented
+ */
+ outb(0x10, data->ebase + WBCIR_REG_ECEIR_CTS);
+}
+
+static int __devinit
+wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
+{
+ struct device *dev = &device->dev;
+ struct wbcir_data *data;
+ int err;
+
+ if (!(pnp_port_len(device, 0) == EHFUNC_IOMEM_LEN &&
+ pnp_port_len(device, 1) == WAKEUP_IOMEM_LEN &&
+ pnp_port_len(device, 2) == SP_IOMEM_LEN)) {
+ dev_err(dev, "Invalid resources\n");
+ return -ENODEV;
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ pnp_set_drvdata(device, data);
+
+ data->ebase = pnp_port_start(device, 0);
+ data->wbase = pnp_port_start(device, 1);
+ data->sbase = pnp_port_start(device, 2);
+ data->irq = pnp_irq(device, 0);
+
+ if (data->wbase == 0 || data->ebase == 0 ||
+ data->sbase == 0 || data->irq == 0) {
+ err = -ENODEV;
+ dev_err(dev, "Invalid resources\n");
+ goto exit_free_data;
+ }
+
+ dev_dbg(&device->dev, "Found device "
+ "(w: 0x%lX, e: 0x%lX, s: 0x%lX, i: %u)\n",
+ data->wbase, data->ebase, data->sbase, data->irq);
+
+ if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
+ dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
+ data->wbase, data->wbase + WAKEUP_IOMEM_LEN - 1);
+ err = -EBUSY;
+ goto exit_free_data;
+ }
+
+ if (!request_region(data->ebase, EHFUNC_IOMEM_LEN, DRVNAME)) {
+ dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
+ data->ebase, data->ebase + EHFUNC_IOMEM_LEN - 1);
+ err = -EBUSY;
+ goto exit_release_wbase;
+ }
+
+ if (!request_region(data->sbase, SP_IOMEM_LEN, DRVNAME)) {
+ dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
+ data->sbase, data->sbase + SP_IOMEM_LEN - 1);
+ err = -EBUSY;
+ goto exit_release_ebase;
+ }
+
+ err = request_irq(data->irq, wbcir_irq_handler,
+ IRQF_DISABLED, DRVNAME, device);
+ if (err) {
+ dev_err(dev, "Failed to claim IRQ %u\n", data->irq);
+ err = -EBUSY;
+ goto exit_release_sbase;
+ }
+
+ led_trigger_register_simple("cir-tx", &data->txtrigger);
+ if (!data->txtrigger) {
+ err = -ENOMEM;
+ goto exit_free_irq;
+ }
+
+ led_trigger_register_simple("cir-rx", &data->rxtrigger);
+ if (!data->rxtrigger) {
+ err = -ENOMEM;
+ goto exit_unregister_txtrigger;
+ }
+
+ data->led.name = "cir::activity";
+ data->led.default_trigger = "cir-rx";
+ data->led.brightness_set = wbcir_led_brightness_set;
+ data->led.brightness_get = wbcir_led_brightness_get;
+ err = led_classdev_register(&device->dev, &data->led);
+ if (err)
+ goto exit_unregister_rxtrigger;
+
+ data->input_dev = input_allocate_device();
+ if (!data->input_dev) {
+ err = -ENOMEM;
+ goto exit_unregister_led;
+ }
+
+ data->input_dev->evbit[0] = BIT(EV_KEY);
+ data->input_dev->name = WBCIR_NAME;
+ data->input_dev->phys = "wbcir/cir0";
+ data->input_dev->id.bustype = BUS_HOST;
+ data->input_dev->id.vendor = PCI_VENDOR_ID_WINBOND;
+ data->input_dev->id.product = WBCIR_ID_FAMILY;
+ data->input_dev->id.version = WBCIR_ID_CHIP;
+ data->input_dev->getkeycode = wbcir_getkeycode;
+ data->input_dev->setkeycode = wbcir_setkeycode;
+ input_set_capability(data->input_dev, EV_MSC, MSC_SCAN);
+ input_set_drvdata(data->input_dev, data);
+
+ err = input_register_device(data->input_dev);
+ if (err)
+ goto exit_free_input;
+
+ data->last_scancode = INVALID_SCANCODE;
+ INIT_LIST_HEAD(&data->keytable);
+ setup_timer(&data->timer_keyup, wbcir_keyup, (unsigned long)data);
+
+ /* Load default keymaps */
+ if (protocol == IR_PROTOCOL_RC6) {
+ int i;
+ for (i = 0; i < ARRAY_SIZE(rc6_def_keymap); i++) {
+ err = wbcir_setkeycode(data->input_dev,
+ (int)rc6_def_keymap[i].scancode,
+ (int)rc6_def_keymap[i].keycode);
+ if (err)
+ goto exit_unregister_keys;
+ }
+ }
+
+ device_init_wakeup(&device->dev, 1);
+
+ wbcir_cfg_ceir(data);
+
+ /* Disable interrupts */
+ wbcir_select_bank(data, WBCIR_BANK_0);
+ outb(WBCIR_IRQ_NONE, data->sbase + WBCIR_REG_SP3_IER);
+
+ /* Enable extended mode */
+ wbcir_select_bank(data, WBCIR_BANK_2);
+ outb(WBCIR_EXT_ENABLE, data->sbase + WBCIR_REG_SP3_EXCR1);
+
+ /*
+ * Configure baud generator, IR data will be sampled at
+ * a bitrate of: (24Mhz * prescaler) / (divisor * 16).
+ *
+ * The ECIR registers include a flag to change the
+ * 24Mhz clock freq to 48Mhz.
+ *
+ * It's not documented in the specs, but fifo levels
+ * other than 16 seems to be unsupported.
+ */
+
+ /* prescaler 1.0, tx/rx fifo lvl 16 */
+ outb(0x30, data->sbase + WBCIR_REG_SP3_EXCR2);
+
+ /* Set baud divisor to generate one byte per bit/cell */
+ switch (protocol) {
+ case IR_PROTOCOL_RC5:
+ outb(0xA7, data->sbase + WBCIR_REG_SP3_BGDL);
+ break;
+ case IR_PROTOCOL_RC6:
+ outb(0x53, data->sbase + WBCIR_REG_SP3_BGDL);
+ break;
+ case IR_PROTOCOL_NEC:
+ outb(0x69, data->sbase + WBCIR_REG_SP3_BGDL);
+ break;
+ }
+ outb(0x00, data->sbase + WBCIR_REG_SP3_BGDH);
+
+ /* Set CEIR mode */
+ wbcir_select_bank(data, WBCIR_BANK_0);
+ outb(0xC0, data->sbase + WBCIR_REG_SP3_MCR);
+ inb(data->sbase + WBCIR_REG_SP3_LSR); /* Clear LSR */
+ inb(data->sbase + WBCIR_REG_SP3_MSR); /* Clear MSR */
+
+ /* Disable RX demod, run-length encoding/decoding, set freq span */
+ wbcir_select_bank(data, WBCIR_BANK_7);
+ outb(0x10, data->sbase + WBCIR_REG_SP3_RCCFG);
+
+ /* Disable timer */
+ wbcir_select_bank(data, WBCIR_BANK_4);
+ outb(0x00, data->sbase + WBCIR_REG_SP3_IRCR1);
+
+ /* Enable MSR interrupt, Clear AUX_IRX */
+ wbcir_select_bank(data, WBCIR_BANK_5);
+ outb(0x00, data->sbase + WBCIR_REG_SP3_IRCR2);
+
+ /* Disable CRC */
+ wbcir_select_bank(data, WBCIR_BANK_6);
+ outb(0x20, data->sbase + WBCIR_REG_SP3_IRCR3);
+
+ /* Set RX/TX (de)modulation freq, not really used */
+ wbcir_select_bank(data, WBCIR_BANK_7);
+ outb(0xF2, data->sbase + WBCIR_REG_SP3_IRRXDC);
+ outb(0x69, data->sbase + WBCIR_REG_SP3_IRTXMC);
+
+ /* Set invert and pin direction */
+ if (invert)
+ outb(0x10, data->sbase + WBCIR_REG_SP3_IRCFG4);
+ else
+ outb(0x00, data->sbase + WBCIR_REG_SP3_IRCFG4);
+
+ /* Set FIFO thresholds (RX = 8, TX = 3), reset RX/TX */
+ wbcir_select_bank(data, WBCIR_BANK_0);
+ outb(0x97, data->sbase + WBCIR_REG_SP3_FCR);
+
+ /* Clear AUX status bits */
+ outb(0xE0, data->sbase + WBCIR_REG_SP3_ASCR);
+
+ /* Enable interrupts */
+ outb(WBCIR_IRQ_RX | WBCIR_IRQ_ERR, data->sbase + WBCIR_REG_SP3_IER);
+
+ return 0;
+
+exit_unregister_keys:
+ if (!list_empty(&data->keytable)) {
+ struct wbcir_keyentry *key;
+ struct wbcir_keyentry *keytmp;
+
+ list_for_each_entry_safe(key, keytmp, &data->keytable, list) {
+ list_del(&key->list);
+ kfree(key);
+ }
+ }
+ input_unregister_device(data->input_dev);
+ /* Can't call input_free_device on an unregistered device */
+ data->input_dev = NULL;
+exit_free_input:
+ input_free_device(data->input_dev);
+exit_unregister_led:
+ led_classdev_unregister(&data->led);
+exit_unregister_rxtrigger:
+ led_trigger_unregister_simple(data->rxtrigger);
+exit_unregister_txtrigger:
+ led_trigger_unregister_simple(data->txtrigger);
+exit_free_irq:
+ free_irq(data->irq, device);
+exit_release_sbase:
+ release_region(data->sbase, SP_IOMEM_LEN);
+exit_release_ebase:
+ release_region(data->ebase, EHFUNC_IOMEM_LEN);
+exit_release_wbase:
+ release_region(data->wbase, WAKEUP_IOMEM_LEN);
+exit_free_data:
+ kfree(data);
+ pnp_set_drvdata(device, NULL);
+exit:
+ return err;
+}
+
+static void __devexit
+wbcir_remove(struct pnp_dev *device)
+{
+ struct wbcir_data *data = pnp_get_drvdata(device);
+ struct wbcir_keyentry *key;
+ struct wbcir_keyentry *keytmp;
+
+ /* Disable interrupts */
+ wbcir_select_bank(data, WBCIR_BANK_0);
+ outb(WBCIR_IRQ_NONE, data->sbase + WBCIR_REG_SP3_IER);
+
+ del_timer_sync(&data->timer_keyup);
+
+ free_irq(data->irq, device);
+
+ /* Clear status bits NEC_REP, BUFF, MSG_END, MATCH */
+ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_STS, 0x17, 0x17);
+
+ /* Clear CEIR_EN */
+ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_CTL, 0x00, 0x01);
+
+ /* Clear BUFF_EN, END_EN, MATCH_EN */
+ wbcir_set_bits(data->wbase + WBCIR_REG_WCEIR_EV_EN, 0x00, 0x07);
+
+ /* This will generate a keyup event if necessary */
+ input_unregister_device(data->input_dev);
+
+ led_trigger_unregister_simple(data->rxtrigger);
+ led_trigger_unregister_simple(data->txtrigger);
+ led_classdev_unregister(&data->led);
+
+ /* This is ok since &data->led isn't actually used */
+ wbcir_led_brightness_set(&data->led, LED_OFF);
+
+ release_region(data->wbase, WAKEUP_IOMEM_LEN);
+ release_region(data->ebase, EHFUNC_IOMEM_LEN);
+ release_region(data->sbase, SP_IOMEM_LEN);
+
+ list_for_each_entry_safe(key, keytmp, &data->keytable, list) {
+ list_del(&key->list);
+ kfree(key);
+ }
+
+ kfree(data);
+
+ pnp_set_drvdata(device, NULL);
+}
+
+static const struct pnp_device_id wbcir_ids[] = {
+ { "WEC1022", 0 },
+ { "", 0 }
+};
+MODULE_DEVICE_TABLE(pnp, wbcir_ids);
+
+static struct pnp_driver wbcir_driver = {
+ .name = WBCIR_NAME,
+ .id_table = wbcir_ids,
+ .probe = wbcir_probe,
+ .remove = __devexit_p(wbcir_remove),
+ .suspend = wbcir_suspend,
+ .resume = wbcir_resume,
+ .shutdown = wbcir_shutdown
+};
+
+static int __init
+wbcir_init(void)
+{
+ int ret;
+
+ switch (protocol) {
+ case IR_PROTOCOL_RC5:
+ case IR_PROTOCOL_NEC:
+ case IR_PROTOCOL_RC6:
+ break;
+ default:
+ printk(KERN_ERR DRVNAME ": Invalid protocol argument\n");
+ return -EINVAL;
+ }
+
+ ret = pnp_register_driver(&wbcir_driver);
+ if (ret)
+ printk(KERN_ERR DRVNAME ": Unable to register driver\n");
+
+ return ret;
+}
+
+static void __exit
+wbcir_exit(void)
+{
+ pnp_unregister_driver(&wbcir_driver);
+}
+
+MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
+MODULE_DESCRIPTION("Winbond SuperI/O Consumer IR Driver");
+MODULE_LICENSE("GPL");
+
+module_init(wbcir_init);
+module_exit(wbcir_exit);
+
+
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index 84e2fc04d11..f84cbd97c88 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -92,7 +92,8 @@ static int fsp_reg_read(struct psmouse *psmouse, int reg_addr, int *reg_val)
*/
ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE);
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
- mutex_lock(&ps2dev->cmd_mutex);
+
+ ps2_begin_command(ps2dev);
if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0)
goto out;
@@ -126,7 +127,7 @@ static int fsp_reg_read(struct psmouse *psmouse, int reg_addr, int *reg_val)
rc = 0;
out:
- mutex_unlock(&ps2dev->cmd_mutex);
+ ps2_end_command(ps2dev);
ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE);
psmouse_set_state(psmouse, PSMOUSE_ACTIVATED);
dev_dbg(&ps2dev->serio->dev, "READ REG: 0x%02x is 0x%02x (rc = %d)\n",
@@ -140,7 +141,7 @@ static int fsp_reg_write(struct psmouse *psmouse, int reg_addr, int reg_val)
unsigned char v;
int rc = -1;
- mutex_lock(&ps2dev->cmd_mutex);
+ ps2_begin_command(ps2dev);
if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0)
goto out;
@@ -179,7 +180,7 @@ static int fsp_reg_write(struct psmouse *psmouse, int reg_addr, int reg_val)
rc = 0;
out:
- mutex_unlock(&ps2dev->cmd_mutex);
+ ps2_end_command(ps2dev);
dev_dbg(&ps2dev->serio->dev, "WRITE REG: 0x%02x to 0x%02x (rc = %d)\n",
reg_addr, reg_val, rc);
return rc;
@@ -214,7 +215,8 @@ static int fsp_page_reg_read(struct psmouse *psmouse, int *reg_val)
ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE);
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
- mutex_lock(&ps2dev->cmd_mutex);
+
+ ps2_begin_command(ps2dev);
if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0)
goto out;
@@ -236,7 +238,7 @@ static int fsp_page_reg_read(struct psmouse *psmouse, int *reg_val)
rc = 0;
out:
- mutex_unlock(&ps2dev->cmd_mutex);
+ ps2_end_command(ps2dev);
ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE);
psmouse_set_state(psmouse, PSMOUSE_ACTIVATED);
dev_dbg(&ps2dev->serio->dev, "READ PAGE REG: 0x%02x (rc = %d)\n",
@@ -250,7 +252,7 @@ static int fsp_page_reg_write(struct psmouse *psmouse, int reg_val)
unsigned char v;
int rc = -1;
- mutex_lock(&ps2dev->cmd_mutex);
+ ps2_begin_command(ps2dev);
if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0)
goto out;
@@ -275,7 +277,7 @@ static int fsp_page_reg_write(struct psmouse *psmouse, int reg_val)
rc = 0;
out:
- mutex_unlock(&ps2dev->cmd_mutex);
+ ps2_end_command(ps2dev);
dev_dbg(&ps2dev->serio->dev, "WRITE PAGE REG: to 0x%02x (rc = %d)\n",
reg_val, rc);
return rc;
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index eac9fdde7ee..7283c78044a 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -203,7 +203,7 @@ MODULE_PARM_DESC(no_filter, "No Filter. Default = 0 (off)");
* and the irq configuration should be set to Falling Edge Trigger
*/
/* Control IRQ / Polling option */
-static int polling_req;
+static bool polling_req;
module_param(polling_req, bool, 0444);
MODULE_PARM_DESC(polling_req, "Request Polling. Default = 0 (use irq)");
@@ -217,6 +217,7 @@ struct synaptics_i2c {
struct i2c_client *client;
struct input_dev *input;
struct delayed_work dwork;
+ spinlock_t lock;
int no_data_count;
int no_decel_param;
int reduce_report_param;
@@ -366,17 +367,28 @@ static bool synaptics_i2c_get_input(struct synaptics_i2c *touch)
return xy_delta || gesture;
}
-static irqreturn_t synaptics_i2c_irq(int irq, void *dev_id)
+static void synaptics_i2c_reschedule_work(struct synaptics_i2c *touch,
+ unsigned long delay)
{
- struct synaptics_i2c *touch = dev_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&touch->lock, flags);
/*
- * We want to have the work run immediately but it might have
- * already been scheduled with a delay, that's why we have to
- * cancel it first.
+ * If work is already scheduled then subsequent schedules will not
+ * change the scheduled time that's why we have to cancel it first.
*/
- cancel_delayed_work(&touch->dwork);
- schedule_delayed_work(&touch->dwork, 0);
+ __cancel_delayed_work(&touch->dwork);
+ schedule_delayed_work(&touch->dwork, delay);
+
+ spin_unlock_irqrestore(&touch->lock, flags);
+}
+
+static irqreturn_t synaptics_i2c_irq(int irq, void *dev_id)
+{
+ struct synaptics_i2c *touch = dev_id;
+
+ synaptics_i2c_reschedule_work(touch, 0);
return IRQ_HANDLED;
}
@@ -452,7 +464,7 @@ static void synaptics_i2c_work_handler(struct work_struct *work)
* We poll the device once in THREAD_IRQ_SLEEP_SECS and
* if error is detected, we try to reset and reconfigure the touchpad.
*/
- schedule_delayed_work(&touch->dwork, delay);
+ synaptics_i2c_reschedule_work(touch, delay);
}
static int synaptics_i2c_open(struct input_dev *input)
@@ -465,8 +477,8 @@ static int synaptics_i2c_open(struct input_dev *input)
return ret;
if (polling_req)
- schedule_delayed_work(&touch->dwork,
- msecs_to_jiffies(NO_DATA_SLEEP_MSECS));
+ synaptics_i2c_reschedule_work(touch,
+ msecs_to_jiffies(NO_DATA_SLEEP_MSECS));
return 0;
}
@@ -521,6 +533,7 @@ struct synaptics_i2c *synaptics_i2c_touch_create(struct i2c_client *client)
touch->scan_rate_param = scan_rate;
set_scan_rate(touch, scan_rate);
INIT_DELAYED_WORK(&touch->dwork, synaptics_i2c_work_handler);
+ spin_lock_init(&touch->lock);
return touch;
}
@@ -535,14 +548,12 @@ static int __devinit synaptics_i2c_probe(struct i2c_client *client,
if (!touch)
return -ENOMEM;
- i2c_set_clientdata(client, touch);
-
ret = synaptics_i2c_reset_config(client);
if (ret)
goto err_mem_free;
if (client->irq < 1)
- polling_req = 1;
+ polling_req = true;
touch->input = input_allocate_device();
if (!touch->input) {
@@ -563,7 +574,7 @@ static int __devinit synaptics_i2c_probe(struct i2c_client *client,
dev_warn(&touch->client->dev,
"IRQ request failed: %d, "
"falling back to polling\n", ret);
- polling_req = 1;
+ polling_req = true;
synaptics_i2c_reg_set(touch->client,
INTERRUPT_EN_REG, 0);
}
@@ -580,12 +591,14 @@ static int __devinit synaptics_i2c_probe(struct i2c_client *client,
"Input device register failed: %d\n", ret);
goto err_input_free;
}
+
+ i2c_set_clientdata(client, touch);
+
return 0;
err_input_free:
input_free_device(touch->input);
err_mem_free:
- i2c_set_clientdata(client, NULL);
kfree(touch);
return ret;
@@ -596,7 +609,7 @@ static int __devexit synaptics_i2c_remove(struct i2c_client *client)
struct synaptics_i2c *touch = i2c_get_clientdata(client);
if (!polling_req)
- free_irq(touch->client->irq, touch);
+ free_irq(client->irq, touch);
input_unregister_device(touch->input);
i2c_set_clientdata(client, NULL);
@@ -627,8 +640,8 @@ static int synaptics_i2c_resume(struct i2c_client *client)
if (ret)
return ret;
- schedule_delayed_work(&touch->dwork,
- msecs_to_jiffies(NO_DATA_SLEEP_MSECS));
+ synaptics_i2c_reschedule_work(touch,
+ msecs_to_jiffies(NO_DATA_SLEEP_MSECS));
return 0;
}
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index eb3ff94af58..bc56e52b945 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -87,8 +87,22 @@ static bool i8042_bypass_aux_irq_test;
#include "i8042.h"
+/*
+ * i8042_lock protects serialization between i8042_command and
+ * the interrupt handler.
+ */
static DEFINE_SPINLOCK(i8042_lock);
+/*
+ * Writers to AUX and KBD ports as well as users issuing i8042_command
+ * directly should acquire i8042_mutex (by means of calling
+ * i8042_lock_chip() and i8042_unlock_ship() helpers) to ensure that
+ * they do not disturb each other (unfortunately in many i8042
+ * implementations write to one of the ports will immediately abort
+ * command that is being processed by another port).
+ */
+static DEFINE_MUTEX(i8042_mutex);
+
struct i8042_port {
struct serio *serio;
int irq;
@@ -113,6 +127,18 @@ static struct platform_device *i8042_platform_device;
static irqreturn_t i8042_interrupt(int irq, void *dev_id);
+void i8042_lock_chip(void)
+{
+ mutex_lock(&i8042_mutex);
+}
+EXPORT_SYMBOL(i8042_lock_chip);
+
+void i8042_unlock_chip(void)
+{
+ mutex_unlock(&i8042_mutex);
+}
+EXPORT_SYMBOL(i8042_unlock_chip);
+
/*
* The i8042_wait_read() and i8042_wait_write functions wait for the i8042 to
* be ready for reading values from it / writing values to it.
@@ -1161,6 +1187,21 @@ static void __devexit i8042_unregister_ports(void)
}
}
+/*
+ * Checks whether port belongs to i8042 controller.
+ */
+bool i8042_check_port_owner(const struct serio *port)
+{
+ int i;
+
+ for (i = 0; i < I8042_NUM_PORTS; i++)
+ if (i8042_ports[i].serio == port)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(i8042_check_port_owner);
+
static void i8042_free_irqs(void)
{
if (i8042_aux_irq_registered)
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 3a95b508bf2..769ba65a585 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/serio.h>
+#include <linux/i8042.h>
#include <linux/init.h>
#include <linux/libps2.h>
@@ -54,6 +55,24 @@ int ps2_sendbyte(struct ps2dev *ps2dev, unsigned char byte, int timeout)
}
EXPORT_SYMBOL(ps2_sendbyte);
+void ps2_begin_command(struct ps2dev *ps2dev)
+{
+ mutex_lock(&ps2dev->cmd_mutex);
+
+ if (i8042_check_port_owner(ps2dev->serio))
+ i8042_lock_chip();
+}
+EXPORT_SYMBOL(ps2_begin_command);
+
+void ps2_end_command(struct ps2dev *ps2dev)
+{
+ if (i8042_check_port_owner(ps2dev->serio))
+ i8042_unlock_chip();
+
+ mutex_unlock(&ps2dev->cmd_mutex);
+}
+EXPORT_SYMBOL(ps2_end_command);
+
/*
* ps2_drain() waits for device to transmit requested number of bytes
* and discards them.
@@ -66,7 +85,7 @@ void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout)
maxbytes = sizeof(ps2dev->cmdbuf);
}
- mutex_lock(&ps2dev->cmd_mutex);
+ ps2_begin_command(ps2dev);
serio_pause_rx(ps2dev->serio);
ps2dev->flags = PS2_FLAG_CMD;
@@ -76,7 +95,8 @@ void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout)
wait_event_timeout(ps2dev->wait,
!(ps2dev->flags & PS2_FLAG_CMD),
msecs_to_jiffies(timeout));
- mutex_unlock(&ps2dev->cmd_mutex);
+
+ ps2_end_command(ps2dev);
}
EXPORT_SYMBOL(ps2_drain);
@@ -237,9 +257,9 @@ int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
{
int rc;
- mutex_lock(&ps2dev->cmd_mutex);
+ ps2_begin_command(ps2dev);
rc = __ps2_command(ps2dev, param, command);
- mutex_unlock(&ps2dev->cmd_mutex);
+ ps2_end_command(ps2dev);
return rc;
}
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index ab02d72afbf..8cc453c85ea 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -48,8 +48,8 @@ config TOUCHSCREEN_AD7879_I2C
select TOUCHSCREEN_AD7879
help
Say Y here if you have a touchscreen interface using the
- AD7879-1 controller, and your board-specific initialization
- code includes that in its table of I2C devices.
+ AD7879-1/AD7889-1 controller, and your board-specific
+ initialization code includes that in its table of I2C devices.
If unsure, say N (but it's safe to say "Y").
@@ -62,7 +62,7 @@ config TOUCHSCREEN_AD7879_SPI
select TOUCHSCREEN_AD7879
help
Say Y here if you have a touchscreen interface using the
- AD7879 controller, and your board-specific initialization
+ AD7879/AD7889 controller, and your board-specific initialization
code includes that in its table of SPI devices.
If unsure, say N (but it's safe to say "Y").
@@ -169,6 +169,17 @@ config TOUCHSCREEN_WACOM_W8001
To compile this driver as a module, choose M here: the
module will be called wacom_w8001.
+config TOUCHSCREEN_MCS5000
+ tristate "MELFAS MCS-5000 touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have the MELFAS MCS-5000 touchscreen controller
+ chip in your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mcs5000_ts.
config TOUCHSCREEN_MTOUCH
tristate "MicroTouch serial touchscreens"
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 4599bf7ad81..15fa62cffc7 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o
obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
+obj-$(CONFIG_TOUCHSCREEN_MCS5000) += mcs5000_ts.o
obj-$(CONFIG_TOUCHSCREEN_MIGOR) += migor_ts.o
obj-$(CONFIG_TOUCHSCREEN_MTOUCH) += mtouch.o
obj-$(CONFIG_TOUCHSCREEN_MK712) += mk712.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index ecaeb7e8e75..eb83939c705 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -842,3 +842,4 @@ module_exit(ad7877_exit);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("AD7877 touchscreen Driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:ad7877");
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index 5d8a7039880..f06332c9e21 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -1,7 +1,8 @@
/*
- * Copyright (C) 2008 Michael Hennerich, Analog Devices Inc.
+ * Copyright (C) 2008-2009 Michael Hennerich, Analog Devices Inc.
*
- * Description: AD7879 based touchscreen, and GPIO driver (I2C/SPI Interface)
+ * Description: AD7879/AD7889 based touchscreen, and GPIO driver
+ * (I2C/SPI Interface)
*
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
@@ -747,6 +748,7 @@ static int __devexit ad7879_remove(struct i2c_client *client)
static const struct i2c_device_id ad7879_id[] = {
{ "ad7879", 0 },
+ { "ad7889", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ad7879_id);
@@ -779,3 +781,4 @@ module_exit(ad7879_exit);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("AD7879(-1) touchscreen Driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:ad7879");
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index ba9d38c3f41..09c810999b9 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1256,3 +1256,4 @@ module_exit(ads7846_exit);
MODULE_DESCRIPTION("ADS7846 TouchScreen Driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:ads7846");
diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c
new file mode 100644
index 00000000000..4c28b89757f
--- /dev/null
+++ b/drivers/input/touchscreen/mcs5000_ts.c
@@ -0,0 +1,318 @@
+/*
+ * mcs5000_ts.c - Touchscreen driver for MELFAS MCS-5000 controller
+ *
+ * Copyright (C) 2009 Samsung Electronics Co.Ltd
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * Based on wm97xx-core.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/i2c/mcs5000_ts.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/irq.h>
+
+/* Registers */
+#define MCS5000_TS_STATUS 0x00
+#define STATUS_OFFSET 0
+#define STATUS_NO (0 << STATUS_OFFSET)
+#define STATUS_INIT (1 << STATUS_OFFSET)
+#define STATUS_SENSING (2 << STATUS_OFFSET)
+#define STATUS_COORD (3 << STATUS_OFFSET)
+#define STATUS_GESTURE (4 << STATUS_OFFSET)
+#define ERROR_OFFSET 4
+#define ERROR_NO (0 << ERROR_OFFSET)
+#define ERROR_POWER_ON_RESET (1 << ERROR_OFFSET)
+#define ERROR_INT_RESET (2 << ERROR_OFFSET)
+#define ERROR_EXT_RESET (3 << ERROR_OFFSET)
+#define ERROR_INVALID_REG_ADDRESS (8 << ERROR_OFFSET)
+#define ERROR_INVALID_REG_VALUE (9 << ERROR_OFFSET)
+
+#define MCS5000_TS_OP_MODE 0x01
+#define RESET_OFFSET 0
+#define RESET_NO (0 << RESET_OFFSET)
+#define RESET_EXT_SOFT (1 << RESET_OFFSET)
+#define OP_MODE_OFFSET 1
+#define OP_MODE_SLEEP (0 << OP_MODE_OFFSET)
+#define OP_MODE_ACTIVE (1 << OP_MODE_OFFSET)
+#define GESTURE_OFFSET 4
+#define GESTURE_DISABLE (0 << GESTURE_OFFSET)
+#define GESTURE_ENABLE (1 << GESTURE_OFFSET)
+#define PROXIMITY_OFFSET 5
+#define PROXIMITY_DISABLE (0 << PROXIMITY_OFFSET)
+#define PROXIMITY_ENABLE (1 << PROXIMITY_OFFSET)
+#define SCAN_MODE_OFFSET 6
+#define SCAN_MODE_INTERRUPT (0 << SCAN_MODE_OFFSET)
+#define SCAN_MODE_POLLING (1 << SCAN_MODE_OFFSET)
+#define REPORT_RATE_OFFSET 7
+#define REPORT_RATE_40 (0 << REPORT_RATE_OFFSET)
+#define REPORT_RATE_80 (1 << REPORT_RATE_OFFSET)
+
+#define MCS5000_TS_SENS_CTL 0x02
+#define MCS5000_TS_FILTER_CTL 0x03
+#define PRI_FILTER_OFFSET 0
+#define SEC_FILTER_OFFSET 4
+
+#define MCS5000_TS_X_SIZE_UPPER 0x08
+#define MCS5000_TS_X_SIZE_LOWER 0x09
+#define MCS5000_TS_Y_SIZE_UPPER 0x0A
+#define MCS5000_TS_Y_SIZE_LOWER 0x0B
+
+#define MCS5000_TS_INPUT_INFO 0x10
+#define INPUT_TYPE_OFFSET 0
+#define INPUT_TYPE_NONTOUCH (0 << INPUT_TYPE_OFFSET)
+#define INPUT_TYPE_SINGLE (1 << INPUT_TYPE_OFFSET)
+#define INPUT_TYPE_DUAL (2 << INPUT_TYPE_OFFSET)
+#define INPUT_TYPE_PALM (3 << INPUT_TYPE_OFFSET)
+#define INPUT_TYPE_PROXIMITY (7 << INPUT_TYPE_OFFSET)
+#define GESTURE_CODE_OFFSET 3
+#define GESTURE_CODE_NO (0 << GESTURE_CODE_OFFSET)
+
+#define MCS5000_TS_X_POS_UPPER 0x11
+#define MCS5000_TS_X_POS_LOWER 0x12
+#define MCS5000_TS_Y_POS_UPPER 0x13
+#define MCS5000_TS_Y_POS_LOWER 0x14
+#define MCS5000_TS_Z_POS 0x15
+#define MCS5000_TS_WIDTH 0x16
+#define MCS5000_TS_GESTURE_VAL 0x17
+#define MCS5000_TS_MODULE_REV 0x20
+#define MCS5000_TS_FIRMWARE_VER 0x21
+
+/* Touchscreen absolute values */
+#define MCS5000_MAX_XC 0x3ff
+#define MCS5000_MAX_YC 0x3ff
+
+enum mcs5000_ts_read_offset {
+ READ_INPUT_INFO,
+ READ_X_POS_UPPER,
+ READ_X_POS_LOWER,
+ READ_Y_POS_UPPER,
+ READ_Y_POS_LOWER,
+ READ_BLOCK_SIZE,
+};
+
+/* Each client has this additional data */
+struct mcs5000_ts_data {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ const struct mcs5000_ts_platform_data *platform_data;
+};
+
+static irqreturn_t mcs5000_ts_interrupt(int irq, void *dev_id)
+{
+ struct mcs5000_ts_data *data = dev_id;
+ struct i2c_client *client = data->client;
+ u8 buffer[READ_BLOCK_SIZE];
+ int err;
+ int x;
+ int y;
+
+ err = i2c_smbus_read_i2c_block_data(client, MCS5000_TS_INPUT_INFO,
+ READ_BLOCK_SIZE, buffer);
+ if (err < 0) {
+ dev_err(&client->dev, "%s, err[%d]\n", __func__, err);
+ goto out;
+ }
+
+ switch (buffer[READ_INPUT_INFO]) {
+ case INPUT_TYPE_NONTOUCH:
+ input_report_key(data->input_dev, BTN_TOUCH, 0);
+ input_sync(data->input_dev);
+ break;
+
+ case INPUT_TYPE_SINGLE:
+ x = (buffer[READ_X_POS_UPPER] << 8) | buffer[READ_X_POS_LOWER];
+ y = (buffer[READ_Y_POS_UPPER] << 8) | buffer[READ_Y_POS_LOWER];
+
+ input_report_key(data->input_dev, BTN_TOUCH, 1);
+ input_report_abs(data->input_dev, ABS_X, x);
+ input_report_abs(data->input_dev, ABS_Y, y);
+ input_sync(data->input_dev);
+ break;
+
+ case INPUT_TYPE_DUAL:
+ /* TODO */
+ break;
+
+ case INPUT_TYPE_PALM:
+ /* TODO */
+ break;
+
+ case INPUT_TYPE_PROXIMITY:
+ /* TODO */
+ break;
+
+ default:
+ dev_err(&client->dev, "Unknown ts input type %d\n",
+ buffer[READ_INPUT_INFO]);
+ break;
+ }
+
+ out:
+ return IRQ_HANDLED;
+}
+
+static void mcs5000_ts_phys_init(struct mcs5000_ts_data *data)
+{
+ const struct mcs5000_ts_platform_data *platform_data =
+ data->platform_data;
+ struct i2c_client *client = data->client;
+
+ /* Touch reset & sleep mode */
+ i2c_smbus_write_byte_data(client, MCS5000_TS_OP_MODE,
+ RESET_EXT_SOFT | OP_MODE_SLEEP);
+
+ /* Touch size */
+ i2c_smbus_write_byte_data(client, MCS5000_TS_X_SIZE_UPPER,
+ platform_data->x_size >> 8);
+ i2c_smbus_write_byte_data(client, MCS5000_TS_X_SIZE_LOWER,
+ platform_data->x_size & 0xff);
+ i2c_smbus_write_byte_data(client, MCS5000_TS_Y_SIZE_UPPER,
+ platform_data->y_size >> 8);
+ i2c_smbus_write_byte_data(client, MCS5000_TS_Y_SIZE_LOWER,
+ platform_data->y_size & 0xff);
+
+ /* Touch active mode & 80 report rate */
+ i2c_smbus_write_byte_data(data->client, MCS5000_TS_OP_MODE,
+ OP_MODE_ACTIVE | REPORT_RATE_80);
+}
+
+static int __devinit mcs5000_ts_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mcs5000_ts_data *data;
+ struct input_dev *input_dev;
+ int ret;
+
+ if (!client->dev.platform_data)
+ return -EINVAL;
+
+ data = kzalloc(sizeof(struct mcs5000_ts_data), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!data || !input_dev) {
+ dev_err(&client->dev, "Failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ data->client = client;
+ data->input_dev = input_dev;
+ data->platform_data = client->dev.platform_data;
+
+ input_dev->name = "MELPAS MCS-5000 Touchscreen";
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->dev.parent = &client->dev;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+ input_set_abs_params(input_dev, ABS_X, 0, MCS5000_MAX_XC, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, MCS5000_MAX_YC, 0, 0);
+
+ input_set_drvdata(input_dev, data);
+
+ if (data->platform_data->cfg_pin)
+ data->platform_data->cfg_pin();
+
+ ret = request_threaded_irq(client->irq, NULL, mcs5000_ts_interrupt,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT, "mcs5000_ts", data);
+
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to register interrupt\n");
+ goto err_free_mem;
+ }
+
+ ret = input_register_device(data->input_dev);
+ if (ret < 0)
+ goto err_free_irq;
+
+ mcs5000_ts_phys_init(data);
+ i2c_set_clientdata(client, data);
+
+ return 0;
+
+err_free_irq:
+ free_irq(client->irq, data);
+err_free_mem:
+ input_free_device(input_dev);
+ kfree(data);
+ return ret;
+}
+
+static int __devexit mcs5000_ts_remove(struct i2c_client *client)
+{
+ struct mcs5000_ts_data *data = i2c_get_clientdata(client);
+
+ free_irq(client->irq, data);
+ input_unregister_device(data->input_dev);
+ kfree(data);
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mcs5000_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ /* Touch sleep mode */
+ i2c_smbus_write_byte_data(client, MCS5000_TS_OP_MODE, OP_MODE_SLEEP);
+
+ return 0;
+}
+
+static int mcs5000_ts_resume(struct i2c_client *client)
+{
+ struct mcs5000_ts_data *data = i2c_get_clientdata(client);
+
+ mcs5000_ts_phys_init(data);
+
+ return 0;
+}
+#else
+#define mcs5000_ts_suspend NULL
+#define mcs5000_ts_resume NULL
+#endif
+
+static const struct i2c_device_id mcs5000_ts_id[] = {
+ { "mcs5000_ts", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mcs5000_ts_id);
+
+static struct i2c_driver mcs5000_ts_driver = {
+ .probe = mcs5000_ts_probe,
+ .remove = __devexit_p(mcs5000_ts_remove),
+ .suspend = mcs5000_ts_suspend,
+ .resume = mcs5000_ts_resume,
+ .driver = {
+ .name = "mcs5000_ts",
+ },
+ .id_table = mcs5000_ts_id,
+};
+
+static int __init mcs5000_ts_init(void)
+{
+ return i2c_add_driver(&mcs5000_ts_driver);
+}
+
+static void __exit mcs5000_ts_exit(void)
+{
+ i2c_del_driver(&mcs5000_ts_driver);
+}
+
+module_init(mcs5000_ts_init);
+module_exit(mcs5000_ts_exit);
+
+/* Module information */
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_DESCRIPTION("Touchscreen driver for MELFAS MCS-5000 controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 252eb11fe9d..f944918466e 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -561,6 +561,7 @@ static void wm97xx_ts_input_close(struct input_dev *idev)
static int wm97xx_probe(struct device *dev)
{
struct wm97xx *wm;
+ struct wm97xx_pdata *pdata = dev->platform_data;
int ret = 0, id = 0;
wm = kzalloc(sizeof(struct wm97xx), GFP_KERNEL);
@@ -658,6 +659,7 @@ static int wm97xx_probe(struct device *dev)
}
platform_set_drvdata(wm->battery_dev, wm);
wm->battery_dev->dev.parent = dev;
+ wm->battery_dev->dev.platform_data = pdata;
ret = platform_device_add(wm->battery_dev);
if (ret < 0)
goto batt_reg_err;
@@ -671,6 +673,7 @@ static int wm97xx_probe(struct device *dev)
}
platform_set_drvdata(wm->touch_dev, wm);
wm->touch_dev->dev.parent = dev;
+ wm->touch_dev->dev.platform_data = pdata;
ret = platform_device_add(wm->touch_dev);
if (ret < 0)
goto touch_reg_err;
diff --git a/drivers/isdn/capi/capifs.c b/drivers/isdn/capi/capifs.c
index bff72d81f26..9f8f67b6c07 100644
--- a/drivers/isdn/capi/capifs.c
+++ b/drivers/isdn/capi/capifs.c
@@ -89,7 +89,7 @@ static int capifs_remount(struct super_block *s, int *flags, char *data)
return 0;
}
-static struct super_operations capifs_sops =
+static const struct super_operations capifs_sops =
{
.statfs = simple_statfs,
.remount_fs = capifs_remount,
diff --git a/drivers/isdn/capi/capiutil.c b/drivers/isdn/capi/capiutil.c
index 16f2e465e5f..26626eead82 100644
--- a/drivers/isdn/capi/capiutil.c
+++ b/drivers/isdn/capi/capiutil.c
@@ -1019,7 +1019,7 @@ int __init cdebug_init(void)
if (!g_debbuf->buf) {
kfree(g_cmsg);
kfree(g_debbuf);
- return -ENOMEM;;
+ return -ENOMEM;
}
g_debbuf->size = CDEBUG_GSIZE;
g_debbuf->buf[0] = 0;
diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c
index 50ed778f63f..09d4db764d2 100644
--- a/drivers/isdn/capi/kcapi_proc.c
+++ b/drivers/isdn/capi/kcapi_proc.c
@@ -89,14 +89,14 @@ static int contrstats_show(struct seq_file *seq, void *v)
return 0;
}
-static struct seq_operations seq_controller_ops = {
+static const struct seq_operations seq_controller_ops = {
.start = controller_start,
.next = controller_next,
.stop = controller_stop,
.show = controller_show,
};
-static struct seq_operations seq_contrstats_ops = {
+static const struct seq_operations seq_contrstats_ops = {
.start = controller_start,
.next = controller_next,
.stop = controller_stop,
@@ -194,14 +194,14 @@ applstats_show(struct seq_file *seq, void *v)
return 0;
}
-static struct seq_operations seq_applications_ops = {
+static const struct seq_operations seq_applications_ops = {
.start = applications_start,
.next = applications_next,
.stop = applications_stop,
.show = applications_show,
};
-static struct seq_operations seq_applstats_ops = {
+static const struct seq_operations seq_applstats_ops = {
.start = applications_start,
.next = applications_next,
.stop = applications_stop,
@@ -264,7 +264,7 @@ static int capi_driver_show(struct seq_file *seq, void *v)
return 0;
}
-static struct seq_operations seq_capi_driver_ops = {
+static const struct seq_operations seq_capi_driver_ops = {
.start = capi_driver_start,
.next = capi_driver_next,
.stop = capi_driver_stop,
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 7188c59a76f..adb1e8c36b4 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -761,7 +761,7 @@ isdn_getnum(char **p)
* Be aware that this is not an atomic operation when sleep != 0, even though
* interrupts are turned off! Well, like that we are currently only called
* on behalf of a read system call on raw device files (which are documented
- * to be dangerous and for for debugging purpose only). The inode semaphore
+ * to be dangerous and for debugging purpose only). The inode semaphore
* takes care that this is not called for the same minor device number while
* we are sleeping, but access is not serialized against simultaneous read()
* from the corresponding ttyI device. Can other ugly events, like changes
@@ -873,7 +873,7 @@ isdn_readbchan(int di, int channel, u_char * buf, u_char * fp, int len, wait_que
* Be aware that this is not an atomic operation when sleep != 0, even though
* interrupts are turned off! Well, like that we are currently only called
* on behalf of a read system call on raw device files (which are documented
- * to be dangerous and for for debugging purpose only). The inode semaphore
+ * to be dangerous and for debugging purpose only). The inode semaphore
* takes care that this is not called for the same minor device number while
* we are sleeping, but access is not serialized against simultaneous read()
* from the corresponding ttyI device. Can other ugly events, like changes
diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
index 1813c84ea5f..f2242db5401 100644
--- a/drivers/leds/leds-clevo-mail.c
+++ b/drivers/leds/leds-clevo-mail.c
@@ -93,6 +93,8 @@ static struct dmi_system_id __initdata mail_led_whitelist[] = {
static void clevo_mail_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
+ i8042_lock_chip();
+
if (value == LED_OFF)
i8042_command(NULL, CLEVO_MAIL_LED_OFF);
else if (value <= LED_HALF)
@@ -100,6 +102,8 @@ static void clevo_mail_led_set(struct led_classdev *led_cdev,
else
i8042_command(NULL, CLEVO_MAIL_LED_BLINK_1HZ);
+ i8042_unlock_chip();
+
}
static int clevo_mail_led_blink(struct led_classdev *led_cdev,
@@ -108,6 +112,8 @@ static int clevo_mail_led_blink(struct led_classdev *led_cdev,
{
int status = -EINVAL;
+ i8042_lock_chip();
+
if (*delay_on == 0 /* ms */ && *delay_off == 0 /* ms */) {
/* Special case: the leds subsystem requested us to
* chose one user friendly blinking of the LED, and
@@ -135,6 +141,8 @@ static int clevo_mail_led_blink(struct led_classdev *led_cdev,
*delay_on, *delay_off);
}
+ i8042_unlock_chip();
+
return status;
}
diff --git a/drivers/leds/leds-dac124s085.c b/drivers/leds/leds-dac124s085.c
index 098d9aae725..2913d76ad3d 100644
--- a/drivers/leds/leds-dac124s085.c
+++ b/drivers/leds/leds-dac124s085.c
@@ -148,3 +148,4 @@ module_exit(dac124s085_leds_exit);
MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
MODULE_DESCRIPTION("DAC124S085 LED driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:dac124s085");
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index 1e2cb846b3c..8744d24ac6e 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -67,12 +67,11 @@ static __init int map_switcher(void)
* so we make sure they're zeroed.
*/
for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
- unsigned long addr = get_zeroed_page(GFP_KERNEL);
- if (!addr) {
+ switcher_page[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
+ if (!switcher_page[i]) {
err = -ENOMEM;
goto free_some_pages;
}
- switcher_page[i] = virt_to_page(addr);
}
/*
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index a8d0aee3bc0..cf94326f1b5 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -380,7 +380,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
* And we copy the flags to the shadow PMD entry. The page
* number in the shadow PMD is the page we just allocated.
*/
- native_set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd)));
+ set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd)));
}
/*
@@ -447,7 +447,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
* we will come back here when a write does actually occur, so
* we can update the Guest's _PAGE_DIRTY flag.
*/
- native_set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));
+ set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));
/*
* Finally, we write the Guest PTE entry back: we've set the
@@ -528,7 +528,7 @@ static void release_pmd(pmd_t *spmd)
/* Now we can free the page of PTEs */
free_page((long)ptepage);
/* And zero out the PMD entry so we never release it twice. */
- native_set_pmd(spmd, __pmd(0));
+ set_pmd(spmd, __pmd(0));
}
}
@@ -833,15 +833,15 @@ static void do_set_pte(struct lg_cpu *cpu, int idx,
*/
if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
check_gpte(cpu, gpte);
- native_set_pte(spte,
- gpte_to_spte(cpu, gpte,
+ set_pte(spte,
+ gpte_to_spte(cpu, gpte,
pte_flags(gpte) & _PAGE_DIRTY));
} else {
/*
* Otherwise kill it and we can demand_page()
* it in later.
*/
- native_set_pte(spte, __pte(0));
+ set_pte(spte, __pte(0));
}
#ifdef CONFIG_X86_PAE
}
@@ -894,7 +894,7 @@ void guest_set_pte(struct lg_cpu *cpu,
* tells us they've changed. When the Guest tries to use the new entry it will
* fault and demand_page() will fix it up.
*
- * So with that in mind here's our code to to update a (top-level) PGD entry:
+ * So with that in mind here's our code to update a (top-level) PGD entry:
*/
void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
{
@@ -983,25 +983,22 @@ static unsigned long setup_pagetables(struct lguest *lg,
*/
for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD;
i += PTRS_PER_PTE, j++) {
- /* FIXME: native_set_pmd is overkill here. */
- native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i)
- - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER));
+ pmd = pfn_pmd(((unsigned long)&linear[i] - mem_base)/PAGE_SIZE,
+ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER));
if (copy_to_user(&pmds[j], &pmd, sizeof(pmd)) != 0)
return -EFAULT;
}
/* One PGD entry, pointing to that PMD page. */
- set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT));
+ pgd = __pgd(((unsigned long)pmds - mem_base) | _PAGE_PRESENT);
/* Copy it in as the first PGD entry (ie. addresses 0-1G). */
if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0)
return -EFAULT;
/*
- * And the third PGD entry (ie. addresses 3G-4G).
- *
- * FIXME: This assumes that PAGE_OFFSET for the Guest is 0xC0000000.
+ * And the other PGD entry to make the linear mapping at PAGE_OFFSET
*/
- if (copy_to_user(&pgdir[3], &pgd, sizeof(pgd)) != 0)
+ if (copy_to_user(&pgdir[KERNEL_PGD_BOUNDARY], &pgd, sizeof(pgd)))
return -EFAULT;
#else
/*
@@ -1141,15 +1138,13 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
{
pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
pte_t regs_pte;
- unsigned long pfn;
#ifdef CONFIG_X86_PAE
pmd_t switcher_pmd;
pmd_t *pmd_table;
- /* FIXME: native_set_pmd is overkill here. */
- native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >>
- PAGE_SHIFT, PAGE_KERNEL_EXEC));
+ switcher_pmd = pfn_pmd(__pa(switcher_pte_page) >> PAGE_SHIFT,
+ PAGE_KERNEL_EXEC);
/* Figure out where the pmd page is, by reading the PGD, and converting
* it to a virtual address. */
@@ -1157,7 +1152,7 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX])
<< PAGE_SHIFT);
/* Now write it into the shadow page table. */
- native_set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd);
+ set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd);
#else
pgd_t switcher_pgd;
@@ -1179,10 +1174,8 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
* page is already mapped there, we don't have to copy them out
* again.
*/
- pfn = __pa(cpu->regs_page) >> PAGE_SHIFT;
- native_set_pte(&regs_pte, pfn_pte(pfn, PAGE_KERNEL));
- native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)],
- regs_pte);
+ regs_pte = pfn_pte(__pa(cpu->regs_page) >> PAGE_SHIFT, PAGE_KERNEL);
+ set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], regs_pte);
}
/*:*/
@@ -1209,7 +1202,7 @@ static __init void populate_switcher_pte_page(unsigned int cpu,
/* The first entries are easy: they map the Switcher code. */
for (i = 0; i < pages; i++) {
- native_set_pte(&pte[i], mk_pte(switcher_page[i],
+ set_pte(&pte[i], mk_pte(switcher_page[i],
__pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
}
@@ -1217,14 +1210,14 @@ static __init void populate_switcher_pte_page(unsigned int cpu,
i = pages + cpu*2;
/* First page (Guest registers) is writable from the Guest */
- native_set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]),
+ set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]),
__pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)));
/*
* The second page contains the "struct lguest_ro_state", and is
* read-only.
*/
- native_set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]),
+ set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]),
__pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
}
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index a98ab72adf9..93fb32038b1 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -274,7 +274,7 @@ static void __devinit rackmeter_init_cpu_sniffer(struct rackmeter *rm)
if (cpu > 1)
continue;
- rcpu = &rm->cpu[cpu];;
+ rcpu = &rm->cpu[cpu];
rcpu->prev_idle = get_cpu_idle_time(cpu);
rcpu->prev_wall = jiffies64_to_cputime64(get_jiffies_64());
schedule_delayed_work_on(cpu, &rm->cpu[cpu].sniffer,
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 020f9573fd8..2158377a135 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -124,6 +124,8 @@ config MD_RAID456
select MD_RAID6_PQ
select ASYNC_MEMCPY
select ASYNC_XOR
+ select ASYNC_PQ
+ select ASYNC_RAID6_RECOV
---help---
A RAID-5 set of N drives with a capacity of C MB per drive provides
the capacity of C * (N - 1) MB, and protects against a failure
@@ -152,9 +154,33 @@ config MD_RAID456
If unsure, say Y.
+config MULTICORE_RAID456
+ bool "RAID-4/RAID-5/RAID-6 Multicore processing (EXPERIMENTAL)"
+ depends on MD_RAID456
+ depends on SMP
+ depends on EXPERIMENTAL
+ ---help---
+ Enable the raid456 module to dispatch per-stripe raid operations to a
+ thread pool.
+
+ If unsure, say N.
+
config MD_RAID6_PQ
tristate
+config ASYNC_RAID6_TEST
+ tristate "Self test for hardware accelerated raid6 recovery"
+ depends on MD_RAID6_PQ
+ select ASYNC_RAID6_RECOV
+ ---help---
+ This is a one-shot self test that permutes through the
+ recovery of all the possible two disk failure scenarios for a
+ N-disk array. Recovery is performed with the asynchronous
+ raid6 recovery routines, and will optionally use an offload
+ engine if one is available.
+
+ If unsure, say N.
+
config MD_MULTIPATH
tristate "Multipath I/O support"
depends on BLK_DEV_MD
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 3319c2fec28..6986b0059d2 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -108,6 +108,8 @@ static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page)
* allocated while we're using it
*/
static int bitmap_checkpage(struct bitmap *bitmap, unsigned long page, int create)
+__releases(bitmap->lock)
+__acquires(bitmap->lock)
{
unsigned char *mappage;
@@ -325,7 +327,6 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
return 0;
bad_alignment:
- rcu_read_unlock();
return -EINVAL;
}
@@ -1207,6 +1208,8 @@ void bitmap_daemon_work(struct bitmap *bitmap)
static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
sector_t offset, int *blocks,
int create)
+__releases(bitmap->lock)
+__acquires(bitmap->lock)
{
/* If 'create', we might release the lock and reclaim it.
* The lock must have been taken with interrupts enabled.
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index eee28fac210..376f1ab48a2 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1716,7 +1716,7 @@ out:
return r;
}
-static struct block_device_operations dm_blk_dops;
+static const struct block_device_operations dm_blk_dops;
static void dm_wq_work(struct work_struct *work);
@@ -2663,7 +2663,7 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
kfree(pools);
}
-static struct block_device_operations dm_blk_dops = {
+static const struct block_device_operations dm_blk_dops = {
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index ea484290544..1ceceb334d5 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -108,6 +108,9 @@ static int linear_congested(void *data, int bits)
linear_conf_t *conf;
int i, ret = 0;
+ if (mddev_congested(mddev, bits))
+ return 1;
+
rcu_read_lock();
conf = rcu_dereference(mddev->private);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9dd872000ce..26ba42a7912 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -138,7 +138,7 @@ static ctl_table raid_root_table[] = {
{ .ctl_name = 0 }
};
-static struct block_device_operations md_fops;
+static const struct block_device_operations md_fops;
static int start_readonly;
@@ -262,6 +262,12 @@ static void mddev_resume(mddev_t *mddev)
mddev->pers->quiesce(mddev, 0);
}
+int mddev_congested(mddev_t *mddev, int bits)
+{
+ return mddev->suspended;
+}
+EXPORT_SYMBOL(mddev_congested);
+
static inline mddev_t *mddev_get(mddev_t *mddev)
{
@@ -4218,7 +4224,7 @@ static int do_md_run(mddev_t * mddev)
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
mddev->sync_thread = md_register_thread(md_do_sync,
mddev,
- "%s_resync");
+ "resync");
if (!mddev->sync_thread) {
printk(KERN_ERR "%s: could not start resync"
" thread...\n",
@@ -4575,10 +4581,10 @@ static int get_version(void __user * arg)
static int get_array_info(mddev_t * mddev, void __user * arg)
{
mdu_array_info_t info;
- int nr,working,active,failed,spare;
+ int nr,working,insync,failed,spare;
mdk_rdev_t *rdev;
- nr=working=active=failed=spare=0;
+ nr=working=insync=failed=spare=0;
list_for_each_entry(rdev, &mddev->disks, same_set) {
nr++;
if (test_bit(Faulty, &rdev->flags))
@@ -4586,7 +4592,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
else {
working++;
if (test_bit(In_sync, &rdev->flags))
- active++;
+ insync++;
else
spare++;
}
@@ -4611,7 +4617,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
info.state = (1<<MD_SB_CLEAN);
if (mddev->bitmap && mddev->bitmap_offset)
info.state = (1<<MD_SB_BITMAP_PRESENT);
- info.active_disks = active;
+ info.active_disks = insync;
info.working_disks = working;
info.failed_disks = failed;
info.spare_disks = spare;
@@ -4721,7 +4727,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
if (!list_empty(&mddev->disks)) {
mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
mdk_rdev_t, same_set);
- int err = super_types[mddev->major_version]
+ err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0) {
printk(KERN_WARNING
@@ -5556,7 +5562,7 @@ static int md_revalidate(struct gendisk *disk)
mddev->changed = 0;
return 0;
}
-static struct block_device_operations md_fops =
+static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
.open = md_open,
@@ -5631,7 +5637,10 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
thread->run = run;
thread->mddev = mddev;
thread->timeout = MAX_SCHEDULE_TIMEOUT;
- thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
+ thread->tsk = kthread_run(md_thread, thread,
+ "%s_%s",
+ mdname(thread->mddev),
+ name ?: mddev->pers->name);
if (IS_ERR(thread->tsk)) {
kfree(thread);
return NULL;
@@ -6745,7 +6754,7 @@ void md_check_recovery(mddev_t *mddev)
}
mddev->sync_thread = md_register_thread(md_do_sync,
mddev,
- "%s_resync");
+ "resync");
if (!mddev->sync_thread) {
printk(KERN_ERR "%s: could not start resync"
" thread...\n",
diff --git a/drivers/md/md.h b/drivers/md/md.h
index f8fc188bc76..f184b69ef33 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -201,7 +201,7 @@ struct mddev_s
* INTR: resync needs to be aborted for some reason
* DONE: thread is done and is waiting to be reaped
* REQUEST: user-space has requested a sync (used with SYNC)
- * CHECK: user-space request for for check-only, no repair
+ * CHECK: user-space request for check-only, no repair
* RESHAPE: A reshape is happening
*
* If neither SYNC or RESHAPE are set, then it is a recovery.
@@ -430,6 +430,7 @@ extern void md_write_end(mddev_t *mddev);
extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);
+extern int mddev_congested(mddev_t *mddev, int bits);
extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
sector_t sector, int size, struct page *page);
extern void md_super_wait(mddev_t *mddev);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 89e76819f61..ee7646f974a 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -198,6 +198,9 @@ static int multipath_congested(void *data, int bits)
multipath_conf_t *conf = mddev->private;
int i, ret = 0;
+ if (mddev_congested(mddev, bits))
+ return 1;
+
rcu_read_lock();
for (i = 0; i < mddev->raid_disks ; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
@@ -493,7 +496,7 @@ static int multipath_run (mddev_t *mddev)
}
mddev->degraded = conf->raid_disks - conf->working_disks;
- conf->pool = mempool_create_kzalloc_pool(NR_RESERVED_BUFS,
+ conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS,
sizeof(struct multipath_bh));
if (conf->pool == NULL) {
printk(KERN_ERR
@@ -503,7 +506,7 @@ static int multipath_run (mddev_t *mddev)
}
{
- mddev->thread = md_register_thread(multipathd, mddev, "%s_multipath");
+ mddev->thread = md_register_thread(multipathd, mddev, NULL);
if (!mddev->thread) {
printk(KERN_ERR "multipath: couldn't allocate thread"
" for %s\n", mdname(mddev));
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index f845ed98fec..d3a4ce06015 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -44,6 +44,9 @@ static int raid0_congested(void *data, int bits)
mdk_rdev_t **devlist = conf->devlist;
int i, ret = 0;
+ if (mddev_congested(mddev, bits))
+ return 1;
+
for (i = 0; i < mddev->raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
@@ -86,7 +89,7 @@ static void dump_zones(mddev_t *mddev)
static int create_strip_zones(mddev_t *mddev)
{
- int i, c, j, err;
+ int i, c, err;
sector_t curr_zone_end, sectors;
mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev, **dev;
struct strip_zone *zone;
@@ -198,6 +201,8 @@ static int create_strip_zones(mddev_t *mddev)
/* now do the other zones */
for (i = 1; i < conf->nr_strip_zones; i++)
{
+ int j;
+
zone = conf->strip_zone + i;
dev = conf->devlist + i * mddev->raid_disks;
@@ -207,7 +212,6 @@ static int create_strip_zones(mddev_t *mddev)
c = 0;
for (j=0; j<cnt; j++) {
- char b[BDEVNAME_SIZE];
rdev = conf->devlist[j];
printk(KERN_INFO "raid0: checking %s ...",
bdevname(rdev->bdev, b));
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index ff7ed333599..d1b9bd5fd4f 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -576,6 +576,9 @@ static int raid1_congested(void *data, int bits)
conf_t *conf = mddev->private;
int i, ret = 0;
+ if (mddev_congested(mddev, bits))
+ return 1;
+
rcu_read_lock();
for (i = 0; i < mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
@@ -851,7 +854,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid1_end_read_request;
- read_bio->bi_rw = READ | do_sync;
+ read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
read_bio->bi_private = r1_bio;
generic_make_request(read_bio);
@@ -943,7 +946,8 @@ static int make_request(struct request_queue *q, struct bio * bio)
mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
- mbio->bi_rw = WRITE | do_barriers | do_sync;
+ mbio->bi_rw = WRITE | (do_barriers << BIO_RW_BARRIER) |
+ (do_sync << BIO_RW_SYNCIO);
mbio->bi_private = r1_bio;
if (behind_pages) {
@@ -1623,7 +1627,8 @@ static void raid1d(mddev_t *mddev)
conf->mirrors[i].rdev->data_offset;
bio->bi_bdev = conf->mirrors[i].rdev->bdev;
bio->bi_end_io = raid1_end_write_request;
- bio->bi_rw = WRITE | do_sync;
+ bio->bi_rw = WRITE |
+ (do_sync << BIO_RW_SYNCIO);
bio->bi_private = r1_bio;
r1_bio->bios[i] = bio;
generic_make_request(bio);
@@ -1672,7 +1677,7 @@ static void raid1d(mddev_t *mddev)
bio->bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_end_io = raid1_end_read_request;
- bio->bi_rw = READ | do_sync;
+ bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
bio->bi_private = r1_bio;
unplug = 1;
generic_make_request(bio);
@@ -2047,7 +2052,7 @@ static int run(mddev_t *mddev)
conf->last_used = j;
- mddev->thread = md_register_thread(raid1d, mddev, "%s_raid1");
+ mddev->thread = md_register_thread(raid1d, mddev, NULL);
if (!mddev->thread) {
printk(KERN_ERR
"raid1: couldn't allocate thread for %s\n",
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index d0a2152e064..51c4c5c4d87 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -631,6 +631,8 @@ static int raid10_congested(void *data, int bits)
conf_t *conf = mddev->private;
int i, ret = 0;
+ if (mddev_congested(mddev, bits))
+ return 1;
rcu_read_lock();
for (i = 0; i < mddev->raid_disks && ret == 0; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
@@ -882,7 +884,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
mirror->rdev->data_offset;
read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request;
- read_bio->bi_rw = READ | do_sync;
+ read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
read_bio->bi_private = r10_bio;
generic_make_request(read_bio);
@@ -950,7 +952,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
conf->mirrors[d].rdev->data_offset;
mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_rw = WRITE | do_sync;
+ mbio->bi_rw = WRITE | (do_sync << BIO_RW_SYNCIO);
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
@@ -1623,7 +1625,7 @@ static void raid10d(mddev_t *mddev)
bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
- bio->bi_rw = READ | do_sync;
+ bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
bio->bi_private = r10_bio;
bio->bi_end_io = raid10_end_read_request;
unplug = 1;
@@ -1773,7 +1775,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
/* recovery... the complicated one */
- int i, j, k;
+ int j, k;
r10_bio = NULL;
for (i=0 ; i<conf->raid_disks; i++)
@@ -2188,7 +2190,7 @@ static int run(mddev_t *mddev)
}
- mddev->thread = md_register_thread(raid10d, mddev, "%s_raid10");
+ mddev->thread = md_register_thread(raid10d, mddev, NULL);
if (!mddev->thread) {
printk(KERN_ERR
"raid10: couldn't allocate thread for %s\n",
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 826eb346735..94829804ab7 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -47,7 +47,9 @@
#include <linux/kthread.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
+#include <linux/async.h>
#include <linux/seq_file.h>
+#include <linux/cpu.h>
#include "md.h"
#include "raid5.h"
#include "bitmap.h"
@@ -499,11 +501,18 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
struct page *bio_page;
int i;
int page_offset;
+ struct async_submit_ctl submit;
+ enum async_tx_flags flags = 0;
if (bio->bi_sector >= sector)
page_offset = (signed)(bio->bi_sector - sector) * 512;
else
page_offset = (signed)(sector - bio->bi_sector) * -512;
+
+ if (frombio)
+ flags |= ASYNC_TX_FENCE;
+ init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
+
bio_for_each_segment(bvl, bio, i) {
int len = bio_iovec_idx(bio, i)->bv_len;
int clen;
@@ -525,15 +534,14 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
bio_page = bio_iovec_idx(bio, i)->bv_page;
if (frombio)
tx = async_memcpy(page, bio_page, page_offset,
- b_offset, clen,
- ASYNC_TX_DEP_ACK,
- tx, NULL, NULL);
+ b_offset, clen, &submit);
else
tx = async_memcpy(bio_page, page, b_offset,
- page_offset, clen,
- ASYNC_TX_DEP_ACK,
- tx, NULL, NULL);
+ page_offset, clen, &submit);
}
+ /* chain the operations */
+ submit.depend_tx = tx;
+
if (clen < len) /* hit end of page */
break;
page_offset += len;
@@ -592,6 +600,7 @@ static void ops_run_biofill(struct stripe_head *sh)
{
struct dma_async_tx_descriptor *tx = NULL;
raid5_conf_t *conf = sh->raid_conf;
+ struct async_submit_ctl submit;
int i;
pr_debug("%s: stripe %llu\n", __func__,
@@ -615,22 +624,34 @@ static void ops_run_biofill(struct stripe_head *sh)
}
atomic_inc(&sh->count);
- async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx,
- ops_complete_biofill, sh);
+ init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
+ async_trigger_callback(&submit);
}
-static void ops_complete_compute5(void *stripe_head_ref)
+static void mark_target_uptodate(struct stripe_head *sh, int target)
{
- struct stripe_head *sh = stripe_head_ref;
- int target = sh->ops.target;
- struct r5dev *tgt = &sh->dev[target];
+ struct r5dev *tgt;
- pr_debug("%s: stripe %llu\n", __func__,
- (unsigned long long)sh->sector);
+ if (target < 0)
+ return;
+ tgt = &sh->dev[target];
set_bit(R5_UPTODATE, &tgt->flags);
BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
clear_bit(R5_Wantcompute, &tgt->flags);
+}
+
+static void ops_complete_compute(void *stripe_head_ref)
+{
+ struct stripe_head *sh = stripe_head_ref;
+
+ pr_debug("%s: stripe %llu\n", __func__,
+ (unsigned long long)sh->sector);
+
+ /* mark the computed target(s) as uptodate */
+ mark_target_uptodate(sh, sh->ops.target);
+ mark_target_uptodate(sh, sh->ops.target2);
+
clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
if (sh->check_state == check_state_compute_run)
sh->check_state = check_state_compute_result;
@@ -638,16 +659,24 @@ static void ops_complete_compute5(void *stripe_head_ref)
release_stripe(sh);
}
-static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh)
+/* return a pointer to the address conversion region of the scribble buffer */
+static addr_conv_t *to_addr_conv(struct stripe_head *sh,
+ struct raid5_percpu *percpu)
+{
+ return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
+}
+
+static struct dma_async_tx_descriptor *
+ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
{
- /* kernel stack size limits the total number of disks */
int disks = sh->disks;
- struct page *xor_srcs[disks];
+ struct page **xor_srcs = percpu->scribble;
int target = sh->ops.target;
struct r5dev *tgt = &sh->dev[target];
struct page *xor_dest = tgt->page;
int count = 0;
struct dma_async_tx_descriptor *tx;
+ struct async_submit_ctl submit;
int i;
pr_debug("%s: stripe %llu block: %d\n",
@@ -660,17 +689,215 @@ static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh)
atomic_inc(&sh->count);
+ init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
+ ops_complete_compute, sh, to_addr_conv(sh, percpu));
if (unlikely(count == 1))
- tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE,
- 0, NULL, ops_complete_compute5, sh);
+ tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
else
- tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
- ASYNC_TX_XOR_ZERO_DST, NULL,
- ops_complete_compute5, sh);
+ tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
return tx;
}
+/* set_syndrome_sources - populate source buffers for gen_syndrome
+ * @srcs - (struct page *) array of size sh->disks
+ * @sh - stripe_head to parse
+ *
+ * Populates srcs in proper layout order for the stripe and returns the
+ * 'count' of sources to be used in a call to async_gen_syndrome. The P
+ * destination buffer is recorded in srcs[count] and the Q destination
+ * is recorded in srcs[count+1]].
+ */
+static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
+{
+ int disks = sh->disks;
+ int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
+ int d0_idx = raid6_d0(sh);
+ int count;
+ int i;
+
+ for (i = 0; i < disks; i++)
+ srcs[i] = (void *)raid6_empty_zero_page;
+
+ count = 0;
+ i = d0_idx;
+ do {
+ int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
+
+ srcs[slot] = sh->dev[i].page;
+ i = raid6_next_disk(i, disks);
+ } while (i != d0_idx);
+ BUG_ON(count != syndrome_disks);
+
+ return count;
+}
+
+static struct dma_async_tx_descriptor *
+ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
+{
+ int disks = sh->disks;
+ struct page **blocks = percpu->scribble;
+ int target;
+ int qd_idx = sh->qd_idx;
+ struct dma_async_tx_descriptor *tx;
+ struct async_submit_ctl submit;
+ struct r5dev *tgt;
+ struct page *dest;
+ int i;
+ int count;
+
+ if (sh->ops.target < 0)
+ target = sh->ops.target2;
+ else if (sh->ops.target2 < 0)
+ target = sh->ops.target;
+ else
+ /* we should only have one valid target */
+ BUG();
+ BUG_ON(target < 0);
+ pr_debug("%s: stripe %llu block: %d\n",
+ __func__, (unsigned long long)sh->sector, target);
+
+ tgt = &sh->dev[target];
+ BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
+ dest = tgt->page;
+
+ atomic_inc(&sh->count);
+
+ if (target == qd_idx) {
+ count = set_syndrome_sources(blocks, sh);
+ blocks[count] = NULL; /* regenerating p is not necessary */
+ BUG_ON(blocks[count+1] != dest); /* q should already be set */
+ init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
+ ops_complete_compute, sh,
+ to_addr_conv(sh, percpu));
+ tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
+ } else {
+ /* Compute any data- or p-drive using XOR */
+ count = 0;
+ for (i = disks; i-- ; ) {
+ if (i == target || i == qd_idx)
+ continue;
+ blocks[count++] = sh->dev[i].page;
+ }
+
+ init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
+ NULL, ops_complete_compute, sh,
+ to_addr_conv(sh, percpu));
+ tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
+ }
+
+ return tx;
+}
+
+static struct dma_async_tx_descriptor *
+ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
+{
+ int i, count, disks = sh->disks;
+ int syndrome_disks = sh->ddf_layout ? disks : disks-2;
+ int d0_idx = raid6_d0(sh);
+ int faila = -1, failb = -1;
+ int target = sh->ops.target;
+ int target2 = sh->ops.target2;
+ struct r5dev *tgt = &sh->dev[target];
+ struct r5dev *tgt2 = &sh->dev[target2];
+ struct dma_async_tx_descriptor *tx;
+ struct page **blocks = percpu->scribble;
+ struct async_submit_ctl submit;
+
+ pr_debug("%s: stripe %llu block1: %d block2: %d\n",
+ __func__, (unsigned long long)sh->sector, target, target2);
+ BUG_ON(target < 0 || target2 < 0);
+ BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
+ BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
+
+ /* we need to open-code set_syndrome_sources to handle the
+ * slot number conversion for 'faila' and 'failb'
+ */
+ for (i = 0; i < disks ; i++)
+ blocks[i] = (void *)raid6_empty_zero_page;
+ count = 0;
+ i = d0_idx;
+ do {
+ int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
+
+ blocks[slot] = sh->dev[i].page;
+
+ if (i == target)
+ faila = slot;
+ if (i == target2)
+ failb = slot;
+ i = raid6_next_disk(i, disks);
+ } while (i != d0_idx);
+ BUG_ON(count != syndrome_disks);
+
+ BUG_ON(faila == failb);
+ if (failb < faila)
+ swap(faila, failb);
+ pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
+ __func__, (unsigned long long)sh->sector, faila, failb);
+
+ atomic_inc(&sh->count);
+
+ if (failb == syndrome_disks+1) {
+ /* Q disk is one of the missing disks */
+ if (faila == syndrome_disks) {
+ /* Missing P+Q, just recompute */
+ init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
+ ops_complete_compute, sh,
+ to_addr_conv(sh, percpu));
+ return async_gen_syndrome(blocks, 0, count+2,
+ STRIPE_SIZE, &submit);
+ } else {
+ struct page *dest;
+ int data_target;
+ int qd_idx = sh->qd_idx;
+
+ /* Missing D+Q: recompute D from P, then recompute Q */
+ if (target == qd_idx)
+ data_target = target2;
+ else
+ data_target = target;
+
+ count = 0;
+ for (i = disks; i-- ; ) {
+ if (i == data_target || i == qd_idx)
+ continue;
+ blocks[count++] = sh->dev[i].page;
+ }
+ dest = sh->dev[data_target].page;
+ init_async_submit(&submit,
+ ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
+ NULL, NULL, NULL,
+ to_addr_conv(sh, percpu));
+ tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
+ &submit);
+
+ count = set_syndrome_sources(blocks, sh);
+ init_async_submit(&submit, ASYNC_TX_FENCE, tx,
+ ops_complete_compute, sh,
+ to_addr_conv(sh, percpu));
+ return async_gen_syndrome(blocks, 0, count+2,
+ STRIPE_SIZE, &submit);
+ }
+ } else {
+ init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
+ ops_complete_compute, sh,
+ to_addr_conv(sh, percpu));
+ if (failb == syndrome_disks) {
+ /* We're missing D+P. */
+ return async_raid6_datap_recov(syndrome_disks+2,
+ STRIPE_SIZE, faila,
+ blocks, &submit);
+ } else {
+ /* We're missing D+D. */
+ return async_raid6_2data_recov(syndrome_disks+2,
+ STRIPE_SIZE, faila, failb,
+ blocks, &submit);
+ }
+ }
+}
+
+
static void ops_complete_prexor(void *stripe_head_ref)
{
struct stripe_head *sh = stripe_head_ref;
@@ -680,12 +907,13 @@ static void ops_complete_prexor(void *stripe_head_ref)
}
static struct dma_async_tx_descriptor *
-ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
+ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
+ struct dma_async_tx_descriptor *tx)
{
- /* kernel stack size limits the total number of disks */
int disks = sh->disks;
- struct page *xor_srcs[disks];
+ struct page **xor_srcs = percpu->scribble;
int count = 0, pd_idx = sh->pd_idx, i;
+ struct async_submit_ctl submit;
/* existing parity data subtracted */
struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
@@ -700,9 +928,9 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
xor_srcs[count++] = dev->page;
}
- tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
- ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx,
- ops_complete_prexor, sh);
+ init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
+ ops_complete_prexor, sh, to_addr_conv(sh, percpu));
+ tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
return tx;
}
@@ -742,17 +970,21 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
return tx;
}
-static void ops_complete_postxor(void *stripe_head_ref)
+static void ops_complete_reconstruct(void *stripe_head_ref)
{
struct stripe_head *sh = stripe_head_ref;
- int disks = sh->disks, i, pd_idx = sh->pd_idx;
+ int disks = sh->disks;
+ int pd_idx = sh->pd_idx;
+ int qd_idx = sh->qd_idx;
+ int i;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if (dev->written || i == pd_idx)
+
+ if (dev->written || i == pd_idx || i == qd_idx)
set_bit(R5_UPTODATE, &dev->flags);
}
@@ -770,12 +1002,12 @@ static void ops_complete_postxor(void *stripe_head_ref)
}
static void
-ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
+ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
+ struct dma_async_tx_descriptor *tx)
{
- /* kernel stack size limits the total number of disks */
int disks = sh->disks;
- struct page *xor_srcs[disks];
-
+ struct page **xor_srcs = percpu->scribble;
+ struct async_submit_ctl submit;
int count = 0, pd_idx = sh->pd_idx, i;
struct page *xor_dest;
int prexor = 0;
@@ -809,18 +1041,36 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
* set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
* for the synchronous xor case
*/
- flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK |
+ flags = ASYNC_TX_ACK |
(prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
atomic_inc(&sh->count);
- if (unlikely(count == 1)) {
- flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST);
- tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE,
- flags, tx, ops_complete_postxor, sh);
- } else
- tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
- flags, tx, ops_complete_postxor, sh);
+ init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
+ to_addr_conv(sh, percpu));
+ if (unlikely(count == 1))
+ tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
+ else
+ tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
+}
+
+static void
+ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
+ struct dma_async_tx_descriptor *tx)
+{
+ struct async_submit_ctl submit;
+ struct page **blocks = percpu->scribble;
+ int count;
+
+ pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
+
+ count = set_syndrome_sources(blocks, sh);
+
+ atomic_inc(&sh->count);
+
+ init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
+ sh, to_addr_conv(sh, percpu));
+ async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
}
static void ops_complete_check(void *stripe_head_ref)
@@ -835,63 +1085,115 @@ static void ops_complete_check(void *stripe_head_ref)
release_stripe(sh);
}
-static void ops_run_check(struct stripe_head *sh)
+static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
{
- /* kernel stack size limits the total number of disks */
int disks = sh->disks;
- struct page *xor_srcs[disks];
+ int pd_idx = sh->pd_idx;
+ int qd_idx = sh->qd_idx;
+ struct page *xor_dest;
+ struct page **xor_srcs = percpu->scribble;
struct dma_async_tx_descriptor *tx;
-
- int count = 0, pd_idx = sh->pd_idx, i;
- struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
+ struct async_submit_ctl submit;
+ int count;
+ int i;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
+ count = 0;
+ xor_dest = sh->dev[pd_idx].page;
+ xor_srcs[count++] = xor_dest;
for (i = disks; i--; ) {
- struct r5dev *dev = &sh->dev[i];
- if (i != pd_idx)
- xor_srcs[count++] = dev->page;
+ if (i == pd_idx || i == qd_idx)
+ continue;
+ xor_srcs[count++] = sh->dev[i].page;
}
- tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
- &sh->ops.zero_sum_result, 0, NULL, NULL, NULL);
+ init_async_submit(&submit, 0, NULL, NULL, NULL,
+ to_addr_conv(sh, percpu));
+ tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
+ &sh->ops.zero_sum_result, &submit);
+
+ atomic_inc(&sh->count);
+ init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
+ tx = async_trigger_callback(&submit);
+}
+
+static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
+{
+ struct page **srcs = percpu->scribble;
+ struct async_submit_ctl submit;
+ int count;
+
+ pr_debug("%s: stripe %llu checkp: %d\n", __func__,
+ (unsigned long long)sh->sector, checkp);
+
+ count = set_syndrome_sources(srcs, sh);
+ if (!checkp)
+ srcs[count] = NULL;
atomic_inc(&sh->count);
- tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx,
- ops_complete_check, sh);
+ init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
+ sh, to_addr_conv(sh, percpu));
+ async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
+ &sh->ops.zero_sum_result, percpu->spare_page, &submit);
}
-static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request)
+static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
{
int overlap_clear = 0, i, disks = sh->disks;
struct dma_async_tx_descriptor *tx = NULL;
+ raid5_conf_t *conf = sh->raid_conf;
+ int level = conf->level;
+ struct raid5_percpu *percpu;
+ unsigned long cpu;
+ cpu = get_cpu();
+ percpu = per_cpu_ptr(conf->percpu, cpu);
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
ops_run_biofill(sh);
overlap_clear++;
}
if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
- tx = ops_run_compute5(sh);
- /* terminate the chain if postxor is not set to be run */
- if (tx && !test_bit(STRIPE_OP_POSTXOR, &ops_request))
+ if (level < 6)
+ tx = ops_run_compute5(sh, percpu);
+ else {
+ if (sh->ops.target2 < 0 || sh->ops.target < 0)
+ tx = ops_run_compute6_1(sh, percpu);
+ else
+ tx = ops_run_compute6_2(sh, percpu);
+ }
+ /* terminate the chain if reconstruct is not set to be run */
+ if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
async_tx_ack(tx);
}
if (test_bit(STRIPE_OP_PREXOR, &ops_request))
- tx = ops_run_prexor(sh, tx);
+ tx = ops_run_prexor(sh, percpu, tx);
if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
tx = ops_run_biodrain(sh, tx);
overlap_clear++;
}
- if (test_bit(STRIPE_OP_POSTXOR, &ops_request))
- ops_run_postxor(sh, tx);
+ if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
+ if (level < 6)
+ ops_run_reconstruct5(sh, percpu, tx);
+ else
+ ops_run_reconstruct6(sh, percpu, tx);
+ }
- if (test_bit(STRIPE_OP_CHECK, &ops_request))
- ops_run_check(sh);
+ if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
+ if (sh->check_state == check_state_run)
+ ops_run_check_p(sh, percpu);
+ else if (sh->check_state == check_state_run_q)
+ ops_run_check_pq(sh, percpu, 0);
+ else if (sh->check_state == check_state_run_pq)
+ ops_run_check_pq(sh, percpu, 1);
+ else
+ BUG();
+ }
if (overlap_clear)
for (i = disks; i--; ) {
@@ -899,6 +1201,7 @@ static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request)
if (test_and_clear_bit(R5_Overlap, &dev->flags))
wake_up(&sh->raid_conf->wait_for_overlap);
}
+ put_cpu();
}
static int grow_one_stripe(raid5_conf_t *conf)
@@ -948,6 +1251,28 @@ static int grow_stripes(raid5_conf_t *conf, int num)
return 0;
}
+/**
+ * scribble_len - return the required size of the scribble region
+ * @num - total number of disks in the array
+ *
+ * The size must be enough to contain:
+ * 1/ a struct page pointer for each device in the array +2
+ * 2/ room to convert each entry in (1) to its corresponding dma
+ * (dma_map_page()) or page (page_address()) address.
+ *
+ * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
+ * calculate over all devices (not just the data blocks), using zeros in place
+ * of the P and Q blocks.
+ */
+static size_t scribble_len(int num)
+{
+ size_t len;
+
+ len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
+
+ return len;
+}
+
static int resize_stripes(raid5_conf_t *conf, int newsize)
{
/* Make all the stripes able to hold 'newsize' devices.
@@ -976,6 +1301,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
struct stripe_head *osh, *nsh;
LIST_HEAD(newstripes);
struct disk_info *ndisks;
+ unsigned long cpu;
int err;
struct kmem_cache *sc;
int i;
@@ -1041,7 +1367,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
/* Step 3.
* At this point, we are holding all the stripes so the array
* is completely stalled, so now is a good time to resize
- * conf->disks.
+ * conf->disks and the scribble region
*/
ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
if (ndisks) {
@@ -1052,10 +1378,30 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
} else
err = -ENOMEM;
+ get_online_cpus();
+ conf->scribble_len = scribble_len(newsize);
+ for_each_present_cpu(cpu) {
+ struct raid5_percpu *percpu;
+ void *scribble;
+
+ percpu = per_cpu_ptr(conf->percpu, cpu);
+ scribble = kmalloc(conf->scribble_len, GFP_NOIO);
+
+ if (scribble) {
+ kfree(percpu->scribble);
+ percpu->scribble = scribble;
+ } else {
+ err = -ENOMEM;
+ break;
+ }
+ }
+ put_online_cpus();
+
/* Step 4, return new stripes to service */
while(!list_empty(&newstripes)) {
nsh = list_entry(newstripes.next, struct stripe_head, lru);
list_del_init(&nsh->lru);
+
for (i=conf->raid_disks; i < newsize; i++)
if (nsh->dev[i].page == NULL) {
struct page *p = alloc_page(GFP_NOIO);
@@ -1594,258 +1940,13 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
}
-
-/*
- * Copy data between a page in the stripe cache, and one or more bion
- * The page could align with the middle of the bio, or there could be
- * several bion, each with several bio_vecs, which cover part of the page
- * Multiple bion are linked together on bi_next. There may be extras
- * at the end of this list. We ignore them.
- */
-static void copy_data(int frombio, struct bio *bio,
- struct page *page,
- sector_t sector)
-{
- char *pa = page_address(page);
- struct bio_vec *bvl;
- int i;
- int page_offset;
-
- if (bio->bi_sector >= sector)
- page_offset = (signed)(bio->bi_sector - sector) * 512;
- else
- page_offset = (signed)(sector - bio->bi_sector) * -512;
- bio_for_each_segment(bvl, bio, i) {
- int len = bio_iovec_idx(bio,i)->bv_len;
- int clen;
- int b_offset = 0;
-
- if (page_offset < 0) {
- b_offset = -page_offset;
- page_offset += b_offset;
- len -= b_offset;
- }
-
- if (len > 0 && page_offset + len > STRIPE_SIZE)
- clen = STRIPE_SIZE - page_offset;
- else clen = len;
-
- if (clen > 0) {
- char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
- if (frombio)
- memcpy(pa+page_offset, ba+b_offset, clen);
- else
- memcpy(ba+b_offset, pa+page_offset, clen);
- __bio_kunmap_atomic(ba, KM_USER0);
- }
- if (clen < len) /* hit end of page */
- break;
- page_offset += len;
- }
-}
-
-#define check_xor() do { \
- if (count == MAX_XOR_BLOCKS) { \
- xor_blocks(count, STRIPE_SIZE, dest, ptr);\
- count = 0; \
- } \
- } while(0)
-
-static void compute_parity6(struct stripe_head *sh, int method)
-{
- raid5_conf_t *conf = sh->raid_conf;
- int i, pd_idx, qd_idx, d0_idx, disks = sh->disks, count;
- int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
- struct bio *chosen;
- /**** FIX THIS: This could be very bad if disks is close to 256 ****/
- void *ptrs[syndrome_disks+2];
-
- pd_idx = sh->pd_idx;
- qd_idx = sh->qd_idx;
- d0_idx = raid6_d0(sh);
-
- pr_debug("compute_parity, stripe %llu, method %d\n",
- (unsigned long long)sh->sector, method);
-
- switch(method) {
- case READ_MODIFY_WRITE:
- BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */
- case RECONSTRUCT_WRITE:
- for (i= disks; i-- ;)
- if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) {
- chosen = sh->dev[i].towrite;
- sh->dev[i].towrite = NULL;
-
- if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
- wake_up(&conf->wait_for_overlap);
-
- BUG_ON(sh->dev[i].written);
- sh->dev[i].written = chosen;
- }
- break;
- case CHECK_PARITY:
- BUG(); /* Not implemented yet */
- }
-
- for (i = disks; i--;)
- if (sh->dev[i].written) {
- sector_t sector = sh->dev[i].sector;
- struct bio *wbi = sh->dev[i].written;
- while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
- copy_data(1, wbi, sh->dev[i].page, sector);
- wbi = r5_next_bio(wbi, sector);
- }
-
- set_bit(R5_LOCKED, &sh->dev[i].flags);
- set_bit(R5_UPTODATE, &sh->dev[i].flags);
- }
-
- /* Note that unlike RAID-5, the ordering of the disks matters greatly.*/
-
- for (i = 0; i < disks; i++)
- ptrs[i] = (void *)raid6_empty_zero_page;
-
- count = 0;
- i = d0_idx;
- do {
- int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
-
- ptrs[slot] = page_address(sh->dev[i].page);
- if (slot < syndrome_disks &&
- !test_bit(R5_UPTODATE, &sh->dev[i].flags)) {
- printk(KERN_ERR "block %d/%d not uptodate "
- "on parity calc\n", i, count);
- BUG();
- }
-
- i = raid6_next_disk(i, disks);
- } while (i != d0_idx);
- BUG_ON(count != syndrome_disks);
-
- raid6_call.gen_syndrome(syndrome_disks+2, STRIPE_SIZE, ptrs);
-
- switch(method) {
- case RECONSTRUCT_WRITE:
- set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
- set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
- set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
- set_bit(R5_LOCKED, &sh->dev[qd_idx].flags);
- break;
- case UPDATE_PARITY:
- set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
- set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
- break;
- }
-}
-
-
-/* Compute one missing block */
-static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
-{
- int i, count, disks = sh->disks;
- void *ptr[MAX_XOR_BLOCKS], *dest, *p;
- int qd_idx = sh->qd_idx;
-
- pr_debug("compute_block_1, stripe %llu, idx %d\n",
- (unsigned long long)sh->sector, dd_idx);
-
- if ( dd_idx == qd_idx ) {
- /* We're actually computing the Q drive */
- compute_parity6(sh, UPDATE_PARITY);
- } else {
- dest = page_address(sh->dev[dd_idx].page);
- if (!nozero) memset(dest, 0, STRIPE_SIZE);
- count = 0;
- for (i = disks ; i--; ) {
- if (i == dd_idx || i == qd_idx)
- continue;
- p = page_address(sh->dev[i].page);
- if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
- ptr[count++] = p;
- else
- printk("compute_block() %d, stripe %llu, %d"
- " not present\n", dd_idx,
- (unsigned long long)sh->sector, i);
-
- check_xor();
- }
- if (count)
- xor_blocks(count, STRIPE_SIZE, dest, ptr);
- if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
- else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
- }
-}
-
-/* Compute two missing blocks */
-static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
-{
- int i, count, disks = sh->disks;
- int syndrome_disks = sh->ddf_layout ? disks : disks-2;
- int d0_idx = raid6_d0(sh);
- int faila = -1, failb = -1;
- /**** FIX THIS: This could be very bad if disks is close to 256 ****/
- void *ptrs[syndrome_disks+2];
-
- for (i = 0; i < disks ; i++)
- ptrs[i] = (void *)raid6_empty_zero_page;
- count = 0;
- i = d0_idx;
- do {
- int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
-
- ptrs[slot] = page_address(sh->dev[i].page);
-
- if (i == dd_idx1)
- faila = slot;
- if (i == dd_idx2)
- failb = slot;
- i = raid6_next_disk(i, disks);
- } while (i != d0_idx);
- BUG_ON(count != syndrome_disks);
-
- BUG_ON(faila == failb);
- if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; }
-
- pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n",
- (unsigned long long)sh->sector, dd_idx1, dd_idx2,
- faila, failb);
-
- if (failb == syndrome_disks+1) {
- /* Q disk is one of the missing disks */
- if (faila == syndrome_disks) {
- /* Missing P+Q, just recompute */
- compute_parity6(sh, UPDATE_PARITY);
- return;
- } else {
- /* We're missing D+Q; recompute D from P */
- compute_block_1(sh, ((dd_idx1 == sh->qd_idx) ?
- dd_idx2 : dd_idx1),
- 0);
- compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */
- return;
- }
- }
-
- /* We're missing D+P or D+D; */
- if (failb == syndrome_disks) {
- /* We're missing D+P. */
- raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE, faila, ptrs);
- } else {
- /* We're missing D+D. */
- raid6_2data_recov(syndrome_disks+2, STRIPE_SIZE, faila, failb,
- ptrs);
- }
-
- /* Both the above update both missing blocks */
- set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags);
- set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags);
-}
-
static void
-schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s,
+schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
int rcw, int expand)
{
int i, pd_idx = sh->pd_idx, disks = sh->disks;
+ raid5_conf_t *conf = sh->raid_conf;
+ int level = conf->level;
if (rcw) {
/* if we are not expanding this is a proper write request, and
@@ -1858,7 +1959,7 @@ schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s,
} else
sh->reconstruct_state = reconstruct_state_run;
- set_bit(STRIPE_OP_POSTXOR, &s->ops_request);
+ set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
@@ -1871,17 +1972,18 @@ schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s,
s->locked++;
}
}
- if (s->locked + 1 == disks)
+ if (s->locked + conf->max_degraded == disks)
if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
- atomic_inc(&sh->raid_conf->pending_full_writes);
+ atomic_inc(&conf->pending_full_writes);
} else {
+ BUG_ON(level == 6);
BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
sh->reconstruct_state = reconstruct_state_prexor_drain_run;
set_bit(STRIPE_OP_PREXOR, &s->ops_request);
set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
- set_bit(STRIPE_OP_POSTXOR, &s->ops_request);
+ set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
@@ -1899,13 +2001,22 @@ schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s,
}
}
- /* keep the parity disk locked while asynchronous operations
+ /* keep the parity disk(s) locked while asynchronous operations
* are in flight
*/
set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
s->locked++;
+ if (level == 6) {
+ int qd_idx = sh->qd_idx;
+ struct r5dev *dev = &sh->dev[qd_idx];
+
+ set_bit(R5_LOCKED, &dev->flags);
+ clear_bit(R5_UPTODATE, &dev->flags);
+ s->locked++;
+ }
+
pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
__func__, (unsigned long long)sh->sector,
s->locked, s->ops_request);
@@ -1986,13 +2097,6 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
static void end_reshape(raid5_conf_t *conf);
-static int page_is_zero(struct page *p)
-{
- char *a = page_address(p);
- return ((*(u32*)a) == 0 &&
- memcmp(a, a+4, STRIPE_SIZE-4)==0);
-}
-
static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
struct stripe_head *sh)
{
@@ -2132,9 +2236,10 @@ static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s,
set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
set_bit(R5_Wantcompute, &dev->flags);
sh->ops.target = disk_idx;
+ sh->ops.target2 = -1;
s->req_compute = 1;
/* Careful: from this point on 'uptodate' is in the eye
- * of raid5_run_ops which services 'compute' operations
+ * of raid_run_ops which services 'compute' operations
* before writes. R5_Wantcompute flags a block that will
* be R5_UPTODATE by the time it is needed for a
* subsequent operation.
@@ -2173,61 +2278,104 @@ static void handle_stripe_fill5(struct stripe_head *sh,
set_bit(STRIPE_HANDLE, &sh->state);
}
-static void handle_stripe_fill6(struct stripe_head *sh,
- struct stripe_head_state *s, struct r6_state *r6s,
- int disks)
+/* fetch_block6 - checks the given member device to see if its data needs
+ * to be read or computed to satisfy a request.
+ *
+ * Returns 1 when no more member devices need to be checked, otherwise returns
+ * 0 to tell the loop in handle_stripe_fill6 to continue
+ */
+static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s,
+ struct r6_state *r6s, int disk_idx, int disks)
{
- int i;
- for (i = disks; i--; ) {
- struct r5dev *dev = &sh->dev[i];
- if (!test_bit(R5_LOCKED, &dev->flags) &&
- !test_bit(R5_UPTODATE, &dev->flags) &&
- (dev->toread || (dev->towrite &&
- !test_bit(R5_OVERWRITE, &dev->flags)) ||
- s->syncing || s->expanding ||
- (s->failed >= 1 &&
- (sh->dev[r6s->failed_num[0]].toread ||
- s->to_write)) ||
- (s->failed >= 2 &&
- (sh->dev[r6s->failed_num[1]].toread ||
- s->to_write)))) {
- /* we would like to get this block, possibly
- * by computing it, but we might not be able to
+ struct r5dev *dev = &sh->dev[disk_idx];
+ struct r5dev *fdev[2] = { &sh->dev[r6s->failed_num[0]],
+ &sh->dev[r6s->failed_num[1]] };
+
+ if (!test_bit(R5_LOCKED, &dev->flags) &&
+ !test_bit(R5_UPTODATE, &dev->flags) &&
+ (dev->toread ||
+ (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
+ s->syncing || s->expanding ||
+ (s->failed >= 1 &&
+ (fdev[0]->toread || s->to_write)) ||
+ (s->failed >= 2 &&
+ (fdev[1]->toread || s->to_write)))) {
+ /* we would like to get this block, possibly by computing it,
+ * otherwise read it if the backing disk is insync
+ */
+ BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
+ BUG_ON(test_bit(R5_Wantread, &dev->flags));
+ if ((s->uptodate == disks - 1) &&
+ (s->failed && (disk_idx == r6s->failed_num[0] ||
+ disk_idx == r6s->failed_num[1]))) {
+ /* have disk failed, and we're requested to fetch it;
+ * do compute it
*/
- if ((s->uptodate == disks - 1) &&
- (s->failed && (i == r6s->failed_num[0] ||
- i == r6s->failed_num[1]))) {
- pr_debug("Computing stripe %llu block %d\n",
- (unsigned long long)sh->sector, i);
- compute_block_1(sh, i, 0);
- s->uptodate++;
- } else if ( s->uptodate == disks-2 && s->failed >= 2 ) {
- /* Computing 2-failure is *very* expensive; only
- * do it if failed >= 2
- */
- int other;
- for (other = disks; other--; ) {
- if (other == i)
- continue;
- if (!test_bit(R5_UPTODATE,
- &sh->dev[other].flags))
- break;
- }
- BUG_ON(other < 0);
- pr_debug("Computing stripe %llu blocks %d,%d\n",
- (unsigned long long)sh->sector,
- i, other);
- compute_block_2(sh, i, other);
- s->uptodate += 2;
- } else if (test_bit(R5_Insync, &dev->flags)) {
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantread, &dev->flags);
- s->locked++;
- pr_debug("Reading block %d (sync=%d)\n",
- i, s->syncing);
+ pr_debug("Computing stripe %llu block %d\n",
+ (unsigned long long)sh->sector, disk_idx);
+ set_bit(STRIPE_COMPUTE_RUN, &sh->state);
+ set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
+ set_bit(R5_Wantcompute, &dev->flags);
+ sh->ops.target = disk_idx;
+ sh->ops.target2 = -1; /* no 2nd target */
+ s->req_compute = 1;
+ s->uptodate++;
+ return 1;
+ } else if (s->uptodate == disks-2 && s->failed >= 2) {
+ /* Computing 2-failure is *very* expensive; only
+ * do it if failed >= 2
+ */
+ int other;
+ for (other = disks; other--; ) {
+ if (other == disk_idx)
+ continue;
+ if (!test_bit(R5_UPTODATE,
+ &sh->dev[other].flags))
+ break;
}
+ BUG_ON(other < 0);
+ pr_debug("Computing stripe %llu blocks %d,%d\n",
+ (unsigned long long)sh->sector,
+ disk_idx, other);
+ set_bit(STRIPE_COMPUTE_RUN, &sh->state);
+ set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
+ set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
+ set_bit(R5_Wantcompute, &sh->dev[other].flags);
+ sh->ops.target = disk_idx;
+ sh->ops.target2 = other;
+ s->uptodate += 2;
+ s->req_compute = 1;
+ return 1;
+ } else if (test_bit(R5_Insync, &dev->flags)) {
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantread, &dev->flags);
+ s->locked++;
+ pr_debug("Reading block %d (sync=%d)\n",
+ disk_idx, s->syncing);
}
}
+
+ return 0;
+}
+
+/**
+ * handle_stripe_fill6 - read or compute data to satisfy pending requests.
+ */
+static void handle_stripe_fill6(struct stripe_head *sh,
+ struct stripe_head_state *s, struct r6_state *r6s,
+ int disks)
+{
+ int i;
+
+ /* look for blocks to read/compute, skip this if a compute
+ * is already in flight, or if the stripe contents are in the
+ * midst of changing due to a write
+ */
+ if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
+ !sh->reconstruct_state)
+ for (i = disks; i--; )
+ if (fetch_block6(sh, s, r6s, i, disks))
+ break;
set_bit(STRIPE_HANDLE, &sh->state);
}
@@ -2361,114 +2509,61 @@ static void handle_stripe_dirtying5(raid5_conf_t *conf,
*/
/* since handle_stripe can be called at any time we need to handle the
* case where a compute block operation has been submitted and then a
- * subsequent call wants to start a write request. raid5_run_ops only
- * handles the case where compute block and postxor are requested
+ * subsequent call wants to start a write request. raid_run_ops only
+ * handles the case where compute block and reconstruct are requested
* simultaneously. If this is not the case then new writes need to be
* held off until the compute completes.
*/
if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
(s->locked == 0 && (rcw == 0 || rmw == 0) &&
!test_bit(STRIPE_BIT_DELAY, &sh->state)))
- schedule_reconstruction5(sh, s, rcw == 0, 0);
+ schedule_reconstruction(sh, s, rcw == 0, 0);
}
static void handle_stripe_dirtying6(raid5_conf_t *conf,
struct stripe_head *sh, struct stripe_head_state *s,
struct r6_state *r6s, int disks)
{
- int rcw = 0, must_compute = 0, pd_idx = sh->pd_idx, i;
+ int rcw = 0, pd_idx = sh->pd_idx, i;
int qd_idx = sh->qd_idx;
+
+ set_bit(STRIPE_HANDLE, &sh->state);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- /* Would I have to read this buffer for reconstruct_write */
- if (!test_bit(R5_OVERWRITE, &dev->flags)
- && i != pd_idx && i != qd_idx
- && (!test_bit(R5_LOCKED, &dev->flags)
- ) &&
- !test_bit(R5_UPTODATE, &dev->flags)) {
- if (test_bit(R5_Insync, &dev->flags)) rcw++;
- else {
- pr_debug("raid6: must_compute: "
- "disk %d flags=%#lx\n", i, dev->flags);
- must_compute++;
+ /* check if we haven't enough data */
+ if (!test_bit(R5_OVERWRITE, &dev->flags) &&
+ i != pd_idx && i != qd_idx &&
+ !test_bit(R5_LOCKED, &dev->flags) &&
+ !(test_bit(R5_UPTODATE, &dev->flags) ||
+ test_bit(R5_Wantcompute, &dev->flags))) {
+ rcw++;
+ if (!test_bit(R5_Insync, &dev->flags))
+ continue; /* it's a failed drive */
+
+ if (
+ test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
+ pr_debug("Read_old stripe %llu "
+ "block %d for Reconstruct\n",
+ (unsigned long long)sh->sector, i);
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantread, &dev->flags);
+ s->locked++;
+ } else {
+ pr_debug("Request delayed stripe %llu "
+ "block %d for Reconstruct\n",
+ (unsigned long long)sh->sector, i);
+ set_bit(STRIPE_DELAYED, &sh->state);
+ set_bit(STRIPE_HANDLE, &sh->state);
}
}
}
- pr_debug("for sector %llu, rcw=%d, must_compute=%d\n",
- (unsigned long long)sh->sector, rcw, must_compute);
- set_bit(STRIPE_HANDLE, &sh->state);
-
- if (rcw > 0)
- /* want reconstruct write, but need to get some data */
- for (i = disks; i--; ) {
- struct r5dev *dev = &sh->dev[i];
- if (!test_bit(R5_OVERWRITE, &dev->flags)
- && !(s->failed == 0 && (i == pd_idx || i == qd_idx))
- && !test_bit(R5_LOCKED, &dev->flags) &&
- !test_bit(R5_UPTODATE, &dev->flags) &&
- test_bit(R5_Insync, &dev->flags)) {
- if (
- test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
- pr_debug("Read_old stripe %llu "
- "block %d for Reconstruct\n",
- (unsigned long long)sh->sector, i);
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantread, &dev->flags);
- s->locked++;
- } else {
- pr_debug("Request delayed stripe %llu "
- "block %d for Reconstruct\n",
- (unsigned long long)sh->sector, i);
- set_bit(STRIPE_DELAYED, &sh->state);
- set_bit(STRIPE_HANDLE, &sh->state);
- }
- }
- }
/* now if nothing is locked, and if we have enough data, we can start a
* write request
*/
- if (s->locked == 0 && rcw == 0 &&
+ if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
+ s->locked == 0 && rcw == 0 &&
!test_bit(STRIPE_BIT_DELAY, &sh->state)) {
- if (must_compute > 0) {
- /* We have failed blocks and need to compute them */
- switch (s->failed) {
- case 0:
- BUG();
- case 1:
- compute_block_1(sh, r6s->failed_num[0], 0);
- break;
- case 2:
- compute_block_2(sh, r6s->failed_num[0],
- r6s->failed_num[1]);
- break;
- default: /* This request should have been failed? */
- BUG();
- }
- }
-
- pr_debug("Computing parity for stripe %llu\n",
- (unsigned long long)sh->sector);
- compute_parity6(sh, RECONSTRUCT_WRITE);
- /* now every locked buffer is ready to be written */
- for (i = disks; i--; )
- if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
- pr_debug("Writing stripe %llu block %d\n",
- (unsigned long long)sh->sector, i);
- s->locked++;
- set_bit(R5_Wantwrite, &sh->dev[i].flags);
- }
- if (s->locked == disks)
- if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
- atomic_inc(&conf->pending_full_writes);
- /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */
- set_bit(STRIPE_INSYNC, &sh->state);
-
- if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
- atomic_dec(&conf->preread_active_stripes);
- if (atomic_read(&conf->preread_active_stripes) <
- IO_THRESHOLD)
- md_wakeup_thread(conf->mddev->thread);
- }
+ schedule_reconstruction(sh, s, 1, 0);
}
}
@@ -2527,7 +2622,7 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
* we are done. Otherwise update the mismatch count and repair
* parity if !MD_RECOVERY_CHECK
*/
- if (sh->ops.zero_sum_result == 0)
+ if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
/* parity is correct (on disc,
* not in buffer any more)
*/
@@ -2544,6 +2639,7 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
set_bit(R5_Wantcompute,
&sh->dev[sh->pd_idx].flags);
sh->ops.target = sh->pd_idx;
+ sh->ops.target2 = -1;
s->uptodate++;
}
}
@@ -2560,67 +2656,74 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
- struct stripe_head_state *s,
- struct r6_state *r6s, struct page *tmp_page,
- int disks)
+ struct stripe_head_state *s,
+ struct r6_state *r6s, int disks)
{
- int update_p = 0, update_q = 0;
- struct r5dev *dev;
int pd_idx = sh->pd_idx;
int qd_idx = sh->qd_idx;
+ struct r5dev *dev;
set_bit(STRIPE_HANDLE, &sh->state);
BUG_ON(s->failed > 2);
- BUG_ON(s->uptodate < disks);
+
/* Want to check and possibly repair P and Q.
* However there could be one 'failed' device, in which
* case we can only check one of them, possibly using the
* other to generate missing data
*/
- /* If !tmp_page, we cannot do the calculations,
- * but as we have set STRIPE_HANDLE, we will soon be called
- * by stripe_handle with a tmp_page - just wait until then.
- */
- if (tmp_page) {
+ switch (sh->check_state) {
+ case check_state_idle:
+ /* start a new check operation if there are < 2 failures */
if (s->failed == r6s->q_failed) {
- /* The only possible failed device holds 'Q', so it
+ /* The only possible failed device holds Q, so it
* makes sense to check P (If anything else were failed,
* we would have used P to recreate it).
*/
- compute_block_1(sh, pd_idx, 1);
- if (!page_is_zero(sh->dev[pd_idx].page)) {
- compute_block_1(sh, pd_idx, 0);
- update_p = 1;
- }
+ sh->check_state = check_state_run;
}
if (!r6s->q_failed && s->failed < 2) {
- /* q is not failed, and we didn't use it to generate
+ /* Q is not failed, and we didn't use it to generate
* anything, so it makes sense to check it
*/
- memcpy(page_address(tmp_page),
- page_address(sh->dev[qd_idx].page),
- STRIPE_SIZE);
- compute_parity6(sh, UPDATE_PARITY);
- if (memcmp(page_address(tmp_page),
- page_address(sh->dev[qd_idx].page),
- STRIPE_SIZE) != 0) {
- clear_bit(STRIPE_INSYNC, &sh->state);
- update_q = 1;
- }
+ if (sh->check_state == check_state_run)
+ sh->check_state = check_state_run_pq;
+ else
+ sh->check_state = check_state_run_q;
}
- if (update_p || update_q) {
- conf->mddev->resync_mismatches += STRIPE_SECTORS;
- if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
- /* don't try to repair!! */
- update_p = update_q = 0;
+
+ /* discard potentially stale zero_sum_result */
+ sh->ops.zero_sum_result = 0;
+
+ if (sh->check_state == check_state_run) {
+ /* async_xor_zero_sum destroys the contents of P */
+ clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
+ s->uptodate--;
+ }
+ if (sh->check_state >= check_state_run &&
+ sh->check_state <= check_state_run_pq) {
+ /* async_syndrome_zero_sum preserves P and Q, so
+ * no need to mark them !uptodate here
+ */
+ set_bit(STRIPE_OP_CHECK, &s->ops_request);
+ break;
}
+ /* we have 2-disk failure */
+ BUG_ON(s->failed != 2);
+ /* fall through */
+ case check_state_compute_result:
+ sh->check_state = check_state_idle;
+
+ /* check that a write has not made the stripe insync */
+ if (test_bit(STRIPE_INSYNC, &sh->state))
+ break;
+
/* now write out any block on a failed drive,
- * or P or Q if they need it
+ * or P or Q if they were recomputed
*/
-
+ BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
if (s->failed == 2) {
dev = &sh->dev[r6s->failed_num[1]];
s->locked++;
@@ -2633,14 +2736,13 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantwrite, &dev->flags);
}
-
- if (update_p) {
+ if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
dev = &sh->dev[pd_idx];
s->locked++;
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantwrite, &dev->flags);
}
- if (update_q) {
+ if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
dev = &sh->dev[qd_idx];
s->locked++;
set_bit(R5_LOCKED, &dev->flags);
@@ -2649,6 +2751,70 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
clear_bit(STRIPE_DEGRADED, &sh->state);
set_bit(STRIPE_INSYNC, &sh->state);
+ break;
+ case check_state_run:
+ case check_state_run_q:
+ case check_state_run_pq:
+ break; /* we will be called again upon completion */
+ case check_state_check_result:
+ sh->check_state = check_state_idle;
+
+ /* handle a successful check operation, if parity is correct
+ * we are done. Otherwise update the mismatch count and repair
+ * parity if !MD_RECOVERY_CHECK
+ */
+ if (sh->ops.zero_sum_result == 0) {
+ /* both parities are correct */
+ if (!s->failed)
+ set_bit(STRIPE_INSYNC, &sh->state);
+ else {
+ /* in contrast to the raid5 case we can validate
+ * parity, but still have a failure to write
+ * back
+ */
+ sh->check_state = check_state_compute_result;
+ /* Returning at this point means that we may go
+ * off and bring p and/or q uptodate again so
+ * we make sure to check zero_sum_result again
+ * to verify if p or q need writeback
+ */
+ }
+ } else {
+ conf->mddev->resync_mismatches += STRIPE_SECTORS;
+ if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+ /* don't try to repair!! */
+ set_bit(STRIPE_INSYNC, &sh->state);
+ else {
+ int *target = &sh->ops.target;
+
+ sh->ops.target = -1;
+ sh->ops.target2 = -1;
+ sh->check_state = check_state_compute_run;
+ set_bit(STRIPE_COMPUTE_RUN, &sh->state);
+ set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
+ if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
+ set_bit(R5_Wantcompute,
+ &sh->dev[pd_idx].flags);
+ *target = pd_idx;
+ target = &sh->ops.target2;
+ s->uptodate++;
+ }
+ if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
+ set_bit(R5_Wantcompute,
+ &sh->dev[qd_idx].flags);
+ *target = qd_idx;
+ s->uptodate++;
+ }
+ }
+ }
+ break;
+ case check_state_compute_run:
+ break;
+ default:
+ printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
+ __func__, sh->check_state,
+ (unsigned long long) sh->sector);
+ BUG();
}
}
@@ -2666,6 +2832,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
if (i != sh->pd_idx && i != sh->qd_idx) {
int dd_idx, j;
struct stripe_head *sh2;
+ struct async_submit_ctl submit;
sector_t bn = compute_blocknr(sh, i, 1);
sector_t s = raid5_compute_sector(conf, bn, 0,
@@ -2685,9 +2852,10 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
}
/* place all the copies on one channel */
+ init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
tx = async_memcpy(sh2->dev[dd_idx].page,
- sh->dev[i].page, 0, 0, STRIPE_SIZE,
- ASYNC_TX_DEP_ACK, tx, NULL, NULL);
+ sh->dev[i].page, 0, 0, STRIPE_SIZE,
+ &submit);
set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
@@ -2756,7 +2924,8 @@ static bool handle_stripe5(struct stripe_head *sh)
rcu_read_lock();
for (i=disks; i--; ) {
mdk_rdev_t *rdev;
- struct r5dev *dev = &sh->dev[i];
+
+ dev = &sh->dev[i];
clear_bit(R5_Insync, &dev->flags);
pr_debug("check %d: state 0x%lx toread %p read %p write %p "
@@ -2973,7 +3142,7 @@ static bool handle_stripe5(struct stripe_head *sh)
/* Need to write out all blocks after computing parity */
sh->disks = conf->raid_disks;
stripe_set_idx(sh->sector, conf, 0, sh);
- schedule_reconstruction5(sh, &s, 1, 1);
+ schedule_reconstruction(sh, &s, 1, 1);
} else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
clear_bit(STRIPE_EXPAND_READY, &sh->state);
atomic_dec(&conf->reshape_stripes);
@@ -2993,7 +3162,7 @@ static bool handle_stripe5(struct stripe_head *sh)
md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
if (s.ops_request)
- raid5_run_ops(sh, s.ops_request);
+ raid_run_ops(sh, s.ops_request);
ops_run_io(sh, &s);
@@ -3002,7 +3171,7 @@ static bool handle_stripe5(struct stripe_head *sh)
return blocked_rdev == NULL;
}
-static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
+static bool handle_stripe6(struct stripe_head *sh)
{
raid5_conf_t *conf = sh->raid_conf;
int disks = sh->disks;
@@ -3014,9 +3183,10 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
mdk_rdev_t *blocked_rdev = NULL;
pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
- "pd_idx=%d, qd_idx=%d\n",
+ "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
(unsigned long long)sh->sector, sh->state,
- atomic_read(&sh->count), pd_idx, qd_idx);
+ atomic_read(&sh->count), pd_idx, qd_idx,
+ sh->check_state, sh->reconstruct_state);
memset(&s, 0, sizeof(s));
spin_lock(&sh->lock);
@@ -3036,35 +3206,26 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
i, dev->flags, dev->toread, dev->towrite, dev->written);
- /* maybe we can reply to a read */
- if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
- struct bio *rbi, *rbi2;
- pr_debug("Return read for disc %d\n", i);
- spin_lock_irq(&conf->device_lock);
- rbi = dev->toread;
- dev->toread = NULL;
- if (test_and_clear_bit(R5_Overlap, &dev->flags))
- wake_up(&conf->wait_for_overlap);
- spin_unlock_irq(&conf->device_lock);
- while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
- copy_data(0, rbi, dev->page, dev->sector);
- rbi2 = r5_next_bio(rbi, dev->sector);
- spin_lock_irq(&conf->device_lock);
- if (!raid5_dec_bi_phys_segments(rbi)) {
- rbi->bi_next = return_bi;
- return_bi = rbi;
- }
- spin_unlock_irq(&conf->device_lock);
- rbi = rbi2;
- }
- }
+ /* maybe we can reply to a read
+ *
+ * new wantfill requests are only permitted while
+ * ops_complete_biofill is guaranteed to be inactive
+ */
+ if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
+ !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
+ set_bit(R5_Wantfill, &dev->flags);
/* now count some things */
if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
+ if (test_bit(R5_Wantcompute, &dev->flags)) {
+ s.compute++;
+ BUG_ON(s.compute > 2);
+ }
-
- if (dev->toread)
+ if (test_bit(R5_Wantfill, &dev->flags)) {
+ s.to_fill++;
+ } else if (dev->toread)
s.to_read++;
if (dev->towrite) {
s.to_write++;
@@ -3105,6 +3266,11 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
blocked_rdev = NULL;
}
+ if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
+ set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
+ set_bit(STRIPE_BIOFILL_RUN, &sh->state);
+ }
+
pr_debug("locked=%d uptodate=%d to_read=%d"
" to_write=%d failed=%d failed_num=%d,%d\n",
s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
@@ -3145,19 +3311,62 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
* or to load a block that is being partially written.
*/
if (s.to_read || s.non_overwrite || (s.to_write && s.failed) ||
- (s.syncing && (s.uptodate < disks)) || s.expanding)
+ (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
handle_stripe_fill6(sh, &s, &r6s, disks);
- /* now to consider writing and what else, if anything should be read */
- if (s.to_write)
+ /* Now we check to see if any write operations have recently
+ * completed
+ */
+ if (sh->reconstruct_state == reconstruct_state_drain_result) {
+ int qd_idx = sh->qd_idx;
+
+ sh->reconstruct_state = reconstruct_state_idle;
+ /* All the 'written' buffers and the parity blocks are ready to
+ * be written back to disk
+ */
+ BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
+ BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags));
+ for (i = disks; i--; ) {
+ dev = &sh->dev[i];
+ if (test_bit(R5_LOCKED, &dev->flags) &&
+ (i == sh->pd_idx || i == qd_idx ||
+ dev->written)) {
+ pr_debug("Writing block %d\n", i);
+ BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
+ set_bit(R5_Wantwrite, &dev->flags);
+ if (!test_bit(R5_Insync, &dev->flags) ||
+ ((i == sh->pd_idx || i == qd_idx) &&
+ s.failed == 0))
+ set_bit(STRIPE_INSYNC, &sh->state);
+ }
+ }
+ if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
+ atomic_dec(&conf->preread_active_stripes);
+ if (atomic_read(&conf->preread_active_stripes) <
+ IO_THRESHOLD)
+ md_wakeup_thread(conf->mddev->thread);
+ }
+ }
+
+ /* Now to consider new write requests and what else, if anything
+ * should be read. We do not handle new writes when:
+ * 1/ A 'write' operation (copy+gen_syndrome) is already in flight.
+ * 2/ A 'check' operation is in flight, as it may clobber the parity
+ * block.
+ */
+ if (s.to_write && !sh->reconstruct_state && !sh->check_state)
handle_stripe_dirtying6(conf, sh, &s, &r6s, disks);
/* maybe we need to check and possibly fix the parity for this stripe
* Any reads will already have been scheduled, so we just see if enough
- * data is available
+ * data is available. The parity check is held off while parity
+ * dependent operations are in flight.
*/
- if (s.syncing && s.locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state))
- handle_parity_checks6(conf, sh, &s, &r6s, tmp_page, disks);
+ if (sh->check_state ||
+ (s.syncing && s.locked == 0 &&
+ !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
+ !test_bit(STRIPE_INSYNC, &sh->state)))
+ handle_parity_checks6(conf, sh, &s, &r6s, disks);
if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
md_done_sync(conf->mddev, STRIPE_SECTORS,1);
@@ -3178,15 +3387,29 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
set_bit(R5_Wantwrite, &dev->flags);
set_bit(R5_ReWrite, &dev->flags);
set_bit(R5_LOCKED, &dev->flags);
+ s.locked++;
} else {
/* let's read it back */
set_bit(R5_Wantread, &dev->flags);
set_bit(R5_LOCKED, &dev->flags);
+ s.locked++;
}
}
}
- if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
+ /* Finish reconstruct operations initiated by the expansion process */
+ if (sh->reconstruct_state == reconstruct_state_result) {
+ sh->reconstruct_state = reconstruct_state_idle;
+ clear_bit(STRIPE_EXPANDING, &sh->state);
+ for (i = conf->raid_disks; i--; ) {
+ set_bit(R5_Wantwrite, &sh->dev[i].flags);
+ set_bit(R5_LOCKED, &sh->dev[i].flags);
+ s.locked++;
+ }
+ }
+
+ if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
+ !sh->reconstruct_state) {
struct stripe_head *sh2
= get_active_stripe(conf, sh->sector, 1, 1, 1);
if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
@@ -3207,14 +3430,8 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
/* Need to write out all blocks after computing P&Q */
sh->disks = conf->raid_disks;
stripe_set_idx(sh->sector, conf, 0, sh);
- compute_parity6(sh, RECONSTRUCT_WRITE);
- for (i = conf->raid_disks ; i-- ; ) {
- set_bit(R5_LOCKED, &sh->dev[i].flags);
- s.locked++;
- set_bit(R5_Wantwrite, &sh->dev[i].flags);
- }
- clear_bit(STRIPE_EXPANDING, &sh->state);
- } else if (s.expanded) {
+ schedule_reconstruction(sh, &s, 1, 1);
+ } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
clear_bit(STRIPE_EXPAND_READY, &sh->state);
atomic_dec(&conf->reshape_stripes);
wake_up(&conf->wait_for_overlap);
@@ -3232,6 +3449,9 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
if (unlikely(blocked_rdev))
md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
+ if (s.ops_request)
+ raid_run_ops(sh, s.ops_request);
+
ops_run_io(sh, &s);
return_io(return_bi);
@@ -3240,16 +3460,14 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
}
/* returns true if the stripe was handled */
-static bool handle_stripe(struct stripe_head *sh, struct page *tmp_page)
+static bool handle_stripe(struct stripe_head *sh)
{
if (sh->raid_conf->level == 6)
- return handle_stripe6(sh, tmp_page);
+ return handle_stripe6(sh);
else
return handle_stripe5(sh);
}
-
-
static void raid5_activate_delayed(raid5_conf_t *conf)
{
if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
@@ -3331,6 +3549,9 @@ static int raid5_congested(void *data, int bits)
/* No difference between reads and writes. Just check
* how busy the stripe_cache is
*/
+
+ if (mddev_congested(mddev, bits))
+ return 1;
if (conf->inactive_blocked)
return 1;
if (conf->quiesce)
@@ -3880,7 +4101,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
INIT_LIST_HEAD(&stripes);
for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
int j;
- int skipped = 0;
+ int skipped_disk = 0;
sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
set_bit(STRIPE_EXPANDING, &sh->state);
atomic_inc(&conf->reshape_stripes);
@@ -3896,14 +4117,14 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
continue;
s = compute_blocknr(sh, j, 0);
if (s < raid5_size(mddev, 0, 0)) {
- skipped = 1;
+ skipped_disk = 1;
continue;
}
memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
set_bit(R5_Expanded, &sh->dev[j].flags);
set_bit(R5_UPTODATE, &sh->dev[j].flags);
}
- if (!skipped) {
+ if (!skipped_disk) {
set_bit(STRIPE_EXPAND_READY, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
}
@@ -4057,7 +4278,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
spin_unlock(&sh->lock);
/* wait for any blocked device to be handled */
- while(unlikely(!handle_stripe(sh, NULL)))
+ while (unlikely(!handle_stripe(sh)))
;
release_stripe(sh);
@@ -4114,7 +4335,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
return handled;
}
- handle_stripe(sh, NULL);
+ handle_stripe(sh);
release_stripe(sh);
handled++;
}
@@ -4128,6 +4349,36 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
return handled;
}
+#ifdef CONFIG_MULTICORE_RAID456
+static void __process_stripe(void *param, async_cookie_t cookie)
+{
+ struct stripe_head *sh = param;
+
+ handle_stripe(sh);
+ release_stripe(sh);
+}
+
+static void process_stripe(struct stripe_head *sh, struct list_head *domain)
+{
+ async_schedule_domain(__process_stripe, sh, domain);
+}
+
+static void synchronize_stripe_processing(struct list_head *domain)
+{
+ async_synchronize_full_domain(domain);
+}
+#else
+static void process_stripe(struct stripe_head *sh, struct list_head *domain)
+{
+ handle_stripe(sh);
+ release_stripe(sh);
+ cond_resched();
+}
+
+static void synchronize_stripe_processing(struct list_head *domain)
+{
+}
+#endif
/*
@@ -4142,6 +4393,7 @@ static void raid5d(mddev_t *mddev)
struct stripe_head *sh;
raid5_conf_t *conf = mddev->private;
int handled;
+ LIST_HEAD(raid_domain);
pr_debug("+++ raid5d active\n");
@@ -4178,8 +4430,7 @@ static void raid5d(mddev_t *mddev)
spin_unlock_irq(&conf->device_lock);
handled++;
- handle_stripe(sh, conf->spare_page);
- release_stripe(sh);
+ process_stripe(sh, &raid_domain);
spin_lock_irq(&conf->device_lock);
}
@@ -4187,6 +4438,7 @@ static void raid5d(mddev_t *mddev)
spin_unlock_irq(&conf->device_lock);
+ synchronize_stripe_processing(&raid_domain);
async_tx_issue_pending_all();
unplug_slaves(mddev);
@@ -4319,15 +4571,118 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
return sectors * (raid_disks - conf->max_degraded);
}
+static void raid5_free_percpu(raid5_conf_t *conf)
+{
+ struct raid5_percpu *percpu;
+ unsigned long cpu;
+
+ if (!conf->percpu)
+ return;
+
+ get_online_cpus();
+ for_each_possible_cpu(cpu) {
+ percpu = per_cpu_ptr(conf->percpu, cpu);
+ safe_put_page(percpu->spare_page);
+ kfree(percpu->scribble);
+ }
+#ifdef CONFIG_HOTPLUG_CPU
+ unregister_cpu_notifier(&conf->cpu_notify);
+#endif
+ put_online_cpus();
+
+ free_percpu(conf->percpu);
+}
+
static void free_conf(raid5_conf_t *conf)
{
shrink_stripes(conf);
- safe_put_page(conf->spare_page);
+ raid5_free_percpu(conf);
kfree(conf->disks);
kfree(conf->stripe_hashtbl);
kfree(conf);
}
+#ifdef CONFIG_HOTPLUG_CPU
+static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
+ void *hcpu)
+{
+ raid5_conf_t *conf = container_of(nfb, raid5_conf_t, cpu_notify);
+ long cpu = (long)hcpu;
+ struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ if (conf->level == 6 && !percpu->spare_page)
+ percpu->spare_page = alloc_page(GFP_KERNEL);
+ if (!percpu->scribble)
+ percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
+
+ if (!percpu->scribble ||
+ (conf->level == 6 && !percpu->spare_page)) {
+ safe_put_page(percpu->spare_page);
+ kfree(percpu->scribble);
+ pr_err("%s: failed memory allocation for cpu%ld\n",
+ __func__, cpu);
+ return NOTIFY_BAD;
+ }
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ safe_put_page(percpu->spare_page);
+ kfree(percpu->scribble);
+ percpu->spare_page = NULL;
+ percpu->scribble = NULL;
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+#endif
+
+static int raid5_alloc_percpu(raid5_conf_t *conf)
+{
+ unsigned long cpu;
+ struct page *spare_page;
+ struct raid5_percpu *allcpus;
+ void *scribble;
+ int err;
+
+ allcpus = alloc_percpu(struct raid5_percpu);
+ if (!allcpus)
+ return -ENOMEM;
+ conf->percpu = allcpus;
+
+ get_online_cpus();
+ err = 0;
+ for_each_present_cpu(cpu) {
+ if (conf->level == 6) {
+ spare_page = alloc_page(GFP_KERNEL);
+ if (!spare_page) {
+ err = -ENOMEM;
+ break;
+ }
+ per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
+ }
+ scribble = kmalloc(scribble_len(conf->raid_disks), GFP_KERNEL);
+ if (!scribble) {
+ err = -ENOMEM;
+ break;
+ }
+ per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
+ }
+#ifdef CONFIG_HOTPLUG_CPU
+ conf->cpu_notify.notifier_call = raid456_cpu_notify;
+ conf->cpu_notify.priority = 0;
+ if (err == 0)
+ err = register_cpu_notifier(&conf->cpu_notify);
+#endif
+ put_online_cpus();
+
+ return err;
+}
+
static raid5_conf_t *setup_conf(mddev_t *mddev)
{
raid5_conf_t *conf;
@@ -4369,6 +4724,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
goto abort;
conf->raid_disks = mddev->raid_disks;
+ conf->scribble_len = scribble_len(conf->raid_disks);
if (mddev->reshape_position == MaxSector)
conf->previous_raid_disks = mddev->raid_disks;
else
@@ -4384,11 +4740,10 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
goto abort;
- if (mddev->new_level == 6) {
- conf->spare_page = alloc_page(GFP_KERNEL);
- if (!conf->spare_page)
- goto abort;
- }
+ conf->level = mddev->new_level;
+ if (raid5_alloc_percpu(conf) != 0)
+ goto abort;
+
spin_lock_init(&conf->device_lock);
init_waitqueue_head(&conf->wait_for_stripe);
init_waitqueue_head(&conf->wait_for_overlap);
@@ -4447,7 +4802,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
printk(KERN_INFO "raid5: allocated %dkB for %s\n",
memory, mdname(mddev));
- conf->thread = md_register_thread(raid5d, mddev, "%s_raid5");
+ conf->thread = md_register_thread(raid5d, mddev, NULL);
if (!conf->thread) {
printk(KERN_ERR
"raid5: couldn't allocate thread for %s\n",
@@ -4613,7 +4968,7 @@ static int run(mddev_t *mddev)
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
mddev->sync_thread = md_register_thread(md_do_sync, mddev,
- "%s_reshape");
+ "reshape");
}
/* read-ahead size must cover two whole stripes, which is
@@ -5031,7 +5386,7 @@ static int raid5_start_reshape(mddev_t *mddev)
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
mddev->sync_thread = md_register_thread(md_do_sync, mddev,
- "%s_reshape");
+ "reshape");
if (!mddev->sync_thread) {
mddev->recovery = 0;
spin_lock_irq(&conf->device_lock);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 9459689c4ea..2390e0e83da 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -2,6 +2,7 @@
#define _RAID5_H
#include <linux/raid/xor.h>
+#include <linux/dmaengine.h>
/*
*
@@ -175,7 +176,9 @@
*/
enum check_states {
check_state_idle = 0,
- check_state_run, /* parity check */
+ check_state_run, /* xor parity check */
+ check_state_run_q, /* q-parity check */
+ check_state_run_pq, /* pq dual parity check */
check_state_check_result,
check_state_compute_run, /* parity repair */
check_state_compute_result,
@@ -215,8 +218,8 @@ struct stripe_head {
* @target - STRIPE_OP_COMPUTE_BLK target
*/
struct stripe_operations {
- int target;
- u32 zero_sum_result;
+ int target, target2;
+ enum sum_check_flags zero_sum_result;
} ops;
struct r5dev {
struct bio req;
@@ -298,7 +301,7 @@ struct r6_state {
#define STRIPE_OP_COMPUTE_BLK 1
#define STRIPE_OP_PREXOR 2
#define STRIPE_OP_BIODRAIN 3
-#define STRIPE_OP_POSTXOR 4
+#define STRIPE_OP_RECONSTRUCT 4
#define STRIPE_OP_CHECK 5
/*
@@ -385,8 +388,21 @@ struct raid5_private_data {
* (fresh device added).
* Cleared when a sync completes.
*/
-
- struct page *spare_page; /* Used when checking P/Q in raid6 */
+ /* per cpu variables */
+ struct raid5_percpu {
+ struct page *spare_page; /* Used when checking P/Q in raid6 */
+ void *scribble; /* space for constructing buffer
+ * lists and performing address
+ * conversions
+ */
+ } *percpu;
+ size_t scribble_len; /* size of scribble region must be
+ * associated with conf to handle
+ * cpu hotplug while reshaping
+ */
+#ifdef CONFIG_HOTPLUG_CPU
+ struct notifier_block cpu_notify;
+#endif
/*
* Free stripes pool
diff --git a/drivers/media/dvb/dvb-core/dvbdev.h b/drivers/media/dvb/dvb-core/dvbdev.h
index 895e2efca8a..01fc7048474 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.h
+++ b/drivers/media/dvb/dvb-core/dvbdev.h
@@ -31,10 +31,9 @@
#define DVB_MAJOR 212
#if defined(CONFIG_DVB_MAX_ADAPTERS) && CONFIG_DVB_MAX_ADAPTERS > 0
-#define DVB_MAX_ADAPTERS CONFIG_DVB_MAX_ADAPTERS
+ #define DVB_MAX_ADAPTERS CONFIG_DVB_MAX_ADAPTERS
#else
-#warning invalid CONFIG_DVB_MAX_ADAPTERS value
-#define DVB_MAX_ADAPTERS 8
+ #define DVB_MAX_ADAPTERS 8
#endif
#define DVB_UNSET (-1)
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 0e4b97fba38..9744b069241 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -75,7 +75,7 @@ config DVB_USB_DIB0700
select DVB_DIB3000MC if !DVB_FE_CUSTOMISE
select DVB_S5H1411 if !DVB_FE_CUSTOMISE
select DVB_LGDT3305 if !DVB_FE_CUSTOMISE
- select DVB_TUNER_DIB0070 if !DVB_FE_CUSTOMISE
+ select DVB_TUNER_DIB0070
select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_MT2266 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE
diff --git a/drivers/media/dvb/pt1/pt1.c b/drivers/media/dvb/pt1/pt1.c
index 8ffbcecad93..81e623a90f0 100644
--- a/drivers/media/dvb/pt1/pt1.c
+++ b/drivers/media/dvb/pt1/pt1.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/vmalloc.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c
index bd9ab9d0d12..fa6a62369a7 100644
--- a/drivers/media/dvb/siano/smscoreapi.c
+++ b/drivers/media/dvb/siano/smscoreapi.c
@@ -1367,7 +1367,7 @@ int smscore_set_gpio(struct smscore_device_t *coredev, u32 pin, int level)
&msg, sizeof(msg));
}
-/* new GPIO managment implementation */
+/* new GPIO management implementation */
static int GetGpioPinParams(u32 PinNum, u32 *pTranslatedPinNum,
u32 *pGroupNum, u32 *pGroupCfg) {
diff --git a/drivers/media/dvb/siano/smscoreapi.h b/drivers/media/dvb/siano/smscoreapi.h
index f1108c64e89..eec18aaf551 100644
--- a/drivers/media/dvb/siano/smscoreapi.h
+++ b/drivers/media/dvb/siano/smscoreapi.h
@@ -657,12 +657,12 @@ struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev);
extern void smscore_putbuffer(struct smscore_device_t *coredev,
struct smscore_buffer_t *cb);
-/* old GPIO managment */
+/* old GPIO management */
int smscore_configure_gpio(struct smscore_device_t *coredev, u32 pin,
struct smscore_config_gpio *pinconfig);
int smscore_set_gpio(struct smscore_device_t *coredev, u32 pin, int level);
-/* new GPIO managment */
+/* new GPIO management */
extern int smscore_gpio_configure(struct smscore_device_t *coredev, u8 PinNum,
struct smscore_gpio_config *pGpioConfig);
extern int smscore_gpio_set_level(struct smscore_device_t *coredev, u8 PinNum,
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index 575bf9d8941..a1239083472 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -46,7 +46,7 @@
* Version 0.11: Converted to v4l2_device.
*
* Many things to do:
- * - Correct power managment of device (suspend & resume)
+ * - Correct power management of device (suspend & resume)
* - Add code for scanning and smooth tuning
* - Add code for sensitivity value
* - Correct mistakes
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index 356d6896da3..fbdc1cde56a 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -1371,7 +1371,7 @@ static struct cx8802_driver cx8802_blackbird_driver = {
.advise_release = cx8802_blackbird_advise_release,
};
-static int blackbird_init(void)
+static int __init blackbird_init(void)
{
printk(KERN_INFO "cx2388x blackbird driver version %d.%d.%d loaded\n",
(CX88_VERSION_CODE >> 16) & 0xff,
@@ -1384,7 +1384,7 @@ static int blackbird_init(void)
return cx8802_register_driver(&cx8802_blackbird_driver);
}
-static void blackbird_fini(void)
+static void __exit blackbird_fini(void)
{
cx8802_unregister_driver(&cx8802_blackbird_driver);
}
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index 6e5d142b5b0..518bcfe18bc 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -1350,7 +1350,7 @@ static struct cx8802_driver cx8802_dvb_driver = {
.advise_release = cx8802_dvb_advise_release,
};
-static int dvb_init(void)
+static int __init dvb_init(void)
{
printk(KERN_INFO "cx88/2: cx2388x dvb driver version %d.%d.%d loaded\n",
(CX88_VERSION_CODE >> 16) & 0xff,
@@ -1363,7 +1363,7 @@ static int dvb_init(void)
return cx8802_register_driver(&cx8802_dvb_driver);
}
-static void dvb_fini(void)
+static void __exit dvb_fini(void)
{
cx8802_unregister_driver(&cx8802_dvb_driver);
}
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index 7172dcf2a4f..de9ff0fc741 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -870,7 +870,7 @@ static struct pci_driver cx8802_pci_driver = {
.remove = __devexit_p(cx8802_remove),
};
-static int cx8802_init(void)
+static int __init cx8802_init(void)
{
printk(KERN_INFO "cx88/2: cx2388x MPEG-TS Driver Manager version %d.%d.%d loaded\n",
(CX88_VERSION_CODE >> 16) & 0xff,
@@ -883,7 +883,7 @@ static int cx8802_init(void)
return pci_register_driver(&cx8802_pci_driver);
}
-static void cx8802_fini(void)
+static void __exit cx8802_fini(void)
{
pci_unregister_driver(&cx8802_pci_driver);
}
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 81d2b5dea18..57e6b124109 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -2113,7 +2113,7 @@ static struct pci_driver cx8800_pci_driver = {
#endif
};
-static int cx8800_init(void)
+static int __init cx8800_init(void)
{
printk(KERN_INFO "cx88/0: cx2388x v4l2 driver version %d.%d.%d loaded\n",
(CX88_VERSION_CODE >> 16) & 0xff,
@@ -2126,7 +2126,7 @@ static int cx8800_init(void)
return pci_register_driver(&cx8800_pci_driver);
}
-static void cx8800_fini(void)
+static void __exit cx8800_fini(void)
{
pci_unregister_driver(&cx8800_pci_driver);
}
diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c
index 8a5bba16ff3..7f1e5415850 100644
--- a/drivers/media/video/gspca/m5602/m5602_core.c
+++ b/drivers/media/video/gspca/m5602/m5602_core.c
@@ -56,7 +56,7 @@ int m5602_read_bridge(struct sd *sd, const u8 address, u8 *i2c_data)
return (err < 0) ? err : 0;
}
-/* Writes a byte to to the m5602 */
+/* Writes a byte to the m5602 */
int m5602_write_bridge(struct sd *sd, const u8 address, const u8 i2c_data)
{
int err;
diff --git a/drivers/media/video/saa7164/saa7164-api.c b/drivers/media/video/saa7164/saa7164-api.c
index bb6df1b276b..6f094a96ac8 100644
--- a/drivers/media/video/saa7164/saa7164-api.c
+++ b/drivers/media/video/saa7164/saa7164-api.c
@@ -415,7 +415,7 @@ int saa7164_api_enum_subdevs(struct saa7164_dev *dev)
goto out;
}
- if (debug & DBGLVL_API)
+ if (saa_debug & DBGLVL_API)
saa7164_dumphex16(dev, buf, (buflen/16)*16);
saa7164_api_dump_subdevs(dev, buf, buflen);
@@ -480,7 +480,7 @@ int saa7164_api_i2c_read(struct saa7164_i2c *bus, u8 addr, u32 reglen, u8 *reg,
dprintk(DBGLVL_API, "%s() len = %d bytes\n", __func__, len);
- if (debug & DBGLVL_I2C)
+ if (saa_debug & DBGLVL_I2C)
saa7164_dumphex16(dev, buf, 2 * 16);
ret = saa7164_cmd_send(bus->dev, unitid, GET_CUR,
@@ -488,7 +488,7 @@ int saa7164_api_i2c_read(struct saa7164_i2c *bus, u8 addr, u32 reglen, u8 *reg,
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret(2) = 0x%x\n", __func__, ret);
else {
- if (debug & DBGLVL_I2C)
+ if (saa_debug & DBGLVL_I2C)
saa7164_dumphex16(dev, buf, sizeof(buf));
memcpy(data, (buf + 2 * sizeof(u32) + reglen), datalen);
}
@@ -548,7 +548,7 @@ int saa7164_api_i2c_write(struct saa7164_i2c *bus, u8 addr, u32 datalen,
*((u32 *)(buf + 1 * sizeof(u32))) = datalen - reglen;
memcpy((buf + 2 * sizeof(u32)), data, datalen);
- if (debug & DBGLVL_I2C)
+ if (saa_debug & DBGLVL_I2C)
saa7164_dumphex16(dev, buf, sizeof(buf));
ret = saa7164_cmd_send(bus->dev, unitid, SET_CUR,
diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
index e097f1a0969..c45966edc0c 100644
--- a/drivers/media/video/saa7164/saa7164-cmd.c
+++ b/drivers/media/video/saa7164/saa7164-cmd.c
@@ -250,7 +250,7 @@ int saa7164_cmd_wait(struct saa7164_dev *dev, u8 seqno)
unsigned long stamp;
int r;
- if (debug >= 4)
+ if (saa_debug >= 4)
saa7164_bus_dump(dev);
dprintk(DBGLVL_CMD, "%s(seqno=%d)\n", __func__, seqno);
diff --git a/drivers/media/video/saa7164/saa7164-core.c b/drivers/media/video/saa7164/saa7164-core.c
index f0dbead188c..709affc3104 100644
--- a/drivers/media/video/saa7164/saa7164-core.c
+++ b/drivers/media/video/saa7164/saa7164-core.c
@@ -45,8 +45,8 @@ MODULE_LICENSE("GPL");
32 bus
*/
-unsigned int debug;
-module_param(debug, int, 0644);
+unsigned int saa_debug;
+module_param_named(debug, saa_debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debug messages");
unsigned int waitsecs = 10;
@@ -653,7 +653,7 @@ static int __devinit saa7164_initdev(struct pci_dev *pci_dev,
printk(KERN_ERR "%s() Unsupported board detected, "
"registering without firmware\n", __func__);
- dprintk(1, "%s() parameter debug = %d\n", __func__, debug);
+ dprintk(1, "%s() parameter debug = %d\n", __func__, saa_debug);
dprintk(1, "%s() parameter waitsecs = %d\n", __func__, waitsecs);
fail_fw:
diff --git a/drivers/media/video/saa7164/saa7164.h b/drivers/media/video/saa7164/saa7164.h
index 6753008a9c9..42660b546f0 100644
--- a/drivers/media/video/saa7164/saa7164.h
+++ b/drivers/media/video/saa7164/saa7164.h
@@ -375,9 +375,9 @@ extern int saa7164_buffer_dealloc(struct saa7164_tsport *port,
/* ----------------------------------------------------------- */
-extern unsigned int debug;
+extern unsigned int saa_debug;
#define dprintk(level, fmt, arg...)\
- do { if (debug & level)\
+ do { if (saa_debug & level)\
printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
} while (0)
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 5ab7c5aefd6..65ac474c517 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -404,7 +404,7 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
"SuperH Mobile CEU driver attached to camera %d\n",
icd->devnum);
- clk_enable(pcdev->clk);
+ pm_runtime_get_sync(ici->v4l2_dev.dev);
ceu_write(pcdev, CAPSR, 1 << 16); /* reset */
while (ceu_read(pcdev, CSTSR) & 1)
@@ -438,7 +438,7 @@ static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
}
spin_unlock_irqrestore(&pcdev->lock, flags);
- clk_disable(pcdev->clk);
+ pm_runtime_put_sync(ici->v4l2_dev.dev);
dev_info(icd->dev.parent,
"SuperH Mobile CEU driver detached from camera %d\n",
diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
index 6ba16abeebd..e0f91e4ab65 100644
--- a/drivers/media/video/usbvision/usbvision-core.c
+++ b/drivers/media/video/usbvision/usbvision-core.c
@@ -28,7 +28,6 @@
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/mm.h>
-#include <linux/utsname.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
diff --git a/drivers/media/video/usbvision/usbvision-i2c.c b/drivers/media/video/usbvision/usbvision-i2c.c
index f97fd06d594..c19f51dba2e 100644
--- a/drivers/media/video/usbvision/usbvision-i2c.c
+++ b/drivers/media/video/usbvision/usbvision-i2c.c
@@ -28,7 +28,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/utsname.h>
#include <linux/init.h>
#include <asm/uaccess.h>
#include <linux/ioport.h>
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index 90d9b5c0e9a..a2a50d608a3 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -52,7 +52,6 @@
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/mm.h>
-#include <linux/utsname.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index a5b448ea4ea..b3bf1c44d74 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -339,9 +339,9 @@ static int h_memstick_read_dev_id(struct memstick_dev *card,
card->id.type = id_reg.type;
card->id.category = id_reg.category;
card->id.class = id_reg.class;
+ dev_dbg(&card->dev, "if_mode = %02x\n", id_reg.if_mode);
}
complete(&card->mrq_complete);
- dev_dbg(&card->dev, "if_mode = %02x\n", id_reg.if_mode);
return -EAGAIN;
}
}
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 7847bbc1440..bd83fa0a497 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -235,7 +235,7 @@ static int mspro_block_bd_getgeo(struct block_device *bdev,
return 0;
}
-static struct block_device_operations ms_block_bdops = {
+static const struct block_device_operations ms_block_bdops = {
.open = mspro_block_bd_open,
.release = mspro_block_bd_release,
.getgeo = mspro_block_bd_getgeo,
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 76fa2ee0b57..610e914abe6 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -6821,7 +6821,7 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh
*size = y;
}
/**
- * mpt_set_taskmgmt_in_progress_flag - set flags associated with task managment
+ * mpt_set_taskmgmt_in_progress_flag - set flags associated with task management
* @ioc: Pointer to MPT_ADAPTER structure
*
* Returns 0 for SUCCESS or -1 if FAILED.
@@ -6854,7 +6854,7 @@ mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc)
EXPORT_SYMBOL(mpt_set_taskmgmt_in_progress_flag);
/**
- * mpt_clear_taskmgmt_in_progress_flag - clear flags associated with task managment
+ * mpt_clear_taskmgmt_in_progress_flag - clear flags associated with task management
* @ioc: Pointer to MPT_ADAPTER structure
*
**/
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 335d4c78a77..d505b68cd37 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -925,7 +925,7 @@ static void i2o_block_request_fn(struct request_queue *q)
};
/* I2O Block device operations definition */
-static struct block_device_operations i2o_block_fops = {
+static const struct block_device_operations i2o_block_fops = {
.owner = THIS_MODULE,
.open = i2o_block_open,
.release = i2o_block_release,
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index c533f86ff5e..5447da16a17 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -647,7 +647,7 @@ struct ab3100_init_setting {
u8 setting;
};
-static const struct ab3100_init_setting __initdata
+static const struct ab3100_init_setting __initconst
ab3100_init_settings[] = {
{
.abreg = AB3100_MCA,
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 016be4938e4..87628891797 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -548,3 +548,4 @@ module_exit(ezx_pcap_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Daniel Ribeiro / Harald Welte");
MODULE_DESCRIPTION("Motorola PCAP2 ASIC Driver");
+MODULE_ALIAS("spi:ezx-pcap");
diff --git a/drivers/mfd/ucb1400_core.c b/drivers/mfd/ucb1400_core.c
index 78c2135c5de..2afc08006e6 100644
--- a/drivers/mfd/ucb1400_core.c
+++ b/drivers/mfd/ucb1400_core.c
@@ -48,9 +48,11 @@ static int ucb1400_core_probe(struct device *dev)
int err;
struct ucb1400 *ucb;
struct ucb1400_ts ucb_ts;
+ struct ucb1400_gpio ucb_gpio;
struct snd_ac97 *ac97;
memset(&ucb_ts, 0, sizeof(ucb_ts));
+ memset(&ucb_gpio, 0, sizeof(ucb_gpio));
ucb = kzalloc(sizeof(struct ucb1400), GFP_KERNEL);
if (!ucb) {
@@ -68,25 +70,44 @@ static int ucb1400_core_probe(struct device *dev)
goto err0;
}
+ /* GPIO */
+ ucb_gpio.ac97 = ac97;
+ ucb->ucb1400_gpio = platform_device_alloc("ucb1400_gpio", -1);
+ if (!ucb->ucb1400_gpio) {
+ err = -ENOMEM;
+ goto err0;
+ }
+ err = platform_device_add_data(ucb->ucb1400_gpio, &ucb_gpio,
+ sizeof(ucb_gpio));
+ if (err)
+ goto err1;
+ err = platform_device_add(ucb->ucb1400_gpio);
+ if (err)
+ goto err1;
+
/* TOUCHSCREEN */
ucb_ts.ac97 = ac97;
ucb->ucb1400_ts = platform_device_alloc("ucb1400_ts", -1);
if (!ucb->ucb1400_ts) {
err = -ENOMEM;
- goto err0;
+ goto err2;
}
err = platform_device_add_data(ucb->ucb1400_ts, &ucb_ts,
sizeof(ucb_ts));
if (err)
- goto err1;
+ goto err3;
err = platform_device_add(ucb->ucb1400_ts);
if (err)
- goto err1;
+ goto err3;
return 0;
-err1:
+err3:
platform_device_put(ucb->ucb1400_ts);
+err2:
+ platform_device_unregister(ucb->ucb1400_gpio);
+err1:
+ platform_device_put(ucb->ucb1400_gpio);
err0:
kfree(ucb);
err:
@@ -98,6 +119,8 @@ static int ucb1400_core_remove(struct device *dev)
struct ucb1400 *ucb = dev_get_drvdata(dev);
platform_device_unregister(ucb->ucb1400_ts);
+ platform_device_unregister(ucb->ucb1400_gpio);
+
kfree(ucb);
return 0;
}
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 2e535a0ccd5..d902d81dde3 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -417,4 +417,4 @@ module_exit(at25_exit);
MODULE_DESCRIPTION("Driver for most SPI EEPROMs");
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
-
+MODULE_ALIAS("spi:at25");
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index de966a6fb7e..aecf40ecb3a 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -97,7 +97,7 @@ static int ibmasmfs_get_super(struct file_system_type *fst,
return get_sb_single(fst, flags, data, ibmasmfs_fill_super, mnt);
}
-static struct super_operations ibmasmfs_s_ops = {
+static const struct super_operations ibmasmfs_s_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
};
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 1bfe5d16963..3648b23d5c9 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -283,7 +283,7 @@ static int __init lkdtm_module_init(void)
switch (cpoint) {
case INT_HARDWARE_ENTRY:
- lkdtm.kp.symbol_name = "__do_IRQ";
+ lkdtm.kp.symbol_name = "do_IRQ";
lkdtm.entry = (kprobe_opcode_t*) jp_do_irq;
break;
case INT_HW_IRQ_EN:
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 79689b10f93..766e21e1557 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -937,6 +937,8 @@ static int quicktest1(unsigned long arg)
/* Need 1K cacheline aligned that does not cross page boundary */
p = kmalloc(4096, 0);
+ if (p == NULL)
+ return -ENOMEM;
mq = ALIGNUP(p, 1024);
memset(mes, 0xee, sizeof(mes));
dw = mq;
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index 9cbf95bedce..ccd4408a26c 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -340,10 +340,9 @@ static struct proc_dir_entry *proc_gru __read_mostly;
static int create_proc_file(struct proc_entry *p)
{
- p->entry = create_proc_entry(p->name, p->mode, proc_gru);
+ p->entry = proc_create(p->name, p->mode, proc_gru, p->fops);
if (!p->entry)
return -1;
- p->entry->proc_fops = p->fops;
return 0;
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index adc205c49fb..85f0e8cd875 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -130,7 +130,7 @@ mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static struct block_device_operations mmc_bdops = {
+static const struct block_device_operations mmc_bdops = {
.open = mmc_blk_open,
.release = mmc_blk_release,
.getgeo = mmc_blk_getgeo,
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index d84c880fac8..7dab2e5f4bc 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -344,6 +344,101 @@ unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
EXPORT_SYMBOL(mmc_align_data_size);
/**
+ * mmc_host_enable - enable a host.
+ * @host: mmc host to enable
+ *
+ * Hosts that support power saving can use the 'enable' and 'disable'
+ * methods to exit and enter power saving states. For more information
+ * see comments for struct mmc_host_ops.
+ */
+int mmc_host_enable(struct mmc_host *host)
+{
+ if (!(host->caps & MMC_CAP_DISABLE))
+ return 0;
+
+ if (host->en_dis_recurs)
+ return 0;
+
+ if (host->nesting_cnt++)
+ return 0;
+
+ cancel_delayed_work_sync(&host->disable);
+
+ if (host->enabled)
+ return 0;
+
+ if (host->ops->enable) {
+ int err;
+
+ host->en_dis_recurs = 1;
+ err = host->ops->enable(host);
+ host->en_dis_recurs = 0;
+
+ if (err) {
+ pr_debug("%s: enable error %d\n",
+ mmc_hostname(host), err);
+ return err;
+ }
+ }
+ host->enabled = 1;
+ return 0;
+}
+EXPORT_SYMBOL(mmc_host_enable);
+
+static int mmc_host_do_disable(struct mmc_host *host, int lazy)
+{
+ if (host->ops->disable) {
+ int err;
+
+ host->en_dis_recurs = 1;
+ err = host->ops->disable(host, lazy);
+ host->en_dis_recurs = 0;
+
+ if (err < 0) {
+ pr_debug("%s: disable error %d\n",
+ mmc_hostname(host), err);
+ return err;
+ }
+ if (err > 0) {
+ unsigned long delay = msecs_to_jiffies(err);
+
+ mmc_schedule_delayed_work(&host->disable, delay);
+ }
+ }
+ host->enabled = 0;
+ return 0;
+}
+
+/**
+ * mmc_host_disable - disable a host.
+ * @host: mmc host to disable
+ *
+ * Hosts that support power saving can use the 'enable' and 'disable'
+ * methods to exit and enter power saving states. For more information
+ * see comments for struct mmc_host_ops.
+ */
+int mmc_host_disable(struct mmc_host *host)
+{
+ int err;
+
+ if (!(host->caps & MMC_CAP_DISABLE))
+ return 0;
+
+ if (host->en_dis_recurs)
+ return 0;
+
+ if (--host->nesting_cnt)
+ return 0;
+
+ if (!host->enabled)
+ return 0;
+
+ err = mmc_host_do_disable(host, 0);
+ return err;
+}
+EXPORT_SYMBOL(mmc_host_disable);
+
+/**
* __mmc_claim_host - exclusively claim a host
* @host: mmc host to claim
* @abort: whether or not the operation should be aborted
@@ -366,25 +461,111 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
stop = abort ? atomic_read(abort) : 0;
- if (stop || !host->claimed)
+ if (stop || !host->claimed || host->claimer == current)
break;
spin_unlock_irqrestore(&host->lock, flags);
schedule();
spin_lock_irqsave(&host->lock, flags);
}
set_current_state(TASK_RUNNING);
- if (!stop)
+ if (!stop) {
host->claimed = 1;
- else
+ host->claimer = current;
+ host->claim_cnt += 1;
+ } else
wake_up(&host->wq);
spin_unlock_irqrestore(&host->lock, flags);
remove_wait_queue(&host->wq, &wait);
+ if (!stop)
+ mmc_host_enable(host);
return stop;
}
EXPORT_SYMBOL(__mmc_claim_host);
/**
+ * mmc_try_claim_host - try exclusively to claim a host
+ * @host: mmc host to claim
+ *
+ * Returns %1 if the host is claimed, %0 otherwise.
+ */
+int mmc_try_claim_host(struct mmc_host *host)
+{
+ int claimed_host = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (!host->claimed || host->claimer == current) {
+ host->claimed = 1;
+ host->claimer = current;
+ host->claim_cnt += 1;
+ claimed_host = 1;
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+ return claimed_host;
+}
+EXPORT_SYMBOL(mmc_try_claim_host);
+
+static void mmc_do_release_host(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (--host->claim_cnt) {
+ /* Release for nested claim */
+ spin_unlock_irqrestore(&host->lock, flags);
+ } else {
+ host->claimed = 0;
+ host->claimer = NULL;
+ spin_unlock_irqrestore(&host->lock, flags);
+ wake_up(&host->wq);
+ }
+}
+
+void mmc_host_deeper_disable(struct work_struct *work)
+{
+ struct mmc_host *host =
+ container_of(work, struct mmc_host, disable.work);
+
+ /* If the host is claimed then we do not want to disable it anymore */
+ if (!mmc_try_claim_host(host))
+ return;
+ mmc_host_do_disable(host, 1);
+ mmc_do_release_host(host);
+}
+
+/**
+ * mmc_host_lazy_disable - lazily disable a host.
+ * @host: mmc host to disable
+ *
+ * Hosts that support power saving can use the 'enable' and 'disable'
+ * methods to exit and enter power saving states. For more information
+ * see comments for struct mmc_host_ops.
+ */
+int mmc_host_lazy_disable(struct mmc_host *host)
+{
+ if (!(host->caps & MMC_CAP_DISABLE))
+ return 0;
+
+ if (host->en_dis_recurs)
+ return 0;
+
+ if (--host->nesting_cnt)
+ return 0;
+
+ if (!host->enabled)
+ return 0;
+
+ if (host->disable_delay) {
+ mmc_schedule_delayed_work(&host->disable,
+ msecs_to_jiffies(host->disable_delay));
+ return 0;
+ } else
+ return mmc_host_do_disable(host, 1);
+}
+EXPORT_SYMBOL(mmc_host_lazy_disable);
+
+/**
* mmc_release_host - release a host
* @host: mmc host to release
*
@@ -393,15 +574,11 @@ EXPORT_SYMBOL(__mmc_claim_host);
*/
void mmc_release_host(struct mmc_host *host)
{
- unsigned long flags;
-
WARN_ON(!host->claimed);
- spin_lock_irqsave(&host->lock, flags);
- host->claimed = 0;
- spin_unlock_irqrestore(&host->lock, flags);
+ mmc_host_lazy_disable(host);
- wake_up(&host->wq);
+ mmc_do_release_host(host);
}
EXPORT_SYMBOL(mmc_release_host);
@@ -687,7 +864,13 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
*/
static void mmc_power_up(struct mmc_host *host)
{
- int bit = fls(host->ocr_avail) - 1;
+ int bit;
+
+ /* If ocr is set, we use it */
+ if (host->ocr)
+ bit = ffs(host->ocr) - 1;
+ else
+ bit = fls(host->ocr_avail) - 1;
host->ios.vdd = bit;
if (mmc_host_is_spi(host)) {
@@ -947,6 +1130,8 @@ void mmc_stop_host(struct mmc_host *host)
spin_unlock_irqrestore(&host->lock, flags);
#endif
+ if (host->caps & MMC_CAP_DISABLE)
+ cancel_delayed_work(&host->disable);
cancel_delayed_work(&host->detect);
mmc_flush_scheduled_work();
@@ -958,6 +1143,8 @@ void mmc_stop_host(struct mmc_host *host)
mmc_claim_host(host);
mmc_detach_bus(host);
mmc_release_host(host);
+ mmc_bus_put(host);
+ return;
}
mmc_bus_put(host);
@@ -966,6 +1153,80 @@ void mmc_stop_host(struct mmc_host *host)
mmc_power_off(host);
}
+void mmc_power_save_host(struct mmc_host *host)
+{
+ mmc_bus_get(host);
+
+ if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
+ mmc_bus_put(host);
+ return;
+ }
+
+ if (host->bus_ops->power_save)
+ host->bus_ops->power_save(host);
+
+ mmc_bus_put(host);
+
+ mmc_power_off(host);
+}
+EXPORT_SYMBOL(mmc_power_save_host);
+
+void mmc_power_restore_host(struct mmc_host *host)
+{
+ mmc_bus_get(host);
+
+ if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
+ mmc_bus_put(host);
+ return;
+ }
+
+ mmc_power_up(host);
+ host->bus_ops->power_restore(host);
+
+ mmc_bus_put(host);
+}
+EXPORT_SYMBOL(mmc_power_restore_host);
+
+int mmc_card_awake(struct mmc_host *host)
+{
+ int err = -ENOSYS;
+
+ mmc_bus_get(host);
+
+ if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
+ err = host->bus_ops->awake(host);
+
+ mmc_bus_put(host);
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_card_awake);
+
+int mmc_card_sleep(struct mmc_host *host)
+{
+ int err = -ENOSYS;
+
+ mmc_bus_get(host);
+
+ if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
+ err = host->bus_ops->sleep(host);
+
+ mmc_bus_put(host);
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_card_sleep);
+
+int mmc_card_can_sleep(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+
+ if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(mmc_card_can_sleep);
+
#ifdef CONFIG_PM
/**
@@ -975,27 +1236,36 @@ void mmc_stop_host(struct mmc_host *host)
*/
int mmc_suspend_host(struct mmc_host *host, pm_message_t state)
{
+ int err = 0;
+
+ if (host->caps & MMC_CAP_DISABLE)
+ cancel_delayed_work(&host->disable);
cancel_delayed_work(&host->detect);
mmc_flush_scheduled_work();
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead) {
if (host->bus_ops->suspend)
- host->bus_ops->suspend(host);
- if (!host->bus_ops->resume) {
+ err = host->bus_ops->suspend(host);
+ if (err == -ENOSYS || !host->bus_ops->resume) {
+ /*
+ * We simply "remove" the card in this case.
+ * It will be redetected on resume.
+ */
if (host->bus_ops->remove)
host->bus_ops->remove(host);
-
mmc_claim_host(host);
mmc_detach_bus(host);
mmc_release_host(host);
+ err = 0;
}
}
mmc_bus_put(host);
- mmc_power_off(host);
+ if (!err)
+ mmc_power_off(host);
- return 0;
+ return err;
}
EXPORT_SYMBOL(mmc_suspend_host);
@@ -1006,12 +1276,26 @@ EXPORT_SYMBOL(mmc_suspend_host);
*/
int mmc_resume_host(struct mmc_host *host)
{
+ int err = 0;
+
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead) {
mmc_power_up(host);
mmc_select_voltage(host, host->ocr);
BUG_ON(!host->bus_ops->resume);
- host->bus_ops->resume(host);
+ err = host->bus_ops->resume(host);
+ if (err) {
+ printk(KERN_WARNING "%s: error %d during resume "
+ "(card was removed?)\n",
+ mmc_hostname(host), err);
+ if (host->bus_ops->remove)
+ host->bus_ops->remove(host);
+ mmc_claim_host(host);
+ mmc_detach_bus(host);
+ mmc_release_host(host);
+ /* no need to bother upper layers */
+ err = 0;
+ }
}
mmc_bus_put(host);
@@ -1021,7 +1305,7 @@ int mmc_resume_host(struct mmc_host *host)
*/
mmc_detect_change(host, 1);
- return 0;
+ return err;
}
EXPORT_SYMBOL(mmc_resume_host);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index c819effa103..67ae6abc423 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -16,10 +16,14 @@
#define MMC_CMD_RETRIES 3
struct mmc_bus_ops {
+ int (*awake)(struct mmc_host *);
+ int (*sleep)(struct mmc_host *);
void (*remove)(struct mmc_host *);
void (*detect)(struct mmc_host *);
- void (*suspend)(struct mmc_host *);
- void (*resume)(struct mmc_host *);
+ int (*suspend)(struct mmc_host *);
+ int (*resume)(struct mmc_host *);
+ void (*power_save)(struct mmc_host *);
+ void (*power_restore)(struct mmc_host *);
};
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 5e945e64ead..a268d12f1af 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -83,6 +83,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
+ INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable);
/*
* By default, hosts do not support SGIO or large requests.
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index c2dc3d2d9f9..8c87e1109a3 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -14,5 +14,7 @@
int mmc_register_host_class(void);
void mmc_unregister_host_class(void);
+void mmc_host_deeper_disable(struct work_struct *work);
+
#endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 2fb9d5f271e..bfefce365ae 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -160,7 +160,6 @@ static int mmc_read_ext_csd(struct mmc_card *card)
{
int err;
u8 *ext_csd;
- unsigned int ext_csd_struct;
BUG_ON(!card);
@@ -180,11 +179,11 @@ static int mmc_read_ext_csd(struct mmc_card *card)
err = mmc_send_ext_csd(card, ext_csd);
if (err) {
- /*
- * We all hosts that cannot perform the command
- * to fail more gracefully
- */
- if (err != -EINVAL)
+ /* If the host or the card can't do the switch,
+ * fail more gracefully. */
+ if ((err != -EINVAL)
+ && (err != -ENOSYS)
+ && (err != -EFAULT))
goto out;
/*
@@ -207,16 +206,16 @@ static int mmc_read_ext_csd(struct mmc_card *card)
goto out;
}
- ext_csd_struct = ext_csd[EXT_CSD_REV];
- if (ext_csd_struct > 3) {
+ card->ext_csd.rev = ext_csd[EXT_CSD_REV];
+ if (card->ext_csd.rev > 3) {
printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
"version %d\n", mmc_hostname(card->host),
- ext_csd_struct);
+ card->ext_csd.rev);
err = -EINVAL;
goto out;
}
- if (ext_csd_struct >= 2) {
+ if (card->ext_csd.rev >= 2) {
card->ext_csd.sectors =
ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
@@ -241,6 +240,15 @@ static int mmc_read_ext_csd(struct mmc_card *card)
goto out;
}
+ if (card->ext_csd.rev >= 3) {
+ u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
+
+ /* Sleep / awake timeout in 100ns units */
+ if (sa_shift > 0 && sa_shift <= 0x17)
+ card->ext_csd.sa_timeout =
+ 1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
+ }
+
out:
kfree(ext_csd);
@@ -408,12 +416,17 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
(host->caps & MMC_CAP_MMC_HIGHSPEED)) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, 1);
- if (err)
+ if (err && err != -EBADMSG)
goto free_card;
- mmc_card_set_highspeed(card);
-
- mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+ if (err) {
+ printk(KERN_WARNING "%s: switch to highspeed failed\n",
+ mmc_hostname(card->host));
+ err = 0;
+ } else {
+ mmc_card_set_highspeed(card);
+ mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+ }
}
/*
@@ -448,10 +461,17 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH, ext_csd_bit);
- if (err)
+ if (err && err != -EBADMSG)
goto free_card;
- mmc_set_bus_width(card->host, bus_width);
+ if (err) {
+ printk(KERN_WARNING "%s: switch to bus width %d "
+ "failed\n", mmc_hostname(card->host),
+ 1 << bus_width);
+ err = 0;
+ } else {
+ mmc_set_bus_width(card->host, bus_width);
+ }
}
if (!oldcard)
@@ -507,12 +527,10 @@ static void mmc_detect(struct mmc_host *host)
}
}
-#ifdef CONFIG_MMC_UNSAFE_RESUME
-
/*
* Suspend callback from host.
*/
-static void mmc_suspend(struct mmc_host *host)
+static int mmc_suspend(struct mmc_host *host)
{
BUG_ON(!host);
BUG_ON(!host->card);
@@ -522,6 +540,8 @@ static void mmc_suspend(struct mmc_host *host)
mmc_deselect_cards(host);
host->card->state &= ~MMC_STATE_HIGHSPEED;
mmc_release_host(host);
+
+ return 0;
}
/*
@@ -530,7 +550,7 @@ static void mmc_suspend(struct mmc_host *host)
* This function tries to determine if the same card is still present
* and, if so, restore all state to it.
*/
-static void mmc_resume(struct mmc_host *host)
+static int mmc_resume(struct mmc_host *host)
{
int err;
@@ -541,30 +561,99 @@ static void mmc_resume(struct mmc_host *host)
err = mmc_init_card(host, host->ocr, host->card);
mmc_release_host(host);
- if (err) {
- mmc_remove(host);
+ return err;
+}
- mmc_claim_host(host);
- mmc_detach_bus(host);
- mmc_release_host(host);
+static void mmc_power_restore(struct mmc_host *host)
+{
+ host->card->state &= ~MMC_STATE_HIGHSPEED;
+ mmc_claim_host(host);
+ mmc_init_card(host, host->ocr, host->card);
+ mmc_release_host(host);
+}
+
+static int mmc_sleep(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+ int err = -ENOSYS;
+
+ if (card && card->ext_csd.rev >= 3) {
+ err = mmc_card_sleepawake(host, 1);
+ if (err < 0)
+ pr_debug("%s: Error %d while putting card into sleep",
+ mmc_hostname(host), err);
}
+ return err;
}
-#else
+static int mmc_awake(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+ int err = -ENOSYS;
+
+ if (card && card->ext_csd.rev >= 3) {
+ err = mmc_card_sleepawake(host, 0);
+ if (err < 0)
+ pr_debug("%s: Error %d while awaking sleeping card",
+ mmc_hostname(host), err);
+ }
+
+ return err;
+}
-#define mmc_suspend NULL
-#define mmc_resume NULL
+#ifdef CONFIG_MMC_UNSAFE_RESUME
-#endif
+static const struct mmc_bus_ops mmc_ops = {
+ .awake = mmc_awake,
+ .sleep = mmc_sleep,
+ .remove = mmc_remove,
+ .detect = mmc_detect,
+ .suspend = mmc_suspend,
+ .resume = mmc_resume,
+ .power_restore = mmc_power_restore,
+};
+
+static void mmc_attach_bus_ops(struct mmc_host *host)
+{
+ mmc_attach_bus(host, &mmc_ops);
+}
+
+#else
static const struct mmc_bus_ops mmc_ops = {
+ .awake = mmc_awake,
+ .sleep = mmc_sleep,
+ .remove = mmc_remove,
+ .detect = mmc_detect,
+ .suspend = NULL,
+ .resume = NULL,
+ .power_restore = mmc_power_restore,
+};
+
+static const struct mmc_bus_ops mmc_ops_unsafe = {
+ .awake = mmc_awake,
+ .sleep = mmc_sleep,
.remove = mmc_remove,
.detect = mmc_detect,
.suspend = mmc_suspend,
.resume = mmc_resume,
+ .power_restore = mmc_power_restore,
};
+static void mmc_attach_bus_ops(struct mmc_host *host)
+{
+ const struct mmc_bus_ops *bus_ops;
+
+ if (host->caps & MMC_CAP_NONREMOVABLE)
+ bus_ops = &mmc_ops_unsafe;
+ else
+ bus_ops = &mmc_ops;
+ mmc_attach_bus(host, bus_ops);
+}
+
+#endif
+
/*
* Starting point for MMC card init.
*/
@@ -575,7 +664,7 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr)
BUG_ON(!host);
WARN_ON(!host->claimed);
- mmc_attach_bus(host, &mmc_ops);
+ mmc_attach_bus_ops(host);
/*
* We need to get OCR a different way for SPI.
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 34ce2703d29..d2cb5c63439 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -57,6 +57,42 @@ int mmc_deselect_cards(struct mmc_host *host)
return _mmc_select_card(host, NULL);
}
+int mmc_card_sleepawake(struct mmc_host *host, int sleep)
+{
+ struct mmc_command cmd;
+ struct mmc_card *card = host->card;
+ int err;
+
+ if (sleep)
+ mmc_deselect_cards(host);
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = MMC_SLEEP_AWAKE;
+ cmd.arg = card->rca << 16;
+ if (sleep)
+ cmd.arg |= 1 << 15;
+
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err)
+ return err;
+
+ /*
+ * If the host does not wait while the card signals busy, then we will
+ * will have to wait the sleep/awake timeout. Note, we cannot use the
+ * SEND_STATUS command to poll the status because that command (and most
+ * others) is invalid while the card sleeps.
+ */
+ if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
+ mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000));
+
+ if (!sleep)
+ err = mmc_select_card(card);
+
+ return err;
+}
+
int mmc_go_idle(struct mmc_host *host)
{
int err;
@@ -354,6 +390,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value)
{
int err;
struct mmc_command cmd;
+ u32 status;
BUG_ON(!card);
BUG_ON(!card->host);
@@ -371,6 +408,28 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value)
if (err)
return err;
+ /* Must check status to be sure of no errors */
+ do {
+ err = mmc_send_status(card, &status);
+ if (err)
+ return err;
+ if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+ break;
+ if (mmc_host_is_spi(card->host))
+ break;
+ } while (R1_CURRENT_STATE(status) == 7);
+
+ if (mmc_host_is_spi(card->host)) {
+ if (status & R1_SPI_ILLEGAL_COMMAND)
+ return -EBADMSG;
+ } else {
+ if (status & 0xFDFFA000)
+ printk(KERN_WARNING "%s: unexpected status %#x after "
+ "switch", mmc_hostname(card->host), status);
+ if (status & R1_SWITCH_ERROR)
+ return -EBADMSG;
+ }
+
return 0;
}
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 17854bf7cf0..653eb8e8417 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -25,6 +25,7 @@ int mmc_send_status(struct mmc_card *card, u32 *status);
int mmc_send_cid(struct mmc_host *host, u32 *cid);
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
+int mmc_card_sleepawake(struct mmc_host *host, int sleep);
#endif
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 7ad646fe077..10b2a4d20f5 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -210,11 +210,11 @@ static int mmc_read_switch(struct mmc_card *card)
err = mmc_sd_switch(card, 0, 0, 1, status);
if (err) {
- /*
- * We all hosts that cannot perform the command
- * to fail more gracefully
- */
- if (err != -EINVAL)
+ /* If the host or the card can't do the switch,
+ * fail more gracefully. */
+ if ((err != -EINVAL)
+ && (err != -ENOSYS)
+ && (err != -EFAULT))
goto out;
printk(KERN_WARNING "%s: problem reading switch "
@@ -561,12 +561,10 @@ static void mmc_sd_detect(struct mmc_host *host)
}
}
-#ifdef CONFIG_MMC_UNSAFE_RESUME
-
/*
* Suspend callback from host.
*/
-static void mmc_sd_suspend(struct mmc_host *host)
+static int mmc_sd_suspend(struct mmc_host *host)
{
BUG_ON(!host);
BUG_ON(!host->card);
@@ -576,6 +574,8 @@ static void mmc_sd_suspend(struct mmc_host *host)
mmc_deselect_cards(host);
host->card->state &= ~MMC_STATE_HIGHSPEED;
mmc_release_host(host);
+
+ return 0;
}
/*
@@ -584,7 +584,7 @@ static void mmc_sd_suspend(struct mmc_host *host)
* This function tries to determine if the same card is still present
* and, if so, restore all state to it.
*/
-static void mmc_sd_resume(struct mmc_host *host)
+static int mmc_sd_resume(struct mmc_host *host)
{
int err;
@@ -595,30 +595,63 @@ static void mmc_sd_resume(struct mmc_host *host)
err = mmc_sd_init_card(host, host->ocr, host->card);
mmc_release_host(host);
- if (err) {
- mmc_sd_remove(host);
-
- mmc_claim_host(host);
- mmc_detach_bus(host);
- mmc_release_host(host);
- }
+ return err;
+}
+static void mmc_sd_power_restore(struct mmc_host *host)
+{
+ host->card->state &= ~MMC_STATE_HIGHSPEED;
+ mmc_claim_host(host);
+ mmc_sd_init_card(host, host->ocr, host->card);
+ mmc_release_host(host);
}
-#else
+#ifdef CONFIG_MMC_UNSAFE_RESUME
-#define mmc_sd_suspend NULL
-#define mmc_sd_resume NULL
+static const struct mmc_bus_ops mmc_sd_ops = {
+ .remove = mmc_sd_remove,
+ .detect = mmc_sd_detect,
+ .suspend = mmc_sd_suspend,
+ .resume = mmc_sd_resume,
+ .power_restore = mmc_sd_power_restore,
+};
-#endif
+static void mmc_sd_attach_bus_ops(struct mmc_host *host)
+{
+ mmc_attach_bus(host, &mmc_sd_ops);
+}
+
+#else
static const struct mmc_bus_ops mmc_sd_ops = {
.remove = mmc_sd_remove,
.detect = mmc_sd_detect,
+ .suspend = NULL,
+ .resume = NULL,
+ .power_restore = mmc_sd_power_restore,
+};
+
+static const struct mmc_bus_ops mmc_sd_ops_unsafe = {
+ .remove = mmc_sd_remove,
+ .detect = mmc_sd_detect,
.suspend = mmc_sd_suspend,
.resume = mmc_sd_resume,
+ .power_restore = mmc_sd_power_restore,
};
+static void mmc_sd_attach_bus_ops(struct mmc_host *host)
+{
+ const struct mmc_bus_ops *bus_ops;
+
+ if (host->caps & MMC_CAP_NONREMOVABLE)
+ bus_ops = &mmc_sd_ops_unsafe;
+ else
+ bus_ops = &mmc_sd_ops;
+ mmc_attach_bus(host, bus_ops);
+}
+
+#endif
+
/*
* Starting point for SD card init.
*/
@@ -629,7 +662,7 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr)
BUG_ON(!host);
WARN_ON(!host->claimed);
- mmc_attach_bus(host, &mmc_sd_ops);
+ mmc_sd_attach_bus_ops(host);
/*
* We need to get OCR a different way for SPI.
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index fb99ccff908..cdb845b68ab 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -165,6 +165,29 @@ static int sdio_enable_wide(struct mmc_card *card)
}
/*
+ * If desired, disconnect the pull-up resistor on CD/DAT[3] (pin 1)
+ * of the card. This may be required on certain setups of boards,
+ * controllers and embedded sdio device which do not need the card's
+ * pull-up. As a result, card detection is disabled and power is saved.
+ */
+static int sdio_disable_cd(struct mmc_card *card)
+{
+ int ret;
+ u8 ctrl;
+
+ if (!card->cccr.disable_cd)
+ return 0;
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl);
+ if (ret)
+ return ret;
+
+ ctrl |= SDIO_BUS_CD_DISABLE;
+
+ return mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
+}
+
+/*
* Test if the card supports high-speed mode and, if so, switch to it.
*/
static int sdio_enable_hs(struct mmc_card *card)
@@ -195,6 +218,135 @@ static int sdio_enable_hs(struct mmc_card *card)
}
/*
+ * Handle the detection and initialisation of a card.
+ *
+ * In the case of a resume, "oldcard" will contain the card
+ * we're trying to reinitialise.
+ */
+static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
+ struct mmc_card *oldcard)
+{
+ struct mmc_card *card;
+ int err;
+
+ BUG_ON(!host);
+ WARN_ON(!host->claimed);
+
+ /*
+ * Inform the card of the voltage
+ */
+ err = mmc_send_io_op_cond(host, host->ocr, &ocr);
+ if (err)
+ goto err;
+
+ /*
+ * For SPI, enable CRC as appropriate.
+ */
+ if (mmc_host_is_spi(host)) {
+ err = mmc_spi_set_crc(host, use_spi_crc);
+ if (err)
+ goto err;
+ }
+
+ /*
+ * Allocate card structure.
+ */
+ card = mmc_alloc_card(host, NULL);
+ if (IS_ERR(card)) {
+ err = PTR_ERR(card);
+ goto err;
+ }
+
+ card->type = MMC_TYPE_SDIO;
+
+ /*
+ * For native busses: set card RCA and quit open drain mode.
+ */
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_send_relative_addr(host, &card->rca);
+ if (err)
+ goto remove;
+
+ mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
+ }
+
+ /*
+ * Select card, as all following commands rely on that.
+ */
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_select_card(card);
+ if (err)
+ goto remove;
+ }
+
+ /*
+ * Read the common registers.
+ */
+ err = sdio_read_cccr(card);
+ if (err)
+ goto remove;
+
+ /*
+ * Read the common CIS tuples.
+ */
+ err = sdio_read_common_cis(card);
+ if (err)
+ goto remove;
+
+ if (oldcard) {
+ int same = (card->cis.vendor == oldcard->cis.vendor &&
+ card->cis.device == oldcard->cis.device);
+ mmc_remove_card(card);
+ if (!same) {
+ err = -ENOENT;
+ goto err;
+ }
+ card = oldcard;
+ return 0;
+ }
+
+ /*
+ * Switch to high-speed (if supported).
+ */
+ err = sdio_enable_hs(card);
+ if (err)
+ goto remove;
+
+ /*
+ * Change to the card's maximum speed.
+ */
+ if (mmc_card_highspeed(card)) {
+ /*
+ * The SDIO specification doesn't mention how
+ * the CIS transfer speed register relates to
+ * high-speed, but it seems that 50 MHz is
+ * mandatory.
+ */
+ mmc_set_clock(host, 50000000);
+ } else {
+ mmc_set_clock(host, card->cis.max_dtr);
+ }
+
+ /*
+ * Switch to wider bus (if supported).
+ */
+ err = sdio_enable_wide(card);
+ if (err)
+ goto remove;
+
+ if (!oldcard)
+ host->card = card;
+ return 0;
+
+remove:
+ if (!oldcard)
+ mmc_remove_card(card);
+
+err:
+ return err;
+}
+
+/*
* Host is being removed. Free up the current card.
*/
static void mmc_sdio_remove(struct mmc_host *host)
@@ -243,10 +395,77 @@ static void mmc_sdio_detect(struct mmc_host *host)
}
}
+/*
+ * SDIO suspend. We need to suspend all functions separately.
+ * Therefore all registered functions must have drivers with suspend
+ * and resume methods. Failing that we simply remove the whole card.
+ */
+static int mmc_sdio_suspend(struct mmc_host *host)
+{
+ int i, err = 0;
+
+ for (i = 0; i < host->card->sdio_funcs; i++) {
+ struct sdio_func *func = host->card->sdio_func[i];
+ if (func && sdio_func_present(func) && func->dev.driver) {
+ const struct dev_pm_ops *pmops = func->dev.driver->pm;
+ if (!pmops || !pmops->suspend || !pmops->resume) {
+ /* force removal of entire card in that case */
+ err = -ENOSYS;
+ } else
+ err = pmops->suspend(&func->dev);
+ if (err)
+ break;
+ }
+ }
+ while (err && --i >= 0) {
+ struct sdio_func *func = host->card->sdio_func[i];
+ if (func && sdio_func_present(func) && func->dev.driver) {
+ const struct dev_pm_ops *pmops = func->dev.driver->pm;
+ pmops->resume(&func->dev);
+ }
+ }
+
+ return err;
+}
+
+static int mmc_sdio_resume(struct mmc_host *host)
+{
+ int i, err;
+
+ BUG_ON(!host);
+ BUG_ON(!host->card);
+
+ /* Basic card reinitialization. */
+ mmc_claim_host(host);
+ err = mmc_sdio_init_card(host, host->ocr, host->card);
+ mmc_release_host(host);
+
+ /*
+ * If the card looked to be the same as before suspending, then
+ * we proceed to resume all card functions. If one of them returns
+ * an error then we simply return that error to the core and the
+ * card will be redetected as new. It is the responsibility of
+ * the function driver to perform further tests with the extra
+ * knowledge it has of the card to confirm the card is indeed the
+ * same as before suspending (same MAC address for network cards,
+ * etc.) and return an error otherwise.
+ */
+ for (i = 0; !err && i < host->card->sdio_funcs; i++) {
+ struct sdio_func *func = host->card->sdio_func[i];
+ if (func && sdio_func_present(func) && func->dev.driver) {
+ const struct dev_pm_ops *pmops = func->dev.driver->pm;
+ err = pmops->resume(&func->dev);
+ }
+ }
+
+ return err;
+}
static const struct mmc_bus_ops mmc_sdio_ops = {
.remove = mmc_sdio_remove,
.detect = mmc_sdio_detect,
+ .suspend = mmc_sdio_suspend,
+ .resume = mmc_sdio_resume,
};
@@ -275,13 +494,6 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
ocr &= ~0x7F;
}
- if (ocr & MMC_VDD_165_195) {
- printk(KERN_WARNING "%s: SDIO card claims to support the "
- "incompletely defined 'low voltage range'. This "
- "will be ignored.\n", mmc_hostname(host));
- ocr &= ~MMC_VDD_165_195;
- }
-
host->ocr = mmc_select_voltage(host, ocr);
/*
@@ -293,101 +505,23 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
}
/*
- * Inform the card of the voltage
+ * Detect and init the card.
*/
- err = mmc_send_io_op_cond(host, host->ocr, &ocr);
+ err = mmc_sdio_init_card(host, host->ocr, NULL);
if (err)
goto err;
-
- /*
- * For SPI, enable CRC as appropriate.
- */
- if (mmc_host_is_spi(host)) {
- err = mmc_spi_set_crc(host, use_spi_crc);
- if (err)
- goto err;
- }
+ card = host->card;
/*
* The number of functions on the card is encoded inside
* the ocr.
*/
- funcs = (ocr & 0x70000000) >> 28;
-
- /*
- * Allocate card structure.
- */
- card = mmc_alloc_card(host, NULL);
- if (IS_ERR(card)) {
- err = PTR_ERR(card);
- goto err;
- }
-
- card->type = MMC_TYPE_SDIO;
- card->sdio_funcs = funcs;
-
- host->card = card;
-
- /*
- * For native busses: set card RCA and quit open drain mode.
- */
- if (!mmc_host_is_spi(host)) {
- err = mmc_send_relative_addr(host, &card->rca);
- if (err)
- goto remove;
-
- mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
- }
-
- /*
- * Select card, as all following commands rely on that.
- */
- if (!mmc_host_is_spi(host)) {
- err = mmc_select_card(card);
- if (err)
- goto remove;
- }
+ card->sdio_funcs = funcs = (ocr & 0x70000000) >> 28;
/*
- * Read the common registers.
+ * If needed, disconnect card detection pull-up resistor.
*/
- err = sdio_read_cccr(card);
- if (err)
- goto remove;
-
- /*
- * Read the common CIS tuples.
- */
- err = sdio_read_common_cis(card);
- if (err)
- goto remove;
-
- /*
- * Switch to high-speed (if supported).
- */
- err = sdio_enable_hs(card);
- if (err)
- goto remove;
-
- /*
- * Change to the card's maximum speed.
- */
- if (mmc_card_highspeed(card)) {
- /*
- * The SDIO specification doesn't mention how
- * the CIS transfer speed register relates to
- * high-speed, but it seems that 50 MHz is
- * mandatory.
- */
- mmc_set_clock(host, 50000000);
- } else {
- mmc_set_clock(host, card->cis.max_dtr);
- }
-
- /*
- * Switch to wider bus (if supported).
- */
- err = sdio_enable_wide(card);
+ err = sdio_disable_cd(card);
if (err)
goto remove;
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 46284b52739..d37464e296a 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -20,9 +20,6 @@
#include "sdio_cis.h"
#include "sdio_bus.h"
-#define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev)
-#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv)
-
/* show configuration fields */
#define sdio_config_attr(field, format_string) \
static ssize_t \
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index 963f2937c5e..6636354b48c 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -40,7 +40,7 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
nr_strings++;
}
- if (buf[i-1] != '\0') {
+ if (nr_strings < 4) {
printk(KERN_WARNING "SDIO: ignoring broken CISTPL_VERS_1\n");
return 0;
}
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index f61fc2d4cd0..f9aa8a7deff 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -624,7 +624,7 @@ void sdio_f0_writeb(struct sdio_func *func, unsigned char b, unsigned int addr,
BUG_ON(!func);
- if (addr < 0xF0 || addr > 0xFF) {
+ if ((addr < 0xF0 || addr > 0xFF) && (!mmc_card_lenient_fn0(func->card))) {
if (err_ret)
*err_ret = -EINVAL;
return;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 891ef18bd77..7cb057f3f88 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -132,11 +132,11 @@ config MMC_OMAP
config MMC_OMAP_HS
tristate "TI OMAP High Speed Multimedia Card Interface support"
- depends on ARCH_OMAP2430 || ARCH_OMAP3
+ depends on ARCH_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4
help
This selects the TI OMAP High Speed Multimedia card Interface.
- If you have an OMAP2430 or OMAP3 board with a Multimedia Card slot,
- say Y or M here.
+ If you have an OMAP2430 or OMAP3 board or OMAP4 board with a
+ Multimedia Card slot, say Y or M here.
If unsure, say N.
@@ -160,6 +160,12 @@ config MMC_AU1X
If unsure, say N.
+choice
+ prompt "Atmel SD/MMC Driver"
+ default MMC_ATMELMCI if AVR32
+ help
+ Choose which driver to use for the Atmel MCI Silicon
+
config MMC_AT91
tristate "AT91 SD/MMC Card Interface support"
depends on ARCH_AT91
@@ -170,17 +176,19 @@ config MMC_AT91
config MMC_ATMELMCI
tristate "Atmel Multimedia Card Interface support"
- depends on AVR32
+ depends on AVR32 || ARCH_AT91
help
This selects the Atmel Multimedia Card Interface driver. If
- you have an AT32 (AVR32) platform with a Multimedia Card
- slot, say Y or M here.
+ you have an AT32 (AVR32) or AT91 platform with a Multimedia
+ Card slot, say Y or M here.
If unsure, say N.
+endchoice
+
config MMC_ATMELMCI_DMA
bool "Atmel MCI DMA support (EXPERIMENTAL)"
- depends on MMC_ATMELMCI && DMA_ENGINE && EXPERIMENTAL
+ depends on MMC_ATMELMCI && AVR32 && DMA_ENGINE && EXPERIMENTAL
help
Say Y here to have the Atmel MCI driver use a DMA engine to
do data transfers and thus increase the throughput and
@@ -199,6 +207,13 @@ config MMC_IMX
If unsure, say N.
+config MMC_MSM7X00A
+ tristate "Qualcomm MSM 7X00A SDCC Controller Support"
+ depends on MMC && ARCH_MSM
+ help
+ This provides support for the SD/MMC cell found in the
+ MSM 7X00A controllers from Qualcomm.
+
config MMC_MXC
tristate "Freescale i.MX2/3 Multimedia Card Interface support"
depends on ARCH_MXC
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index cf153f62845..abcb0400e06 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_MMC_OMAP_HS) += omap_hsmmc.o
obj-$(CONFIG_MMC_AT91) += at91_mci.o
obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o
obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
+obj-$(CONFIG_MMC_MSM7X00A) += msm_sdcc.o
obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o
obj-$(CONFIG_MMC_SPI) += mmc_spi.o
ifeq ($(CONFIG_OF),y)
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 7b603e4b41d..fc25586b7ee 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -30,6 +30,7 @@
#include <asm/io.h>
#include <asm/unaligned.h>
+#include <mach/cpu.h>
#include <mach/board.h>
#include "atmel-mci-regs.h"
@@ -210,6 +211,18 @@ struct atmel_mci_slot {
set_bit(event, &host->pending_events)
/*
+ * Enable or disable features/registers based on
+ * whether the processor supports them
+ */
+static bool mci_has_rwproof(void)
+{
+ if (cpu_is_at91sam9261() || cpu_is_at91rm9200())
+ return false;
+ else
+ return true;
+}
+
+/*
* The debugfs stuff below is mostly optimized away when
* CONFIG_DEBUG_FS is not set.
*/
@@ -276,8 +289,13 @@ static void atmci_show_status_reg(struct seq_file *s,
[3] = "BLKE",
[4] = "DTIP",
[5] = "NOTBUSY",
+ [6] = "ENDRX",
+ [7] = "ENDTX",
[8] = "SDIOIRQA",
[9] = "SDIOIRQB",
+ [12] = "SDIOWAIT",
+ [14] = "RXBUFF",
+ [15] = "TXBUFE",
[16] = "RINDE",
[17] = "RDIRE",
[18] = "RCRCE",
@@ -285,6 +303,11 @@ static void atmci_show_status_reg(struct seq_file *s,
[20] = "RTOE",
[21] = "DCRCE",
[22] = "DTOE",
+ [23] = "CSTOE",
+ [24] = "BLKOVRE",
+ [25] = "DMADONE",
+ [26] = "FIFOEMPTY",
+ [27] = "XFRDONE",
[30] = "OVRE",
[31] = "UNRE",
};
@@ -576,6 +599,7 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
struct scatterlist *sg;
unsigned int i;
enum dma_data_direction direction;
+ unsigned int sglen;
/*
* We don't do DMA on "complex" transfers, i.e. with
@@ -605,11 +629,14 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
else
direction = DMA_TO_DEVICE;
+ sglen = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, direction);
+ if (sglen != data->sg_len)
+ goto unmap_exit;
desc = chan->device->device_prep_slave_sg(chan,
data->sg, data->sg_len, direction,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
- return -ENOMEM;
+ goto unmap_exit;
host->dma.data_desc = desc;
desc->callback = atmci_dma_complete;
@@ -620,6 +647,9 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
chan->device->device_issue_pending(chan);
return 0;
+unmap_exit:
+ dma_unmap_sg(&host->pdev->dev, data->sg, sglen, direction);
+ return -ENOMEM;
}
#else /* CONFIG_MMC_ATMELMCI_DMA */
@@ -849,13 +879,15 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
clkdiv = 255;
}
+ host->mode_reg = MCI_MR_CLKDIV(clkdiv);
+
/*
* WRPROOF and RDPROOF prevent overruns/underruns by
* stopping the clock when the FIFO is full/empty.
* This state is not expected to last for long.
*/
- host->mode_reg = MCI_MR_CLKDIV(clkdiv) | MCI_MR_WRPROOF
- | MCI_MR_RDPROOF;
+ if (mci_has_rwproof())
+ host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF);
if (list_empty(&host->queue))
mci_writel(host, MR, host->mode_reg);
@@ -1648,8 +1680,10 @@ static int __init atmci_probe(struct platform_device *pdev)
nr_slots++;
}
- if (!nr_slots)
+ if (!nr_slots) {
+ dev_err(&pdev->dev, "init failed: no slot defined\n");
goto err_init_slot;
+ }
dev_info(&pdev->dev,
"Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index a461017ce5c..d55fe4fb793 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1562,3 +1562,4 @@ MODULE_AUTHOR("Mike Lavender, David Brownell, "
"Hans-Peter Nilsson, Jan Nikitenko");
MODULE_DESCRIPTION("SPI SD/MMC host driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:mmc_spi");
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 8741d0f5146..3d1e5329da1 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -22,12 +22,13 @@
#include <linux/clk.h>
#include <linux/scatterlist.h>
#include <linux/gpio.h>
+#include <linux/amba/mmci.h>
+#include <linux/regulator/consumer.h>
#include <asm/cacheflush.h>
#include <asm/div64.h>
#include <asm/io.h>
#include <asm/sizes.h>
-#include <asm/mach/mmc.h>
#include "mmci.h"
@@ -38,6 +39,36 @@
static unsigned int fmax = 515633;
+/*
+ * This must be called with host->lock held
+ */
+static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
+{
+ u32 clk = 0;
+
+ if (desired) {
+ if (desired >= host->mclk) {
+ clk = MCI_CLK_BYPASS;
+ host->cclk = host->mclk;
+ } else {
+ clk = host->mclk / (2 * desired) - 1;
+ if (clk >= 256)
+ clk = 255;
+ host->cclk = host->mclk / (2 * (clk + 1));
+ }
+ if (host->hw_designer == 0x80)
+ clk |= MCI_FCEN; /* Bug fix in ST IP block */
+ clk |= MCI_CLK_ENABLE;
+ /* This hasn't proven to be worthwhile */
+ /* clk |= MCI_CLK_PWRSAVE; */
+ }
+
+ if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
+ clk |= MCI_WIDE_BUS;
+
+ writel(clk, host->base + MMCICLOCK);
+}
+
static void
mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
{
@@ -419,30 +450,31 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mmci_host *host = mmc_priv(mmc);
- u32 clk = 0, pwr = 0;
-
- if (ios->clock) {
- if (ios->clock >= host->mclk) {
- clk = MCI_CLK_BYPASS;
- host->cclk = host->mclk;
- } else {
- clk = host->mclk / (2 * ios->clock) - 1;
- if (clk >= 256)
- clk = 255;
- host->cclk = host->mclk / (2 * (clk + 1));
- }
- if (host->hw_designer == AMBA_VENDOR_ST)
- clk |= MCI_FCEN; /* Bug fix in ST IP block */
- clk |= MCI_CLK_ENABLE;
- }
-
- if (host->plat->translate_vdd)
- pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
+ u32 pwr = 0;
+ unsigned long flags;
switch (ios->power_mode) {
case MMC_POWER_OFF:
+ if(host->vcc &&
+ regulator_is_enabled(host->vcc))
+ regulator_disable(host->vcc);
break;
case MMC_POWER_UP:
+#ifdef CONFIG_REGULATOR
+ if (host->vcc)
+ /* This implicitly enables the regulator */
+ mmc_regulator_set_ocr(host->vcc, ios->vdd);
+#endif
+ /*
+ * The translate_vdd function is not used if you have
+ * an external regulator, or your design is really weird.
+ * Using it would mean sending in power control BOTH using
+ * a regulator AND the 4 MMCIPWR bits. If we don't have
+ * a regulator, we might have some other platform specific
+ * power control behind this translate function.
+ */
+ if (!host->vcc && host->plat->translate_vdd)
+ pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
/* The ST version does not have this, fall through to POWER_ON */
if (host->hw_designer != AMBA_VENDOR_ST) {
pwr |= MCI_PWR_UP;
@@ -465,12 +497,16 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
}
- writel(clk, host->base + MMCICLOCK);
+ spin_lock_irqsave(&host->lock, flags);
+
+ mmci_set_clkreg(host, ios->clock);
if (host->pwr != pwr) {
host->pwr = pwr;
writel(pwr, host->base + MMCIPOWER);
}
+
+ spin_unlock_irqrestore(&host->lock, flags);
}
static int mmci_get_ro(struct mmc_host *mmc)
@@ -517,7 +553,7 @@ static void mmci_check_status(unsigned long data)
static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
{
- struct mmc_platform_data *plat = dev->dev.platform_data;
+ struct mmci_platform_data *plat = dev->dev.platform_data;
struct mmci_host *host;
struct mmc_host *mmc;
int ret;
@@ -583,7 +619,30 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
mmc->ops = &mmci_ops;
mmc->f_min = (host->mclk + 511) / 512;
mmc->f_max = min(host->mclk, fmax);
- mmc->ocr_avail = plat->ocr_mask;
+#ifdef CONFIG_REGULATOR
+ /* If we're using the regulator framework, try to fetch a regulator */
+ host->vcc = regulator_get(&dev->dev, "vmmc");
+ if (IS_ERR(host->vcc))
+ host->vcc = NULL;
+ else {
+ int mask = mmc_regulator_get_ocrmask(host->vcc);
+
+ if (mask < 0)
+ dev_err(&dev->dev, "error getting OCR mask (%d)\n",
+ mask);
+ else {
+ host->mmc->ocr_avail = (u32) mask;
+ if (plat->ocr_mask)
+ dev_warn(&dev->dev,
+ "Provided ocr_mask/setpower will not be used "
+ "(using regulator instead)\n");
+ }
+ }
+#endif
+ /* Fall back to platform data if no regulator is found */
+ if (host->vcc == NULL)
+ mmc->ocr_avail = plat->ocr_mask;
+ mmc->caps = plat->capabilities;
/*
* We can do SGIO
@@ -720,6 +779,10 @@ static int __devexit mmci_remove(struct amba_device *dev)
clk_disable(host->clk);
clk_put(host->clk);
+ if (regulator_is_enabled(host->vcc))
+ regulator_disable(host->vcc);
+ regulator_put(host->vcc);
+
mmc_free_host(mmc);
amba_release_regions(dev);
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 839f264c972..1ceb9a90f59 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -161,7 +161,7 @@ struct mmci_host {
unsigned int mclk;
unsigned int cclk;
u32 pwr;
- struct mmc_platform_data *plat;
+ struct mmci_platform_data *plat;
u8 hw_designer;
u8 hw_revision:4;
@@ -175,6 +175,7 @@ struct mmci_host {
struct scatterlist *sg_ptr;
unsigned int sg_off;
unsigned int size;
+ struct regulator *vcc;
};
static inline void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
new file mode 100644
index 00000000000..dba4600bcdb
--- /dev/null
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -0,0 +1,1287 @@
+/*
+ * linux/drivers/mmc/host/msm_sdcc.c - Qualcomm MSM 7X00A SDCC Driver
+ *
+ * Copyright (C) 2007 Google Inc,
+ * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on mmci.c
+ *
+ * Author: San Mehat (san@android.com)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/log2.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/clk.h>
+#include <linux/scatterlist.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/memory.h>
+
+#include <asm/cacheflush.h>
+#include <asm/div64.h>
+#include <asm/sizes.h>
+
+#include <asm/mach/mmc.h>
+#include <mach/msm_iomap.h>
+#include <mach/dma.h>
+#include <mach/htc_pwrsink.h>
+
+#include "msm_sdcc.h"
+
+#define DRIVER_NAME "msm-sdcc"
+
+static unsigned int msmsdcc_fmin = 144000;
+static unsigned int msmsdcc_fmax = 50000000;
+static unsigned int msmsdcc_4bit = 1;
+static unsigned int msmsdcc_pwrsave = 1;
+static unsigned int msmsdcc_piopoll = 1;
+static unsigned int msmsdcc_sdioirq;
+
+#define PIO_SPINMAX 30
+#define CMD_SPINMAX 20
+
+static void
+msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
+ u32 c);
+
+static void
+msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
+{
+ writel(0, host->base + MMCICOMMAND);
+
+ BUG_ON(host->curr.data);
+
+ host->curr.mrq = NULL;
+ host->curr.cmd = NULL;
+
+ if (mrq->data)
+ mrq->data->bytes_xfered = host->curr.data_xfered;
+ if (mrq->cmd->error == -ETIMEDOUT)
+ mdelay(5);
+
+ /*
+ * Need to drop the host lock here; mmc_request_done may call
+ * back into the driver...
+ */
+ spin_unlock(&host->lock);
+ mmc_request_done(host->mmc, mrq);
+ spin_lock(&host->lock);
+}
+
+static void
+msmsdcc_stop_data(struct msmsdcc_host *host)
+{
+ writel(0, host->base + MMCIDATACTRL);
+ host->curr.data = NULL;
+ host->curr.got_dataend = host->curr.got_datablkend = 0;
+}
+
+uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
+{
+ switch (host->pdev_id) {
+ case 1:
+ return MSM_SDC1_PHYS + MMCIFIFO;
+ case 2:
+ return MSM_SDC2_PHYS + MMCIFIFO;
+ case 3:
+ return MSM_SDC3_PHYS + MMCIFIFO;
+ case 4:
+ return MSM_SDC4_PHYS + MMCIFIFO;
+ }
+ BUG();
+ return 0;
+}
+
+static void
+msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
+ unsigned int result,
+ struct msm_dmov_errdata *err)
+{
+ struct msmsdcc_dma_data *dma_data =
+ container_of(cmd, struct msmsdcc_dma_data, hdr);
+ struct msmsdcc_host *host = dma_data->host;
+ unsigned long flags;
+ struct mmc_request *mrq;
+
+ spin_lock_irqsave(&host->lock, flags);
+ mrq = host->curr.mrq;
+ BUG_ON(!mrq);
+
+ if (!(result & DMOV_RSLT_VALID)) {
+ pr_err("msmsdcc: Invalid DataMover result\n");
+ goto out;
+ }
+
+ if (result & DMOV_RSLT_DONE) {
+ host->curr.data_xfered = host->curr.xfer_size;
+ } else {
+ /* Error or flush */
+ if (result & DMOV_RSLT_ERROR)
+ pr_err("%s: DMA error (0x%.8x)\n",
+ mmc_hostname(host->mmc), result);
+ if (result & DMOV_RSLT_FLUSH)
+ pr_err("%s: DMA channel flushed (0x%.8x)\n",
+ mmc_hostname(host->mmc), result);
+ if (err)
+ pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n",
+ err->flush[0], err->flush[1], err->flush[2],
+ err->flush[3], err->flush[4], err->flush[5]);
+ if (!mrq->data->error)
+ mrq->data->error = -EIO;
+ }
+ host->dma.busy = 0;
+ dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
+ host->dma.dir);
+
+ if (host->curr.user_pages) {
+ struct scatterlist *sg = host->dma.sg;
+ int i;
+
+ for (i = 0; i < host->dma.num_ents; i++)
+ flush_dcache_page(sg_page(sg++));
+ }
+
+ host->dma.sg = NULL;
+
+ if ((host->curr.got_dataend && host->curr.got_datablkend)
+ || mrq->data->error) {
+
+ /*
+ * If we've already gotten our DATAEND / DATABLKEND
+ * for this request, then complete it through here.
+ */
+ msmsdcc_stop_data(host);
+
+ if (!mrq->data->error)
+ host->curr.data_xfered = host->curr.xfer_size;
+ if (!mrq->data->stop || mrq->cmd->error) {
+ writel(0, host->base + MMCICOMMAND);
+ host->curr.mrq = NULL;
+ host->curr.cmd = NULL;
+ mrq->data->bytes_xfered = host->curr.data_xfered;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ mmc_request_done(host->mmc, mrq);
+ return;
+ } else
+ msmsdcc_start_command(host, mrq->data->stop, 0);
+ }
+
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+}
+
+static int validate_dma(struct msmsdcc_host *host, struct mmc_data *data)
+{
+ if (host->dma.channel == -1)
+ return -ENOENT;
+
+ if ((data->blksz * data->blocks) < MCI_FIFOSIZE)
+ return -EINVAL;
+ if ((data->blksz * data->blocks) % MCI_FIFOSIZE)
+ return -EINVAL;
+ return 0;
+}
+
+static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
+{
+ struct msmsdcc_nc_dmadata *nc;
+ dmov_box *box;
+ uint32_t rows;
+ uint32_t crci;
+ unsigned int n;
+ int i, rc;
+ struct scatterlist *sg = data->sg;
+
+ rc = validate_dma(host, data);
+ if (rc)
+ return rc;
+
+ host->dma.sg = data->sg;
+ host->dma.num_ents = data->sg_len;
+
+ nc = host->dma.nc;
+
+ switch (host->pdev_id) {
+ case 1:
+ crci = MSMSDCC_CRCI_SDC1;
+ break;
+ case 2:
+ crci = MSMSDCC_CRCI_SDC2;
+ break;
+ case 3:
+ crci = MSMSDCC_CRCI_SDC3;
+ break;
+ case 4:
+ crci = MSMSDCC_CRCI_SDC4;
+ break;
+ default:
+ host->dma.sg = NULL;
+ host->dma.num_ents = 0;
+ return -ENOENT;
+ }
+
+ if (data->flags & MMC_DATA_READ)
+ host->dma.dir = DMA_FROM_DEVICE;
+ else
+ host->dma.dir = DMA_TO_DEVICE;
+
+ host->curr.user_pages = 0;
+
+ n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
+ host->dma.num_ents, host->dma.dir);
+
+ if (n != host->dma.num_ents) {
+ pr_err("%s: Unable to map in all sg elements\n",
+ mmc_hostname(host->mmc));
+ host->dma.sg = NULL;
+ host->dma.num_ents = 0;
+ return -ENOMEM;
+ }
+
+ box = &nc->cmd[0];
+ for (i = 0; i < host->dma.num_ents; i++) {
+ box->cmd = CMD_MODE_BOX;
+
+ if (i == (host->dma.num_ents - 1))
+ box->cmd |= CMD_LC;
+ rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
+ (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
+ (sg_dma_len(sg) / MCI_FIFOSIZE) ;
+
+ if (data->flags & MMC_DATA_READ) {
+ box->src_row_addr = msmsdcc_fifo_addr(host);
+ box->dst_row_addr = sg_dma_address(sg);
+
+ box->src_dst_len = (MCI_FIFOSIZE << 16) |
+ (MCI_FIFOSIZE);
+ box->row_offset = MCI_FIFOSIZE;
+
+ box->num_rows = rows * ((1 << 16) + 1);
+ box->cmd |= CMD_SRC_CRCI(crci);
+ } else {
+ box->src_row_addr = sg_dma_address(sg);
+ box->dst_row_addr = msmsdcc_fifo_addr(host);
+
+ box->src_dst_len = (MCI_FIFOSIZE << 16) |
+ (MCI_FIFOSIZE);
+ box->row_offset = (MCI_FIFOSIZE << 16);
+
+ box->num_rows = rows * ((1 << 16) + 1);
+ box->cmd |= CMD_DST_CRCI(crci);
+ }
+ box++;
+ sg++;
+ }
+
+ /* location of command block must be 64 bit aligned */
+ BUG_ON(host->dma.cmd_busaddr & 0x07);
+
+ nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
+ host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
+ DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
+ host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
+
+ return 0;
+}
+
+static void
+msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data)
+{
+ unsigned int datactrl, timeout;
+ unsigned long long clks;
+ void __iomem *base = host->base;
+ unsigned int pio_irqmask = 0;
+
+ host->curr.data = data;
+ host->curr.xfer_size = data->blksz * data->blocks;
+ host->curr.xfer_remain = host->curr.xfer_size;
+ host->curr.data_xfered = 0;
+ host->curr.got_dataend = 0;
+ host->curr.got_datablkend = 0;
+
+ memset(&host->pio, 0, sizeof(host->pio));
+
+ clks = (unsigned long long)data->timeout_ns * host->clk_rate;
+ do_div(clks, NSEC_PER_SEC);
+ timeout = data->timeout_clks + (unsigned int)clks;
+ writel(timeout, base + MMCIDATATIMER);
+
+ writel(host->curr.xfer_size, base + MMCIDATALENGTH);
+
+ datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
+
+ if (!msmsdcc_config_dma(host, data))
+ datactrl |= MCI_DPSM_DMAENABLE;
+ else {
+ host->pio.sg = data->sg;
+ host->pio.sg_len = data->sg_len;
+ host->pio.sg_off = 0;
+
+ if (data->flags & MMC_DATA_READ) {
+ pio_irqmask = MCI_RXFIFOHALFFULLMASK;
+ if (host->curr.xfer_remain < MCI_FIFOSIZE)
+ pio_irqmask |= MCI_RXDATAAVLBLMASK;
+ } else
+ pio_irqmask = MCI_TXFIFOHALFEMPTYMASK;
+ }
+
+ if (data->flags & MMC_DATA_READ)
+ datactrl |= MCI_DPSM_DIRECTION;
+
+ writel(pio_irqmask, base + MMCIMASK1);
+ writel(datactrl, base + MMCIDATACTRL);
+
+ if (datactrl & MCI_DPSM_DMAENABLE) {
+ host->dma.busy = 1;
+ msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr);
+ }
+}
+
+static void
+msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c)
+{
+ void __iomem *base = host->base;
+
+ if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
+ writel(0, base + MMCICOMMAND);
+ udelay(2 + ((5 * 1000000) / host->clk_rate));
+ }
+
+ c |= cmd->opcode | MCI_CPSM_ENABLE;
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136)
+ c |= MCI_CPSM_LONGRSP;
+ c |= MCI_CPSM_RESPONSE;
+ }
+
+ if (cmd->opcode == 17 || cmd->opcode == 18 ||
+ cmd->opcode == 24 || cmd->opcode == 25 ||
+ cmd->opcode == 53)
+ c |= MCI_CSPM_DATCMD;
+
+ if (cmd == cmd->mrq->stop)
+ c |= MCI_CSPM_MCIABORT;
+
+ host->curr.cmd = cmd;
+
+ host->stats.cmds++;
+
+ writel(cmd->arg, base + MMCIARGUMENT);
+ writel(c, base + MMCICOMMAND);
+}
+
+static void
+msmsdcc_data_err(struct msmsdcc_host *host, struct mmc_data *data,
+ unsigned int status)
+{
+ if (status & MCI_DATACRCFAIL) {
+ pr_err("%s: Data CRC error\n", mmc_hostname(host->mmc));
+ pr_err("%s: opcode 0x%.8x\n", __func__,
+ data->mrq->cmd->opcode);
+ pr_err("%s: blksz %d, blocks %d\n", __func__,
+ data->blksz, data->blocks);
+ data->error = -EILSEQ;
+ } else if (status & MCI_DATATIMEOUT) {
+ pr_err("%s: Data timeout\n", mmc_hostname(host->mmc));
+ data->error = -ETIMEDOUT;
+ } else if (status & MCI_RXOVERRUN) {
+ pr_err("%s: RX overrun\n", mmc_hostname(host->mmc));
+ data->error = -EIO;
+ } else if (status & MCI_TXUNDERRUN) {
+ pr_err("%s: TX underrun\n", mmc_hostname(host->mmc));
+ data->error = -EIO;
+ } else {
+ pr_err("%s: Unknown error (0x%.8x)\n",
+ mmc_hostname(host->mmc), status);
+ data->error = -EIO;
+ }
+}
+
+
+static int
+msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)
+{
+ void __iomem *base = host->base;
+ uint32_t *ptr = (uint32_t *) buffer;
+ int count = 0;
+
+ while (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL) {
+
+ *ptr = readl(base + MMCIFIFO + (count % MCI_FIFOSIZE));
+ ptr++;
+ count += sizeof(uint32_t);
+
+ remain -= sizeof(uint32_t);
+ if (remain == 0)
+ break;
+ }
+ return count;
+}
+
+static int
+msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer,
+ unsigned int remain, u32 status)
+{
+ void __iomem *base = host->base;
+ char *ptr = buffer;
+
+ do {
+ unsigned int count, maxcnt;
+
+ maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE :
+ MCI_FIFOHALFSIZE;
+ count = min(remain, maxcnt);
+
+ writesl(base + MMCIFIFO, ptr, count >> 2);
+ ptr += count;
+ remain -= count;
+
+ if (remain == 0)
+ break;
+
+ status = readl(base + MMCISTATUS);
+ } while (status & MCI_TXFIFOHALFEMPTY);
+
+ return ptr - buffer;
+}
+
+static int
+msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
+{
+ while (maxspin) {
+ if ((readl(host->base + MMCISTATUS) & mask))
+ return 0;
+ udelay(1);
+ --maxspin;
+ }
+ return -ETIMEDOUT;
+}
+
+static int
+msmsdcc_pio_irq(int irq, void *dev_id)
+{
+ struct msmsdcc_host *host = dev_id;
+ void __iomem *base = host->base;
+ uint32_t status;
+
+ status = readl(base + MMCISTATUS);
+
+ do {
+ unsigned long flags;
+ unsigned int remain, len;
+ char *buffer;
+
+ if (!(status & (MCI_TXFIFOHALFEMPTY | MCI_RXDATAAVLBL))) {
+ if (host->curr.xfer_remain == 0 || !msmsdcc_piopoll)
+ break;
+
+ if (msmsdcc_spin_on_status(host,
+ (MCI_TXFIFOHALFEMPTY |
+ MCI_RXDATAAVLBL),
+ PIO_SPINMAX)) {
+ break;
+ }
+ }
+
+ /* Map the current scatter buffer */
+ local_irq_save(flags);
+ buffer = kmap_atomic(sg_page(host->pio.sg),
+ KM_BIO_SRC_IRQ) + host->pio.sg->offset;
+ buffer += host->pio.sg_off;
+ remain = host->pio.sg->length - host->pio.sg_off;
+ len = 0;
+ if (status & MCI_RXACTIVE)
+ len = msmsdcc_pio_read(host, buffer, remain);
+ if (status & MCI_TXACTIVE)
+ len = msmsdcc_pio_write(host, buffer, remain, status);
+
+ /* Unmap the buffer */
+ kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
+ local_irq_restore(flags);
+
+ host->pio.sg_off += len;
+ host->curr.xfer_remain -= len;
+ host->curr.data_xfered += len;
+ remain -= len;
+
+ if (remain == 0) {
+ /* This sg page is full - do some housekeeping */
+ if (status & MCI_RXACTIVE && host->curr.user_pages)
+ flush_dcache_page(sg_page(host->pio.sg));
+
+ if (!--host->pio.sg_len) {
+ memset(&host->pio, 0, sizeof(host->pio));
+ break;
+ }
+
+ /* Advance to next sg */
+ host->pio.sg++;
+ host->pio.sg_off = 0;
+ }
+
+ status = readl(base + MMCISTATUS);
+ } while (1);
+
+ if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE)
+ writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
+
+ if (!host->curr.xfer_remain)
+ writel(0, base + MMCIMASK1);
+
+ return IRQ_HANDLED;
+}
+
+static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
+{
+ struct mmc_command *cmd = host->curr.cmd;
+ void __iomem *base = host->base;
+
+ host->curr.cmd = NULL;
+ cmd->resp[0] = readl(base + MMCIRESPONSE0);
+ cmd->resp[1] = readl(base + MMCIRESPONSE1);
+ cmd->resp[2] = readl(base + MMCIRESPONSE2);
+ cmd->resp[3] = readl(base + MMCIRESPONSE3);
+
+ del_timer(&host->command_timer);
+ if (status & MCI_CMDTIMEOUT) {
+ cmd->error = -ETIMEDOUT;
+ } else if (status & MCI_CMDCRCFAIL &&
+ cmd->flags & MMC_RSP_CRC) {
+ pr_err("%s: Command CRC error\n", mmc_hostname(host->mmc));
+ cmd->error = -EILSEQ;
+ }
+
+ if (!cmd->data || cmd->error) {
+ if (host->curr.data && host->dma.sg)
+ msm_dmov_stop_cmd(host->dma.channel,
+ &host->dma.hdr, 0);
+ else if (host->curr.data) { /* Non DMA */
+ msmsdcc_stop_data(host);
+ msmsdcc_request_end(host, cmd->mrq);
+ } else /* host->data == NULL */
+ msmsdcc_request_end(host, cmd->mrq);
+ } else if (!(cmd->data->flags & MMC_DATA_READ))
+ msmsdcc_start_data(host, cmd->data);
+}
+
+static void
+msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
+ void __iomem *base)
+{
+ struct mmc_data *data = host->curr.data;
+
+ if (!data)
+ return;
+
+ /* Check for data errors */
+ if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
+ MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
+ msmsdcc_data_err(host, data, status);
+ host->curr.data_xfered = 0;
+ if (host->dma.sg)
+ msm_dmov_stop_cmd(host->dma.channel,
+ &host->dma.hdr, 0);
+ else {
+ msmsdcc_stop_data(host);
+ if (!data->stop)
+ msmsdcc_request_end(host, data->mrq);
+ else
+ msmsdcc_start_command(host, data->stop, 0);
+ }
+ }
+
+ /* Check for data done */
+ if (!host->curr.got_dataend && (status & MCI_DATAEND))
+ host->curr.got_dataend = 1;
+
+ if (!host->curr.got_datablkend && (status & MCI_DATABLOCKEND))
+ host->curr.got_datablkend = 1;
+
+ /*
+ * If DMA is still in progress, we complete via the completion handler
+ */
+ if (host->curr.got_dataend && host->curr.got_datablkend &&
+ !host->dma.busy) {
+ /*
+ * There appears to be an issue in the controller where
+ * if you request a small block transfer (< fifo size),
+ * you may get your DATAEND/DATABLKEND irq without the
+ * PIO data irq.
+ *
+ * Check to see if there is still data to be read,
+ * and simulate a PIO irq.
+ */
+ if (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL)
+ msmsdcc_pio_irq(1, host);
+
+ msmsdcc_stop_data(host);
+ if (!data->error)
+ host->curr.data_xfered = host->curr.xfer_size;
+
+ if (!data->stop)
+ msmsdcc_request_end(host, data->mrq);
+ else
+ msmsdcc_start_command(host, data->stop, 0);
+ }
+}
+
+static irqreturn_t
+msmsdcc_irq(int irq, void *dev_id)
+{
+ struct msmsdcc_host *host = dev_id;
+ void __iomem *base = host->base;
+ u32 status;
+ int ret = 0;
+ int cardint = 0;
+
+ spin_lock(&host->lock);
+
+ do {
+ status = readl(base + MMCISTATUS);
+
+ status &= (readl(base + MMCIMASK0) | MCI_DATABLOCKENDMASK);
+ writel(status, base + MMCICLEAR);
+
+ msmsdcc_handle_irq_data(host, status, base);
+
+ if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
+ MCI_CMDTIMEOUT) && host->curr.cmd) {
+ msmsdcc_do_cmdirq(host, status);
+ }
+
+ if (status & MCI_SDIOINTOPER) {
+ cardint = 1;
+ status &= ~MCI_SDIOINTOPER;
+ }
+ ret = 1;
+ } while (status);
+
+ spin_unlock(&host->lock);
+
+ /*
+ * We have to delay handling the card interrupt as it calls
+ * back into the driver.
+ */
+ if (cardint)
+ mmc_signal_sdio_irq(host->mmc);
+
+ return IRQ_RETVAL(ret);
+}
+
+static void
+msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct msmsdcc_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ WARN_ON(host->curr.mrq != NULL);
+ WARN_ON(host->pwr == 0);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ host->stats.reqs++;
+
+ if (host->eject) {
+ if (mrq->data && !(mrq->data->flags & MMC_DATA_READ)) {
+ mrq->cmd->error = 0;
+ mrq->data->bytes_xfered = mrq->data->blksz *
+ mrq->data->blocks;
+ } else
+ mrq->cmd->error = -ENOMEDIUM;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ host->curr.mrq = mrq;
+
+ if (mrq->data && mrq->data->flags & MMC_DATA_READ)
+ msmsdcc_start_data(host, mrq->data);
+
+ msmsdcc_start_command(host, mrq->cmd, 0);
+
+ if (host->cmdpoll && !msmsdcc_spin_on_status(host,
+ MCI_CMDRESPEND|MCI_CMDCRCFAIL|MCI_CMDTIMEOUT,
+ CMD_SPINMAX)) {
+ uint32_t status = readl(host->base + MMCISTATUS);
+ msmsdcc_do_cmdirq(host, status);
+ writel(MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
+ host->base + MMCICLEAR);
+ host->stats.cmdpoll_hits++;
+ } else {
+ host->stats.cmdpoll_misses++;
+ mod_timer(&host->command_timer, jiffies + HZ);
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void
+msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct msmsdcc_host *host = mmc_priv(mmc);
+ u32 clk = 0, pwr = 0;
+ int rc;
+
+ if (ios->clock) {
+
+ if (!host->clks_on) {
+ clk_enable(host->pclk);
+ clk_enable(host->clk);
+ host->clks_on = 1;
+ }
+ if (ios->clock != host->clk_rate) {
+ rc = clk_set_rate(host->clk, ios->clock);
+ if (rc < 0)
+ pr_err("%s: Error setting clock rate (%d)\n",
+ mmc_hostname(host->mmc), rc);
+ else
+ host->clk_rate = ios->clock;
+ }
+ clk |= MCI_CLK_ENABLE;
+ }
+
+ if (ios->bus_width == MMC_BUS_WIDTH_4)
+ clk |= (2 << 10); /* Set WIDEBUS */
+
+ if (ios->clock > 400000 && msmsdcc_pwrsave)
+ clk |= (1 << 9); /* PWRSAVE */
+
+ clk |= (1 << 12); /* FLOW_ENA */
+ clk |= (1 << 15); /* feedback clock */
+
+ if (host->plat->translate_vdd)
+ pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ htc_pwrsink_set(PWRSINK_SDCARD, 0);
+ break;
+ case MMC_POWER_UP:
+ pwr |= MCI_PWR_UP;
+ break;
+ case MMC_POWER_ON:
+ htc_pwrsink_set(PWRSINK_SDCARD, 100);
+ pwr |= MCI_PWR_ON;
+ break;
+ }
+
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ pwr |= MCI_OD;
+
+ writel(clk, host->base + MMCICLOCK);
+
+ if (host->pwr != pwr) {
+ host->pwr = pwr;
+ writel(pwr, host->base + MMCIPOWER);
+ }
+
+ if (!(clk & MCI_CLK_ENABLE) && host->clks_on) {
+ clk_disable(host->clk);
+ clk_disable(host->pclk);
+ host->clks_on = 0;
+ }
+}
+
+static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct msmsdcc_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ u32 status;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (msmsdcc_sdioirq == 1) {
+ status = readl(host->base + MMCIMASK0);
+ if (enable)
+ status |= MCI_SDIOINTOPERMASK;
+ else
+ status &= ~MCI_SDIOINTOPERMASK;
+ host->saved_irq0mask = status;
+ writel(status, host->base + MMCIMASK0);
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static const struct mmc_host_ops msmsdcc_ops = {
+ .request = msmsdcc_request,
+ .set_ios = msmsdcc_set_ios,
+ .enable_sdio_irq = msmsdcc_enable_sdio_irq,
+};
+
+static void
+msmsdcc_check_status(unsigned long data)
+{
+ struct msmsdcc_host *host = (struct msmsdcc_host *)data;
+ unsigned int status;
+
+ if (!host->plat->status) {
+ mmc_detect_change(host->mmc, 0);
+ goto out;
+ }
+
+ status = host->plat->status(mmc_dev(host->mmc));
+ host->eject = !status;
+ if (status ^ host->oldstat) {
+ pr_info("%s: Slot status change detected (%d -> %d)\n",
+ mmc_hostname(host->mmc), host->oldstat, status);
+ if (status)
+ mmc_detect_change(host->mmc, (5 * HZ) / 2);
+ else
+ mmc_detect_change(host->mmc, 0);
+ }
+
+ host->oldstat = status;
+
+out:
+ if (host->timer.function)
+ mod_timer(&host->timer, jiffies + HZ);
+}
+
+static irqreturn_t
+msmsdcc_platform_status_irq(int irq, void *dev_id)
+{
+ struct msmsdcc_host *host = dev_id;
+
+ printk(KERN_DEBUG "%s: %d\n", __func__, irq);
+ msmsdcc_check_status((unsigned long) host);
+ return IRQ_HANDLED;
+}
+
+static void
+msmsdcc_status_notify_cb(int card_present, void *dev_id)
+{
+ struct msmsdcc_host *host = dev_id;
+
+ printk(KERN_DEBUG "%s: card_present %d\n", mmc_hostname(host->mmc),
+ card_present);
+ msmsdcc_check_status((unsigned long) host);
+}
+
+/*
+ * called when a command expires.
+ * Dump some debugging, and then error
+ * out the transaction.
+ */
+static void
+msmsdcc_command_expired(unsigned long _data)
+{
+ struct msmsdcc_host *host = (struct msmsdcc_host *) _data;
+ struct mmc_request *mrq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ mrq = host->curr.mrq;
+
+ if (!mrq) {
+ pr_info("%s: Command expiry misfire\n",
+ mmc_hostname(host->mmc));
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+
+ pr_err("%s: Command timeout (%p %p %p %p)\n",
+ mmc_hostname(host->mmc), mrq, mrq->cmd,
+ mrq->data, host->dma.sg);
+
+ mrq->cmd->error = -ETIMEDOUT;
+ msmsdcc_stop_data(host);
+
+ writel(0, host->base + MMCICOMMAND);
+
+ host->curr.mrq = NULL;
+ host->curr.cmd = NULL;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ mmc_request_done(host->mmc, mrq);
+}
+
+static int
+msmsdcc_init_dma(struct msmsdcc_host *host)
+{
+ memset(&host->dma, 0, sizeof(struct msmsdcc_dma_data));
+ host->dma.host = host;
+ host->dma.channel = -1;
+
+ if (!host->dmares)
+ return -ENODEV;
+
+ host->dma.nc = dma_alloc_coherent(NULL,
+ sizeof(struct msmsdcc_nc_dmadata),
+ &host->dma.nc_busaddr,
+ GFP_KERNEL);
+ if (host->dma.nc == NULL) {
+ pr_err("Unable to allocate DMA buffer\n");
+ return -ENOMEM;
+ }
+ memset(host->dma.nc, 0x00, sizeof(struct msmsdcc_nc_dmadata));
+ host->dma.cmd_busaddr = host->dma.nc_busaddr;
+ host->dma.cmdptr_busaddr = host->dma.nc_busaddr +
+ offsetof(struct msmsdcc_nc_dmadata, cmdptr);
+ host->dma.channel = host->dmares->start;
+
+ return 0;
+}
+
+#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
+static void
+do_resume_work(struct work_struct *work)
+{
+ struct msmsdcc_host *host =
+ container_of(work, struct msmsdcc_host, resume_task);
+ struct mmc_host *mmc = host->mmc;
+
+ if (mmc) {
+ mmc_resume_host(mmc);
+ if (host->stat_irq)
+ enable_irq(host->stat_irq);
+ }
+}
+#endif
+
+static int
+msmsdcc_probe(struct platform_device *pdev)
+{
+ struct mmc_platform_data *plat = pdev->dev.platform_data;
+ struct msmsdcc_host *host;
+ struct mmc_host *mmc;
+ struct resource *cmd_irqres = NULL;
+ struct resource *pio_irqres = NULL;
+ struct resource *stat_irqres = NULL;
+ struct resource *memres = NULL;
+ struct resource *dmares = NULL;
+ int ret;
+
+ /* must have platform data */
+ if (!plat) {
+ pr_err("%s: Platform data not available\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (pdev->id < 1 || pdev->id > 4)
+ return -EINVAL;
+
+ if (pdev->resource == NULL || pdev->num_resources < 2) {
+ pr_err("%s: Invalid resource\n", __func__);
+ return -ENXIO;
+ }
+
+ memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ cmd_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "cmd_irq");
+ pio_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "pio_irq");
+ stat_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "status_irq");
+
+ if (!cmd_irqres || !pio_irqres || !memres) {
+ pr_err("%s: Invalid resource\n", __func__);
+ return -ENXIO;
+ }
+
+ /*
+ * Setup our host structure
+ */
+
+ mmc = mmc_alloc_host(sizeof(struct msmsdcc_host), &pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ host = mmc_priv(mmc);
+ host->pdev_id = pdev->id;
+ host->plat = plat;
+ host->mmc = mmc;
+
+ host->cmdpoll = 1;
+
+ host->base = ioremap(memres->start, PAGE_SIZE);
+ if (!host->base) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ host->cmd_irqres = cmd_irqres;
+ host->pio_irqres = pio_irqres;
+ host->memres = memres;
+ host->dmares = dmares;
+ spin_lock_init(&host->lock);
+
+ /*
+ * Setup DMA
+ */
+ msmsdcc_init_dma(host);
+
+ /*
+ * Setup main peripheral bus clock
+ */
+ host->pclk = clk_get(&pdev->dev, "sdc_pclk");
+ if (IS_ERR(host->pclk)) {
+ ret = PTR_ERR(host->pclk);
+ goto host_free;
+ }
+
+ ret = clk_enable(host->pclk);
+ if (ret)
+ goto pclk_put;
+
+ host->pclk_rate = clk_get_rate(host->pclk);
+
+ /*
+ * Setup SDC MMC clock
+ */
+ host->clk = clk_get(&pdev->dev, "sdc_clk");
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
+ goto pclk_disable;
+ }
+
+ ret = clk_enable(host->clk);
+ if (ret)
+ goto clk_put;
+
+ ret = clk_set_rate(host->clk, msmsdcc_fmin);
+ if (ret) {
+ pr_err("%s: Clock rate set failed (%d)\n", __func__, ret);
+ goto clk_disable;
+ }
+
+ host->clk_rate = clk_get_rate(host->clk);
+
+ host->clks_on = 1;
+
+ /*
+ * Setup MMC host structure
+ */
+ mmc->ops = &msmsdcc_ops;
+ mmc->f_min = msmsdcc_fmin;
+ mmc->f_max = msmsdcc_fmax;
+ mmc->ocr_avail = plat->ocr_mask;
+
+ if (msmsdcc_4bit)
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+ if (msmsdcc_sdioirq)
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
+
+ mmc->max_phys_segs = NR_SG;
+ mmc->max_hw_segs = NR_SG;
+ mmc->max_blk_size = 4096; /* MCI_DATA_CTL BLOCKSIZE up to 4096 */
+ mmc->max_blk_count = 65536;
+
+ mmc->max_req_size = 33554432; /* MCI_DATA_LENGTH is 25 bits */
+ mmc->max_seg_size = mmc->max_req_size;
+
+ writel(0, host->base + MMCIMASK0);
+ writel(0x5e007ff, host->base + MMCICLEAR); /* Add: 1 << 25 */
+
+ writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+ host->saved_irq0mask = MCI_IRQENABLE;
+
+ /*
+ * Setup card detect change
+ */
+
+ memset(&host->timer, 0, sizeof(host->timer));
+
+ if (stat_irqres && !(stat_irqres->flags & IORESOURCE_DISABLED)) {
+ unsigned long irqflags = IRQF_SHARED |
+ (stat_irqres->flags & IRQF_TRIGGER_MASK);
+
+ host->stat_irq = stat_irqres->start;
+ ret = request_irq(host->stat_irq,
+ msmsdcc_platform_status_irq,
+ irqflags,
+ DRIVER_NAME " (slot)",
+ host);
+ if (ret) {
+ pr_err("%s: Unable to get slot IRQ %d (%d)\n",
+ mmc_hostname(mmc), host->stat_irq, ret);
+ goto clk_disable;
+ }
+ } else if (plat->register_status_notify) {
+ plat->register_status_notify(msmsdcc_status_notify_cb, host);
+ } else if (!plat->status)
+ pr_err("%s: No card detect facilities available\n",
+ mmc_hostname(mmc));
+ else {
+ init_timer(&host->timer);
+ host->timer.data = (unsigned long)host;
+ host->timer.function = msmsdcc_check_status;
+ host->timer.expires = jiffies + HZ;
+ add_timer(&host->timer);
+ }
+
+ if (plat->status) {
+ host->oldstat = host->plat->status(mmc_dev(host->mmc));
+ host->eject = !host->oldstat;
+ }
+
+ /*
+ * Setup a command timer. We currently need this due to
+ * some 'strange' timeout / error handling situations.
+ */
+ init_timer(&host->command_timer);
+ host->command_timer.data = (unsigned long) host;
+ host->command_timer.function = msmsdcc_command_expired;
+
+ ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED,
+ DRIVER_NAME " (cmd)", host);
+ if (ret)
+ goto stat_irq_free;
+
+ ret = request_irq(pio_irqres->start, msmsdcc_pio_irq, IRQF_SHARED,
+ DRIVER_NAME " (pio)", host);
+ if (ret)
+ goto cmd_irq_free;
+
+ mmc_set_drvdata(pdev, mmc);
+ mmc_add_host(mmc);
+
+ pr_info("%s: Qualcomm MSM SDCC at 0x%016llx irq %d,%d dma %d\n",
+ mmc_hostname(mmc), (unsigned long long)memres->start,
+ (unsigned int) cmd_irqres->start,
+ (unsigned int) host->stat_irq, host->dma.channel);
+ pr_info("%s: 4 bit data mode %s\n", mmc_hostname(mmc),
+ (mmc->caps & MMC_CAP_4_BIT_DATA ? "enabled" : "disabled"));
+ pr_info("%s: MMC clock %u -> %u Hz, PCLK %u Hz\n",
+ mmc_hostname(mmc), msmsdcc_fmin, msmsdcc_fmax, host->pclk_rate);
+ pr_info("%s: Slot eject status = %d\n", mmc_hostname(mmc), host->eject);
+ pr_info("%s: Power save feature enable = %d\n",
+ mmc_hostname(mmc), msmsdcc_pwrsave);
+
+ if (host->dma.channel != -1) {
+ pr_info("%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n",
+ mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr);
+ pr_info("%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n",
+ mmc_hostname(mmc), host->dma.cmd_busaddr,
+ host->dma.cmdptr_busaddr);
+ } else
+ pr_info("%s: PIO transfer enabled\n", mmc_hostname(mmc));
+ if (host->timer.function)
+ pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
+
+ return 0;
+ cmd_irq_free:
+ free_irq(cmd_irqres->start, host);
+ stat_irq_free:
+ if (host->stat_irq)
+ free_irq(host->stat_irq, host);
+ clk_disable:
+ clk_disable(host->clk);
+ clk_put:
+ clk_put(host->clk);
+ pclk_disable:
+ clk_disable(host->pclk);
+ pclk_put:
+ clk_put(host->pclk);
+ host_free:
+ mmc_free_host(mmc);
+ out:
+ return ret;
+}
+
+static int
+msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct mmc_host *mmc = mmc_get_drvdata(dev);
+ int rc = 0;
+
+ if (mmc) {
+ struct msmsdcc_host *host = mmc_priv(mmc);
+
+ if (host->stat_irq)
+ disable_irq(host->stat_irq);
+
+ if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
+ rc = mmc_suspend_host(mmc, state);
+ if (!rc) {
+ writel(0, host->base + MMCIMASK0);
+
+ if (host->clks_on) {
+ clk_disable(host->clk);
+ clk_disable(host->pclk);
+ host->clks_on = 0;
+ }
+ }
+ }
+ return rc;
+}
+
+static int
+msmsdcc_resume(struct platform_device *dev)
+{
+ struct mmc_host *mmc = mmc_get_drvdata(dev);
+ unsigned long flags;
+
+ if (mmc) {
+ struct msmsdcc_host *host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (!host->clks_on) {
+ clk_enable(host->pclk);
+ clk_enable(host->clk);
+ host->clks_on = 1;
+ }
+
+ writel(host->saved_irq0mask, host->base + MMCIMASK0);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
+ mmc_resume_host(mmc);
+ if (host->stat_irq)
+ enable_irq(host->stat_irq);
+ else if (host->stat_irq)
+ enable_irq(host->stat_irq);
+ }
+ return 0;
+}
+
+static struct platform_driver msmsdcc_driver = {
+ .probe = msmsdcc_probe,
+ .suspend = msmsdcc_suspend,
+ .resume = msmsdcc_resume,
+ .driver = {
+ .name = "msm_sdcc",
+ },
+};
+
+static int __init msmsdcc_init(void)
+{
+ return platform_driver_register(&msmsdcc_driver);
+}
+
+static void __exit msmsdcc_exit(void)
+{
+ platform_driver_unregister(&msmsdcc_driver);
+}
+
+module_init(msmsdcc_init);
+module_exit(msmsdcc_exit);
+
+MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
new file mode 100644
index 00000000000..8c844846981
--- /dev/null
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -0,0 +1,238 @@
+/*
+ * linux/drivers/mmc/host/msmsdcc.h - QCT MSM7K SDC Controller
+ *
+ * Copyright (C) 2008 Google, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * - Based on mmci.h
+ */
+
+#ifndef _MSM_SDCC_H
+#define _MSM_SDCC_H
+
+#define MSMSDCC_CRCI_SDC1 6
+#define MSMSDCC_CRCI_SDC2 7
+#define MSMSDCC_CRCI_SDC3 12
+#define MSMSDCC_CRCI_SDC4 13
+
+#define MMCIPOWER 0x000
+#define MCI_PWR_OFF 0x00
+#define MCI_PWR_UP 0x02
+#define MCI_PWR_ON 0x03
+#define MCI_OD (1 << 6)
+
+#define MMCICLOCK 0x004
+#define MCI_CLK_ENABLE (1 << 8)
+#define MCI_CLK_PWRSAVE (1 << 9)
+#define MCI_CLK_WIDEBUS (1 << 10)
+#define MCI_CLK_FLOWENA (1 << 12)
+#define MCI_CLK_INVERTOUT (1 << 13)
+#define MCI_CLK_SELECTIN (1 << 14)
+
+#define MMCIARGUMENT 0x008
+#define MMCICOMMAND 0x00c
+#define MCI_CPSM_RESPONSE (1 << 6)
+#define MCI_CPSM_LONGRSP (1 << 7)
+#define MCI_CPSM_INTERRUPT (1 << 8)
+#define MCI_CPSM_PENDING (1 << 9)
+#define MCI_CPSM_ENABLE (1 << 10)
+#define MCI_CPSM_PROGENA (1 << 11)
+#define MCI_CSPM_DATCMD (1 << 12)
+#define MCI_CSPM_MCIABORT (1 << 13)
+#define MCI_CSPM_CCSENABLE (1 << 14)
+#define MCI_CSPM_CCSDISABLE (1 << 15)
+
+
+#define MMCIRESPCMD 0x010
+#define MMCIRESPONSE0 0x014
+#define MMCIRESPONSE1 0x018
+#define MMCIRESPONSE2 0x01c
+#define MMCIRESPONSE3 0x020
+#define MMCIDATATIMER 0x024
+#define MMCIDATALENGTH 0x028
+
+#define MMCIDATACTRL 0x02c
+#define MCI_DPSM_ENABLE (1 << 0)
+#define MCI_DPSM_DIRECTION (1 << 1)
+#define MCI_DPSM_MODE (1 << 2)
+#define MCI_DPSM_DMAENABLE (1 << 3)
+
+#define MMCIDATACNT 0x030
+#define MMCISTATUS 0x034
+#define MCI_CMDCRCFAIL (1 << 0)
+#define MCI_DATACRCFAIL (1 << 1)
+#define MCI_CMDTIMEOUT (1 << 2)
+#define MCI_DATATIMEOUT (1 << 3)
+#define MCI_TXUNDERRUN (1 << 4)
+#define MCI_RXOVERRUN (1 << 5)
+#define MCI_CMDRESPEND (1 << 6)
+#define MCI_CMDSENT (1 << 7)
+#define MCI_DATAEND (1 << 8)
+#define MCI_DATABLOCKEND (1 << 10)
+#define MCI_CMDACTIVE (1 << 11)
+#define MCI_TXACTIVE (1 << 12)
+#define MCI_RXACTIVE (1 << 13)
+#define MCI_TXFIFOHALFEMPTY (1 << 14)
+#define MCI_RXFIFOHALFFULL (1 << 15)
+#define MCI_TXFIFOFULL (1 << 16)
+#define MCI_RXFIFOFULL (1 << 17)
+#define MCI_TXFIFOEMPTY (1 << 18)
+#define MCI_RXFIFOEMPTY (1 << 19)
+#define MCI_TXDATAAVLBL (1 << 20)
+#define MCI_RXDATAAVLBL (1 << 21)
+#define MCI_SDIOINTR (1 << 22)
+#define MCI_PROGDONE (1 << 23)
+#define MCI_ATACMDCOMPL (1 << 24)
+#define MCI_SDIOINTOPER (1 << 25)
+#define MCI_CCSTIMEOUT (1 << 26)
+
+#define MMCICLEAR 0x038
+#define MCI_CMDCRCFAILCLR (1 << 0)
+#define MCI_DATACRCFAILCLR (1 << 1)
+#define MCI_CMDTIMEOUTCLR (1 << 2)
+#define MCI_DATATIMEOUTCLR (1 << 3)
+#define MCI_TXUNDERRUNCLR (1 << 4)
+#define MCI_RXOVERRUNCLR (1 << 5)
+#define MCI_CMDRESPENDCLR (1 << 6)
+#define MCI_CMDSENTCLR (1 << 7)
+#define MCI_DATAENDCLR (1 << 8)
+#define MCI_DATABLOCKENDCLR (1 << 10)
+
+#define MMCIMASK0 0x03c
+#define MCI_CMDCRCFAILMASK (1 << 0)
+#define MCI_DATACRCFAILMASK (1 << 1)
+#define MCI_CMDTIMEOUTMASK (1 << 2)
+#define MCI_DATATIMEOUTMASK (1 << 3)
+#define MCI_TXUNDERRUNMASK (1 << 4)
+#define MCI_RXOVERRUNMASK (1 << 5)
+#define MCI_CMDRESPENDMASK (1 << 6)
+#define MCI_CMDSENTMASK (1 << 7)
+#define MCI_DATAENDMASK (1 << 8)
+#define MCI_DATABLOCKENDMASK (1 << 10)
+#define MCI_CMDACTIVEMASK (1 << 11)
+#define MCI_TXACTIVEMASK (1 << 12)
+#define MCI_RXACTIVEMASK (1 << 13)
+#define MCI_TXFIFOHALFEMPTYMASK (1 << 14)
+#define MCI_RXFIFOHALFFULLMASK (1 << 15)
+#define MCI_TXFIFOFULLMASK (1 << 16)
+#define MCI_RXFIFOFULLMASK (1 << 17)
+#define MCI_TXFIFOEMPTYMASK (1 << 18)
+#define MCI_RXFIFOEMPTYMASK (1 << 19)
+#define MCI_TXDATAAVLBLMASK (1 << 20)
+#define MCI_RXDATAAVLBLMASK (1 << 21)
+#define MCI_SDIOINTMASK (1 << 22)
+#define MCI_PROGDONEMASK (1 << 23)
+#define MCI_ATACMDCOMPLMASK (1 << 24)
+#define MCI_SDIOINTOPERMASK (1 << 25)
+#define MCI_CCSTIMEOUTMASK (1 << 26)
+
+#define MMCIMASK1 0x040
+#define MMCIFIFOCNT 0x044
+#define MCICCSTIMER 0x058
+
+#define MMCIFIFO 0x080 /* to 0x0bc */
+
+#define MCI_IRQENABLE \
+ (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
+ MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
+ MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATAENDMASK)
+
+/*
+ * The size of the FIFO in bytes.
+ */
+#define MCI_FIFOSIZE (16*4)
+
+#define MCI_FIFOHALFSIZE (MCI_FIFOSIZE / 2)
+
+#define NR_SG 32
+
+struct clk;
+
+struct msmsdcc_nc_dmadata {
+ dmov_box cmd[NR_SG];
+ uint32_t cmdptr;
+};
+
+struct msmsdcc_dma_data {
+ struct msmsdcc_nc_dmadata *nc;
+ dma_addr_t nc_busaddr;
+ dma_addr_t cmd_busaddr;
+ dma_addr_t cmdptr_busaddr;
+
+ struct msm_dmov_cmd hdr;
+ enum dma_data_direction dir;
+
+ struct scatterlist *sg;
+ int num_ents;
+
+ int channel;
+ struct msmsdcc_host *host;
+ int busy; /* Set if DM is busy */
+};
+
+struct msmsdcc_pio_data {
+ struct scatterlist *sg;
+ unsigned int sg_len;
+ unsigned int sg_off;
+};
+
+struct msmsdcc_curr_req {
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ unsigned int xfer_size; /* Total data size */
+ unsigned int xfer_remain; /* Bytes remaining to send */
+ unsigned int data_xfered; /* Bytes acked by BLKEND irq */
+ int got_dataend;
+ int got_datablkend;
+ int user_pages;
+};
+
+struct msmsdcc_stats {
+ unsigned int reqs;
+ unsigned int cmds;
+ unsigned int cmdpoll_hits;
+ unsigned int cmdpoll_misses;
+};
+
+struct msmsdcc_host {
+ struct resource *cmd_irqres;
+ struct resource *pio_irqres;
+ struct resource *memres;
+ struct resource *dmares;
+ void __iomem *base;
+ int pdev_id;
+ unsigned int stat_irq;
+
+ struct msmsdcc_curr_req curr;
+
+ struct mmc_host *mmc;
+ struct clk *clk; /* main MMC bus clock */
+ struct clk *pclk; /* SDCC peripheral bus clock */
+ unsigned int clks_on; /* set if clocks are enabled */
+ struct timer_list command_timer;
+
+ unsigned int eject; /* eject state */
+
+ spinlock_t lock;
+
+ unsigned int clk_rate; /* Current clock rate */
+ unsigned int pclk_rate;
+
+ u32 pwr;
+ u32 saved_irq0mask; /* MMCIMASK0 reg value */
+ struct mmc_platform_data *plat;
+
+ struct timer_list timer;
+ unsigned int oldstat;
+
+ struct msmsdcc_dma_data dma;
+ struct msmsdcc_pio_data pio;
+ int cmdpoll;
+ struct msmsdcc_stats stats;
+};
+
+#endif
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index bc14bb1b057..88671529c45 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -512,7 +512,7 @@ static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
}
/* For the DMA case the DMA engine handles the data transfer
- * automatically. For non DMA we have to to it ourselves.
+ * automatically. For non DMA we have to do it ourselves.
* Don't do it in interrupt context though.
*/
if (!mxcmci_use_dma(host) && host->data)
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 1cf9cfb3b64..4487cc09791 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -17,6 +17,8 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
@@ -25,6 +27,7 @@
#include <linux/timer.h>
#include <linux/clk.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/core.h>
#include <linux/io.h>
#include <linux/semaphore.h>
#include <mach/dma.h>
@@ -35,6 +38,7 @@
/* OMAP HSMMC Host Controller Registers */
#define OMAP_HSMMC_SYSCONFIG 0x0010
+#define OMAP_HSMMC_SYSSTATUS 0x0014
#define OMAP_HSMMC_CON 0x002C
#define OMAP_HSMMC_BLK 0x0104
#define OMAP_HSMMC_ARG 0x0108
@@ -70,6 +74,8 @@
#define DTO_MASK 0x000F0000
#define DTO_SHIFT 16
#define INT_EN_MASK 0x307F0033
+#define BWR_ENABLE (1 << 4)
+#define BRR_ENABLE (1 << 5)
#define INIT_STREAM (1 << 1)
#define DP_SELECT (1 << 21)
#define DDIR (1 << 4)
@@ -92,6 +98,8 @@
#define DUAL_VOLT_OCR_BIT 7
#define SRC (1 << 25)
#define SRD (1 << 26)
+#define SOFTRESET (1 << 1)
+#define RESETDONE (1 << 0)
/*
* FIXME: Most likely all the data using these _DEVID defines should come
@@ -101,11 +109,18 @@
#define OMAP_MMC1_DEVID 0
#define OMAP_MMC2_DEVID 1
#define OMAP_MMC3_DEVID 2
+#define OMAP_MMC4_DEVID 3
+#define OMAP_MMC5_DEVID 4
#define MMC_TIMEOUT_MS 20
#define OMAP_MMC_MASTER_CLOCK 96000000
#define DRIVER_NAME "mmci-omap-hs"
+/* Timeouts for entering power saving states on inactivity, msec */
+#define OMAP_MMC_DISABLED_TIMEOUT 100
+#define OMAP_MMC_SLEEP_TIMEOUT 1000
+#define OMAP_MMC_OFF_TIMEOUT 8000
+
/*
* One controller can have multiple slots, like on some omap boards using
* omap.c controller driver. Luckily this is not currently done on any known
@@ -122,7 +137,7 @@
#define OMAP_HSMMC_WRITE(base, reg, val) \
__raw_writel((val), (base) + OMAP_HSMMC_##reg)
-struct mmc_omap_host {
+struct omap_hsmmc_host {
struct device *dev;
struct mmc_host *mmc;
struct mmc_request *mrq;
@@ -135,27 +150,35 @@ struct mmc_omap_host {
struct work_struct mmc_carddetect_work;
void __iomem *base;
resource_size_t mapbase;
+ spinlock_t irq_lock; /* Prevent races with irq handler */
+ unsigned long flags;
unsigned int id;
unsigned int dma_len;
unsigned int dma_sg_idx;
unsigned char bus_mode;
+ unsigned char power_mode;
u32 *buffer;
u32 bytesleft;
int suspended;
int irq;
- int carddetect;
int use_dma, dma_ch;
int dma_line_tx, dma_line_rx;
int slot_id;
- int dbclk_enabled;
+ int got_dbclk;
int response_busy;
+ int context_loss;
+ int dpm_state;
+ int vdd;
+ int protect_card;
+ int reqs_blocked;
+
struct omap_mmc_platform_data *pdata;
};
/*
* Stop clock to the card
*/
-static void omap_mmc_stop_clock(struct mmc_omap_host *host)
+static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
{
OMAP_HSMMC_WRITE(host->base, SYSCTL,
OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN);
@@ -163,15 +186,178 @@ static void omap_mmc_stop_clock(struct mmc_omap_host *host)
dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n");
}
+#ifdef CONFIG_PM
+
+/*
+ * Restore the MMC host context, if it was lost as result of a
+ * power state change.
+ */
+static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
+{
+ struct mmc_ios *ios = &host->mmc->ios;
+ struct omap_mmc_platform_data *pdata = host->pdata;
+ int context_loss = 0;
+ u32 hctl, capa, con;
+ u16 dsor = 0;
+ unsigned long timeout;
+
+ if (pdata->get_context_loss_count) {
+ context_loss = pdata->get_context_loss_count(host->dev);
+ if (context_loss < 0)
+ return 1;
+ }
+
+ dev_dbg(mmc_dev(host->mmc), "context was %slost\n",
+ context_loss == host->context_loss ? "not " : "");
+ if (host->context_loss == context_loss)
+ return 1;
+
+ /* Wait for hardware reset */
+ timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
+ while ((OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) != RESETDONE
+ && time_before(jiffies, timeout))
+ ;
+
+ /* Do software reset */
+ OMAP_HSMMC_WRITE(host->base, SYSCONFIG, SOFTRESET);
+ timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
+ while ((OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) != RESETDONE
+ && time_before(jiffies, timeout))
+ ;
+
+ OMAP_HSMMC_WRITE(host->base, SYSCONFIG,
+ OMAP_HSMMC_READ(host->base, SYSCONFIG) | AUTOIDLE);
+
+ if (host->id == OMAP_MMC1_DEVID) {
+ if (host->power_mode != MMC_POWER_OFF &&
+ (1 << ios->vdd) <= MMC_VDD_23_24)
+ hctl = SDVS18;
+ else
+ hctl = SDVS30;
+ capa = VS30 | VS18;
+ } else {
+ hctl = SDVS18;
+ capa = VS18;
+ }
+
+ OMAP_HSMMC_WRITE(host->base, HCTL,
+ OMAP_HSMMC_READ(host->base, HCTL) | hctl);
+
+ OMAP_HSMMC_WRITE(host->base, CAPA,
+ OMAP_HSMMC_READ(host->base, CAPA) | capa);
+
+ OMAP_HSMMC_WRITE(host->base, HCTL,
+ OMAP_HSMMC_READ(host->base, HCTL) | SDBP);
+
+ timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
+ while ((OMAP_HSMMC_READ(host->base, HCTL) & SDBP) != SDBP
+ && time_before(jiffies, timeout))
+ ;
+
+ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
+ OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
+ OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
+
+ /* Do not initialize card-specific things if the power is off */
+ if (host->power_mode == MMC_POWER_OFF)
+ goto out;
+
+ con = OMAP_HSMMC_READ(host->base, CON);
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_8:
+ OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
+ break;
+ case MMC_BUS_WIDTH_4:
+ OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
+ OMAP_HSMMC_WRITE(host->base, HCTL,
+ OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
+ break;
+ case MMC_BUS_WIDTH_1:
+ OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
+ OMAP_HSMMC_WRITE(host->base, HCTL,
+ OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
+ break;
+ }
+
+ if (ios->clock) {
+ dsor = OMAP_MMC_MASTER_CLOCK / ios->clock;
+ if (dsor < 1)
+ dsor = 1;
+
+ if (OMAP_MMC_MASTER_CLOCK / dsor > ios->clock)
+ dsor++;
+
+ if (dsor > 250)
+ dsor = 250;
+ }
+
+ OMAP_HSMMC_WRITE(host->base, SYSCTL,
+ OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN);
+ OMAP_HSMMC_WRITE(host->base, SYSCTL, (dsor << 6) | (DTO << 16));
+ OMAP_HSMMC_WRITE(host->base, SYSCTL,
+ OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);
+
+ timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
+ while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
+ && time_before(jiffies, timeout))
+ ;
+
+ OMAP_HSMMC_WRITE(host->base, SYSCTL,
+ OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
+
+ con = OMAP_HSMMC_READ(host->base, CON);
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ OMAP_HSMMC_WRITE(host->base, CON, con | OD);
+ else
+ OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
+out:
+ host->context_loss = context_loss;
+
+ dev_dbg(mmc_dev(host->mmc), "context is restored\n");
+ return 0;
+}
+
+/*
+ * Save the MMC host context (store the number of power state changes so far).
+ */
+static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
+{
+ struct omap_mmc_platform_data *pdata = host->pdata;
+ int context_loss;
+
+ if (pdata->get_context_loss_count) {
+ context_loss = pdata->get_context_loss_count(host->dev);
+ if (context_loss < 0)
+ return;
+ host->context_loss = context_loss;
+ }
+}
+
+#else
+
+static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
+{
+ return 0;
+}
+
+static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
+{
+}
+
+#endif
+
/*
* Send init stream sequence to card
* before sending IDLE command
*/
-static void send_init_stream(struct mmc_omap_host *host)
+static void send_init_stream(struct omap_hsmmc_host *host)
{
int reg = 0;
unsigned long timeout;
+ if (host->protect_card)
+ return;
+
disable_irq(host->irq);
OMAP_HSMMC_WRITE(host->base, CON,
OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
@@ -183,51 +369,53 @@ static void send_init_stream(struct mmc_omap_host *host)
OMAP_HSMMC_WRITE(host->base, CON,
OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM);
+
+ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
+ OMAP_HSMMC_READ(host->base, STAT);
+
enable_irq(host->irq);
}
static inline
-int mmc_omap_cover_is_closed(struct mmc_omap_host *host)
+int omap_hsmmc_cover_is_closed(struct omap_hsmmc_host *host)
{
int r = 1;
- if (host->pdata->slots[host->slot_id].get_cover_state)
- r = host->pdata->slots[host->slot_id].get_cover_state(host->dev,
- host->slot_id);
+ if (mmc_slot(host).get_cover_state)
+ r = mmc_slot(host).get_cover_state(host->dev, host->slot_id);
return r;
}
static ssize_t
-mmc_omap_show_cover_switch(struct device *dev, struct device_attribute *attr,
+omap_hsmmc_show_cover_switch(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
- struct mmc_omap_host *host = mmc_priv(mmc);
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
- return sprintf(buf, "%s\n", mmc_omap_cover_is_closed(host) ? "closed" :
- "open");
+ return sprintf(buf, "%s\n",
+ omap_hsmmc_cover_is_closed(host) ? "closed" : "open");
}
-static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
+static DEVICE_ATTR(cover_switch, S_IRUGO, omap_hsmmc_show_cover_switch, NULL);
static ssize_t
-mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr,
+omap_hsmmc_show_slot_name(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
- struct mmc_omap_host *host = mmc_priv(mmc);
- struct omap_mmc_slot_data slot = host->pdata->slots[host->slot_id];
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
- return sprintf(buf, "%s\n", slot.name);
+ return sprintf(buf, "%s\n", mmc_slot(host).name);
}
-static DEVICE_ATTR(slot_name, S_IRUGO, mmc_omap_show_slot_name, NULL);
+static DEVICE_ATTR(slot_name, S_IRUGO, omap_hsmmc_show_slot_name, NULL);
/*
* Configure the response type and send the cmd.
*/
static void
-mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd,
+omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
struct mmc_data *data)
{
int cmdreg = 0, resptype = 0, cmdtype = 0;
@@ -241,7 +429,12 @@ mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd,
*/
OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
- OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
+
+ if (host->use_dma)
+ OMAP_HSMMC_WRITE(host->base, IE,
+ INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE));
+ else
+ OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
host->response_busy = 0;
if (cmd->flags & MMC_RSP_PRESENT) {
@@ -275,12 +468,20 @@ mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd,
if (host->use_dma)
cmdreg |= DMA_EN;
+ /*
+ * In an interrupt context (i.e. STOP command), the spinlock is unlocked
+ * by the interrupt handler, otherwise (i.e. for a new request) it is
+ * unlocked here.
+ */
+ if (!in_interrupt())
+ spin_unlock_irqrestore(&host->irq_lock, host->flags);
+
OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
}
static int
-mmc_omap_get_dma_dir(struct mmc_omap_host *host, struct mmc_data *data)
+omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
{
if (data->flags & MMC_DATA_WRITE)
return DMA_TO_DEVICE;
@@ -292,11 +493,18 @@ mmc_omap_get_dma_dir(struct mmc_omap_host *host, struct mmc_data *data)
* Notify the transfer complete to MMC core
*/
static void
-mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
+omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
{
if (!data) {
struct mmc_request *mrq = host->mrq;
+ /* TC before CC from CMD6 - don't know why, but it happens */
+ if (host->cmd && host->cmd->opcode == 6 &&
+ host->response_busy) {
+ host->response_busy = 0;
+ return;
+ }
+
host->mrq = NULL;
mmc_request_done(host->mmc, mrq);
return;
@@ -306,7 +514,7 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
if (host->use_dma && host->dma_ch != -1)
dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
- mmc_omap_get_dma_dir(host, data));
+ omap_hsmmc_get_dma_dir(host, data));
if (!data->error)
data->bytes_xfered += data->blocks * (data->blksz);
@@ -318,14 +526,14 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
mmc_request_done(host->mmc, data->mrq);
return;
}
- mmc_omap_start_command(host, data->stop, NULL);
+ omap_hsmmc_start_command(host, data->stop, NULL);
}
/*
* Notify the core about command completion
*/
static void
-mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
+omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
{
host->cmd = NULL;
@@ -350,13 +558,13 @@ mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
/*
* DMA clean up for command errors
*/
-static void mmc_dma_cleanup(struct mmc_omap_host *host, int errno)
+static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
{
host->data->error = errno;
if (host->use_dma && host->dma_ch != -1) {
dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
- mmc_omap_get_dma_dir(host, host->data));
+ omap_hsmmc_get_dma_dir(host, host->data));
omap_free_dma(host->dma_ch);
host->dma_ch = -1;
up(&host->sem);
@@ -368,10 +576,10 @@ static void mmc_dma_cleanup(struct mmc_omap_host *host, int errno)
* Readable error output
*/
#ifdef CONFIG_MMC_DEBUG
-static void mmc_omap_report_irq(struct mmc_omap_host *host, u32 status)
+static void omap_hsmmc_report_irq(struct omap_hsmmc_host *host, u32 status)
{
/* --- means reserved bit without definition at documentation */
- static const char *mmc_omap_status_bits[] = {
+ static const char *omap_hsmmc_status_bits[] = {
"CC", "TC", "BGE", "---", "BWR", "BRR", "---", "---", "CIRQ",
"OBI", "---", "---", "---", "---", "---", "ERRI", "CTO", "CCRC",
"CEB", "CIE", "DTO", "DCRC", "DEB", "---", "ACE", "---",
@@ -384,9 +592,9 @@ static void mmc_omap_report_irq(struct mmc_omap_host *host, u32 status)
len = sprintf(buf, "MMC IRQ 0x%x :", status);
buf += len;
- for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
+ for (i = 0; i < ARRAY_SIZE(omap_hsmmc_status_bits); i++)
if (status & (1 << i)) {
- len = sprintf(buf, " %s", mmc_omap_status_bits[i]);
+ len = sprintf(buf, " %s", omap_hsmmc_status_bits[i]);
buf += len;
}
@@ -401,8 +609,8 @@ static void mmc_omap_report_irq(struct mmc_omap_host *host, u32 status)
* SRC or SRD bit of SYSCTL register
* Can be called from interrupt context
*/
-static inline void mmc_omap_reset_controller_fsm(struct mmc_omap_host *host,
- unsigned long bit)
+static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
+ unsigned long bit)
{
unsigned long i = 0;
unsigned long limit = (loops_per_jiffy *
@@ -424,17 +632,20 @@ static inline void mmc_omap_reset_controller_fsm(struct mmc_omap_host *host,
/*
* MMC controller IRQ handler
*/
-static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
+static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
{
- struct mmc_omap_host *host = dev_id;
+ struct omap_hsmmc_host *host = dev_id;
struct mmc_data *data;
int end_cmd = 0, end_trans = 0, status;
+ spin_lock(&host->irq_lock);
+
if (host->mrq == NULL) {
OMAP_HSMMC_WRITE(host->base, STAT,
OMAP_HSMMC_READ(host->base, STAT));
/* Flush posted write */
OMAP_HSMMC_READ(host->base, STAT);
+ spin_unlock(&host->irq_lock);
return IRQ_HANDLED;
}
@@ -444,13 +655,14 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
if (status & ERR) {
#ifdef CONFIG_MMC_DEBUG
- mmc_omap_report_irq(host, status);
+ omap_hsmmc_report_irq(host, status);
#endif
if ((status & CMD_TIMEOUT) ||
(status & CMD_CRC)) {
if (host->cmd) {
if (status & CMD_TIMEOUT) {
- mmc_omap_reset_controller_fsm(host, SRC);
+ omap_hsmmc_reset_controller_fsm(host,
+ SRC);
host->cmd->error = -ETIMEDOUT;
} else {
host->cmd->error = -EILSEQ;
@@ -459,9 +671,10 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
}
if (host->data || host->response_busy) {
if (host->data)
- mmc_dma_cleanup(host, -ETIMEDOUT);
+ omap_hsmmc_dma_cleanup(host,
+ -ETIMEDOUT);
host->response_busy = 0;
- mmc_omap_reset_controller_fsm(host, SRD);
+ omap_hsmmc_reset_controller_fsm(host, SRD);
}
}
if ((status & DATA_TIMEOUT) ||
@@ -471,11 +684,11 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
-ETIMEDOUT : -EILSEQ;
if (host->data)
- mmc_dma_cleanup(host, err);
+ omap_hsmmc_dma_cleanup(host, err);
else
host->mrq->cmd->error = err;
host->response_busy = 0;
- mmc_omap_reset_controller_fsm(host, SRD);
+ omap_hsmmc_reset_controller_fsm(host, SRD);
end_trans = 1;
}
}
@@ -494,14 +707,16 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
OMAP_HSMMC_READ(host->base, STAT);
if (end_cmd || ((status & CC) && host->cmd))
- mmc_omap_cmd_done(host, host->cmd);
- if (end_trans || (status & TC))
- mmc_omap_xfer_done(host, data);
+ omap_hsmmc_cmd_done(host, host->cmd);
+ if ((end_trans || (status & TC)) && host->mrq)
+ omap_hsmmc_xfer_done(host, data);
+
+ spin_unlock(&host->irq_lock);
return IRQ_HANDLED;
}
-static void set_sd_bus_power(struct mmc_omap_host *host)
+static void set_sd_bus_power(struct omap_hsmmc_host *host)
{
unsigned long i;
@@ -521,7 +736,7 @@ static void set_sd_bus_power(struct mmc_omap_host *host)
* The MMC2 transceiver controls are used instead of DAT4..DAT7.
* Some chips, like eMMC ones, use internal transceivers.
*/
-static int omap_mmc_switch_opcond(struct mmc_omap_host *host, int vdd)
+static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
{
u32 reg_val = 0;
int ret;
@@ -529,22 +744,24 @@ static int omap_mmc_switch_opcond(struct mmc_omap_host *host, int vdd)
/* Disable the clocks */
clk_disable(host->fclk);
clk_disable(host->iclk);
- clk_disable(host->dbclk);
+ if (host->got_dbclk)
+ clk_disable(host->dbclk);
/* Turn the power off */
ret = mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0);
- if (ret != 0)
- goto err;
/* Turn the power ON with given VDD 1.8 or 3.0v */
- ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1, vdd);
+ if (!ret)
+ ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1,
+ vdd);
+ clk_enable(host->iclk);
+ clk_enable(host->fclk);
+ if (host->got_dbclk)
+ clk_enable(host->dbclk);
+
if (ret != 0)
goto err;
- clk_enable(host->fclk);
- clk_enable(host->iclk);
- clk_enable(host->dbclk);
-
OMAP_HSMMC_WRITE(host->base, HCTL,
OMAP_HSMMC_READ(host->base, HCTL) & SDVSCLR);
reg_val = OMAP_HSMMC_READ(host->base, HCTL);
@@ -552,7 +769,7 @@ static int omap_mmc_switch_opcond(struct mmc_omap_host *host, int vdd)
/*
* If a MMC dual voltage card is detected, the set_ios fn calls
* this fn with VDD bit set for 1.8V. Upon card removal from the
- * slot, omap_mmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF.
+ * slot, omap_hsmmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF.
*
* Cope with a bit of slop in the range ... per data sheets:
* - "1.8V" for vdds_mmc1/vdds_mmc1a can be up to 2.45V max,
@@ -578,25 +795,59 @@ err:
return ret;
}
+/* Protect the card while the cover is open */
+static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host)
+{
+ if (!mmc_slot(host).get_cover_state)
+ return;
+
+ host->reqs_blocked = 0;
+ if (mmc_slot(host).get_cover_state(host->dev, host->slot_id)) {
+ if (host->protect_card) {
+ printk(KERN_INFO "%s: cover is closed, "
+ "card is now accessible\n",
+ mmc_hostname(host->mmc));
+ host->protect_card = 0;
+ }
+ } else {
+ if (!host->protect_card) {
+ printk(KERN_INFO "%s: cover is open, "
+ "card is now inaccessible\n",
+ mmc_hostname(host->mmc));
+ host->protect_card = 1;
+ }
+ }
+}
+
/*
* Work Item to notify the core about card insertion/removal
*/
-static void mmc_omap_detect(struct work_struct *work)
+static void omap_hsmmc_detect(struct work_struct *work)
{
- struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
- mmc_carddetect_work);
+ struct omap_hsmmc_host *host =
+ container_of(work, struct omap_hsmmc_host, mmc_carddetect_work);
struct omap_mmc_slot_data *slot = &mmc_slot(host);
+ int carddetect;
- if (mmc_slot(host).card_detect)
- host->carddetect = slot->card_detect(slot->card_detect_irq);
- else
- host->carddetect = -ENOSYS;
+ if (host->suspended)
+ return;
sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
- if (host->carddetect) {
+
+ if (slot->card_detect)
+ carddetect = slot->card_detect(slot->card_detect_irq);
+ else {
+ omap_hsmmc_protect_card(host);
+ carddetect = -ENOSYS;
+ }
+
+ if (carddetect) {
mmc_detect_change(host->mmc, (HZ * 200) / 1000);
} else {
- mmc_omap_reset_controller_fsm(host, SRD);
+ mmc_host_enable(host->mmc);
+ omap_hsmmc_reset_controller_fsm(host, SRD);
+ mmc_host_lazy_disable(host->mmc);
+
mmc_detect_change(host->mmc, (HZ * 50) / 1000);
}
}
@@ -604,16 +855,18 @@ static void mmc_omap_detect(struct work_struct *work)
/*
* ISR for handling card insertion and removal
*/
-static irqreturn_t omap_mmc_cd_handler(int irq, void *dev_id)
+static irqreturn_t omap_hsmmc_cd_handler(int irq, void *dev_id)
{
- struct mmc_omap_host *host = (struct mmc_omap_host *)dev_id;
+ struct omap_hsmmc_host *host = (struct omap_hsmmc_host *)dev_id;
+ if (host->suspended)
+ return IRQ_HANDLED;
schedule_work(&host->mmc_carddetect_work);
return IRQ_HANDLED;
}
-static int mmc_omap_get_dma_sync_dev(struct mmc_omap_host *host,
+static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host,
struct mmc_data *data)
{
int sync_dev;
@@ -625,7 +878,7 @@ static int mmc_omap_get_dma_sync_dev(struct mmc_omap_host *host,
return sync_dev;
}
-static void mmc_omap_config_dma_params(struct mmc_omap_host *host,
+static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
struct mmc_data *data,
struct scatterlist *sgl)
{
@@ -639,7 +892,7 @@ static void mmc_omap_config_dma_params(struct mmc_omap_host *host,
sg_dma_address(sgl), 0, 0);
} else {
omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
- (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
+ (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
sg_dma_address(sgl), 0, 0);
}
@@ -649,7 +902,7 @@ static void mmc_omap_config_dma_params(struct mmc_omap_host *host,
omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
blksz / 4, nblk, OMAP_DMA_SYNC_FRAME,
- mmc_omap_get_dma_sync_dev(host, data),
+ omap_hsmmc_get_dma_sync_dev(host, data),
!(data->flags & MMC_DATA_WRITE));
omap_start_dma(dma_ch);
@@ -658,9 +911,9 @@ static void mmc_omap_config_dma_params(struct mmc_omap_host *host,
/*
* DMA call back function
*/
-static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
+static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data)
{
- struct mmc_omap_host *host = data;
+ struct omap_hsmmc_host *host = data;
if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ)
dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n");
@@ -671,7 +924,7 @@ static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
host->dma_sg_idx++;
if (host->dma_sg_idx < host->dma_len) {
/* Fire up the next transfer. */
- mmc_omap_config_dma_params(host, host->data,
+ omap_hsmmc_config_dma_params(host, host->data,
host->data->sg + host->dma_sg_idx);
return;
}
@@ -688,14 +941,14 @@ static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
/*
* Routine to configure and start DMA for the MMC card
*/
-static int
-mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req)
+static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
+ struct mmc_request *req)
{
int dma_ch = 0, ret = 0, err = 1, i;
struct mmc_data *data = req->data;
/* Sanity check: all the SG entries must be aligned by block size. */
- for (i = 0; i < host->dma_len; i++) {
+ for (i = 0; i < data->sg_len; i++) {
struct scatterlist *sgl;
sgl = data->sg + i;
@@ -726,8 +979,8 @@ mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req)
return err;
}
- ret = omap_request_dma(mmc_omap_get_dma_sync_dev(host, data), "MMC/SD",
- mmc_omap_dma_cb,host, &dma_ch);
+ ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
+ "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
if (ret != 0) {
dev_err(mmc_dev(host->mmc),
"%s: omap_request_dma() failed with %d\n",
@@ -736,17 +989,18 @@ mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req)
}
host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, mmc_omap_get_dma_dir(host, data));
+ data->sg_len, omap_hsmmc_get_dma_dir(host, data));
host->dma_ch = dma_ch;
host->dma_sg_idx = 0;
- mmc_omap_config_dma_params(host, data, data->sg);
+ omap_hsmmc_config_dma_params(host, data, data->sg);
return 0;
}
-static void set_data_timeout(struct mmc_omap_host *host,
- struct mmc_request *req)
+static void set_data_timeout(struct omap_hsmmc_host *host,
+ unsigned int timeout_ns,
+ unsigned int timeout_clks)
{
unsigned int timeout, cycle_ns;
uint32_t reg, clkd, dto = 0;
@@ -757,8 +1011,8 @@ static void set_data_timeout(struct mmc_omap_host *host,
clkd = 1;
cycle_ns = 1000000000 / (clk_get_rate(host->fclk) / clkd);
- timeout = req->data->timeout_ns / cycle_ns;
- timeout += req->data->timeout_clks;
+ timeout = timeout_ns / cycle_ns;
+ timeout += timeout_clks;
if (timeout) {
while ((timeout & 0x80000000) == 0) {
dto += 1;
@@ -785,22 +1039,28 @@ static void set_data_timeout(struct mmc_omap_host *host,
* Configure block length for MMC/SD cards and initiate the transfer.
*/
static int
-mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
+omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)
{
int ret;
host->data = req->data;
if (req->data == NULL) {
OMAP_HSMMC_WRITE(host->base, BLK, 0);
+ /*
+ * Set an arbitrary 100ms data timeout for commands with
+ * busy signal.
+ */
+ if (req->cmd->flags & MMC_RSP_BUSY)
+ set_data_timeout(host, 100000000U, 0);
return 0;
}
OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz)
| (req->data->blocks << 16));
- set_data_timeout(host, req);
+ set_data_timeout(host, req->data->timeout_ns, req->data->timeout_clks);
if (host->use_dma) {
- ret = mmc_omap_start_dma_transfer(host, req);
+ ret = omap_hsmmc_start_dma_transfer(host, req);
if (ret != 0) {
dev_dbg(mmc_dev(host->mmc), "MMC start dma failure\n");
return ret;
@@ -812,35 +1072,92 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
/*
* Request function. for read/write operation
*/
-static void omap_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
+static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
{
- struct mmc_omap_host *host = mmc_priv(mmc);
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+ int err;
+ /*
+ * Prevent races with the interrupt handler because of unexpected
+ * interrupts, but not if we are already in interrupt context i.e.
+ * retries.
+ */
+ if (!in_interrupt()) {
+ spin_lock_irqsave(&host->irq_lock, host->flags);
+ /*
+ * Protect the card from I/O if there is a possibility
+ * it can be removed.
+ */
+ if (host->protect_card) {
+ if (host->reqs_blocked < 3) {
+ /*
+ * Ensure the controller is left in a consistent
+ * state by resetting the command and data state
+ * machines.
+ */
+ omap_hsmmc_reset_controller_fsm(host, SRD);
+ omap_hsmmc_reset_controller_fsm(host, SRC);
+ host->reqs_blocked += 1;
+ }
+ req->cmd->error = -EBADF;
+ if (req->data)
+ req->data->error = -EBADF;
+ spin_unlock_irqrestore(&host->irq_lock, host->flags);
+ mmc_request_done(mmc, req);
+ return;
+ } else if (host->reqs_blocked)
+ host->reqs_blocked = 0;
+ }
WARN_ON(host->mrq != NULL);
host->mrq = req;
- mmc_omap_prepare_data(host, req);
- mmc_omap_start_command(host, req->cmd, req->data);
-}
+ err = omap_hsmmc_prepare_data(host, req);
+ if (err) {
+ req->cmd->error = err;
+ if (req->data)
+ req->data->error = err;
+ host->mrq = NULL;
+ if (!in_interrupt())
+ spin_unlock_irqrestore(&host->irq_lock, host->flags);
+ mmc_request_done(mmc, req);
+ return;
+ }
+ omap_hsmmc_start_command(host, req->cmd, req->data);
+}
/* Routine to configure clock values. Exposed API to core */
-static void omap_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
- struct mmc_omap_host *host = mmc_priv(mmc);
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
u16 dsor = 0;
unsigned long regval;
unsigned long timeout;
u32 con;
+ int do_send_init_stream = 0;
- switch (ios->power_mode) {
- case MMC_POWER_OFF:
- mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0);
- break;
- case MMC_POWER_UP:
- mmc_slot(host).set_power(host->dev, host->slot_id, 1, ios->vdd);
- break;
+ mmc_host_enable(host->mmc);
+
+ if (ios->power_mode != host->power_mode) {
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ mmc_slot(host).set_power(host->dev, host->slot_id,
+ 0, 0);
+ host->vdd = 0;
+ break;
+ case MMC_POWER_UP:
+ mmc_slot(host).set_power(host->dev, host->slot_id,
+ 1, ios->vdd);
+ host->vdd = ios->vdd;
+ break;
+ case MMC_POWER_ON:
+ do_send_init_stream = 1;
+ break;
+ }
+ host->power_mode = ios->power_mode;
}
+ /* FIXME: set registers based only on changes to ios */
+
con = OMAP_HSMMC_READ(host->base, CON);
switch (mmc->ios.bus_width) {
case MMC_BUS_WIDTH_8:
@@ -870,8 +1187,8 @@ static void omap_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
* MMC_POWER_UP upon recalculating the voltage.
* vdd 1.8v.
*/
- if (omap_mmc_switch_opcond(host, ios->vdd) != 0)
- dev_dbg(mmc_dev(host->mmc),
+ if (omap_hsmmc_switch_opcond(host, ios->vdd) != 0)
+ dev_dbg(mmc_dev(host->mmc),
"Switch operation failed\n");
}
}
@@ -887,7 +1204,7 @@ static void omap_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (dsor > 250)
dsor = 250;
}
- omap_mmc_stop_clock(host);
+ omap_hsmmc_stop_clock(host);
regval = OMAP_HSMMC_READ(host->base, SYSCTL);
regval = regval & ~(CLKD_MASK);
regval = regval | (dsor << 6) | (DTO << 16);
@@ -897,42 +1214,47 @@ static void omap_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
/* Wait till the ICS bit is set */
timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
- while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != 0x2
+ while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
&& time_before(jiffies, timeout))
msleep(1);
OMAP_HSMMC_WRITE(host->base, SYSCTL,
OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
- if (ios->power_mode == MMC_POWER_ON)
+ if (do_send_init_stream)
send_init_stream(host);
+ con = OMAP_HSMMC_READ(host->base, CON);
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
- OMAP_HSMMC_WRITE(host->base, CON,
- OMAP_HSMMC_READ(host->base, CON) | OD);
+ OMAP_HSMMC_WRITE(host->base, CON, con | OD);
+ else
+ OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
+
+ if (host->power_mode == MMC_POWER_OFF)
+ mmc_host_disable(host->mmc);
+ else
+ mmc_host_lazy_disable(host->mmc);
}
static int omap_hsmmc_get_cd(struct mmc_host *mmc)
{
- struct mmc_omap_host *host = mmc_priv(mmc);
- struct omap_mmc_platform_data *pdata = host->pdata;
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
- if (!pdata->slots[0].card_detect)
+ if (!mmc_slot(host).card_detect)
return -ENOSYS;
- return pdata->slots[0].card_detect(pdata->slots[0].card_detect_irq);
+ return mmc_slot(host).card_detect(mmc_slot(host).card_detect_irq);
}
static int omap_hsmmc_get_ro(struct mmc_host *mmc)
{
- struct mmc_omap_host *host = mmc_priv(mmc);
- struct omap_mmc_platform_data *pdata = host->pdata;
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
- if (!pdata->slots[0].get_ro)
+ if (!mmc_slot(host).get_ro)
return -ENOSYS;
- return pdata->slots[0].get_ro(host->dev, 0);
+ return mmc_slot(host).get_ro(host->dev, 0);
}
-static void omap_hsmmc_init(struct mmc_omap_host *host)
+static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
{
u32 hctl, capa, value;
@@ -959,19 +1281,340 @@ static void omap_hsmmc_init(struct mmc_omap_host *host)
set_sd_bus_power(host);
}
-static struct mmc_host_ops mmc_omap_ops = {
- .request = omap_mmc_request,
- .set_ios = omap_mmc_set_ios,
+/*
+ * Dynamic power saving handling, FSM:
+ * ENABLED -> DISABLED -> CARDSLEEP / REGSLEEP -> OFF
+ * ^___________| | |
+ * |______________________|______________________|
+ *
+ * ENABLED: mmc host is fully functional
+ * DISABLED: fclk is off
+ * CARDSLEEP: fclk is off, card is asleep, voltage regulator is asleep
+ * REGSLEEP: fclk is off, voltage regulator is asleep
+ * OFF: fclk is off, voltage regulator is off
+ *
+ * Transition handlers return the timeout for the next state transition
+ * or negative error.
+ */
+
+enum {ENABLED = 0, DISABLED, CARDSLEEP, REGSLEEP, OFF};
+
+/* Handler for [ENABLED -> DISABLED] transition */
+static int omap_hsmmc_enabled_to_disabled(struct omap_hsmmc_host *host)
+{
+ omap_hsmmc_context_save(host);
+ clk_disable(host->fclk);
+ host->dpm_state = DISABLED;
+
+ dev_dbg(mmc_dev(host->mmc), "ENABLED -> DISABLED\n");
+
+ if (host->power_mode == MMC_POWER_OFF)
+ return 0;
+
+ return msecs_to_jiffies(OMAP_MMC_SLEEP_TIMEOUT);
+}
+
+/* Handler for [DISABLED -> REGSLEEP / CARDSLEEP] transition */
+static int omap_hsmmc_disabled_to_sleep(struct omap_hsmmc_host *host)
+{
+ int err, new_state;
+
+ if (!mmc_try_claim_host(host->mmc))
+ return 0;
+
+ clk_enable(host->fclk);
+ omap_hsmmc_context_restore(host);
+ if (mmc_card_can_sleep(host->mmc)) {
+ err = mmc_card_sleep(host->mmc);
+ if (err < 0) {
+ clk_disable(host->fclk);
+ mmc_release_host(host->mmc);
+ return err;
+ }
+ new_state = CARDSLEEP;
+ } else {
+ new_state = REGSLEEP;
+ }
+ if (mmc_slot(host).set_sleep)
+ mmc_slot(host).set_sleep(host->dev, host->slot_id, 1, 0,
+ new_state == CARDSLEEP);
+ /* FIXME: turn off bus power and perhaps interrupts too */
+ clk_disable(host->fclk);
+ host->dpm_state = new_state;
+
+ mmc_release_host(host->mmc);
+
+ dev_dbg(mmc_dev(host->mmc), "DISABLED -> %s\n",
+ host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
+
+ if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
+ mmc_slot(host).card_detect ||
+ (mmc_slot(host).get_cover_state &&
+ mmc_slot(host).get_cover_state(host->dev, host->slot_id)))
+ return msecs_to_jiffies(OMAP_MMC_OFF_TIMEOUT);
+
+ return 0;
+}
+
+/* Handler for [REGSLEEP / CARDSLEEP -> OFF] transition */
+static int omap_hsmmc_sleep_to_off(struct omap_hsmmc_host *host)
+{
+ if (!mmc_try_claim_host(host->mmc))
+ return 0;
+
+ if (!((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
+ mmc_slot(host).card_detect ||
+ (mmc_slot(host).get_cover_state &&
+ mmc_slot(host).get_cover_state(host->dev, host->slot_id)))) {
+ mmc_release_host(host->mmc);
+ return 0;
+ }
+
+ mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0);
+ host->vdd = 0;
+ host->power_mode = MMC_POWER_OFF;
+
+ dev_dbg(mmc_dev(host->mmc), "%s -> OFF\n",
+ host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
+
+ host->dpm_state = OFF;
+
+ mmc_release_host(host->mmc);
+
+ return 0;
+}
+
+/* Handler for [DISABLED -> ENABLED] transition */
+static int omap_hsmmc_disabled_to_enabled(struct omap_hsmmc_host *host)
+{
+ int err;
+
+ err = clk_enable(host->fclk);
+ if (err < 0)
+ return err;
+
+ omap_hsmmc_context_restore(host);
+ host->dpm_state = ENABLED;
+
+ dev_dbg(mmc_dev(host->mmc), "DISABLED -> ENABLED\n");
+
+ return 0;
+}
+
+/* Handler for [SLEEP -> ENABLED] transition */
+static int omap_hsmmc_sleep_to_enabled(struct omap_hsmmc_host *host)
+{
+ if (!mmc_try_claim_host(host->mmc))
+ return 0;
+
+ clk_enable(host->fclk);
+ omap_hsmmc_context_restore(host);
+ if (mmc_slot(host).set_sleep)
+ mmc_slot(host).set_sleep(host->dev, host->slot_id, 0,
+ host->vdd, host->dpm_state == CARDSLEEP);
+ if (mmc_card_can_sleep(host->mmc))
+ mmc_card_awake(host->mmc);
+
+ dev_dbg(mmc_dev(host->mmc), "%s -> ENABLED\n",
+ host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
+
+ host->dpm_state = ENABLED;
+
+ mmc_release_host(host->mmc);
+
+ return 0;
+}
+
+/* Handler for [OFF -> ENABLED] transition */
+static int omap_hsmmc_off_to_enabled(struct omap_hsmmc_host *host)
+{
+ clk_enable(host->fclk);
+
+ omap_hsmmc_context_restore(host);
+ omap_hsmmc_conf_bus_power(host);
+ mmc_power_restore_host(host->mmc);
+
+ host->dpm_state = ENABLED;
+
+ dev_dbg(mmc_dev(host->mmc), "OFF -> ENABLED\n");
+
+ return 0;
+}
+
+/*
+ * Bring MMC host to ENABLED from any other PM state.
+ */
+static int omap_hsmmc_enable(struct mmc_host *mmc)
+{
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+
+ switch (host->dpm_state) {
+ case DISABLED:
+ return omap_hsmmc_disabled_to_enabled(host);
+ case CARDSLEEP:
+ case REGSLEEP:
+ return omap_hsmmc_sleep_to_enabled(host);
+ case OFF:
+ return omap_hsmmc_off_to_enabled(host);
+ default:
+ dev_dbg(mmc_dev(host->mmc), "UNKNOWN state\n");
+ return -EINVAL;
+ }
+}
+
+/*
+ * Bring MMC host in PM state (one level deeper).
+ */
+static int omap_hsmmc_disable(struct mmc_host *mmc, int lazy)
+{
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+
+ switch (host->dpm_state) {
+ case ENABLED: {
+ int delay;
+
+ delay = omap_hsmmc_enabled_to_disabled(host);
+ if (lazy || delay < 0)
+ return delay;
+ return 0;
+ }
+ case DISABLED:
+ return omap_hsmmc_disabled_to_sleep(host);
+ case CARDSLEEP:
+ case REGSLEEP:
+ return omap_hsmmc_sleep_to_off(host);
+ default:
+ dev_dbg(mmc_dev(host->mmc), "UNKNOWN state\n");
+ return -EINVAL;
+ }
+}
+
+static int omap_hsmmc_enable_fclk(struct mmc_host *mmc)
+{
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+ int err;
+
+ err = clk_enable(host->fclk);
+ if (err)
+ return err;
+ dev_dbg(mmc_dev(host->mmc), "mmc_fclk: enabled\n");
+ omap_hsmmc_context_restore(host);
+ return 0;
+}
+
+static int omap_hsmmc_disable_fclk(struct mmc_host *mmc, int lazy)
+{
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+
+ omap_hsmmc_context_save(host);
+ clk_disable(host->fclk);
+ dev_dbg(mmc_dev(host->mmc), "mmc_fclk: disabled\n");
+ return 0;
+}
+
+static const struct mmc_host_ops omap_hsmmc_ops = {
+ .enable = omap_hsmmc_enable_fclk,
+ .disable = omap_hsmmc_disable_fclk,
+ .request = omap_hsmmc_request,
+ .set_ios = omap_hsmmc_set_ios,
.get_cd = omap_hsmmc_get_cd,
.get_ro = omap_hsmmc_get_ro,
/* NYET -- enable_sdio_irq */
};
-static int __init omap_mmc_probe(struct platform_device *pdev)
+static const struct mmc_host_ops omap_hsmmc_ps_ops = {
+ .enable = omap_hsmmc_enable,
+ .disable = omap_hsmmc_disable,
+ .request = omap_hsmmc_request,
+ .set_ios = omap_hsmmc_set_ios,
+ .get_cd = omap_hsmmc_get_cd,
+ .get_ro = omap_hsmmc_get_ro,
+ /* NYET -- enable_sdio_irq */
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+static int omap_hsmmc_regs_show(struct seq_file *s, void *data)
+{
+ struct mmc_host *mmc = s->private;
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+ int context_loss = 0;
+
+ if (host->pdata->get_context_loss_count)
+ context_loss = host->pdata->get_context_loss_count(host->dev);
+
+ seq_printf(s, "mmc%d:\n"
+ " enabled:\t%d\n"
+ " dpm_state:\t%d\n"
+ " nesting_cnt:\t%d\n"
+ " ctx_loss:\t%d:%d\n"
+ "\nregs:\n",
+ mmc->index, mmc->enabled ? 1 : 0,
+ host->dpm_state, mmc->nesting_cnt,
+ host->context_loss, context_loss);
+
+ if (host->suspended || host->dpm_state == OFF) {
+ seq_printf(s, "host suspended, can't read registers\n");
+ return 0;
+ }
+
+ if (clk_enable(host->fclk) != 0) {
+ seq_printf(s, "can't read the regs\n");
+ return 0;
+ }
+
+ seq_printf(s, "SYSCONFIG:\t0x%08x\n",
+ OMAP_HSMMC_READ(host->base, SYSCONFIG));
+ seq_printf(s, "CON:\t\t0x%08x\n",
+ OMAP_HSMMC_READ(host->base, CON));
+ seq_printf(s, "HCTL:\t\t0x%08x\n",
+ OMAP_HSMMC_READ(host->base, HCTL));
+ seq_printf(s, "SYSCTL:\t\t0x%08x\n",
+ OMAP_HSMMC_READ(host->base, SYSCTL));
+ seq_printf(s, "IE:\t\t0x%08x\n",
+ OMAP_HSMMC_READ(host->base, IE));
+ seq_printf(s, "ISE:\t\t0x%08x\n",
+ OMAP_HSMMC_READ(host->base, ISE));
+ seq_printf(s, "CAPA:\t\t0x%08x\n",
+ OMAP_HSMMC_READ(host->base, CAPA));
+
+ clk_disable(host->fclk);
+
+ return 0;
+}
+
+static int omap_hsmmc_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, omap_hsmmc_regs_show, inode->i_private);
+}
+
+static const struct file_operations mmc_regs_fops = {
+ .open = omap_hsmmc_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void omap_hsmmc_debugfs(struct mmc_host *mmc)
+{
+ if (mmc->debugfs_root)
+ debugfs_create_file("regs", S_IRUSR, mmc->debugfs_root,
+ mmc, &mmc_regs_fops);
+}
+
+#else
+
+static void omap_hsmmc_debugfs(struct mmc_host *mmc)
+{
+}
+
+#endif
+
+static int __init omap_hsmmc_probe(struct platform_device *pdev)
{
struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
struct mmc_host *mmc;
- struct mmc_omap_host *host = NULL;
+ struct omap_hsmmc_host *host = NULL;
struct resource *res;
int ret = 0, irq;
@@ -995,7 +1638,7 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
if (res == NULL)
return -EBUSY;
- mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev);
+ mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev);
if (!mmc) {
ret = -ENOMEM;
goto err;
@@ -1013,15 +1656,21 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
host->slot_id = 0;
host->mapbase = res->start;
host->base = ioremap(host->mapbase, SZ_4K);
+ host->power_mode = -1;
platform_set_drvdata(pdev, host);
- INIT_WORK(&host->mmc_carddetect_work, mmc_omap_detect);
+ INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect);
+
+ if (mmc_slot(host).power_saving)
+ mmc->ops = &omap_hsmmc_ps_ops;
+ else
+ mmc->ops = &omap_hsmmc_ops;
- mmc->ops = &mmc_omap_ops;
mmc->f_min = 400000;
mmc->f_max = 52000000;
sema_init(&host->sem, 1);
+ spin_lock_init(&host->irq_lock);
host->iclk = clk_get(&pdev->dev, "ick");
if (IS_ERR(host->iclk)) {
@@ -1037,31 +1686,42 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
goto err1;
}
- if (clk_enable(host->fclk) != 0) {
+ omap_hsmmc_context_save(host);
+
+ mmc->caps |= MMC_CAP_DISABLE;
+ mmc_set_disable_delay(mmc, OMAP_MMC_DISABLED_TIMEOUT);
+ /* we start off in DISABLED state */
+ host->dpm_state = DISABLED;
+
+ if (mmc_host_enable(host->mmc) != 0) {
clk_put(host->iclk);
clk_put(host->fclk);
goto err1;
}
if (clk_enable(host->iclk) != 0) {
- clk_disable(host->fclk);
+ mmc_host_disable(host->mmc);
clk_put(host->iclk);
clk_put(host->fclk);
goto err1;
}
- host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
- /*
- * MMC can still work without debounce clock.
- */
- if (IS_ERR(host->dbclk))
- dev_warn(mmc_dev(host->mmc), "Failed to get debounce clock\n");
- else
- if (clk_enable(host->dbclk) != 0)
- dev_dbg(mmc_dev(host->mmc), "Enabling debounce"
- " clk failed\n");
+ if (cpu_is_omap2430()) {
+ host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
+ /*
+ * MMC can still work without debounce clock.
+ */
+ if (IS_ERR(host->dbclk))
+ dev_warn(mmc_dev(host->mmc),
+ "Failed to get debounce clock\n");
else
- host->dbclk_enabled = 1;
+ host->got_dbclk = 1;
+
+ if (host->got_dbclk)
+ if (clk_enable(host->dbclk) != 0)
+ dev_dbg(mmc_dev(host->mmc), "Enabling debounce"
+ " clk failed\n");
+ }
/* Since we do only SG emulation, we can have as many segs
* as we want. */
@@ -1073,14 +1733,18 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
- mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
+ MMC_CAP_WAIT_WHILE_BUSY;
- if (pdata->slots[host->slot_id].wires >= 8)
+ if (mmc_slot(host).wires >= 8)
mmc->caps |= MMC_CAP_8_BIT_DATA;
- else if (pdata->slots[host->slot_id].wires >= 4)
+ else if (mmc_slot(host).wires >= 4)
mmc->caps |= MMC_CAP_4_BIT_DATA;
- omap_hsmmc_init(host);
+ if (mmc_slot(host).nonremovable)
+ mmc->caps |= MMC_CAP_NONREMOVABLE;
+
+ omap_hsmmc_conf_bus_power(host);
/* Select DMA lines */
switch (host->id) {
@@ -1096,13 +1760,21 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
host->dma_line_tx = OMAP34XX_DMA_MMC3_TX;
host->dma_line_rx = OMAP34XX_DMA_MMC3_RX;
break;
+ case OMAP_MMC4_DEVID:
+ host->dma_line_tx = OMAP44XX_DMA_MMC4_TX;
+ host->dma_line_rx = OMAP44XX_DMA_MMC4_RX;
+ break;
+ case OMAP_MMC5_DEVID:
+ host->dma_line_tx = OMAP44XX_DMA_MMC5_TX;
+ host->dma_line_rx = OMAP44XX_DMA_MMC5_RX;
+ break;
default:
dev_err(mmc_dev(host->mmc), "Invalid MMC id\n");
goto err_irq;
}
/* Request IRQ for MMC operations */
- ret = request_irq(host->irq, mmc_omap_irq, IRQF_DISABLED,
+ ret = request_irq(host->irq, omap_hsmmc_irq, IRQF_DISABLED,
mmc_hostname(mmc), host);
if (ret) {
dev_dbg(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n");
@@ -1112,7 +1784,8 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
/* initialize power supplies, gpios, etc */
if (pdata->init != NULL) {
if (pdata->init(&pdev->dev) != 0) {
- dev_dbg(mmc_dev(host->mmc), "late init error\n");
+ dev_dbg(mmc_dev(host->mmc),
+ "Unable to configure MMC IRQs\n");
goto err_irq_cd_init;
}
}
@@ -1121,7 +1794,7 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
/* Request IRQ for card detect */
if ((mmc_slot(host).card_detect_irq)) {
ret = request_irq(mmc_slot(host).card_detect_irq,
- omap_mmc_cd_handler,
+ omap_hsmmc_cd_handler,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
| IRQF_DISABLED,
mmc_hostname(mmc), host);
@@ -1135,21 +1808,26 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
+ mmc_host_lazy_disable(host->mmc);
+
+ omap_hsmmc_protect_card(host);
+
mmc_add_host(mmc);
- if (host->pdata->slots[host->slot_id].name != NULL) {
+ if (mmc_slot(host).name != NULL) {
ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name);
if (ret < 0)
goto err_slot_name;
}
- if (mmc_slot(host).card_detect_irq &&
- host->pdata->slots[host->slot_id].get_cover_state) {
+ if (mmc_slot(host).card_detect_irq && mmc_slot(host).get_cover_state) {
ret = device_create_file(&mmc->class_dev,
&dev_attr_cover_switch);
if (ret < 0)
goto err_cover_switch;
}
+ omap_hsmmc_debugfs(mmc);
+
return 0;
err_cover_switch:
@@ -1161,11 +1839,11 @@ err_irq_cd:
err_irq_cd_init:
free_irq(host->irq, host);
err_irq:
- clk_disable(host->fclk);
+ mmc_host_disable(host->mmc);
clk_disable(host->iclk);
clk_put(host->fclk);
clk_put(host->iclk);
- if (host->dbclk_enabled) {
+ if (host->got_dbclk) {
clk_disable(host->dbclk);
clk_put(host->dbclk);
}
@@ -1180,12 +1858,13 @@ err:
return ret;
}
-static int omap_mmc_remove(struct platform_device *pdev)
+static int omap_hsmmc_remove(struct platform_device *pdev)
{
- struct mmc_omap_host *host = platform_get_drvdata(pdev);
+ struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
struct resource *res;
if (host) {
+ mmc_host_enable(host->mmc);
mmc_remove_host(host->mmc);
if (host->pdata->cleanup)
host->pdata->cleanup(&pdev->dev);
@@ -1194,11 +1873,11 @@ static int omap_mmc_remove(struct platform_device *pdev)
free_irq(mmc_slot(host).card_detect_irq, host);
flush_scheduled_work();
- clk_disable(host->fclk);
+ mmc_host_disable(host->mmc);
clk_disable(host->iclk);
clk_put(host->fclk);
clk_put(host->iclk);
- if (host->dbclk_enabled) {
+ if (host->got_dbclk) {
clk_disable(host->dbclk);
clk_put(host->dbclk);
}
@@ -1216,36 +1895,51 @@ static int omap_mmc_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int omap_mmc_suspend(struct platform_device *pdev, pm_message_t state)
+static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
{
int ret = 0;
- struct mmc_omap_host *host = platform_get_drvdata(pdev);
+ struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
if (host && host->suspended)
return 0;
if (host) {
+ host->suspended = 1;
+ if (host->pdata->suspend) {
+ ret = host->pdata->suspend(&pdev->dev,
+ host->slot_id);
+ if (ret) {
+ dev_dbg(mmc_dev(host->mmc),
+ "Unable to handle MMC board"
+ " level suspend\n");
+ host->suspended = 0;
+ return ret;
+ }
+ }
+ cancel_work_sync(&host->mmc_carddetect_work);
+ mmc_host_enable(host->mmc);
ret = mmc_suspend_host(host->mmc, state);
if (ret == 0) {
- host->suspended = 1;
-
OMAP_HSMMC_WRITE(host->base, ISE, 0);
OMAP_HSMMC_WRITE(host->base, IE, 0);
- if (host->pdata->suspend) {
- ret = host->pdata->suspend(&pdev->dev,
- host->slot_id);
- if (ret)
- dev_dbg(mmc_dev(host->mmc),
- "Unable to handle MMC board"
- " level suspend\n");
- }
OMAP_HSMMC_WRITE(host->base, HCTL,
- OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
- clk_disable(host->fclk);
+ OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
+ mmc_host_disable(host->mmc);
clk_disable(host->iclk);
- clk_disable(host->dbclk);
+ if (host->got_dbclk)
+ clk_disable(host->dbclk);
+ } else {
+ host->suspended = 0;
+ if (host->pdata->resume) {
+ ret = host->pdata->resume(&pdev->dev,
+ host->slot_id);
+ if (ret)
+ dev_dbg(mmc_dev(host->mmc),
+ "Unmask interrupt failed\n");
+ }
+ mmc_host_disable(host->mmc);
}
}
@@ -1253,32 +1947,28 @@ static int omap_mmc_suspend(struct platform_device *pdev, pm_message_t state)
}
/* Routine to resume the MMC device */
-static int omap_mmc_resume(struct platform_device *pdev)
+static int omap_hsmmc_resume(struct platform_device *pdev)
{
int ret = 0;
- struct mmc_omap_host *host = platform_get_drvdata(pdev);
+ struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
if (host && !host->suspended)
return 0;
if (host) {
-
- ret = clk_enable(host->fclk);
+ ret = clk_enable(host->iclk);
if (ret)
goto clk_en_err;
- ret = clk_enable(host->iclk);
- if (ret) {
- clk_disable(host->fclk);
- clk_put(host->fclk);
+ if (mmc_host_enable(host->mmc) != 0) {
+ clk_disable(host->iclk);
goto clk_en_err;
}
- if (clk_enable(host->dbclk) != 0)
- dev_dbg(mmc_dev(host->mmc),
- "Enabling debounce clk failed\n");
+ if (host->got_dbclk)
+ clk_enable(host->dbclk);
- omap_hsmmc_init(host);
+ omap_hsmmc_conf_bus_power(host);
if (host->pdata->resume) {
ret = host->pdata->resume(&pdev->dev, host->slot_id);
@@ -1287,10 +1977,14 @@ static int omap_mmc_resume(struct platform_device *pdev)
"Unmask interrupt failed\n");
}
+ omap_hsmmc_protect_card(host);
+
/* Notify the core to resume the host */
ret = mmc_resume_host(host->mmc);
if (ret == 0)
host->suspended = 0;
+
+ mmc_host_lazy_disable(host->mmc);
}
return ret;
@@ -1302,35 +1996,34 @@ clk_en_err:
}
#else
-#define omap_mmc_suspend NULL
-#define omap_mmc_resume NULL
+#define omap_hsmmc_suspend NULL
+#define omap_hsmmc_resume NULL
#endif
-static struct platform_driver omap_mmc_driver = {
- .probe = omap_mmc_probe,
- .remove = omap_mmc_remove,
- .suspend = omap_mmc_suspend,
- .resume = omap_mmc_resume,
+static struct platform_driver omap_hsmmc_driver = {
+ .remove = omap_hsmmc_remove,
+ .suspend = omap_hsmmc_suspend,
+ .resume = omap_hsmmc_resume,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
};
-static int __init omap_mmc_init(void)
+static int __init omap_hsmmc_init(void)
{
/* Register the MMC driver */
- return platform_driver_register(&omap_mmc_driver);
+ return platform_driver_register(&omap_hsmmc_driver);
}
-static void __exit omap_mmc_cleanup(void)
+static void __exit omap_hsmmc_cleanup(void)
{
/* Unregister MMC driver */
- platform_driver_unregister(&omap_mmc_driver);
+ platform_driver_unregister(&omap_hsmmc_driver);
}
-module_init(omap_mmc_init);
-module_exit(omap_mmc_cleanup);
+module_init(omap_hsmmc_init);
+module_exit(omap_hsmmc_cleanup);
MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index e55ac792d68..5e0b1529964 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -28,6 +28,7 @@
#include <linux/mmc/host.h>
#include <linux/io.h>
#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
#include <asm/sizes.h>
@@ -96,10 +97,18 @@ static inline void pxamci_init_ocr(struct pxamci_host *host)
static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd)
{
+ int on;
+
#ifdef CONFIG_REGULATOR
if (host->vcc)
mmc_regulator_set_ocr(host->vcc, vdd);
#endif
+ if (!host->vcc && host->pdata &&
+ gpio_is_valid(host->pdata->gpio_power)) {
+ on = ((1 << vdd) & host->pdata->ocr_mask);
+ gpio_set_value(host->pdata->gpio_power,
+ !!on ^ host->pdata->gpio_power_invert);
+ }
if (!host->vcc && host->pdata && host->pdata->setpower)
host->pdata->setpower(mmc_dev(host->mmc), vdd);
}
@@ -421,6 +430,12 @@ static int pxamci_get_ro(struct mmc_host *mmc)
{
struct pxamci_host *host = mmc_priv(mmc);
+ if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) {
+ if (host->pdata->gpio_card_ro_invert)
+ return !gpio_get_value(host->pdata->gpio_card_ro);
+ else
+ return gpio_get_value(host->pdata->gpio_card_ro);
+ }
if (host->pdata && host->pdata->get_ro)
return !!host->pdata->get_ro(mmc_dev(mmc));
/*
@@ -534,7 +549,7 @@ static int pxamci_probe(struct platform_device *pdev)
struct mmc_host *mmc;
struct pxamci_host *host = NULL;
struct resource *r, *dmarx, *dmatx;
- int ret, irq;
+ int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
@@ -661,13 +676,63 @@ static int pxamci_probe(struct platform_device *pdev)
}
host->dma_drcmrtx = dmatx->start;
+ if (host->pdata) {
+ gpio_cd = host->pdata->gpio_card_detect;
+ gpio_ro = host->pdata->gpio_card_ro;
+ gpio_power = host->pdata->gpio_power;
+ }
+ if (gpio_is_valid(gpio_power)) {
+ ret = gpio_request(gpio_power, "mmc card power");
+ if (ret) {
+ dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power);
+ goto out;
+ }
+ gpio_direction_output(gpio_power,
+ host->pdata->gpio_power_invert);
+ }
+ if (gpio_is_valid(gpio_ro)) {
+ ret = gpio_request(gpio_ro, "mmc card read only");
+ if (ret) {
+ dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_power);
+ goto err_gpio_ro;
+ }
+ gpio_direction_input(gpio_ro);
+ }
+ if (gpio_is_valid(gpio_cd)) {
+ ret = gpio_request(gpio_cd, "mmc card detect");
+ if (ret) {
+ dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_power);
+ goto err_gpio_cd;
+ }
+ gpio_direction_input(gpio_cd);
+
+ ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "mmc card detect", mmc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request card detect IRQ\n");
+ goto err_request_irq;
+ }
+ }
+
if (host->pdata && host->pdata->init)
host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc);
+ if (gpio_is_valid(gpio_power) && host->pdata->setpower)
+ dev_warn(&pdev->dev, "gpio_power and setpower() both defined\n");
+ if (gpio_is_valid(gpio_ro) && host->pdata->get_ro)
+ dev_warn(&pdev->dev, "gpio_ro and get_ro() both defined\n");
+
mmc_add_host(mmc);
return 0;
+err_request_irq:
+ gpio_free(gpio_cd);
+err_gpio_cd:
+ gpio_free(gpio_ro);
+err_gpio_ro:
+ gpio_free(gpio_power);
out:
if (host) {
if (host->dma >= 0)
@@ -688,12 +753,26 @@ static int pxamci_probe(struct platform_device *pdev)
static int pxamci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
+ int gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
platform_set_drvdata(pdev, NULL);
if (mmc) {
struct pxamci_host *host = mmc_priv(mmc);
+ if (host->pdata) {
+ gpio_cd = host->pdata->gpio_card_detect;
+ gpio_ro = host->pdata->gpio_card_ro;
+ gpio_power = host->pdata->gpio_power;
+ }
+ if (gpio_is_valid(gpio_cd)) {
+ free_irq(gpio_to_irq(gpio_cd), mmc);
+ gpio_free(gpio_cd);
+ }
+ if (gpio_is_valid(gpio_ro))
+ gpio_free(gpio_ro);
+ if (gpio_is_valid(gpio_power))
+ gpio_free(gpio_power);
if (host->vcc)
regulator_put(host->vcc);
@@ -725,20 +804,20 @@ static int pxamci_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int pxamci_suspend(struct platform_device *dev, pm_message_t state)
+static int pxamci_suspend(struct device *dev)
{
- struct mmc_host *mmc = platform_get_drvdata(dev);
+ struct mmc_host *mmc = dev_get_drvdata(dev);
int ret = 0;
if (mmc)
- ret = mmc_suspend_host(mmc, state);
+ ret = mmc_suspend_host(mmc, PMSG_SUSPEND);
return ret;
}
-static int pxamci_resume(struct platform_device *dev)
+static int pxamci_resume(struct device *dev)
{
- struct mmc_host *mmc = platform_get_drvdata(dev);
+ struct mmc_host *mmc = dev_get_drvdata(dev);
int ret = 0;
if (mmc)
@@ -746,19 +825,22 @@ static int pxamci_resume(struct platform_device *dev)
return ret;
}
-#else
-#define pxamci_suspend NULL
-#define pxamci_resume NULL
+
+static struct dev_pm_ops pxamci_pm_ops = {
+ .suspend = pxamci_suspend,
+ .resume = pxamci_resume,
+};
#endif
static struct platform_driver pxamci_driver = {
.probe = pxamci_probe,
.remove = pxamci_remove,
- .suspend = pxamci_suspend,
- .resume = pxamci_resume,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &pxamci_pm_ops,
+#endif
},
};
diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of.c
index 1e8aa590bb3..01ab916c280 100644
--- a/drivers/mmc/host/sdhci-of.c
+++ b/drivers/mmc/host/sdhci-of.c
@@ -21,6 +21,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/mmc/host.h>
+#include <asm/machdep.h>
#include "sdhci.h"
struct sdhci_of_data {
@@ -48,6 +49,8 @@ struct sdhci_of_host {
#define ESDHC_CLOCK_HCKEN 0x00000002
#define ESDHC_CLOCK_IPGEN 0x00000001
+#define ESDHC_HOST_CONTROL_RES 0x05
+
static u32 esdhc_readl(struct sdhci_host *host, int reg)
{
return in_be32(host->ioaddr + reg);
@@ -109,13 +112,17 @@ static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
int base = reg & ~0x3;
int shift = (reg & 0x3) * 8;
+ /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */
+ if (reg == SDHCI_HOST_CONTROL)
+ val &= ~ESDHC_HOST_CONTROL_RES;
+
clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift);
}
static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
{
- int div;
int pre_div = 2;
+ int div = 1;
clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
@@ -123,19 +130,17 @@ static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
if (clock == 0)
goto out;
- if (host->max_clk / 16 > clock) {
- for (; pre_div < 256; pre_div *= 2) {
- if (host->max_clk / pre_div < clock * 16)
- break;
- }
- }
+ while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
+ pre_div *= 2;
- for (div = 1; div <= 16; div++) {
- if (host->max_clk / (div * pre_div) <= clock)
- break;
- }
+ while (host->max_clk / pre_div / div > clock && div < 16)
+ div++;
+
+ dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
+ clock, host->max_clk / pre_div / div);
pre_div >>= 1;
+ div--;
setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN |
@@ -165,19 +170,12 @@ static unsigned int esdhc_get_min_clock(struct sdhci_host *host)
return of_host->clock / 256 / 16;
}
-static unsigned int esdhc_get_timeout_clock(struct sdhci_host *host)
-{
- struct sdhci_of_host *of_host = sdhci_priv(host);
-
- return of_host->clock / 1000;
-}
-
static struct sdhci_of_data sdhci_esdhc = {
.quirks = SDHCI_QUIRK_FORCE_BLK_SZ_2048 |
SDHCI_QUIRK_BROKEN_CARD_DETECTION |
- SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
SDHCI_QUIRK_NO_BUSY_IRQ |
SDHCI_QUIRK_NONSTANDARD_CLOCK |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_PIO_NEEDS_DELAY |
SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
SDHCI_QUIRK_NO_CARD_NO_RESET,
@@ -192,7 +190,6 @@ static struct sdhci_of_data sdhci_esdhc = {
.enable_dma = esdhc_enable_dma,
.get_max_clock = esdhc_get_max_clock,
.get_min_clock = esdhc_get_min_clock,
- .get_timeout_clock = esdhc_get_timeout_clock,
},
};
@@ -219,6 +216,15 @@ static int sdhci_of_resume(struct of_device *ofdev)
#endif
+static bool __devinit sdhci_of_wp_inverted(struct device_node *np)
+{
+ if (of_get_property(np, "sdhci,wp-inverted", NULL))
+ return true;
+
+ /* Old device trees don't have the wp-inverted property. */
+ return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds);
+}
+
static int __devinit sdhci_of_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
@@ -261,6 +267,9 @@ static int __devinit sdhci_of_probe(struct of_device *ofdev,
if (of_get_property(np, "sdhci,1-bit-only", NULL))
host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
+ if (sdhci_of_wp_inverted(np))
+ host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
+
clk = of_get_property(np, "clock-frequency", &size);
if (clk && size == sizeof(*clk) && *clk)
of_host->clock = *clk;
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 2f15cc17d88..e0356644d1a 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -83,7 +83,8 @@ static int ricoh_probe(struct sdhci_pci_chip *chip)
if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
chip->quirks |= SDHCI_QUIRK_CLOCK_BEFORE_RESET;
- if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)
+ if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG ||
+ chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY)
chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET;
return 0;
@@ -395,7 +396,7 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host)
if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
- (host->flags & SDHCI_USE_DMA)) {
+ (host->flags & SDHCI_USE_SDMA)) {
dev_warn(&pdev->dev, "Will use DMA mode even though HW "
"doesn't fully claim to support it.\n");
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index fc96f8cb9c0..c279fbc4c2e 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -591,6 +591,9 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
target_timeout = data->timeout_ns / 1000 +
data->timeout_clks / host->clock;
+ if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
+ host->timeout_clk = host->clock / 1000;
+
/*
* Figure out needed cycles.
* We do this in steps in order to fit inside a 32 bit int.
@@ -652,7 +655,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
count = sdhci_calc_timeout(host, data);
sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
- if (host->flags & SDHCI_USE_DMA)
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
host->flags |= SDHCI_REQ_USE_DMA;
/*
@@ -991,8 +994,8 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
clk |= SDHCI_CLOCK_INT_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
- /* Wait max 10 ms */
- timeout = 10;
+ /* Wait max 20 ms */
+ timeout = 20;
while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
& SDHCI_CLOCK_INT_STABLE)) {
if (timeout == 0) {
@@ -1597,7 +1600,7 @@ int sdhci_resume_host(struct sdhci_host *host)
{
int ret;
- if (host->flags & SDHCI_USE_DMA) {
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->enable_dma)
host->ops->enable_dma(host);
}
@@ -1678,23 +1681,20 @@ int sdhci_add_host(struct sdhci_host *host)
caps = sdhci_readl(host, SDHCI_CAPABILITIES);
if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
- host->flags |= SDHCI_USE_DMA;
- else if (!(caps & SDHCI_CAN_DO_DMA))
- DBG("Controller doesn't have DMA capability\n");
+ host->flags |= SDHCI_USE_SDMA;
+ else if (!(caps & SDHCI_CAN_DO_SDMA))
+ DBG("Controller doesn't have SDMA capability\n");
else
- host->flags |= SDHCI_USE_DMA;
+ host->flags |= SDHCI_USE_SDMA;
if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
- (host->flags & SDHCI_USE_DMA)) {
+ (host->flags & SDHCI_USE_SDMA)) {
DBG("Disabling DMA as it is marked broken\n");
- host->flags &= ~SDHCI_USE_DMA;
+ host->flags &= ~SDHCI_USE_SDMA;
}
- if (host->flags & SDHCI_USE_DMA) {
- if ((host->version >= SDHCI_SPEC_200) &&
- (caps & SDHCI_CAN_DO_ADMA2))
- host->flags |= SDHCI_USE_ADMA;
- }
+ if ((host->version >= SDHCI_SPEC_200) && (caps & SDHCI_CAN_DO_ADMA2))
+ host->flags |= SDHCI_USE_ADMA;
if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
(host->flags & SDHCI_USE_ADMA)) {
@@ -1702,13 +1702,14 @@ int sdhci_add_host(struct sdhci_host *host)
host->flags &= ~SDHCI_USE_ADMA;
}
- if (host->flags & SDHCI_USE_DMA) {
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->enable_dma) {
if (host->ops->enable_dma(host)) {
printk(KERN_WARNING "%s: No suitable DMA "
"available. Falling back to PIO.\n",
mmc_hostname(mmc));
- host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA);
+ host->flags &=
+ ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
}
}
}
@@ -1736,7 +1737,7 @@ int sdhci_add_host(struct sdhci_host *host)
* mask, but PIO does not need the hw shim so we set a new
* mask here in that case.
*/
- if (!(host->flags & SDHCI_USE_DMA)) {
+ if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
host->dma_mask = DMA_BIT_MASK(64);
mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
}
@@ -1757,13 +1758,15 @@ int sdhci_add_host(struct sdhci_host *host)
host->timeout_clk =
(caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
if (host->timeout_clk == 0) {
- if (!host->ops->get_timeout_clock) {
+ if (host->ops->get_timeout_clock) {
+ host->timeout_clk = host->ops->get_timeout_clock(host);
+ } else if (!(host->quirks &
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
printk(KERN_ERR
"%s: Hardware doesn't specify timeout clock "
"frequency.\n", mmc_hostname(mmc));
return -ENODEV;
}
- host->timeout_clk = host->ops->get_timeout_clock(host);
}
if (caps & SDHCI_TIMEOUT_CLK_UNIT)
host->timeout_clk *= 1000;
@@ -1772,7 +1775,8 @@ int sdhci_add_host(struct sdhci_host *host)
* Set host parameters.
*/
mmc->ops = &sdhci_ops;
- if (host->ops->get_min_clock)
+ if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK &&
+ host->ops->set_clock && host->ops->get_min_clock)
mmc->f_min = host->ops->get_min_clock(host);
else
mmc->f_min = host->max_clk / 256;
@@ -1810,7 +1814,7 @@ int sdhci_add_host(struct sdhci_host *host)
*/
if (host->flags & SDHCI_USE_ADMA)
mmc->max_hw_segs = 128;
- else if (host->flags & SDHCI_USE_DMA)
+ else if (host->flags & SDHCI_USE_SDMA)
mmc->max_hw_segs = 1;
else /* PIO */
mmc->max_hw_segs = 128;
@@ -1893,10 +1897,10 @@ int sdhci_add_host(struct sdhci_host *host)
mmc_add_host(mmc);
- printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n",
+ printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n",
mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
- (host->flags & SDHCI_USE_ADMA)?"A":"",
- (host->flags & SDHCI_USE_DMA)?"DMA":"PIO");
+ (host->flags & SDHCI_USE_ADMA) ? "ADMA" :
+ (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
sdhci_enable_card_detection(host);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index c77e9ff3022..ce5f1d73dc0 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -143,7 +143,7 @@
#define SDHCI_CAN_DO_ADMA2 0x00080000
#define SDHCI_CAN_DO_ADMA1 0x00100000
#define SDHCI_CAN_DO_HISPD 0x00200000
-#define SDHCI_CAN_DO_DMA 0x00400000
+#define SDHCI_CAN_DO_SDMA 0x00400000
#define SDHCI_CAN_VDD_330 0x01000000
#define SDHCI_CAN_VDD_300 0x02000000
#define SDHCI_CAN_VDD_180 0x04000000
@@ -232,6 +232,8 @@ struct sdhci_host {
#define SDHCI_QUIRK_FORCE_1_BIT_DATA (1<<22)
/* Controller needs 10ms delay between applying power and clock */
#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
+/* Controller uses SDCLK instead of TMCLK for data timeouts */
+#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24)
int irq; /* Device IRQ */
void __iomem * ioaddr; /* Mapped address */
@@ -250,7 +252,7 @@ struct sdhci_host {
spinlock_t lock; /* Mutex */
int flags; /* Host attributes */
-#define SDHCI_USE_DMA (1<<0) /* Host is DMA capable */
+#define SDHCI_USE_SDMA (1<<0) /* Host is SDMA capable */
#define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */
#define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */
#define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index b8e35a0b4d7..ecf90f5c97c 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -25,6 +25,14 @@ config MTD_DEBUG_VERBOSE
help
Determines the verbosity level of the MTD debugging messages.
+config MTD_TESTS
+ tristate "MTD tests support"
+ depends on m
+ help
+ This option includes various MTD tests into compilation. The tests
+ should normally be compiled as kernel modules. The modules perform
+ various checks and verifications when loaded.
+
config MTD_CONCAT
tristate "MTD concatenating support"
help
@@ -45,14 +53,6 @@ config MTD_PARTITIONS
devices. Partitioning on NFTL 'devices' is a different - that's the
'normal' form of partitioning used on a block device.
-config MTD_TESTS
- tristate "MTD tests support"
- depends on m
- help
- This option includes various MTD tests into compilation. The tests
- should normally be compiled as kernel modules. The modules perform
- various checks and verifications when loaded.
-
config MTD_REDBOOT_PARTS
tristate "RedBoot partition table parsing"
depends on MTD_PARTITIONS
@@ -159,7 +159,7 @@ config MTD_AFS_PARTS
config MTD_OF_PARTS
tristate "Flash partition map based on OF description"
- depends on PPC_OF && MTD_PARTITIONS
+ depends on (MICROBLAZE || PPC_OF) && MTD_PARTITIONS
help
This provides a partition parsing function which derives
the partition map from the children of the flash node,
diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c
index d072ca5be68..cec7ab98b2a 100644
--- a/drivers/mtd/afs.c
+++ b/drivers/mtd/afs.c
@@ -239,7 +239,7 @@ static int parse_afs_partitions(struct mtd_info *mtd,
parts[idx].offset = img_ptr;
parts[idx].mask_flags = 0;
- printk(" mtd%d: at 0x%08x, %5dKB, %8u, %s\n",
+ printk(" mtd%d: at 0x%08x, %5lluKiB, %8u, %s\n",
idx, img_ptr, parts[idx].size / 1024,
iis.imageNumber, str);
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 61ea833e090..94bb61e1904 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -282,16 +282,6 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
}
}
-static void fixup_M29W128G_write_buffer(struct mtd_info *mtd, void *param)
-{
- struct map_info *map = mtd->priv;
- struct cfi_private *cfi = map->fldrv_priv;
- if (cfi->cfiq->BufWriteTimeoutTyp) {
- pr_warning("Don't use write buffer on ST flash M29W128G\n");
- cfi->cfiq->BufWriteTimeoutTyp = 0;
- }
-}
-
static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
#ifdef AMD_BOOTLOC_BUG
@@ -308,7 +298,6 @@ static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
- { CFI_MFR_ST, 0x227E, fixup_M29W128G_write_buffer, NULL, },
#if !FORCE_WORD_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
#endif
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 34d40e25d31..c5a84fda541 100644..100755
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -81,6 +81,10 @@ void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
{
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
+ /* M29W128G flashes require an additional reset command
+ when exit qry mode */
+ if ((cfi->mfr == CFI_MFR_ST) && (cfi->id == 0x227E || cfi->id == 0x7E))
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
}
EXPORT_SYMBOL_GPL(cfi_qry_mode_off);
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index ccc4cfc7e4b..736a3be265f 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -111,6 +111,11 @@
#define I28F320B3B 0x8897
#define I28F640B3T 0x8898
#define I28F640B3B 0x8899
+#define I28F640C3B 0x88CD
+#define I28F160F3T 0x88F3
+#define I28F160F3B 0x88F4
+#define I28F160C3T 0x88C2
+#define I28F160C3B 0x88C3
#define I82802AB 0x00ad
#define I82802AC 0x00ac
@@ -150,6 +155,7 @@
#define M50LPW080 0x002F
#define M50FLW080A 0x0080
#define M50FLW080B 0x0081
+#define PSD4256G6V 0x00e9
/* SST */
#define SST29EE020 0x0010
@@ -201,6 +207,7 @@ enum uaddr {
MTD_UADDR_0x0555_0x02AA,
MTD_UADDR_0x0555_0x0AAA,
MTD_UADDR_0x5555_0x2AAA,
+ MTD_UADDR_0x0AAA_0x0554,
MTD_UADDR_0x0AAA_0x0555,
MTD_UADDR_0xAAAA_0x5555,
MTD_UADDR_DONT_CARE, /* Requires an arbitrary address */
@@ -245,6 +252,11 @@ static const struct unlock_addr unlock_addrs[] = {
.addr2 = 0x2aaa
},
+ [MTD_UADDR_0x0AAA_0x0554] = {
+ .addr1 = 0x0AAA,
+ .addr2 = 0x0554
+ },
+
[MTD_UADDR_0x0AAA_0x0555] = {
.addr1 = 0x0AAA,
.addr2 = 0x0555
@@ -1103,6 +1115,19 @@ static const struct amd_flash_info jedec_table[] = {
}
}, {
.mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F640C3B,
+ .name = "Intel 28F640C3B",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 127),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
.dev_id = I82802AB,
.name = "Intel 82802AB",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1156,8 +1181,8 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_NEC,
.dev_id = UPD29F064115,
.name = "NEC uPD29F064115",
- .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
- .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
.dev_size = SIZE_8MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 3,
@@ -1726,6 +1751,18 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x1000,16),
}
}, {
+ .mfr_id = 0xff00 | MANUFACTURER_ST,
+ .dev_id = 0xff00 | PSD4256G6V,
+ .name = "ST PSD4256G6V",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0x0AAA_0x0554,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x10000,16),
+ }
+ }, {
.mfr_id = MANUFACTURER_TOSHIBA,
.dev_id = TC58FVT160,
.name = "Toshiba TC58FVT160",
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 325fab92a62..c222514bb70 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -104,6 +104,16 @@ config M25PXX_USE_FAST_READ
help
This option enables FAST_READ access supported by ST M25Pxx.
+config MTD_SST25L
+ tristate "Support SST25L (non JEDEC) SPI Flash chips"
+ depends on SPI_MASTER
+ help
+ This enables access to the non JEDEC SST25L SPI flash chips, used
+ for program and data storage.
+
+ Set up your spi devices with the right board-specific platform data,
+ if you want to specify device partitioning.
+
config MTD_SLRAM
tristate "Uncached system RAM"
help
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 0993d5cf392..ab5c9b92ac8 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -16,3 +16,4 @@ obj-$(CONFIG_MTD_LART) += lart.o
obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
obj-$(CONFIG_MTD_M25P80) += m25p80.o
+obj-$(CONFIG_MTD_SST25L) += sst25l.o
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 578de1c67bf..f4359fe7150 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -393,7 +393,8 @@ static int flash_erase (struct mtd_info *mtd,struct erase_info *instr)
* erase range is aligned with the erase size which is in
* effect here.
*/
- if (instr->addr & (mtd->eraseregions[i].erasesize - 1)) return (-EINVAL);
+ if (i < 0 || (instr->addr & (mtd->eraseregions[i].erasesize - 1)))
+ return -EINVAL;
/* Remember the erase region we start on */
first = i;
@@ -409,7 +410,8 @@ static int flash_erase (struct mtd_info *mtd,struct erase_info *instr)
i--;
/* is the end aligned on a block boundary? */
- if ((instr->addr + instr->len) & (mtd->eraseregions[i].erasesize - 1)) return (-EINVAL);
+ if (i < 0 || ((instr->addr + instr->len) & (mtd->eraseregions[i].erasesize - 1)))
+ return -EINVAL;
addr = instr->addr;
len = instr->len;
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 10ed195c0c1..379c316f329 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -44,6 +44,11 @@
#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */
#define OPCODE_RDID 0x9f /* Read JEDEC ID */
+/* Used for SST flashes only. */
+#define OPCODE_BP 0x02 /* Byte program */
+#define OPCODE_WRDI 0x04 /* Write disable */
+#define OPCODE_AAI_WP 0xad /* Auto address increment word program */
+
/* Status Register bits. */
#define SR_WIP 1 /* Write in progress */
#define SR_WEL 2 /* Write enable latch */
@@ -132,6 +137,15 @@ static inline int write_enable(struct m25p *flash)
return spi_write_then_read(flash->spi, &code, 1, NULL, 0);
}
+/*
+ * Send write disble instruction to the chip.
+ */
+static inline int write_disable(struct m25p *flash)
+{
+ u8 code = OPCODE_WRDI;
+
+ return spi_write_then_read(flash->spi, &code, 1, NULL, 0);
+}
/*
* Service routine to read status register until ready, or timeout occurs.
@@ -454,6 +468,111 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
return 0;
}
+static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct m25p *flash = mtd_to_m25p(mtd);
+ struct spi_transfer t[2];
+ struct spi_message m;
+ size_t actual;
+ int cmd_sz, ret;
+
+ if (retlen)
+ *retlen = 0;
+
+ /* sanity checks */
+ if (!len)
+ return 0;
+
+ if (to + len > flash->mtd.size)
+ return -EINVAL;
+
+ spi_message_init(&m);
+ memset(t, 0, (sizeof t));
+
+ t[0].tx_buf = flash->command;
+ t[0].len = CMD_SIZE;
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].tx_buf = buf;
+ spi_message_add_tail(&t[1], &m);
+
+ mutex_lock(&flash->lock);
+
+ /* Wait until finished previous write command. */
+ ret = wait_till_ready(flash);
+ if (ret)
+ goto time_out;
+
+ write_enable(flash);
+
+ actual = to % 2;
+ /* Start write from odd address. */
+ if (actual) {
+ flash->command[0] = OPCODE_BP;
+ flash->command[1] = to >> 16;
+ flash->command[2] = to >> 8;
+ flash->command[3] = to;
+
+ /* write one byte. */
+ t[1].len = 1;
+ spi_sync(flash->spi, &m);
+ ret = wait_till_ready(flash);
+ if (ret)
+ goto time_out;
+ *retlen += m.actual_length - CMD_SIZE;
+ }
+ to += actual;
+
+ flash->command[0] = OPCODE_AAI_WP;
+ flash->command[1] = to >> 16;
+ flash->command[2] = to >> 8;
+ flash->command[3] = to;
+
+ /* Write out most of the data here. */
+ cmd_sz = CMD_SIZE;
+ for (; actual < len - 1; actual += 2) {
+ t[0].len = cmd_sz;
+ /* write two bytes. */
+ t[1].len = 2;
+ t[1].tx_buf = buf + actual;
+
+ spi_sync(flash->spi, &m);
+ ret = wait_till_ready(flash);
+ if (ret)
+ goto time_out;
+ *retlen += m.actual_length - cmd_sz;
+ cmd_sz = 1;
+ to += 2;
+ }
+ write_disable(flash);
+ ret = wait_till_ready(flash);
+ if (ret)
+ goto time_out;
+
+ /* Write out trailing byte if it exists. */
+ if (actual != len) {
+ write_enable(flash);
+ flash->command[0] = OPCODE_BP;
+ flash->command[1] = to >> 16;
+ flash->command[2] = to >> 8;
+ flash->command[3] = to;
+ t[0].len = CMD_SIZE;
+ t[1].len = 1;
+ t[1].tx_buf = buf + actual;
+
+ spi_sync(flash->spi, &m);
+ ret = wait_till_ready(flash);
+ if (ret)
+ goto time_out;
+ *retlen += m.actual_length - CMD_SIZE;
+ write_disable(flash);
+ }
+
+time_out:
+ mutex_unlock(&flash->lock);
+ return ret;
+}
/****************************************************************************/
@@ -501,7 +620,10 @@ static struct flash_info __devinitdata m25p_data [] = {
{ "at26df321", 0x1f4701, 0, 64 * 1024, 64, SECT_4K, },
/* Macronix */
+ { "mx25l3205d", 0xc22016, 0, 64 * 1024, 64, },
+ { "mx25l6405d", 0xc22017, 0, 64 * 1024, 128, },
{ "mx25l12805d", 0xc22018, 0, 64 * 1024, 256, },
+ { "mx25l12855e", 0xc22618, 0, 64 * 1024, 256, },
/* Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
@@ -511,14 +633,20 @@ static struct flash_info __devinitdata m25p_data [] = {
{ "s25sl016a", 0x010214, 0, 64 * 1024, 32, },
{ "s25sl032a", 0x010215, 0, 64 * 1024, 64, },
{ "s25sl064a", 0x010216, 0, 64 * 1024, 128, },
- { "s25sl12800", 0x012018, 0x0300, 256 * 1024, 64, },
+ { "s25sl12800", 0x012018, 0x0300, 256 * 1024, 64, },
{ "s25sl12801", 0x012018, 0x0301, 64 * 1024, 256, },
+ { "s25fl129p0", 0x012018, 0x4d00, 256 * 1024, 64, },
+ { "s25fl129p1", 0x012018, 0x4d01, 64 * 1024, 256, },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", 0xbf258d, 0, 64 * 1024, 8, SECT_4K, },
{ "sst25vf080b", 0xbf258e, 0, 64 * 1024, 16, SECT_4K, },
{ "sst25vf016b", 0xbf2541, 0, 64 * 1024, 32, SECT_4K, },
{ "sst25vf032b", 0xbf254a, 0, 64 * 1024, 64, SECT_4K, },
+ { "sst25wf512", 0xbf2501, 0, 64 * 1024, 1, SECT_4K, },
+ { "sst25wf010", 0xbf2502, 0, 64 * 1024, 2, SECT_4K, },
+ { "sst25wf020", 0xbf2503, 0, 64 * 1024, 4, SECT_4K, },
+ { "sst25wf040", 0xbf2504, 0, 64 * 1024, 8, SECT_4K, },
/* ST Microelectronics -- newer production may have feature updates */
{ "m25p05", 0x202010, 0, 32 * 1024, 2, },
@@ -667,7 +795,12 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash->mtd.size = info->sector_size * info->n_sectors;
flash->mtd.erase = m25p80_erase;
flash->mtd.read = m25p80_read;
- flash->mtd.write = m25p80_write;
+
+ /* sst flash chips use AAI word program */
+ if (info->jedec_id >> 16 == 0xbf)
+ flash->mtd.write = sst_write;
+ else
+ flash->mtd.write = m25p80_write;
/* prefer "small sector" erase if possible */
if (info->flags & SECT_4K) {
@@ -776,13 +909,13 @@ static struct spi_driver m25p80_driver = {
};
-static int m25p80_init(void)
+static int __init m25p80_init(void)
{
return spi_register_driver(&m25p80_driver);
}
-static void m25p80_exit(void)
+static void __exit m25p80_exit(void)
{
spi_unregister_driver(&m25p80_driver);
}
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 43976aa4dbb..93e3627be74 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -401,7 +401,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
(void) dataflash_waitready(priv->spi);
-#ifdef CONFIG_MTD_DATAFLASH_VERIFY_WRITE
+#ifdef CONFIG_MTD_DATAFLASH_WRITE_VERIFY
/* (3) Compare to Buffer1 */
addr = pageaddr << priv->page_offset;
@@ -430,7 +430,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
} else
status = 0;
-#endif /* CONFIG_MTD_DATAFLASH_VERIFY_WRITE */
+#endif /* CONFIG_MTD_DATAFLASH_WRITE_VERIFY */
remaining = remaining - writelen;
pageaddr++;
@@ -966,3 +966,4 @@ module_exit(dataflash_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Andrew Victor, David Brownell");
MODULE_DESCRIPTION("MTD DataFlash driver");
+MODULE_ALIAS("spi:mtd_dataflash");
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 088fbb7595b..1696bbecaa7 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -14,6 +14,9 @@
* Example:
* phram=swap,64Mi,128Mi phram=test,900Mi,1Mi
*/
+
+#define pr_fmt(fmt) "phram: " fmt
+
#include <asm/io.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -23,8 +26,6 @@
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
-#define ERROR(fmt, args...) printk(KERN_ERR "phram: " fmt , ## args)
-
struct phram_mtd_list {
struct mtd_info mtd;
struct list_head list;
@@ -132,7 +133,7 @@ static int register_device(char *name, unsigned long start, unsigned long len)
ret = -EIO;
new->mtd.priv = ioremap(start, len);
if (!new->mtd.priv) {
- ERROR("ioremap failed\n");
+ pr_err("ioremap failed\n");
goto out1;
}
@@ -152,7 +153,7 @@ static int register_device(char *name, unsigned long start, unsigned long len)
ret = -EAGAIN;
if (add_mtd_device(&new->mtd)) {
- ERROR("Failed to register new device\n");
+ pr_err("Failed to register new device\n");
goto out2;
}
@@ -227,8 +228,8 @@ static inline void kill_final_newline(char *str)
#define parse_err(fmt, args...) do { \
- ERROR(fmt , ## args); \
- return 0; \
+ pr_err(fmt , ## args); \
+ return 1; \
} while (0)
static int phram_setup(const char *val, struct kernel_param *kp)
@@ -256,12 +257,8 @@ static int phram_setup(const char *val, struct kernel_param *kp)
parse_err("not enough arguments\n");
ret = parse_name(&name, token[0]);
- if (ret == -ENOMEM)
- parse_err("out of memory\n");
- if (ret == -ENOSPC)
- parse_err("name too long\n");
if (ret)
- return 0;
+ return ret;
ret = parse_num32(&start, token[1]);
if (ret) {
@@ -275,9 +272,11 @@ static int phram_setup(const char *val, struct kernel_param *kp)
parse_err("illegal device length\n");
}
- register_device(name, start, len);
+ ret = register_device(name, start, len);
+ if (!ret)
+ pr_info("%s device: %#x at %#x\n", name, len, start);
- return 0;
+ return ret;
}
module_param_call(phram, phram_setup, NULL, NULL, 000);
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 00248e81ecd..3aa05cd18ea 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -303,7 +303,7 @@ __setup("slram=", mtd_slram_setup);
#endif
-static int init_slram(void)
+static int __init init_slram(void)
{
char *devname;
int i;
@@ -341,7 +341,7 @@ static int init_slram(void)
#else
int count;
- for (count = 0; (map[count]) && (count < SLRAM_MAX_DEVICES_PARAMS);
+ for (count = 0; count < SLRAM_MAX_DEVICES_PARAMS && map[count];
count++) {
}
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
new file mode 100644
index 00000000000..c2baf3353f8
--- /dev/null
+++ b/drivers/mtd/devices/sst25l.c
@@ -0,0 +1,512 @@
+/*
+ * sst25l.c
+ *
+ * Driver for SST25L SPI Flash chips
+ *
+ * Copyright © 2009 Bluewater Systems Ltd
+ * Author: Andre Renaud <andre@bluewatersys.com>
+ * Author: Ryan Mallon <ryan@bluewatersys.com>
+ *
+ * Based on m25p80.c
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+
+/* Erases can take up to 3 seconds! */
+#define MAX_READY_WAIT_JIFFIES msecs_to_jiffies(3000)
+
+#define SST25L_CMD_WRSR 0x01 /* Write status register */
+#define SST25L_CMD_WRDI 0x04 /* Write disable */
+#define SST25L_CMD_RDSR 0x05 /* Read status register */
+#define SST25L_CMD_WREN 0x06 /* Write enable */
+#define SST25L_CMD_READ 0x03 /* High speed read */
+
+#define SST25L_CMD_EWSR 0x50 /* Enable write status register */
+#define SST25L_CMD_SECTOR_ERASE 0x20 /* Erase sector */
+#define SST25L_CMD_READ_ID 0x90 /* Read device ID */
+#define SST25L_CMD_AAI_PROGRAM 0xaf /* Auto address increment */
+
+#define SST25L_STATUS_BUSY (1 << 0) /* Chip is busy */
+#define SST25L_STATUS_WREN (1 << 1) /* Write enabled */
+#define SST25L_STATUS_BP0 (1 << 2) /* Block protection 0 */
+#define SST25L_STATUS_BP1 (1 << 3) /* Block protection 1 */
+
+struct sst25l_flash {
+ struct spi_device *spi;
+ struct mutex lock;
+ struct mtd_info mtd;
+
+ int partitioned;
+};
+
+struct flash_info {
+ const char *name;
+ uint16_t device_id;
+ unsigned page_size;
+ unsigned nr_pages;
+ unsigned erase_size;
+};
+
+#define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd)
+
+static struct flash_info __initdata sst25l_flash_info[] = {
+ {"sst25lf020a", 0xbf43, 256, 1024, 4096},
+ {"sst25lf040a", 0xbf44, 256, 2048, 4096},
+};
+
+static int sst25l_status(struct sst25l_flash *flash, int *status)
+{
+ unsigned char command, response;
+ int err;
+
+ command = SST25L_CMD_RDSR;
+ err = spi_write_then_read(flash->spi, &command, 1, &response, 1);
+ if (err < 0)
+ return err;
+
+ *status = response;
+ return 0;
+}
+
+static int sst25l_write_enable(struct sst25l_flash *flash, int enable)
+{
+ unsigned char command[2];
+ int status, err;
+
+ command[0] = enable ? SST25L_CMD_WREN : SST25L_CMD_WRDI;
+ err = spi_write(flash->spi, command, 1);
+ if (err)
+ return err;
+
+ command[0] = SST25L_CMD_EWSR;
+ err = spi_write(flash->spi, command, 1);
+ if (err)
+ return err;
+
+ command[0] = SST25L_CMD_WRSR;
+ command[1] = enable ? 0 : SST25L_STATUS_BP0 | SST25L_STATUS_BP1;
+ err = spi_write(flash->spi, command, 2);
+ if (err)
+ return err;
+
+ if (enable) {
+ err = sst25l_status(flash, &status);
+ if (err)
+ return err;
+ if (!(status & SST25L_STATUS_WREN))
+ return -EROFS;
+ }
+
+ return 0;
+}
+
+static int sst25l_wait_till_ready(struct sst25l_flash *flash)
+{
+ unsigned long deadline;
+ int status, err;
+
+ deadline = jiffies + MAX_READY_WAIT_JIFFIES;
+ do {
+ err = sst25l_status(flash, &status);
+ if (err)
+ return err;
+ if (!(status & SST25L_STATUS_BUSY))
+ return 0;
+
+ cond_resched();
+ } while (!time_after_eq(jiffies, deadline));
+
+ return -ETIMEDOUT;
+}
+
+static int sst25l_erase_sector(struct sst25l_flash *flash, uint32_t offset)
+{
+ unsigned char command[4];
+ int err;
+
+ err = sst25l_write_enable(flash, 1);
+ if (err)
+ return err;
+
+ command[0] = SST25L_CMD_SECTOR_ERASE;
+ command[1] = offset >> 16;
+ command[2] = offset >> 8;
+ command[3] = offset;
+ err = spi_write(flash->spi, command, 4);
+ if (err)
+ return err;
+
+ err = sst25l_wait_till_ready(flash);
+ if (err)
+ return err;
+
+ return sst25l_write_enable(flash, 0);
+}
+
+static int sst25l_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct sst25l_flash *flash = to_sst25l_flash(mtd);
+ uint32_t addr, end;
+ int err;
+
+ /* Sanity checks */
+ if (instr->addr + instr->len > flash->mtd.size)
+ return -EINVAL;
+
+ if ((uint32_t)instr->len % mtd->erasesize)
+ return -EINVAL;
+
+ if ((uint32_t)instr->addr % mtd->erasesize)
+ return -EINVAL;
+
+ addr = instr->addr;
+ end = addr + instr->len;
+
+ mutex_lock(&flash->lock);
+
+ err = sst25l_wait_till_ready(flash);
+ if (err) {
+ mutex_unlock(&flash->lock);
+ return err;
+ }
+
+ while (addr < end) {
+ err = sst25l_erase_sector(flash, addr);
+ if (err) {
+ mutex_unlock(&flash->lock);
+ instr->state = MTD_ERASE_FAILED;
+ dev_err(&flash->spi->dev, "Erase failed\n");
+ return err;
+ }
+
+ addr += mtd->erasesize;
+ }
+
+ mutex_unlock(&flash->lock);
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+ return 0;
+}
+
+static int sst25l_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, unsigned char *buf)
+{
+ struct sst25l_flash *flash = to_sst25l_flash(mtd);
+ struct spi_transfer transfer[2];
+ struct spi_message message;
+ unsigned char command[4];
+ int ret;
+
+ /* Sanity checking */
+ if (len == 0)
+ return 0;
+
+ if (from + len > flash->mtd.size)
+ return -EINVAL;
+
+ if (retlen)
+ *retlen = 0;
+
+ spi_message_init(&message);
+ memset(&transfer, 0, sizeof(transfer));
+
+ command[0] = SST25L_CMD_READ;
+ command[1] = from >> 16;
+ command[2] = from >> 8;
+ command[3] = from;
+
+ transfer[0].tx_buf = command;
+ transfer[0].len = sizeof(command);
+ spi_message_add_tail(&transfer[0], &message);
+
+ transfer[1].rx_buf = buf;
+ transfer[1].len = len;
+ spi_message_add_tail(&transfer[1], &message);
+
+ mutex_lock(&flash->lock);
+
+ /* Wait for previous write/erase to complete */
+ ret = sst25l_wait_till_ready(flash);
+ if (ret) {
+ mutex_unlock(&flash->lock);
+ return ret;
+ }
+
+ spi_sync(flash->spi, &message);
+
+ if (retlen && message.actual_length > sizeof(command))
+ *retlen += message.actual_length - sizeof(command);
+
+ mutex_unlock(&flash->lock);
+ return 0;
+}
+
+static int sst25l_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const unsigned char *buf)
+{
+ struct sst25l_flash *flash = to_sst25l_flash(mtd);
+ int i, j, ret, bytes, copied = 0;
+ unsigned char command[5];
+
+ /* Sanity checks */
+ if (!len)
+ return 0;
+
+ if (to + len > flash->mtd.size)
+ return -EINVAL;
+
+ if ((uint32_t)to % mtd->writesize)
+ return -EINVAL;
+
+ mutex_lock(&flash->lock);
+
+ ret = sst25l_write_enable(flash, 1);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < len; i += mtd->writesize) {
+ ret = sst25l_wait_till_ready(flash);
+ if (ret)
+ goto out;
+
+ /* Write the first byte of the page */
+ command[0] = SST25L_CMD_AAI_PROGRAM;
+ command[1] = (to + i) >> 16;
+ command[2] = (to + i) >> 8;
+ command[3] = (to + i);
+ command[4] = buf[i];
+ ret = spi_write(flash->spi, command, 5);
+ if (ret < 0)
+ goto out;
+ copied++;
+
+ /*
+ * Write the remaining bytes using auto address
+ * increment mode
+ */
+ bytes = min_t(uint32_t, mtd->writesize, len - i);
+ for (j = 1; j < bytes; j++, copied++) {
+ ret = sst25l_wait_till_ready(flash);
+ if (ret)
+ goto out;
+
+ command[1] = buf[i + j];
+ ret = spi_write(flash->spi, command, 2);
+ if (ret)
+ goto out;
+ }
+ }
+
+out:
+ ret = sst25l_write_enable(flash, 0);
+
+ if (retlen)
+ *retlen = copied;
+
+ mutex_unlock(&flash->lock);
+ return ret;
+}
+
+static struct flash_info *__init sst25l_match_device(struct spi_device *spi)
+{
+ struct flash_info *flash_info = NULL;
+ unsigned char command[4], response;
+ int i, err;
+ uint16_t id;
+
+ command[0] = SST25L_CMD_READ_ID;
+ command[1] = 0;
+ command[2] = 0;
+ command[3] = 0;
+ err = spi_write_then_read(spi, command, sizeof(command), &response, 1);
+ if (err < 0) {
+ dev_err(&spi->dev, "error reading device id msb\n");
+ return NULL;
+ }
+
+ id = response << 8;
+
+ command[0] = SST25L_CMD_READ_ID;
+ command[1] = 0;
+ command[2] = 0;
+ command[3] = 1;
+ err = spi_write_then_read(spi, command, sizeof(command), &response, 1);
+ if (err < 0) {
+ dev_err(&spi->dev, "error reading device id lsb\n");
+ return NULL;
+ }
+
+ id |= response;
+
+ for (i = 0; i < ARRAY_SIZE(sst25l_flash_info); i++)
+ if (sst25l_flash_info[i].device_id == id)
+ flash_info = &sst25l_flash_info[i];
+
+ if (!flash_info)
+ dev_err(&spi->dev, "unknown id %.4x\n", id);
+
+ return flash_info;
+}
+
+static int __init sst25l_probe(struct spi_device *spi)
+{
+ struct flash_info *flash_info;
+ struct sst25l_flash *flash;
+ struct flash_platform_data *data;
+ int ret, i;
+
+ flash_info = sst25l_match_device(spi);
+ if (!flash_info)
+ return -ENODEV;
+
+ flash = kzalloc(sizeof(struct sst25l_flash), GFP_KERNEL);
+ if (!flash)
+ return -ENOMEM;
+
+ flash->spi = spi;
+ mutex_init(&flash->lock);
+ dev_set_drvdata(&spi->dev, flash);
+
+ data = spi->dev.platform_data;
+ if (data && data->name)
+ flash->mtd.name = data->name;
+ else
+ flash->mtd.name = dev_name(&spi->dev);
+
+ flash->mtd.type = MTD_NORFLASH;
+ flash->mtd.flags = MTD_CAP_NORFLASH;
+ flash->mtd.erasesize = flash_info->erase_size;
+ flash->mtd.writesize = flash_info->page_size;
+ flash->mtd.size = flash_info->page_size * flash_info->nr_pages;
+ flash->mtd.erase = sst25l_erase;
+ flash->mtd.read = sst25l_read;
+ flash->mtd.write = sst25l_write;
+
+ dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name,
+ (long long)flash->mtd.size >> 10);
+
+ DEBUG(MTD_DEBUG_LEVEL2,
+ "mtd .name = %s, .size = 0x%llx (%lldMiB) "
+ ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
+ flash->mtd.name,
+ (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
+ flash->mtd.erasesize, flash->mtd.erasesize / 1024,
+ flash->mtd.numeraseregions);
+
+ if (flash->mtd.numeraseregions)
+ for (i = 0; i < flash->mtd.numeraseregions; i++)
+ DEBUG(MTD_DEBUG_LEVEL2,
+ "mtd.eraseregions[%d] = { .offset = 0x%llx, "
+ ".erasesize = 0x%.8x (%uKiB), "
+ ".numblocks = %d }\n",
+ i, (long long)flash->mtd.eraseregions[i].offset,
+ flash->mtd.eraseregions[i].erasesize,
+ flash->mtd.eraseregions[i].erasesize / 1024,
+ flash->mtd.eraseregions[i].numblocks);
+
+ if (mtd_has_partitions()) {
+ struct mtd_partition *parts = NULL;
+ int nr_parts = 0;
+
+ if (mtd_has_cmdlinepart()) {
+ static const char *part_probes[] =
+ {"cmdlinepart", NULL};
+
+ nr_parts = parse_mtd_partitions(&flash->mtd,
+ part_probes,
+ &parts, 0);
+ }
+
+ if (nr_parts <= 0 && data && data->parts) {
+ parts = data->parts;
+ nr_parts = data->nr_parts;
+ }
+
+ if (nr_parts > 0) {
+ for (i = 0; i < nr_parts; i++) {
+ DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
+ "{.name = %s, .offset = 0x%llx, "
+ ".size = 0x%llx (%lldKiB) }\n",
+ i, parts[i].name,
+ (long long)parts[i].offset,
+ (long long)parts[i].size,
+ (long long)(parts[i].size >> 10));
+ }
+
+ flash->partitioned = 1;
+ return add_mtd_partitions(&flash->mtd,
+ parts, nr_parts);
+ }
+
+ } else if (data->nr_parts) {
+ dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
+ data->nr_parts, data->name);
+ }
+
+ ret = add_mtd_device(&flash->mtd);
+ if (ret == 1) {
+ kfree(flash);
+ dev_set_drvdata(&spi->dev, NULL);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int __exit sst25l_remove(struct spi_device *spi)
+{
+ struct sst25l_flash *flash = dev_get_drvdata(&spi->dev);
+ int ret;
+
+ if (mtd_has_partitions() && flash->partitioned)
+ ret = del_mtd_partitions(&flash->mtd);
+ else
+ ret = del_mtd_device(&flash->mtd);
+ if (ret == 0)
+ kfree(flash);
+ return ret;
+}
+
+static struct spi_driver sst25l_driver = {
+ .driver = {
+ .name = "sst25l",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = sst25l_probe,
+ .remove = __exit_p(sst25l_remove),
+};
+
+static int __init sst25l_init(void)
+{
+ return spi_register_driver(&sst25l_driver);
+}
+
+static void __exit sst25l_exit(void)
+{
+ spi_unregister_driver(&sst25l_driver);
+}
+
+module_init(sst25l_init);
+module_exit(sst25l_exit);
+
+MODULE_DESCRIPTION("MTD SPI driver for SST25L Flash chips");
+MODULE_AUTHOR("Andre Renaud <andre@bluewatersys.com>, "
+ "Ryan Mallon <ryan@bluewatersys.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index a790c062af1..e56d6b42f02 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -1099,7 +1099,7 @@ static struct mtd_blktrans_ops ftl_tr = {
.owner = THIS_MODULE,
};
-static int init_ftl(void)
+static int __init init_ftl(void)
{
return register_mtd_blktrans(&ftl_tr);
}
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index d8cf29c01cc..8aca5523a33 100644..100755
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -550,7 +550,7 @@ hitused:
* waiting to be picked up. We're going to have to fold
* a chain to make room.
*/
- thisEUN = INFTL_makefreeblock(inftl, BLOCK_NIL);
+ thisEUN = INFTL_makefreeblock(inftl, block);
/*
* Hopefully we free something, lets try again.
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 7a58bd5522f..841e085ab74 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -74,7 +74,7 @@ config MTD_PHYSMAP_BANKWIDTH
config MTD_PHYSMAP_OF
tristate "Flash device in physical memory map based on OF description"
- depends on PPC_OF && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM)
+ depends on (MICROBLAZE || PPC_OF) && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM)
help
This provides a 'mapping' driver which allows the NOR Flash and
ROM driver code to communicate with chips which are mapped
@@ -484,9 +484,19 @@ config MTD_BFIN_ASYNC
If compiled as a module, it will be called bfin-async-flash.
+config MTD_GPIO_ADDR
+ tristate "GPIO-assisted Flash Chip Support"
+ depends on MTD_COMPLEX_MAPPINGS
+ select MTD_PARTITIONS
+ help
+ Map driver which allows flashes to be partially physically addressed
+ and assisted by GPIOs.
+
+ If compiled as a module, it will be called gpio-addr-flash.
+
config MTD_UCLINUX
bool "Generic uClinux RAM/ROM filesystem support"
- depends on MTD_PARTITIONS && MTD_RAM && !MMU
+ depends on MTD_PARTITIONS && MTD_RAM=y && !MMU
help
Map driver to support image based filesystems for uClinux.
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 5beb0662d72..1d5cf863672 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -58,5 +58,4 @@ obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o
obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o
-obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o
-obj-$(CONFIG_MTD_VMU) += vmu-flash.o
+obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
new file mode 100644
index 00000000000..44ef9a49a86
--- /dev/null
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -0,0 +1,311 @@
+/*
+ * drivers/mtd/maps/gpio-addr-flash.c
+ *
+ * Handle the case where a flash device is mostly addressed using physical
+ * line and supplemented by GPIOs. This way you can hook up say a 8MiB flash
+ * to a 2MiB memory range and use the GPIOs to select a particular range.
+ *
+ * Copyright © 2000 Nicolas Pitre <nico@cam.org>
+ * Copyright © 2005-2009 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <asm/gpio.h>
+#include <asm/io.h>
+
+#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); })
+
+#define DRIVER_NAME "gpio-addr-flash"
+#define PFX DRIVER_NAME ": "
+
+/**
+ * struct async_state - keep GPIO flash state
+ * @mtd: MTD state for this mapping
+ * @map: MTD map state for this flash
+ * @gpio_count: number of GPIOs used to address
+ * @gpio_addrs: array of GPIOs to twiddle
+ * @gpio_values: cached GPIO values
+ * @win_size: dedicated memory size (if no GPIOs)
+ */
+struct async_state {
+ struct mtd_info *mtd;
+ struct map_info map;
+ size_t gpio_count;
+ unsigned *gpio_addrs;
+ int *gpio_values;
+ unsigned long win_size;
+};
+#define gf_map_info_to_state(mi) ((struct async_state *)(mi)->map_priv_1)
+
+/**
+ * gf_set_gpios() - set GPIO address lines to access specified flash offset
+ * @state: GPIO flash state
+ * @ofs: desired offset to access
+ *
+ * Rather than call the GPIO framework every time, cache the last-programmed
+ * value. This speeds up sequential accesses (which are by far the most common
+ * type). We rely on the GPIO framework to treat non-zero value as high so
+ * that we don't have to normalize the bits.
+ */
+static void gf_set_gpios(struct async_state *state, unsigned long ofs)
+{
+ size_t i = 0;
+ int value;
+ ofs /= state->win_size;
+ do {
+ value = ofs & (1 << i);
+ if (state->gpio_values[i] != value) {
+ gpio_set_value(state->gpio_addrs[i], value);
+ state->gpio_values[i] = value;
+ }
+ } while (++i < state->gpio_count);
+}
+
+/**
+ * gf_read() - read a word at the specified offset
+ * @map: MTD map state
+ * @ofs: desired offset to read
+ */
+static map_word gf_read(struct map_info *map, unsigned long ofs)
+{
+ struct async_state *state = gf_map_info_to_state(map);
+ uint16_t word;
+ map_word test;
+
+ gf_set_gpios(state, ofs);
+
+ word = readw(map->virt + (ofs % state->win_size));
+ test.x[0] = word;
+ return test;
+}
+
+/**
+ * gf_copy_from() - copy a chunk of data from the flash
+ * @map: MTD map state
+ * @to: memory to copy to
+ * @from: flash offset to copy from
+ * @len: how much to copy
+ *
+ * We rely on the MTD layer to chunk up copies such that a single request here
+ * will not cross a window size. This allows us to only wiggle the GPIOs once
+ * before falling back to a normal memcpy. Reading the higher layer code shows
+ * that this is indeed the case, but add a BUG_ON() to future proof.
+ */
+static void gf_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ struct async_state *state = gf_map_info_to_state(map);
+
+ gf_set_gpios(state, from);
+
+ /* BUG if operation crosses the win_size */
+ BUG_ON(!((from + len) % state->win_size <= (from + len)));
+
+ /* operation does not cross the win_size, so one shot it */
+ memcpy_fromio(to, map->virt + (from % state->win_size), len);
+}
+
+/**
+ * gf_write() - write a word at the specified offset
+ * @map: MTD map state
+ * @ofs: desired offset to write
+ */
+static void gf_write(struct map_info *map, map_word d1, unsigned long ofs)
+{
+ struct async_state *state = gf_map_info_to_state(map);
+ uint16_t d;
+
+ gf_set_gpios(state, ofs);
+
+ d = d1.x[0];
+ writew(d, map->virt + (ofs % state->win_size));
+}
+
+/**
+ * gf_copy_to() - copy a chunk of data to the flash
+ * @map: MTD map state
+ * @to: flash offset to copy to
+ * @from: memory to copy from
+ * @len: how much to copy
+ *
+ * See gf_copy_from() caveat.
+ */
+static void gf_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ struct async_state *state = gf_map_info_to_state(map);
+
+ gf_set_gpios(state, to);
+
+ /* BUG if operation crosses the win_size */
+ BUG_ON(!((to + len) % state->win_size <= (to + len)));
+
+ /* operation does not cross the win_size, so one shot it */
+ memcpy_toio(map->virt + (to % state->win_size), from, len);
+}
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
+#endif
+
+/**
+ * gpio_flash_probe() - setup a mapping for a GPIO assisted flash
+ * @pdev: platform device
+ *
+ * The platform resource layout expected looks something like:
+ * struct mtd_partition partitions[] = { ... };
+ * struct physmap_flash_data flash_data = { ... };
+ * unsigned flash_gpios[] = { GPIO_XX, GPIO_XX, ... };
+ * struct resource flash_resource[] = {
+ * {
+ * .name = "cfi_probe",
+ * .start = 0x20000000,
+ * .end = 0x201fffff,
+ * .flags = IORESOURCE_MEM,
+ * }, {
+ * .start = (unsigned long)flash_gpios,
+ * .end = ARRAY_SIZE(flash_gpios),
+ * .flags = IORESOURCE_IRQ,
+ * }
+ * };
+ * struct platform_device flash_device = {
+ * .name = "gpio-addr-flash",
+ * .dev = { .platform_data = &flash_data, },
+ * .num_resources = ARRAY_SIZE(flash_resource),
+ * .resource = flash_resource,
+ * ...
+ * };
+ */
+static int __devinit gpio_flash_probe(struct platform_device *pdev)
+{
+ int ret;
+ size_t i, arr_size;
+ struct physmap_flash_data *pdata;
+ struct resource *memory;
+ struct resource *gpios;
+ struct async_state *state;
+
+ pdata = pdev->dev.platform_data;
+ memory = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ gpios = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ if (!memory || !gpios || !gpios->end)
+ return -EINVAL;
+
+ arr_size = sizeof(int) * gpios->end;
+ state = kzalloc(sizeof(*state) + arr_size, GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->gpio_count = gpios->end;
+ state->gpio_addrs = (void *)gpios->start;
+ state->gpio_values = (void *)(state + 1);
+ state->win_size = memory->end - memory->start + 1;
+ memset(state->gpio_values, 0xff, arr_size);
+
+ state->map.name = DRIVER_NAME;
+ state->map.read = gf_read;
+ state->map.copy_from = gf_copy_from;
+ state->map.write = gf_write;
+ state->map.copy_to = gf_copy_to;
+ state->map.bankwidth = pdata->width;
+ state->map.size = state->win_size * (1 << state->gpio_count);
+ state->map.virt = (void __iomem *)memory->start;
+ state->map.phys = NO_XIP;
+ state->map.map_priv_1 = (unsigned long)state;
+
+ platform_set_drvdata(pdev, state);
+
+ i = 0;
+ do {
+ if (gpio_request(state->gpio_addrs[i], DRIVER_NAME)) {
+ pr_devinit(KERN_ERR PFX "failed to request gpio %d\n",
+ state->gpio_addrs[i]);
+ while (i--)
+ gpio_free(state->gpio_addrs[i]);
+ kfree(state);
+ return -EBUSY;
+ }
+ gpio_direction_output(state->gpio_addrs[i], 0);
+ } while (++i < state->gpio_count);
+
+ pr_devinit(KERN_NOTICE PFX "probing %d-bit flash bus\n",
+ state->map.bankwidth * 8);
+ state->mtd = do_map_probe(memory->name, &state->map);
+ if (!state->mtd) {
+ for (i = 0; i < state->gpio_count; ++i)
+ gpio_free(state->gpio_addrs[i]);
+ kfree(state);
+ return -ENXIO;
+ }
+
+#ifdef CONFIG_MTD_PARTITIONS
+ ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0);
+ if (ret > 0) {
+ pr_devinit(KERN_NOTICE PFX "Using commandline partition definition\n");
+ add_mtd_partitions(state->mtd, pdata->parts, ret);
+ kfree(pdata->parts);
+
+ } else if (pdata->nr_parts) {
+ pr_devinit(KERN_NOTICE PFX "Using board partition definition\n");
+ add_mtd_partitions(state->mtd, pdata->parts, pdata->nr_parts);
+
+ } else
+#endif
+ {
+ pr_devinit(KERN_NOTICE PFX "no partition info available, registering whole flash at once\n");
+ add_mtd_device(state->mtd);
+ }
+
+ return 0;
+}
+
+static int __devexit gpio_flash_remove(struct platform_device *pdev)
+{
+ struct async_state *state = platform_get_drvdata(pdev);
+ size_t i = 0;
+ do {
+ gpio_free(state->gpio_addrs[i]);
+ } while (++i < state->gpio_count);
+#ifdef CONFIG_MTD_PARTITIONS
+ del_mtd_partitions(state->mtd);
+#endif
+ map_destroy(state->mtd);
+ kfree(state);
+ return 0;
+}
+
+static struct platform_driver gpio_flash_driver = {
+ .probe = gpio_flash_probe,
+ .remove = __devexit_p(gpio_flash_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init gpio_flash_init(void)
+{
+ return platform_driver_register(&gpio_flash_driver);
+}
+module_init(gpio_flash_init);
+
+static void __exit gpio_flash_exit(void)
+{
+ platform_driver_unregister(&gpio_flash_driver);
+}
+module_exit(gpio_flash_exit);
+
+MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
+MODULE_DESCRIPTION("MTD map driver for flashes addressed physically and with gpios");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index d4fb9a3ab4d..1bdf0ee6d0b 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -184,7 +184,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
info->map.bankwidth = 1;
/*
- * map_priv_2 is used to store a ptr to to the bank_setup routine
+ * map_priv_2 is used to store a ptr to the bank_setup routine
*/
info->map.map_priv_2 = (unsigned long) ixp_data->bank_setup;
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 39d357b2eb4..61e4eb48bb2 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -190,6 +190,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
const u32 *p;
int reg_tuple_size;
struct mtd_info **mtd_list = NULL;
+ resource_size_t res_size;
reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
@@ -204,7 +205,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
dev_err(&dev->dev, "Malformed reg property on %s\n",
dev->node->full_name);
err = -EINVAL;
- goto err_out;
+ goto err_flash_remove;
}
count /= reg_tuple_size;
@@ -212,14 +213,14 @@ static int __devinit of_flash_probe(struct of_device *dev,
info = kzalloc(sizeof(struct of_flash) +
sizeof(struct of_flash_list) * count, GFP_KERNEL);
if (!info)
- goto err_out;
-
- mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL);
- if (!info)
- goto err_out;
+ goto err_flash_remove;
dev_set_drvdata(&dev->dev, info);
+ mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL);
+ if (!mtd_list)
+ goto err_flash_remove;
+
for (i = 0; i < count; i++) {
err = -ENXIO;
if (of_address_to_resource(dp, i, &res)) {
@@ -233,8 +234,8 @@ static int __devinit of_flash_probe(struct of_device *dev,
(unsigned long long)res.end);
err = -EBUSY;
- info->list[i].res = request_mem_region(res.start, res.end -
- res.start + 1,
+ res_size = resource_size(&res);
+ info->list[i].res = request_mem_region(res.start, res_size,
dev_name(&dev->dev));
if (!info->list[i].res)
goto err_out;
@@ -249,7 +250,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
info->list[i].map.name = dev_name(&dev->dev);
info->list[i].map.phys = res.start;
- info->list[i].map.size = res.end - res.start + 1;
+ info->list[i].map.size = res_size;
info->list[i].map.bankwidth = *width;
err = -ENOMEM;
@@ -338,6 +339,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
err_out:
kfree(mtd_list);
+err_flash_remove:
of_flash_remove(dev);
return err;
@@ -360,6 +362,10 @@ static struct of_device_id of_flash_match[] = {
.data = (void *)"jedec_probe",
},
{
+ .compatible = "mtd-ram",
+ .data = (void *)"map_ram",
+ },
+ {
.type = "rom",
.compatible = "direct-mapped"
},
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 49c9ece7647..dafb91944e7 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -175,7 +175,7 @@ static int platram_probe(struct platform_device *pdev)
/* setup map parameters */
info->map.phys = res->start;
- info->map.size = (res->end - res->start) + 1;
+ info->map.size = resource_size(res);
info->map.name = pdata->mapname != NULL ?
(char *)pdata->mapname : (char *)pdev->name;
info->map.bankwidth = pdata->bankwidth;
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
index 4768bd5459d..c8fd8da4bc8 100644
--- a/drivers/mtd/maps/pmcmsp-flash.c
+++ b/drivers/mtd/maps/pmcmsp-flash.c
@@ -50,7 +50,7 @@ static int fcnt;
static int __init init_msp_flash(void)
{
- int i, j;
+ int i, j, ret = -ENOMEM;
int offset, coff;
char *env;
int pcnt;
@@ -75,14 +75,16 @@ static int __init init_msp_flash(void)
printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt);
msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL);
+ if (!msp_flash)
+ return -ENOMEM;
+
msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL);
+ if (!msp_parts)
+ goto free_msp_flash;
+
msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL);
- if (!msp_flash || !msp_parts || !msp_maps) {
- kfree(msp_maps);
- kfree(msp_parts);
- kfree(msp_flash);
- return -ENOMEM;
- }
+ if (!msp_maps)
+ goto free_msp_parts;
/* loop over the flash devices, initializing each */
for (i = 0; i < fcnt; i++) {
@@ -100,13 +102,18 @@ static int __init init_msp_flash(void)
msp_parts[i] = kcalloc(pcnt, sizeof(struct mtd_partition),
GFP_KERNEL);
+ if (!msp_parts[i])
+ goto cleanup_loop;
/* now initialize the devices proper */
flash_name[5] = '0' + i;
env = prom_getenv(flash_name);
- if (sscanf(env, "%x:%x", &addr, &size) < 2)
- return -ENXIO;
+ if (sscanf(env, "%x:%x", &addr, &size) < 2) {
+ ret = -ENXIO;
+ kfree(msp_parts[i]);
+ goto cleanup_loop;
+ }
addr = CPHYSADDR(addr);
printk(KERN_NOTICE
@@ -122,13 +129,23 @@ static int __init init_msp_flash(void)
*/
if (size > CONFIG_MSP_FLASH_MAP_LIMIT)
size = CONFIG_MSP_FLASH_MAP_LIMIT;
+
msp_maps[i].virt = ioremap(addr, size);
+ if (msp_maps[i].virt == NULL) {
+ ret = -ENXIO;
+ kfree(msp_parts[i]);
+ goto cleanup_loop;
+ }
+
msp_maps[i].bankwidth = 1;
- msp_maps[i].name = strncpy(kmalloc(7, GFP_KERNEL),
- flash_name, 7);
+ msp_maps[i].name = kmalloc(7, GFP_KERNEL);
+ if (!msp_maps[i].name) {
+ iounmap(msp_maps[i].virt);
+ kfree(msp_parts[i]);
+ goto cleanup_loop;
+ }
- if (msp_maps[i].virt == NULL)
- return -ENXIO;
+ msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7);
for (j = 0; j < pcnt; j++) {
part_name[5] = '0' + i;
@@ -136,8 +153,14 @@ static int __init init_msp_flash(void)
env = prom_getenv(part_name);
- if (sscanf(env, "%x:%x:%n", &offset, &size, &coff) < 2)
- return -ENXIO;
+ if (sscanf(env, "%x:%x:%n", &offset, &size,
+ &coff) < 2) {
+ ret = -ENXIO;
+ kfree(msp_maps[i].name);
+ iounmap(msp_maps[i].virt);
+ kfree(msp_parts[i]);
+ goto cleanup_loop;
+ }
msp_parts[i][j].size = size;
msp_parts[i][j].offset = offset;
@@ -152,18 +175,37 @@ static int __init init_msp_flash(void)
add_mtd_partitions(msp_flash[i], msp_parts[i], pcnt);
} else {
printk(KERN_ERR "map probe failed for flash\n");
- return -ENXIO;
+ ret = -ENXIO;
+ kfree(msp_maps[i].name);
+ iounmap(msp_maps[i].virt);
+ kfree(msp_parts[i]);
+ goto cleanup_loop;
}
}
return 0;
+
+cleanup_loop:
+ while (i--) {
+ del_mtd_partitions(msp_flash[i]);
+ map_destroy(msp_flash[i]);
+ kfree(msp_maps[i].name);
+ iounmap(msp_maps[i].virt);
+ kfree(msp_parts[i]);
+ }
+ kfree(msp_maps);
+free_msp_parts:
+ kfree(msp_parts);
+free_msp_flash:
+ kfree(msp_flash);
+ return ret;
}
static void __exit cleanup_msp_flash(void)
{
int i;
- for (i = 0; i < sizeof(msp_flash) / sizeof(struct mtd_info **); i++) {
+ for (i = 0; i < fcnt; i++) {
del_mtd_partitions(msp_flash[i]);
map_destroy(msp_flash[i]);
iounmap((void *)msp_maps[i].virt);
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index d4314fb8821..35009294b43 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -89,7 +89,11 @@ static int __init uclinux_mtd_init(void)
mtd->priv = mapp;
uclinux_ram_mtdinfo = mtd;
+#ifdef CONFIG_MTD_PARTITIONS
add_mtd_partitions(mtd, uclinux_romfs, NUM_PARTITIONS);
+#else
+ add_mtd_device(mtd);
+#endif
return(0);
}
@@ -99,7 +103,11 @@ static int __init uclinux_mtd_init(void)
static void __exit uclinux_mtd_cleanup(void)
{
if (uclinux_ram_mtdinfo) {
+#ifdef CONFIG_MTD_PARTITIONS
del_mtd_partitions(uclinux_ram_mtdinfo);
+#else
+ del_mtd_device(uclinux_ram_mtdinfo);
+#endif
map_destroy(uclinux_ram_mtdinfo);
uclinux_ram_mtdinfo = NULL;
}
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 7baba40c1ed..0acbf4f5be5 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -210,7 +210,7 @@ static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
}
}
-static struct block_device_operations mtd_blktrans_ops = {
+static const struct block_device_operations mtd_blktrans_ops = {
.owner = THIS_MODULE,
.open = blktrans_open,
.release = blktrans_release,
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 2d70295a5fa..9f41b1a853c 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -84,7 +84,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
remove_wait_queue(&wait_q, &wait);
/*
- * Next, writhe data to flash.
+ * Next, write the data to flash.
*/
ret = mtd->write(mtd, pos, len, &retlen, buf);
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 792b547786b..db6de74082a 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -427,7 +427,7 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
* to-be-erased area begins. Verify that the starting
* offset is aligned to this region's erase size:
*/
- if (instr->addr & (erase_regions[i].erasesize - 1))
+ if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
return -EINVAL;
/*
@@ -440,8 +440,8 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
/*
* check if the ending offset is aligned to this region's erase size
*/
- if ((instr->addr + instr->len) & (erase_regions[i].erasesize -
- 1))
+ if (i < 0 || ((instr->addr + instr->len) &
+ (erase_regions[i].erasesize - 1)))
return -EINVAL;
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 69007a6eff5..467a4f177bf 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -213,11 +213,11 @@ static struct attribute *mtd_attrs[] = {
NULL,
};
-struct attribute_group mtd_group = {
+static struct attribute_group mtd_group = {
.attrs = mtd_attrs,
};
-const struct attribute_group *mtd_groups[] = {
+static const struct attribute_group *mtd_groups[] = {
&mtd_group,
NULL,
};
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 742504ea96f..b8043a9ba32 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -453,7 +453,8 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
;
/* The loop searched for the region _behind_ the first one */
- i--;
+ if (i > 0)
+ i--;
/* Pick biggest erasesize */
for (; i < max && regions[i].offset < end; i++) {
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index ce96c091f01..2fda0b61524 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -80,6 +80,23 @@ config MTD_NAND_OMAP2
help
Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
+config MTD_NAND_OMAP_PREFETCH
+ bool "GPMC prefetch support for NAND Flash device"
+ depends on MTD_NAND && MTD_NAND_OMAP2
+ default y
+ help
+ The NAND device can be accessed for Read/Write using GPMC PREFETCH engine
+ to improve the performance.
+
+config MTD_NAND_OMAP_PREFETCH_DMA
+ depends on MTD_NAND_OMAP_PREFETCH
+ bool "DMA mode"
+ default n
+ help
+ The GPMC PREFETCH engine can be configured eigther in MPU interrupt mode
+ or in DMA interrupt mode.
+ Say y for DMA mode or MPU mode will be used
+
config MTD_NAND_TS7250
tristate "NAND Flash device on TS-7250 board"
depends on MACH_TS72XX
@@ -426,6 +443,12 @@ config MTD_NAND_MXC
This enables the driver for the NAND flash controller on the
MXC processors.
+config MTD_NAND_NOMADIK
+ tristate "ST Nomadik 8815 NAND support"
+ depends on ARCH_NOMADIK
+ help
+ Driver for the NAND flash controller on the Nomadik, with ECC.
+
config MTD_NAND_SH_FLCTL
tristate "Support for NAND on Renesas SuperH FLCTL"
depends on MTD_NAND && SUPERH && CPU_SUBTYPE_SH7723
@@ -452,4 +475,11 @@ config MTD_NAND_SOCRATES
help
Enables support for NAND Flash chips wired onto Socrates board.
+config MTD_NAND_W90P910
+ tristate "Support for NAND on w90p910 evaluation board."
+ depends on ARCH_W90X900 && MTD_PARTITIONS
+ help
+ This enables the driver for the NAND Flash on evaluation board based
+ on w90p910.
+
endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index f3a786b3cff..6950d3dabf1 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -40,5 +40,7 @@ obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
+obj-$(CONFIG_MTD_NAND_W90P910) += w90p910_nand.o
+obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 20c828ba940..f8e9975c86e 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -218,7 +218,7 @@ static int atmel_nand_calculate(struct mtd_info *mtd,
* buf: buffer to store read data
*/
static int atmel_nand_read_page(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf)
+ struct nand_chip *chip, uint8_t *buf, int page)
{
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 29acd06b1c3..c828d9ac7bd 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -381,7 +381,7 @@ static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
* we need a special oob layout and handling.
*/
static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf)
+ uint8_t *buf, int page)
{
struct cafe_priv *cafe = mtd->priv;
@@ -903,12 +903,12 @@ static struct pci_driver cafe_nand_pci_driver = {
.resume = cafe_nand_resume,
};
-static int cafe_nand_init(void)
+static int __init cafe_nand_init(void)
{
return pci_register_driver(&cafe_nand_pci_driver);
}
-static void cafe_nand_exit(void)
+static void __exit cafe_nand_exit(void)
{
pci_unregister_driver(&cafe_nand_pci_driver);
}
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 10081e656a6..826cacffcef 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -147,7 +147,7 @@ static int cmx270_device_ready(struct mtd_info *mtd)
/*
* Main initialization routine
*/
-static int cmx270_init(void)
+static int __init cmx270_init(void)
{
struct nand_chip *this;
const char *part_type;
@@ -261,7 +261,7 @@ module_init(cmx270_init);
/*
* Clean up routine
*/
-static void cmx270_cleanup(void)
+static void __exit cmx270_cleanup(void)
{
/* Release resources, unregister device */
nand_release(cmx270_nand_mtd);
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 0fad6487e6f..f13f5b9afaf 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -348,6 +348,12 @@ compare:
if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
return 0;
+ /*
+ * Clear any previous address calculation by doing a dummy read of an
+ * error address register.
+ */
+ davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
+
/* Start address calculation, and wait for it to complete.
* We _could_ start reading more data while this is working,
* to speed up the overall page read.
@@ -359,8 +365,10 @@ compare:
switch ((fsr >> 8) & 0x0f) {
case 0: /* no error, should not happen */
+ davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
return 0;
case 1: /* five or more errors detected */
+ davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
return -EIO;
case 2: /* error addresses computed */
case 3:
@@ -500,6 +508,26 @@ static struct nand_ecclayout hwecc4_small __initconst = {
},
};
+/* An ECC layout for using 4-bit ECC with large-page (2048bytes) flash,
+ * storing ten ECC bytes plus the manufacturer's bad block marker byte,
+ * and not overlapping the default BBT markers.
+ */
+static struct nand_ecclayout hwecc4_2048 __initconst = {
+ .eccbytes = 40,
+ .eccpos = {
+ /* at the end of spare sector */
+ 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ },
+ .oobfree = {
+ /* 2 bytes at offset 0 hold manufacturer badblock markers */
+ {.offset = 2, .length = 22, },
+ /* 5 bytes at offset 8 hold BBT markers */
+ /* 8 bytes at offset 16 hold JFFS2 clean markers */
+ },
+};
static int __init nand_davinci_probe(struct platform_device *pdev)
{
@@ -690,15 +718,20 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->mtd.oobsize - 16;
goto syndrome_done;
}
+ if (chunks == 4) {
+ info->ecclayout = hwecc4_2048;
+ info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
+ goto syndrome_done;
+ }
- /* For large page chips we'll be wanting to use a
- * not-yet-implemented mode that reads OOB data
- * before reading the body of the page, to avoid
- * the "infix OOB" model of NAND_ECC_HW_SYNDROME
- * (and preserve manufacturer badblock markings).
+ /* 4KiB page chips are not yet supported. The eccpos from
+ * nand_ecclayout cannot hold 80 bytes and change to eccpos[]
+ * breaks userspace ioctl interface with mtd-utils. Once we
+ * resolve this issue, NAND_ECC_HW_OOB_FIRST mode can be used
+ * for the 4KiB page chips.
*/
dev_warn(&pdev->dev, "no 4-bit ECC support yet "
- "for large page NAND\n");
+ "for 4KiB-page NAND\n");
ret = -EIO;
goto err_scan;
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 1f6eb257871..ddd37d2554e 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -739,7 +739,8 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
static int fsl_elbc_read_page(struct mtd_info *mtd,
struct nand_chip *chip,
- uint8_t *buf)
+ uint8_t *buf,
+ int page)
{
fsl_elbc_read_buf(mtd, buf, mtd->writesize);
fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 76beea40d2c..65b26d5a5c0 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -857,6 +857,17 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
}
}
+/* Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks. */
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+static struct nand_bbt_descr smallpage_memorybased = {
+ .options = NAND_BBT_SCAN2NDPAGE,
+ .offs = 5,
+ .len = 1,
+ .pattern = scan_ff_pattern
+};
+
static int __init mxcnd_probe(struct platform_device *pdev)
{
struct nand_chip *this;
@@ -973,7 +984,10 @@ static int __init mxcnd_probe(struct platform_device *pdev)
goto escan;
}
- host->pagesize_2k = (mtd->writesize == 2048) ? 1 : 0;
+ if (mtd->writesize == 2048) {
+ host->pagesize_2k = 1;
+ this->badblock_pattern = &smallpage_memorybased;
+ }
if (this->ecc.mode == NAND_ECC_HW) {
switch (mtd->oobsize) {
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 8c21b89d2d0..22113865438 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -688,8 +688,7 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
retry:
spin_lock(lock);
- /* Hardware controller shared among independend devices */
- /* Hardware controller shared among independend devices */
+ /* Hardware controller shared among independent devices */
if (!chip->controller->active)
chip->controller->active = chip;
@@ -766,7 +765,7 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
* Not for syndrome calculating ecc controllers, which use a special oob layout
*/
static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf)
+ uint8_t *buf, int page)
{
chip->read_buf(mtd, buf, mtd->writesize);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -782,7 +781,7 @@ static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
* We need a special oob layout and handling even when OOB isn't used.
*/
static int nand_read_page_raw_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf)
+ uint8_t *buf, int page)
{
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -821,7 +820,7 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd, struct nand_chip *c
* @buf: buffer to store read data
*/
static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf)
+ uint8_t *buf, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -831,7 +830,7 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *ecc_code = chip->buffers->ecccode;
uint32_t *eccpos = chip->ecc.layout->eccpos;
- chip->ecc.read_page_raw(mtd, chip, buf);
+ chip->ecc.read_page_raw(mtd, chip, buf, page);
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
@@ -944,7 +943,7 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, uint3
* Not for syndrome calculating ecc controllers which need a special oob layout
*/
static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf)
+ uint8_t *buf, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -980,6 +979,54 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
+ * nand_read_page_hwecc_oob_first - [REPLACABLE] hw ecc, read oob first
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ *
+ * Hardware ECC for large page chips, require OOB to be read first.
+ * For this ECC mode, the write_page method is re-used from ECC_HW.
+ * These methods read/write ECC from the OOB area, unlike the
+ * ECC_HW_SYNDROME support with multiple ECC steps, follows the
+ * "infix ECC" scheme and reads/writes ECC from the data area, by
+ * overwriting the NAND manufacturer bad block markings.
+ */
+static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+
+ /* Read the OOB area first */
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccpos[i]];
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->read_buf(mtd, p, eccsize);
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+ }
+ return 0;
+}
+
+/**
* nand_read_page_syndrome - [REPLACABLE] hardware ecc syndrom based page read
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -989,7 +1036,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
* we need a special oob layout and handling.
*/
static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf)
+ uint8_t *buf, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1131,11 +1178,13 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
/* Now read the page into the buffer */
if (unlikely(ops->mode == MTD_OOB_RAW))
- ret = chip->ecc.read_page_raw(mtd, chip, bufpoi);
+ ret = chip->ecc.read_page_raw(mtd, chip,
+ bufpoi, page);
else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
ret = chip->ecc.read_subpage(mtd, chip, col, bytes, bufpoi);
else
- ret = chip->ecc.read_page(mtd, chip, bufpoi);
+ ret = chip->ecc.read_page(mtd, chip, bufpoi,
+ page);
if (ret < 0)
break;
@@ -1413,8 +1462,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
int len;
uint8_t *buf = ops->oobbuf;
- DEBUG(MTD_DEBUG_LEVEL3, "nand_read_oob: from = 0x%08Lx, len = %i\n",
- (unsigned long long)from, readlen);
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08Lx, len = %i\n",
+ __func__, (unsigned long long)from, readlen);
if (ops->mode == MTD_OOB_AUTO)
len = chip->ecc.layout->oobavail;
@@ -1422,8 +1471,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
len = mtd->oobsize;
if (unlikely(ops->ooboffs >= len)) {
- DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: "
- "Attempt to start read outside oob\n");
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start read "
+ "outside oob\n", __func__);
return -EINVAL;
}
@@ -1431,8 +1480,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
if (unlikely(from >= mtd->size ||
ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
(from >> chip->page_shift)) * len)) {
- DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: "
- "Attempt read beyond end of device\n");
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read beyond end "
+ "of device\n", __func__);
return -EINVAL;
}
@@ -1506,8 +1555,8 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
/* Do not allow reads past end of device */
if (ops->datbuf && (from + ops->len) > mtd->size) {
- DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: "
- "Attempt read beyond end of device\n");
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read "
+ "beyond end of device\n", __func__);
return -EINVAL;
}
@@ -1816,8 +1865,8 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
/* reject writes, which are not page aligned */
if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
- printk(KERN_NOTICE "nand_write: "
- "Attempt to write not page aligned data\n");
+ printk(KERN_NOTICE "%s: Attempt to write not "
+ "page aligned data\n", __func__);
return -EINVAL;
}
@@ -1944,8 +1993,8 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
int chipnr, page, status, len;
struct nand_chip *chip = mtd->priv;
- DEBUG(MTD_DEBUG_LEVEL3, "nand_write_oob: to = 0x%08x, len = %i\n",
- (unsigned int)to, (int)ops->ooblen);
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
+ __func__, (unsigned int)to, (int)ops->ooblen);
if (ops->mode == MTD_OOB_AUTO)
len = chip->ecc.layout->oobavail;
@@ -1954,14 +2003,14 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
/* Do not allow write past end of page */
if ((ops->ooboffs + ops->ooblen) > len) {
- DEBUG(MTD_DEBUG_LEVEL0, "nand_write_oob: "
- "Attempt to write past end of page\n");
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to write "
+ "past end of page\n", __func__);
return -EINVAL;
}
if (unlikely(ops->ooboffs >= len)) {
- DEBUG(MTD_DEBUG_LEVEL0, "nand_do_write_oob: "
- "Attempt to start write outside oob\n");
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start "
+ "write outside oob\n", __func__);
return -EINVAL;
}
@@ -1970,8 +2019,8 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
ops->ooboffs + ops->ooblen >
((mtd->size >> chip->page_shift) -
(to >> chip->page_shift)) * len)) {
- DEBUG(MTD_DEBUG_LEVEL0, "nand_do_write_oob: "
- "Attempt write beyond end of device\n");
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond "
+ "end of device\n", __func__);
return -EINVAL;
}
@@ -2026,8 +2075,8 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
/* Do not allow writes past end of device */
if (ops->datbuf && (to + ops->len) > mtd->size) {
- DEBUG(MTD_DEBUG_LEVEL0, "nand_write_oob: "
- "Attempt write beyond end of device\n");
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond "
+ "end of device\n", __func__);
return -EINVAL;
}
@@ -2117,26 +2166,27 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
unsigned int bbt_masked_page = 0xffffffff;
loff_t len;
- DEBUG(MTD_DEBUG_LEVEL3, "nand_erase: start = 0x%012llx, len = %llu\n",
- (unsigned long long)instr->addr, (unsigned long long)instr->len);
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)instr->addr,
+ (unsigned long long)instr->len);
/* Start address must align on block boundary */
if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) {
- DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: Unaligned address\n");
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__);
return -EINVAL;
}
/* Length must align on block boundary */
if (instr->len & ((1 << chip->phys_erase_shift) - 1)) {
- DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: "
- "Length not block aligned\n");
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n",
+ __func__);
return -EINVAL;
}
/* Do not allow erase past end of device */
if ((instr->len + instr->addr) > mtd->size) {
- DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: "
- "Erase past end of device\n");
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Erase past end of device\n",
+ __func__);
return -EINVAL;
}
@@ -2157,8 +2207,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
- DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: "
- "Device is write protected!!!\n");
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
+ __func__);
instr->state = MTD_ERASE_FAILED;
goto erase_exit;
}
@@ -2183,8 +2233,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
*/
if (nand_block_checkbad(mtd, ((loff_t) page) <<
chip->page_shift, 0, allowbbt)) {
- printk(KERN_WARNING "nand_erase: attempt to erase a "
- "bad block at page 0x%08x\n", page);
+ printk(KERN_WARNING "%s: attempt to erase a bad block "
+ "at page 0x%08x\n", __func__, page);
instr->state = MTD_ERASE_FAILED;
goto erase_exit;
}
@@ -2211,8 +2261,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
/* See if block erase succeeded */
if (status & NAND_STATUS_FAIL) {
- DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: "
- "Failed erase, page 0x%08x\n", page);
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Failed erase, "
+ "page 0x%08x\n", __func__, page);
instr->state = MTD_ERASE_FAILED;
instr->fail_addr =
((loff_t)page << chip->page_shift);
@@ -2272,9 +2322,9 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
if (!rewrite_bbt[chipnr])
continue;
/* update the BBT for chip */
- DEBUG(MTD_DEBUG_LEVEL0, "nand_erase_nand: nand_update_bbt "
- "(%d:0x%0llx 0x%0x)\n", chipnr, rewrite_bbt[chipnr],
- chip->bbt_td->pages[chipnr]);
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: nand_update_bbt "
+ "(%d:0x%0llx 0x%0x)\n", __func__, chipnr,
+ rewrite_bbt[chipnr], chip->bbt_td->pages[chipnr]);
nand_update_bbt(mtd, rewrite_bbt[chipnr]);
}
@@ -2292,7 +2342,7 @@ static void nand_sync(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
- DEBUG(MTD_DEBUG_LEVEL3, "nand_sync: called\n");
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__);
/* Grab the lock and see if the device is available */
nand_get_device(chip, mtd, FL_SYNCING);
@@ -2356,8 +2406,8 @@ static void nand_resume(struct mtd_info *mtd)
if (chip->state == FL_PM_SUSPENDED)
nand_release_device(mtd);
else
- printk(KERN_ERR "nand_resume() called for a chip which is not "
- "in suspended state\n");
+ printk(KERN_ERR "%s called for a chip which is not "
+ "in suspended state\n", __func__);
}
/*
@@ -2671,6 +2721,17 @@ int nand_scan_tail(struct mtd_info *mtd)
*/
switch (chip->ecc.mode) {
+ case NAND_ECC_HW_OOB_FIRST:
+ /* Similar to NAND_ECC_HW, but a separate read_page handle */
+ if (!chip->ecc.calculate || !chip->ecc.correct ||
+ !chip->ecc.hwctl) {
+ printk(KERN_WARNING "No ECC functions supplied; "
+ "Hardware ECC not possible\n");
+ BUG();
+ }
+ if (!chip->ecc.read_page)
+ chip->ecc.read_page = nand_read_page_hwecc_oob_first;
+
case NAND_ECC_HW:
/* Use standard hwecc read page function ? */
if (!chip->ecc.read_page)
@@ -2693,7 +2754,7 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->ecc.read_page == nand_read_page_hwecc ||
!chip->ecc.write_page ||
chip->ecc.write_page == nand_write_page_hwecc)) {
- printk(KERN_WARNING "No ECC functions supplied, "
+ printk(KERN_WARNING "No ECC functions supplied; "
"Hardware ECC not possible\n");
BUG();
}
@@ -2728,7 +2789,8 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->ecc.write_page_raw = nand_write_page_raw;
chip->ecc.read_oob = nand_read_oob_std;
chip->ecc.write_oob = nand_write_oob_std;
- chip->ecc.size = 256;
+ if (!chip->ecc.size)
+ chip->ecc.size = 256;
chip->ecc.bytes = 3;
break;
@@ -2858,7 +2920,8 @@ int nand_scan(struct mtd_info *mtd, int maxchips)
/* Many callers got this wrong, so check for it for a while... */
if (!mtd->owner && caller_is_module()) {
- printk(KERN_CRIT "nand_scan() called with NULL mtd->owner!\n");
+ printk(KERN_CRIT "%s called with NULL mtd->owner!\n",
+ __func__);
BUG();
}
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index c0cb87d6d16..db7ae9d6a29 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -417,22 +417,22 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
EXPORT_SYMBOL(nand_calculate_ecc);
/**
- * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
- * @mtd: MTD block structure
+ * __nand_correct_data - [NAND Interface] Detect and correct bit error(s)
* @buf: raw data read from the chip
* @read_ecc: ECC from the chip
* @calc_ecc: the ECC calculated from raw data
+ * @eccsize: data bytes per ecc step (256 or 512)
*
- * Detect and correct a 1 bit error for 256/512 byte block
+ * Detect and correct a 1 bit error for eccsize byte block
*/
-int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
- unsigned char *read_ecc, unsigned char *calc_ecc)
+int __nand_correct_data(unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc,
+ unsigned int eccsize)
{
unsigned char b0, b1, b2, bit_addr;
unsigned int byte_addr;
/* 256 or 512 bytes/ecc */
- const uint32_t eccsize_mult =
- (((struct nand_chip *)mtd->priv)->ecc.size) >> 8;
+ const uint32_t eccsize_mult = eccsize >> 8;
/*
* b0 to b2 indicate which bit is faulty (if any)
@@ -495,6 +495,23 @@ int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
printk(KERN_ERR "uncorrectable error : ");
return -1;
}
+EXPORT_SYMBOL(__nand_correct_data);
+
+/**
+ * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @mtd: MTD block structure
+ * @buf: raw data read from the chip
+ * @read_ecc: ECC from the chip
+ * @calc_ecc: the ECC calculated from raw data
+ *
+ * Detect and correct a 1 bit error for 256/512 byte block
+ */
+int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ return __nand_correct_data(buf, read_ecc, calc_ecc,
+ ((struct nand_chip *)mtd->priv)->ecc.size);
+}
EXPORT_SYMBOL(nand_correct_data);
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 89bf85af642..40b5658bdbe 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -102,8 +102,8 @@ static int ndfc_calculate_ecc(struct mtd_info *mtd,
wmb();
ecc = in_be32(ndfc->ndfcbase + NDFC_ECC);
/* The NDFC uses Smart Media (SMC) bytes order */
- ecc_code[0] = p[2];
- ecc_code[1] = p[1];
+ ecc_code[0] = p[1];
+ ecc_code[1] = p[2];
ecc_code[2] = p[3];
return 0;
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
new file mode 100644
index 00000000000..7c302d55910
--- /dev/null
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -0,0 +1,250 @@
+/*
+ * drivers/mtd/nand/nomadik_nand.c
+ *
+ * Overview:
+ * Driver for on-board NAND flash on Nomadik Platforms
+ *
+ * Copyright © 2007 STMicroelectronics Pvt. Ltd.
+ * Author: Sachin Verma <sachin.verma@st.com>
+ *
+ * Copyright © 2009 Alessandro Rubini
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+#include <mach/nand.h>
+#include <mach/fsmc.h>
+
+#include <mtd/mtd-abi.h>
+
+struct nomadik_nand_host {
+ struct mtd_info mtd;
+ struct nand_chip nand;
+ void __iomem *data_va;
+ void __iomem *cmd_va;
+ void __iomem *addr_va;
+ struct nand_bbt_descr *bbt_desc;
+};
+
+static struct nand_ecclayout nomadik_ecc_layout = {
+ .eccbytes = 3 * 4,
+ .eccpos = { /* each subpage has 16 bytes: pos 2,3,4 hosts ECC */
+ 0x02, 0x03, 0x04,
+ 0x12, 0x13, 0x14,
+ 0x22, 0x23, 0x24,
+ 0x32, 0x33, 0x34},
+ /* let's keep bytes 5,6,7 for us, just in case we change ECC algo */
+ .oobfree = { {0x08, 0x08}, {0x18, 0x08}, {0x28, 0x08}, {0x38, 0x08} },
+};
+
+static void nomadik_ecc_control(struct mtd_info *mtd, int mode)
+{
+ /* No need to enable hw ecc, it's on by default */
+}
+
+static void nomadik_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct nomadik_nand_host *host = nand->priv;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writeb(cmd, host->cmd_va);
+ else
+ writeb(cmd, host->addr_va);
+}
+
+static int nomadik_nand_probe(struct platform_device *pdev)
+{
+ struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data;
+ struct nomadik_nand_host *host;
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ struct resource *res;
+ int ret = 0;
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = kzalloc(sizeof(struct nomadik_nand_host), GFP_KERNEL);
+ if (!host) {
+ dev_err(&pdev->dev, "Failed to allocate device structure.\n");
+ return -ENOMEM;
+ }
+
+ /* Call the client's init function, if any */
+ if (pdata->init)
+ ret = pdata->init();
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Init function failed\n");
+ goto err;
+ }
+
+ /* ioremap three regions */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
+ if (!res) {
+ ret = -EIO;
+ goto err_unmap;
+ }
+ host->addr_va = ioremap(res->start, res->end - res->start + 1);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
+ if (!res) {
+ ret = -EIO;
+ goto err_unmap;
+ }
+ host->data_va = ioremap(res->start, res->end - res->start + 1);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
+ if (!res) {
+ ret = -EIO;
+ goto err_unmap;
+ }
+ host->cmd_va = ioremap(res->start, res->end - res->start + 1);
+
+ if (!host->addr_va || !host->data_va || !host->cmd_va) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ /* Link all private pointers */
+ mtd = &host->mtd;
+ nand = &host->nand;
+ mtd->priv = nand;
+ nand->priv = host;
+
+ host->mtd.owner = THIS_MODULE;
+ nand->IO_ADDR_R = host->data_va;
+ nand->IO_ADDR_W = host->data_va;
+ nand->cmd_ctrl = nomadik_cmd_ctrl;
+
+ /*
+ * This stanza declares ECC_HW but uses soft routines. It's because
+ * HW claims to make the calculation but not the correction. However,
+ * I haven't managed to get the desired data out of it until now.
+ */
+ nand->ecc.mode = NAND_ECC_SOFT;
+ nand->ecc.layout = &nomadik_ecc_layout;
+ nand->ecc.hwctl = nomadik_ecc_control;
+ nand->ecc.size = 512;
+ nand->ecc.bytes = 3;
+
+ nand->options = pdata->options;
+
+ /*
+ * Scan to find existance of the device
+ */
+ if (nand_scan(&host->mtd, 1)) {
+ ret = -ENXIO;
+ goto err_unmap;
+ }
+
+#ifdef CONFIG_MTD_PARTITIONS
+ add_mtd_partitions(&host->mtd, pdata->parts, pdata->nparts);
+#else
+ pr_info("Registering %s as whole device\n", mtd->name);
+ add_mtd_device(mtd);
+#endif
+
+ platform_set_drvdata(pdev, host);
+ return 0;
+
+ err_unmap:
+ if (host->cmd_va)
+ iounmap(host->cmd_va);
+ if (host->data_va)
+ iounmap(host->data_va);
+ if (host->addr_va)
+ iounmap(host->addr_va);
+ err:
+ kfree(host);
+ return ret;
+}
+
+/*
+ * Clean up routine
+ */
+static int nomadik_nand_remove(struct platform_device *pdev)
+{
+ struct nomadik_nand_host *host = platform_get_drvdata(pdev);
+ struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data;
+
+ if (pdata->exit)
+ pdata->exit();
+
+ if (host) {
+ iounmap(host->cmd_va);
+ iounmap(host->data_va);
+ iounmap(host->addr_va);
+ kfree(host);
+ }
+ return 0;
+}
+
+static int nomadik_nand_suspend(struct device *dev)
+{
+ struct nomadik_nand_host *host = dev_get_drvdata(dev);
+ int ret = 0;
+ if (host)
+ ret = host->mtd.suspend(&host->mtd);
+ return ret;
+}
+
+static int nomadik_nand_resume(struct device *dev)
+{
+ struct nomadik_nand_host *host = dev_get_drvdata(dev);
+ if (host)
+ host->mtd.resume(&host->mtd);
+ return 0;
+}
+
+static struct dev_pm_ops nomadik_nand_pm_ops = {
+ .suspend = nomadik_nand_suspend,
+ .resume = nomadik_nand_resume,
+};
+
+static struct platform_driver nomadik_nand_driver = {
+ .probe = nomadik_nand_probe,
+ .remove = nomadik_nand_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "nomadik_nand",
+ .pm = &nomadik_nand_pm_ops,
+ },
+};
+
+static int __init nand_nomadik_init(void)
+{
+ pr_info("Nomadik NAND driver\n");
+ return platform_driver_register(&nomadik_nand_driver);
+}
+
+static void __exit nand_nomadik_exit(void)
+{
+ platform_driver_unregister(&nomadik_nand_driver);
+}
+
+module_init(nand_nomadik_init);
+module_exit(nand_nomadik_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("ST Microelectronics (sachin.verma@st.com)");
+MODULE_DESCRIPTION("NAND driver for Nomadik Platform");
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index ebd07e95b81..090ab87086b 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -18,8 +18,7 @@
#include <linux/mtd/partitions.h>
#include <linux/io.h>
-#include <asm/dma.h>
-
+#include <mach/dma.h>
#include <mach/gpmc.h>
#include <mach/nand.h>
@@ -112,6 +111,27 @@
static const char *part_probes[] = { "cmdlinepart", NULL };
#endif
+#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH
+static int use_prefetch = 1;
+
+/* "modprobe ... use_prefetch=0" etc */
+module_param(use_prefetch, bool, 0);
+MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH");
+
+#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
+static int use_dma = 1;
+
+/* "modprobe ... use_dma=0" etc */
+module_param(use_dma, bool, 0);
+MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
+#else
+const int use_dma;
+#endif
+#else
+const int use_prefetch;
+const int use_dma;
+#endif
+
struct omap_nand_info {
struct nand_hw_control controller;
struct omap_nand_platform_data *pdata;
@@ -124,6 +144,9 @@ struct omap_nand_info {
unsigned long phys_base;
void __iomem *gpmc_cs_baseaddr;
void __iomem *gpmc_baseaddr;
+ void __iomem *nand_pref_fifo_add;
+ struct completion comp;
+ int dma_ch;
};
/**
@@ -189,6 +212,38 @@ static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
}
/**
+ * omap_read_buf8 - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *nand = mtd->priv;
+
+ ioread8_rep(nand->IO_ADDR_R, buf, len);
+}
+
+/**
+ * omap_write_buf8 - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ u_char *p = (u_char *)buf;
+
+ while (len--) {
+ iowrite8(*p++, info->nand.IO_ADDR_W);
+ while (GPMC_BUF_EMPTY == (readl(info->gpmc_baseaddr +
+ GPMC_STATUS) & GPMC_BUF_FULL));
+ }
+}
+
+/**
* omap_read_buf16 - read data from NAND controller into buffer
* @mtd: MTD device structure
* @buf: buffer to store date
@@ -198,7 +253,7 @@ static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
{
struct nand_chip *nand = mtd->priv;
- __raw_readsw(nand->IO_ADDR_R, buf, len / 2);
+ ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
}
/**
@@ -217,13 +272,242 @@ static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
len >>= 1;
while (len--) {
- writew(*p++, info->nand.IO_ADDR_W);
+ iowrite16(*p++, info->nand.IO_ADDR_W);
while (GPMC_BUF_EMPTY == (readl(info->gpmc_baseaddr +
GPMC_STATUS) & GPMC_BUF_FULL))
;
}
}
+
+/**
+ * omap_read_buf_pref - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ uint32_t pfpw_status = 0, r_count = 0;
+ int ret = 0;
+ u32 *p = (u32 *)buf;
+
+ /* take care of subpage reads */
+ for (; len % 4 != 0; ) {
+ *buf++ = __raw_readb(info->nand.IO_ADDR_R);
+ len--;
+ }
+ p = (u32 *) buf;
+
+ /* configure and start prefetch transfer */
+ ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0);
+ if (ret) {
+ /* PFPW engine is busy, use cpu copy method */
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_read_buf16(mtd, buf, len);
+ else
+ omap_read_buf8(mtd, buf, len);
+ } else {
+ do {
+ pfpw_status = gpmc_prefetch_status();
+ r_count = ((pfpw_status >> 24) & 0x7F) >> 2;
+ ioread32_rep(info->nand_pref_fifo_add, p, r_count);
+ p += r_count;
+ len -= r_count << 2;
+ } while (len);
+
+ /* disable and stop the PFPW engine */
+ gpmc_prefetch_reset();
+ }
+}
+
+/**
+ * omap_write_buf_pref - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf_pref(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ uint32_t pfpw_status = 0, w_count = 0;
+ int i = 0, ret = 0;
+ u16 *p = (u16 *) buf;
+
+ /* take care of subpage writes */
+ if (len % 2 != 0) {
+ writeb(*buf, info->nand.IO_ADDR_R);
+ p = (u16 *)(buf + 1);
+ len--;
+ }
+
+ /* configure and start prefetch transfer */
+ ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1);
+ if (ret) {
+ /* PFPW engine is busy, use cpu copy method */
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_write_buf16(mtd, buf, len);
+ else
+ omap_write_buf8(mtd, buf, len);
+ } else {
+ pfpw_status = gpmc_prefetch_status();
+ while (pfpw_status & 0x3FFF) {
+ w_count = ((pfpw_status >> 24) & 0x7F) >> 1;
+ for (i = 0; (i < w_count) && len; i++, len -= 2)
+ iowrite16(*p++, info->nand_pref_fifo_add);
+ pfpw_status = gpmc_prefetch_status();
+ }
+
+ /* disable and stop the PFPW engine */
+ gpmc_prefetch_reset();
+ }
+}
+
+#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
+/*
+ * omap_nand_dma_cb: callback on the completion of dma transfer
+ * @lch: logical channel
+ * @ch_satuts: channel status
+ * @data: pointer to completion data structure
+ */
+static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
+{
+ complete((struct completion *) data);
+}
+
+/*
+ * omap_nand_dma_transfer: configer and start dma transfer
+ * @mtd: MTD device structure
+ * @addr: virtual address in RAM of source/destination
+ * @len: number of data bytes to be transferred
+ * @is_write: flag for read/write operation
+ */
+static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
+ unsigned int len, int is_write)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ uint32_t prefetch_status = 0;
+ enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE;
+ dma_addr_t dma_addr;
+ int ret;
+
+ /* The fifo depth is 64 bytes. We have a sync at each frame and frame
+ * length is 64 bytes.
+ */
+ int buf_len = len >> 6;
+
+ if (addr >= high_memory) {
+ struct page *p1;
+
+ if (((size_t)addr & PAGE_MASK) !=
+ ((size_t)(addr + len - 1) & PAGE_MASK))
+ goto out_copy;
+ p1 = vmalloc_to_page(addr);
+ if (!p1)
+ goto out_copy;
+ addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
+ }
+
+ dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
+ if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
+ dev_err(&info->pdev->dev,
+ "Couldn't DMA map a %d byte buffer\n", len);
+ goto out_copy;
+ }
+
+ if (is_write) {
+ omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
+ info->phys_base, 0, 0);
+ omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_addr, 0, 0);
+ omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
+ 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
+ OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
+ } else {
+ omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
+ info->phys_base, 0, 0);
+ omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_addr, 0, 0);
+ omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
+ 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
+ OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
+ }
+ /* configure and start prefetch transfer */
+ ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write);
+ if (ret)
+ /* PFPW engine is busy, use cpu copy methode */
+ goto out_copy;
+
+ init_completion(&info->comp);
+
+ omap_start_dma(info->dma_ch);
+
+ /* setup and start DMA using dma_addr */
+ wait_for_completion(&info->comp);
+
+ while (0x3fff & (prefetch_status = gpmc_prefetch_status()))
+ ;
+ /* disable and stop the PFPW engine */
+ gpmc_prefetch_reset();
+
+ dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
+ return 0;
+
+out_copy:
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
+ : omap_write_buf16(mtd, (u_char *) addr, len);
+ else
+ is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
+ : omap_write_buf8(mtd, (u_char *) addr, len);
+ return 0;
+}
+#else
+static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {}
+static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
+ unsigned int len, int is_write)
+{
+ return 0;
+}
+#endif
+
+/**
+ * omap_read_buf_dma_pref - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
+{
+ if (len <= mtd->oobsize)
+ omap_read_buf_pref(mtd, buf, len);
+ else
+ /* start transfer in DMA mode */
+ omap_nand_dma_transfer(mtd, buf, len, 0x0);
+}
+
+/**
+ * omap_write_buf_dma_pref - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf_dma_pref(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ if (len <= mtd->oobsize)
+ omap_write_buf_pref(mtd, buf, len);
+ else
+ /* start transfer in DMA mode */
+ omap_nand_dma_transfer(mtd, buf, len, 0x1);
+}
+
/**
* omap_verify_buf - Verify chip data against buffer
* @mtd: MTD device structure
@@ -658,17 +942,12 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
err = -ENOMEM;
goto out_release_mem_region;
}
+
info->nand.controller = &info->controller;
info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
info->nand.cmd_ctrl = omap_hwcontrol;
- /* REVISIT: only supports 16-bit NAND flash */
-
- info->nand.read_buf = omap_read_buf16;
- info->nand.write_buf = omap_write_buf16;
- info->nand.verify_buf = omap_verify_buf;
-
/*
* If RDY/BSY line is connected to OMAP then use the omap ready
* funcrtion and the generic nand_wait function which reads the status
@@ -689,6 +968,40 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
== 0x1000)
info->nand.options |= NAND_BUSWIDTH_16;
+ if (use_prefetch) {
+ /* copy the virtual address of nand base for fifo access */
+ info->nand_pref_fifo_add = info->nand.IO_ADDR_R;
+
+ info->nand.read_buf = omap_read_buf_pref;
+ info->nand.write_buf = omap_write_buf_pref;
+ if (use_dma) {
+ err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
+ omap_nand_dma_cb, &info->comp, &info->dma_ch);
+ if (err < 0) {
+ info->dma_ch = -1;
+ printk(KERN_WARNING "DMA request failed."
+ " Non-dma data transfer mode\n");
+ } else {
+ omap_set_dma_dest_burst_mode(info->dma_ch,
+ OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_src_burst_mode(info->dma_ch,
+ OMAP_DMA_DATA_BURST_16);
+
+ info->nand.read_buf = omap_read_buf_dma_pref;
+ info->nand.write_buf = omap_write_buf_dma_pref;
+ }
+ }
+ } else {
+ if (info->nand.options & NAND_BUSWIDTH_16) {
+ info->nand.read_buf = omap_read_buf16;
+ info->nand.write_buf = omap_write_buf16;
+ } else {
+ info->nand.read_buf = omap_read_buf8;
+ info->nand.write_buf = omap_write_buf8;
+ }
+ }
+ info->nand.verify_buf = omap_verify_buf;
+
#ifdef CONFIG_MTD_NAND_OMAP_HWECC
info->nand.ecc.bytes = 3;
info->nand.ecc.size = 512;
@@ -744,9 +1057,12 @@ static int omap_nand_remove(struct platform_device *pdev)
struct omap_nand_info *info = mtd->priv;
platform_set_drvdata(pdev, NULL);
+ if (use_dma)
+ omap_free_dma(info->dma_ch);
+
/* Release NAND device, its internal structures and partitions */
nand_release(&info->mtd);
- iounmap(info->nand.IO_ADDR_R);
+ iounmap(info->nand_pref_fifo_add);
kfree(&info->mtd);
return 0;
}
@@ -763,6 +1079,15 @@ static struct platform_driver omap_nand_driver = {
static int __init omap_nand_init(void)
{
printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME);
+
+ /* This check is required if driver is being
+ * loaded run time as a module
+ */
+ if ((1 == use_dma) && (0 == use_prefetch)) {
+ printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 "
+ "without use_prefetch'. Prefetch will not be"
+ " used in either mode (mpu or dma)\n");
+ }
return platform_driver_register(&omap_nand_driver);
}
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 0d9d4bc9c76..f59c07427af 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -171,7 +171,6 @@ static int __devexit orion_nand_remove(struct platform_device *pdev)
}
static struct platform_driver orion_nand_driver = {
- .probe = orion_nand_probe,
.remove = __devexit_p(orion_nand_remove),
.driver = {
.name = "orion_nand",
@@ -181,7 +180,7 @@ static struct platform_driver orion_nand_driver = {
static int __init orion_nand_init(void)
{
- return platform_driver_register(&orion_nand_driver);
+ return platform_driver_probe(&orion_nand_driver, orion_nand_probe);
}
static void __exit orion_nand_exit(void)
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 30a8ce6d3e6..6ea520ae241 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -102,6 +102,7 @@ enum {
ERR_SENDCMD = -2,
ERR_DBERR = -3,
ERR_BBERR = -4,
+ ERR_SBERR = -5,
};
enum {
@@ -564,11 +565,13 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
status = nand_readl(info, NDSR);
- if (status & (NDSR_RDDREQ | NDSR_DBERR)) {
+ if (status & (NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR)) {
if (status & NDSR_DBERR)
info->retcode = ERR_DBERR;
+ else if (status & NDSR_SBERR)
+ info->retcode = ERR_SBERR;
- disable_int(info, NDSR_RDDREQ | NDSR_DBERR);
+ disable_int(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR);
if (info->use_dma) {
info->state = STATE_DMA_READING;
@@ -670,7 +673,7 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
break;
- pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR);
+ pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR);
/* We only are OOB, so if the data has error, does not matter */
if (info->retcode == ERR_DBERR)
@@ -687,7 +690,7 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
break;
- pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR);
+ pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR);
if (info->retcode == ERR_DBERR) {
/* for blank page (all 0xff), HW will calculate its ECC as
@@ -861,8 +864,12 @@ static int pxa3xx_nand_ecc_correct(struct mtd_info *mtd,
* consider it as a ecc error which will tell the caller the
* read fail We have distinguish all the errors, but the
* nand_read_ecc only check this function return value
+ *
+ * Corrected (single-bit) errors must also be noted.
*/
- if (info->retcode != ERR_NONE)
+ if (info->retcode == ERR_SBERR)
+ return 1;
+ else if (info->retcode != ERR_NONE)
return -1;
return 0;
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 2bc896623e2..02bef21f2e4 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -329,7 +329,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
}
static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf)
+ uint8_t *buf, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -857,7 +857,6 @@ static int __exit flctl_remove(struct platform_device *pdev)
}
static struct platform_driver flctl_driver = {
- .probe = flctl_probe,
.remove = flctl_remove,
.driver = {
.name = "sh_flctl",
@@ -867,7 +866,7 @@ static struct platform_driver flctl_driver = {
static int __init flctl_nand_init(void)
{
- return platform_driver_register(&flctl_driver);
+ return platform_driver_probe(&flctl_driver, flctl_probe);
}
static void __exit flctl_nand_cleanup(void)
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index daa6a4c3b8c..92c73344a66 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -301,6 +301,21 @@ static int tmio_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
return 0;
}
+static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ int r0, r1;
+
+ /* assume ecc.size = 512 and ecc.bytes = 6 */
+ r0 = __nand_correct_data(buf, read_ecc, calc_ecc, 256);
+ if (r0 < 0)
+ return r0;
+ r1 = __nand_correct_data(buf + 256, read_ecc + 3, calc_ecc + 3, 256);
+ if (r1 < 0)
+ return r1;
+ return r0 + r1;
+}
+
static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
{
struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
@@ -424,7 +439,7 @@ static int tmio_probe(struct platform_device *dev)
nand_chip->ecc.bytes = 6;
nand_chip->ecc.hwctl = tmio_nand_enable_hwecc;
nand_chip->ecc.calculate = tmio_nand_calculate_ecc;
- nand_chip->ecc.correct = nand_correct_data;
+ nand_chip->ecc.correct = tmio_nand_correct_data;
if (data)
nand_chip->badblock_pattern = data->badblock_pattern;
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 488088eff2c..73af8324d0d 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -189,18 +189,43 @@ static int txx9ndfmc_calculate_ecc(struct mtd_info *mtd, const uint8_t *dat,
uint8_t *ecc_code)
{
struct platform_device *dev = mtd_to_platdev(mtd);
+ struct nand_chip *chip = mtd->priv;
+ int eccbytes;
u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
mcr &= ~TXX9_NDFMCR_ECC_ALL;
txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_READ, TXX9_NDFMCR);
- ecc_code[1] = txx9ndfmc_read(dev, TXX9_NDFDTR);
- ecc_code[0] = txx9ndfmc_read(dev, TXX9_NDFDTR);
- ecc_code[2] = txx9ndfmc_read(dev, TXX9_NDFDTR);
+ for (eccbytes = chip->ecc.bytes; eccbytes > 0; eccbytes -= 3) {
+ ecc_code[1] = txx9ndfmc_read(dev, TXX9_NDFDTR);
+ ecc_code[0] = txx9ndfmc_read(dev, TXX9_NDFDTR);
+ ecc_code[2] = txx9ndfmc_read(dev, TXX9_NDFDTR);
+ ecc_code += 3;
+ }
txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
return 0;
}
+static int txx9ndfmc_correct_data(struct mtd_info *mtd, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ struct nand_chip *chip = mtd->priv;
+ int eccsize;
+ int corrected = 0;
+ int stat;
+
+ for (eccsize = chip->ecc.size; eccsize > 0; eccsize -= 256) {
+ stat = __nand_correct_data(buf, read_ecc, calc_ecc, 256);
+ if (stat < 0)
+ return stat;
+ corrected += stat;
+ buf += 256;
+ read_ecc += 3;
+ calc_ecc += 3;
+ }
+ return corrected;
+}
+
static void txx9ndfmc_enable_hwecc(struct mtd_info *mtd, int mode)
{
struct platform_device *dev = mtd_to_platdev(mtd);
@@ -244,6 +269,22 @@ static void txx9ndfmc_initialize(struct platform_device *dev)
#define TXX9NDFMC_NS_TO_CYC(gbusclk, ns) \
DIV_ROUND_UP((ns) * DIV_ROUND_UP(gbusclk, 1000), 1000000)
+static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret;
+
+ ret = nand_scan_ident(mtd, 1);
+ if (!ret) {
+ if (mtd->writesize >= 512) {
+ chip->ecc.size = mtd->writesize;
+ chip->ecc.bytes = 3 * (mtd->writesize / 256);
+ }
+ ret = nand_scan_tail(mtd);
+ }
+ return ret;
+}
+
static int __init txx9ndfmc_probe(struct platform_device *dev)
{
struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
@@ -321,9 +362,10 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
chip->cmd_ctrl = txx9ndfmc_cmd_ctrl;
chip->dev_ready = txx9ndfmc_dev_ready;
chip->ecc.calculate = txx9ndfmc_calculate_ecc;
- chip->ecc.correct = nand_correct_data;
+ chip->ecc.correct = txx9ndfmc_correct_data;
chip->ecc.hwctl = txx9ndfmc_enable_hwecc;
chip->ecc.mode = NAND_ECC_HW;
+ /* txx9ndfmc_nand_scan will overwrite ecc.size and ecc.bytes */
chip->ecc.size = 256;
chip->ecc.bytes = 3;
chip->chip_delay = 100;
@@ -349,7 +391,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
if (plat->wide_mask & (1 << i))
chip->options |= NAND_BUSWIDTH_16;
- if (nand_scan(mtd, 1)) {
+ if (txx9ndfmc_nand_scan(mtd)) {
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
continue;
diff --git a/drivers/mtd/nand/w90p910_nand.c b/drivers/mtd/nand/w90p910_nand.c
new file mode 100644
index 00000000000..7680e731348
--- /dev/null
+++ b/drivers/mtd/nand/w90p910_nand.c
@@ -0,0 +1,382 @@
+/*
+ * Copyright (c) 2009 Nuvoton technology corporation.
+ *
+ * Wan ZongShun <mcuos.com@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;version 2 of the License.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+#define REG_FMICSR 0x00
+#define REG_SMCSR 0xa0
+#define REG_SMISR 0xac
+#define REG_SMCMD 0xb0
+#define REG_SMADDR 0xb4
+#define REG_SMDATA 0xb8
+
+#define RESET_FMI 0x01
+#define NAND_EN 0x08
+#define READYBUSY (0x01 << 18)
+
+#define SWRST 0x01
+#define PSIZE (0x01 << 3)
+#define DMARWEN (0x03 << 1)
+#define BUSWID (0x01 << 4)
+#define ECC4EN (0x01 << 5)
+#define WP (0x01 << 24)
+#define NANDCS (0x01 << 25)
+#define ENDADDR (0x01 << 31)
+
+#define read_data_reg(dev) \
+ __raw_readl((dev)->reg + REG_SMDATA)
+
+#define write_data_reg(dev, val) \
+ __raw_writel((val), (dev)->reg + REG_SMDATA)
+
+#define write_cmd_reg(dev, val) \
+ __raw_writel((val), (dev)->reg + REG_SMCMD)
+
+#define write_addr_reg(dev, val) \
+ __raw_writel((val), (dev)->reg + REG_SMADDR)
+
+struct w90p910_nand {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ void __iomem *reg;
+ struct clk *clk;
+ spinlock_t lock;
+};
+
+static const struct mtd_partition partitions[] = {
+ {
+ .name = "NAND FS 0",
+ .offset = 0,
+ .size = 8 * 1024 * 1024
+ },
+ {
+ .name = "NAND FS 1",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL
+ }
+};
+
+static unsigned char w90p910_nand_read_byte(struct mtd_info *mtd)
+{
+ unsigned char ret;
+ struct w90p910_nand *nand;
+
+ nand = container_of(mtd, struct w90p910_nand, mtd);
+
+ ret = (unsigned char)read_data_reg(nand);
+
+ return ret;
+}
+
+static void w90p910_nand_read_buf(struct mtd_info *mtd,
+ unsigned char *buf, int len)
+{
+ int i;
+ struct w90p910_nand *nand;
+
+ nand = container_of(mtd, struct w90p910_nand, mtd);
+
+ for (i = 0; i < len; i++)
+ buf[i] = (unsigned char)read_data_reg(nand);
+}
+
+static void w90p910_nand_write_buf(struct mtd_info *mtd,
+ const unsigned char *buf, int len)
+{
+ int i;
+ struct w90p910_nand *nand;
+
+ nand = container_of(mtd, struct w90p910_nand, mtd);
+
+ for (i = 0; i < len; i++)
+ write_data_reg(nand, buf[i]);
+}
+
+static int w90p910_verify_buf(struct mtd_info *mtd,
+ const unsigned char *buf, int len)
+{
+ int i;
+ struct w90p910_nand *nand;
+
+ nand = container_of(mtd, struct w90p910_nand, mtd);
+
+ for (i = 0; i < len; i++) {
+ if (buf[i] != (unsigned char)read_data_reg(nand))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int w90p910_check_rb(struct w90p910_nand *nand)
+{
+ unsigned int val;
+ spin_lock(&nand->lock);
+ val = __raw_readl(REG_SMISR);
+ val &= READYBUSY;
+ spin_unlock(&nand->lock);
+
+ return val;
+}
+
+static int w90p910_nand_devready(struct mtd_info *mtd)
+{
+ struct w90p910_nand *nand;
+ int ready;
+
+ nand = container_of(mtd, struct w90p910_nand, mtd);
+
+ ready = (w90p910_check_rb(nand)) ? 1 : 0;
+ return ready;
+}
+
+static void w90p910_nand_command_lp(struct mtd_info *mtd,
+ unsigned int command, int column, int page_addr)
+{
+ register struct nand_chip *chip = mtd->priv;
+ struct w90p910_nand *nand;
+
+ nand = container_of(mtd, struct w90p910_nand, mtd);
+
+ if (command == NAND_CMD_READOOB) {
+ column += mtd->writesize;
+ command = NAND_CMD_READ0;
+ }
+
+ write_cmd_reg(nand, command & 0xff);
+
+ if (column != -1 || page_addr != -1) {
+
+ if (column != -1) {
+ if (chip->options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ write_addr_reg(nand, column);
+ write_addr_reg(nand, column >> 8 | ENDADDR);
+ }
+ if (page_addr != -1) {
+ write_addr_reg(nand, page_addr);
+
+ if (chip->chipsize > (128 << 20)) {
+ write_addr_reg(nand, page_addr >> 8);
+ write_addr_reg(nand, page_addr >> 16 | ENDADDR);
+ } else {
+ write_addr_reg(nand, page_addr >> 8 | ENDADDR);
+ }
+ }
+ }
+
+ switch (command) {
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_RNDIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_DEPLETE1:
+ return;
+
+ case NAND_CMD_STATUS_ERROR:
+ case NAND_CMD_STATUS_ERROR0:
+ case NAND_CMD_STATUS_ERROR1:
+ case NAND_CMD_STATUS_ERROR2:
+ case NAND_CMD_STATUS_ERROR3:
+ udelay(chip->chip_delay);
+ return;
+
+ case NAND_CMD_RESET:
+ if (chip->dev_ready)
+ break;
+ udelay(chip->chip_delay);
+
+ write_cmd_reg(nand, NAND_CMD_STATUS);
+ write_cmd_reg(nand, command);
+
+ while (!w90p910_check_rb(nand))
+ ;
+
+ return;
+
+ case NAND_CMD_RNDOUT:
+ write_cmd_reg(nand, NAND_CMD_RNDOUTSTART);
+ return;
+
+ case NAND_CMD_READ0:
+
+ write_cmd_reg(nand, NAND_CMD_READSTART);
+ default:
+
+ if (!chip->dev_ready) {
+ udelay(chip->chip_delay);
+ return;
+ }
+ }
+
+ /* Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine. */
+ ndelay(100);
+
+ while (!chip->dev_ready(mtd))
+ ;
+}
+
+
+static void w90p910_nand_enable(struct w90p910_nand *nand)
+{
+ unsigned int val;
+ spin_lock(&nand->lock);
+ __raw_writel(RESET_FMI, (nand->reg + REG_FMICSR));
+
+ val = __raw_readl(nand->reg + REG_FMICSR);
+
+ if (!(val & NAND_EN))
+ __raw_writel(val | NAND_EN, REG_FMICSR);
+
+ val = __raw_readl(nand->reg + REG_SMCSR);
+
+ val &= ~(SWRST|PSIZE|DMARWEN|BUSWID|ECC4EN|NANDCS);
+ val |= WP;
+
+ __raw_writel(val, nand->reg + REG_SMCSR);
+
+ spin_unlock(&nand->lock);
+}
+
+static int __devinit w90p910_nand_probe(struct platform_device *pdev)
+{
+ struct w90p910_nand *w90p910_nand;
+ struct nand_chip *chip;
+ int retval;
+ struct resource *res;
+
+ retval = 0;
+
+ w90p910_nand = kzalloc(sizeof(struct w90p910_nand), GFP_KERNEL);
+ if (!w90p910_nand)
+ return -ENOMEM;
+ chip = &(w90p910_nand->chip);
+
+ w90p910_nand->mtd.priv = chip;
+ w90p910_nand->mtd.owner = THIS_MODULE;
+ spin_lock_init(&w90p910_nand->lock);
+
+ w90p910_nand->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(w90p910_nand->clk)) {
+ retval = -ENOENT;
+ goto fail1;
+ }
+ clk_enable(w90p910_nand->clk);
+
+ chip->cmdfunc = w90p910_nand_command_lp;
+ chip->dev_ready = w90p910_nand_devready;
+ chip->read_byte = w90p910_nand_read_byte;
+ chip->write_buf = w90p910_nand_write_buf;
+ chip->read_buf = w90p910_nand_read_buf;
+ chip->verify_buf = w90p910_verify_buf;
+ chip->chip_delay = 50;
+ chip->options = 0;
+ chip->ecc.mode = NAND_ECC_SOFT;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ retval = -ENXIO;
+ goto fail1;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ retval = -EBUSY;
+ goto fail1;
+ }
+
+ w90p910_nand->reg = ioremap(res->start, resource_size(res));
+ if (!w90p910_nand->reg) {
+ retval = -ENOMEM;
+ goto fail2;
+ }
+
+ w90p910_nand_enable(w90p910_nand);
+
+ if (nand_scan(&(w90p910_nand->mtd), 1)) {
+ retval = -ENXIO;
+ goto fail3;
+ }
+
+ add_mtd_partitions(&(w90p910_nand->mtd), partitions,
+ ARRAY_SIZE(partitions));
+
+ platform_set_drvdata(pdev, w90p910_nand);
+
+ return retval;
+
+fail3: iounmap(w90p910_nand->reg);
+fail2: release_mem_region(res->start, resource_size(res));
+fail1: kfree(w90p910_nand);
+ return retval;
+}
+
+static int __devexit w90p910_nand_remove(struct platform_device *pdev)
+{
+ struct w90p910_nand *w90p910_nand = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ iounmap(w90p910_nand->reg);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ clk_disable(w90p910_nand->clk);
+ clk_put(w90p910_nand->clk);
+
+ kfree(w90p910_nand);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver w90p910_nand_driver = {
+ .probe = w90p910_nand_probe,
+ .remove = __devexit_p(w90p910_nand_remove),
+ .driver = {
+ .name = "w90p910-fmi",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init w90p910_nand_init(void)
+{
+ return platform_driver_register(&w90p910_nand_driver);
+}
+
+static void __exit w90p910_nand_exit(void)
+{
+ platform_driver_unregister(&w90p910_nand_driver);
+}
+
+module_init(w90p910_nand_init);
+module_exit(w90p910_nand_exit);
+
+MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
+MODULE_DESCRIPTION("w90p910 nand driver!");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:w90p910-fmi");
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index 3e164f0c929..62d6a78c4ee 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -46,21 +46,12 @@ int __devinit of_mtd_parse_partitions(struct device *dev,
const u32 *reg;
int len;
- /* check if this is a partition node */
- partname = of_get_property(pp, "name", &len);
- if (strcmp(partname, "partition") != 0) {
+ reg = of_get_property(pp, "reg", &len);
+ if (!reg) {
nr_parts--;
continue;
}
- reg = of_get_property(pp, "reg", &len);
- if (!reg || (len != 2 * sizeof(u32))) {
- of_node_put(pp);
- dev_err(dev, "Invalid 'reg' on %s\n", node->full_name);
- kfree(*pparts);
- *pparts = NULL;
- return -EINVAL;
- }
(*pparts)[i].offset = reg[0];
(*pparts)[i].size = reg[1];
@@ -75,6 +66,14 @@ int __devinit of_mtd_parse_partitions(struct device *dev,
i++;
}
+ if (!i) {
+ of_node_put(pp);
+ dev_err(dev, "No valid partition found on %s\n", node->full_name);
+ kfree(*pparts);
+ *pparts = NULL;
+ return -EINVAL;
+ }
+
return nr_parts;
}
EXPORT_SYMBOL(of_mtd_parse_partitions);
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 79fa79e8f8d..a38f580c2bb 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -5,6 +5,7 @@
menuconfig MTD_ONENAND
tristate "OneNAND Device Support"
depends on MTD
+ select MTD_PARTITIONS
help
This enables support for accessing all type of OneNAND flash
devices. For further information see
@@ -23,7 +24,6 @@ config MTD_ONENAND_VERIFY_WRITE
config MTD_ONENAND_GENERIC
tristate "OneNAND Flash device via platform device driver"
- depends on ARM
help
Support for OneNAND flash via platform device driver.
@@ -66,7 +66,6 @@ config MTD_ONENAND_2X_PROGRAM
config MTD_ONENAND_SIM
tristate "OneNAND simulator support"
- depends on MTD_PARTITIONS
help
The simulator may simulate various OneNAND flash chips for the
OneNAND MTD layer.
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 3a496c33fb5..e78914938c5 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -19,12 +19,16 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
-
#include <asm/io.h>
-#include <asm/mach/flash.h>
-
-#define DRIVER_NAME "onenand"
+/*
+ * Note: Driver name and platform data format have been updated!
+ *
+ * This version of the driver is named "onenand-flash" and takes struct
+ * onenand_platform_data as platform data. The old ARM-specific version
+ * with the name "onenand" used to take struct flash_platform_data.
+ */
+#define DRIVER_NAME "onenand-flash"
#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL, };
@@ -39,16 +43,16 @@ struct onenand_info {
static int __devinit generic_onenand_probe(struct platform_device *pdev)
{
struct onenand_info *info;
- struct flash_platform_data *pdata = pdev->dev.platform_data;
+ struct onenand_platform_data *pdata = pdev->dev.platform_data;
struct resource *res = pdev->resource;
- unsigned long size = res->end - res->start + 1;
+ unsigned long size = resource_size(res);
int err;
info = kzalloc(sizeof(struct onenand_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- if (!request_mem_region(res->start, size, pdev->dev.driver->name)) {
+ if (!request_mem_region(res->start, size, dev_name(&pdev->dev))) {
err = -EBUSY;
goto out_free_info;
}
@@ -59,7 +63,7 @@ static int __devinit generic_onenand_probe(struct platform_device *pdev)
goto out_release_mem_region;
}
- info->onenand.mmcontrol = pdata->mmcontrol;
+ info->onenand.mmcontrol = pdata ? pdata->mmcontrol : 0;
info->onenand.irq = platform_get_irq(pdev, 0);
info->mtd.name = dev_name(&pdev->dev);
@@ -75,7 +79,7 @@ static int __devinit generic_onenand_probe(struct platform_device *pdev)
err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
if (err > 0)
add_mtd_partitions(&info->mtd, info->parts, err);
- else if (err <= 0 && pdata->parts)
+ else if (err <= 0 && pdata && pdata->parts)
add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
else
#endif
@@ -99,7 +103,7 @@ static int __devexit generic_onenand_remove(struct platform_device *pdev)
{
struct onenand_info *info = platform_get_drvdata(pdev);
struct resource *res = pdev->resource;
- unsigned long size = res->end - res->start + 1;
+ unsigned long size = resource_size(res);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 6e829095ea9..ff66e4330aa 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1191,7 +1191,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
/*
* Chip boundary handling in DDP
* Now we issued chip 1 read and pointed chip 1
- * bufferam so we have to point chip 0 bufferam.
+ * bufferram so we have to point chip 0 bufferram.
*/
if (ONENAND_IS_DDP(this) &&
unlikely(from == (this->chipsize >> 1))) {
@@ -1867,8 +1867,8 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
ONENAND_SET_NEXT_BUFFERRAM(this);
/*
- * 2 PLANE, MLC, and Flex-OneNAND doesn't support
- * write-while-programe feature.
+ * 2 PLANE, MLC, and Flex-OneNAND do not support
+ * write-while-program feature.
*/
if (!ONENAND_IS_2PLANE(this) && !first) {
ONENAND_SET_PREV_BUFFERRAM(this);
@@ -1879,7 +1879,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
onenand_update_bufferram(mtd, prev, !ret && !prev_subpage);
if (ret) {
written -= prevlen;
- printk(KERN_ERR "onenand_write_ops_nolock: write filaed %d\n", ret);
+ printk(KERN_ERR "onenand_write_ops_nolock: write failed %d\n", ret);
break;
}
@@ -1905,7 +1905,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
/* In partial page write we don't update bufferram */
onenand_update_bufferram(mtd, to, !ret && !subpage);
if (ret) {
- printk(KERN_ERR "onenand_write_ops_nolock: write filaed %d\n", ret);
+ printk(KERN_ERR "onenand_write_ops_nolock: write failed %d\n", ret);
break;
}
@@ -2201,7 +2201,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
/* Grab the lock and see if the device is available */
onenand_get_device(mtd, FL_ERASING);
- /* Loop throught the pages */
+ /* Loop through the blocks */
instr->state = MTD_ERASING;
while (len) {
@@ -2328,7 +2328,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
if (bbm->bbt)
bbm->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
- /* We write two bytes, so we dont have to mess with 16 bit access */
+ /* We write two bytes, so we don't have to mess with 16-bit access */
ofs += mtd->oobsize + (bbm->badblockpos & ~0x01);
/* FIXME : What to do when marking SLC block in partition
* with MLC erasesize? For now, it is not advisable to
@@ -2557,7 +2557,7 @@ static void onenand_unlock_all(struct mtd_info *mtd)
#ifdef CONFIG_MTD_ONENAND_OTP
-/* Interal OTP operation */
+/* Internal OTP operation */
typedef int (*otp_op_t)(struct mtd_info *mtd, loff_t form, size_t len,
size_t *retlen, u_char *buf);
@@ -2921,7 +2921,7 @@ static void onenand_check_features(struct mtd_info *mtd)
this->options |= ONENAND_HAS_2PLANE;
case ONENAND_DEVICE_DENSITY_2Gb:
- /* 2Gb DDP don't have 2 plane */
+ /* 2Gb DDP does not have 2 plane */
if (!ONENAND_IS_DDP(this))
this->options |= ONENAND_HAS_2PLANE;
this->options |= ONENAND_HAS_UNLOCK_ALL;
@@ -3364,7 +3364,7 @@ static int onenand_probe(struct mtd_info *mtd)
/* It's real page size */
this->writesize = mtd->writesize;
- /* REVIST: Multichip handling */
+ /* REVISIT: Multichip handling */
if (FLEXONENAND(this))
flexonenand_get_size(mtd);
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c
index a18e8d2f255..5553cd4eab2 100644
--- a/drivers/mtd/tests/mtd_oobtest.c
+++ b/drivers/mtd/tests/mtd_oobtest.c
@@ -512,7 +512,7 @@ static int __init mtd_oobtest_init(void)
goto out;
addr0 = 0;
- for (i = 0; bbt[i] && i < ebcnt; ++i)
+ for (i = 0; i < ebcnt && bbt[i]; ++i)
addr0 += mtd->erasesize;
/* Attempt to write off end of OOB */
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index 9648818b9e2..103cac480fe 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -116,11 +116,11 @@ static int verify_eraseblock(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
addr0 = 0;
- for (i = 0; bbt[i] && i < ebcnt; ++i)
+ for (i = 0; i < ebcnt && bbt[i]; ++i)
addr0 += mtd->erasesize;
addrn = mtd->size;
- for (i = 0; bbt[ebcnt - i - 1] && i < ebcnt; ++i)
+ for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i)
addrn -= mtd->erasesize;
set_random_data(writebuf, mtd->erasesize);
@@ -219,11 +219,11 @@ static int crosstest(void)
memset(pp1, 0, pgsize * 4);
addr0 = 0;
- for (i = 0; bbt[i] && i < ebcnt; ++i)
+ for (i = 0; i < ebcnt && bbt[i]; ++i)
addr0 += mtd->erasesize;
addrn = mtd->size;
- for (i = 0; bbt[ebcnt - i - 1] && i < ebcnt; ++i)
+ for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i)
addrn -= mtd->erasesize;
/* Read 2nd-to-last page to pp1 */
@@ -317,7 +317,7 @@ static int erasecrosstest(void)
ebnum = 0;
addr0 = 0;
- for (i = 0; bbt[i] && i < ebcnt; ++i) {
+ for (i = 0; i < ebcnt && bbt[i]; ++i) {
addr0 += mtd->erasesize;
ebnum += 1;
}
@@ -413,7 +413,7 @@ static int erasetest(void)
ebnum = 0;
addr0 = 0;
- for (i = 0; bbt[i] && i < ebcnt; ++i) {
+ for (i = 0; i < ebcnt && bbt[i]; ++i) {
addr0 += mtd->erasesize;
ebnum += 1;
}
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index e4d9ef0c965..9f87c99189a 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -1065,7 +1065,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
}
/*
- * Now we have got to calculate how much data we have to to copy. In
+ * Now we have got to calculate how much data we have to copy. In
* case of a static volume it is fairly easy - the VID header contains
* the data size. In case of a dynamic volume it is more difficult - we
* have to read the contents, cut 0xFF bytes from the end and copy only
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index c290f51dd17..1af08178def 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -570,7 +570,7 @@ void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
/*
* ubi_rb_for_each_entry - walk an RB-tree.
- * @rb: a pointer to type 'struct rb_node' to to use as a loop counter
+ * @rb: a pointer to type 'struct rb_node' to use as a loop counter
* @pos: a pointer to RB-tree entry type to use as a loop counter
* @root: RB-tree's root
* @member: the name of the 'struct rb_node' within the RB-tree entry
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 7adff4d0960..b9eeadf01b7 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -813,10 +813,10 @@ static int vortex_suspend(struct pci_dev *pdev, pm_message_t state)
if (netif_running(dev)) {
netif_device_detach(dev);
vortex_down(dev, 1);
+ disable_irq(dev->irq);
}
pci_save_state(pdev);
pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
- free_irq(dev->irq, dev);
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
}
@@ -839,18 +839,12 @@ static int vortex_resume(struct pci_dev *pdev)
return err;
}
pci_set_master(pdev);
- if (request_irq(dev->irq, vp->full_bus_master_rx ?
- &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev)) {
- pr_warning("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
- pci_disable_device(pdev);
- return -EBUSY;
- }
if (netif_running(dev)) {
err = vortex_up(dev);
if (err)
return err;
- else
- netif_device_attach(dev);
+ enable_irq(dev->irq);
+ netif_device_attach(dev);
}
}
return 0;
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 462d9f59c53..83a1922e68e 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -87,7 +87,7 @@
/* These identify the driver base version and may not be removed. */
static char version[] =
-KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
+DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ed5741b2e70..2bea67c134f 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1875,7 +1875,7 @@ config 68360_ENET
config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
- depends on M523x || M527x || M5272 || M528x || M520x || M532x || MACH_MX27 || ARCH_MX35
+ depends on M523x || M527x || M5272 || M528x || M520x || M532x || MACH_MX27 || ARCH_MX35 || ARCH_MX25
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c
index 646dfc5f50c..8ea9c7545c1 100644
--- a/drivers/net/arcnet/arc-rawmode.c
+++ b/drivers/net/arcnet/arc-rawmode.c
@@ -123,7 +123,6 @@ static void rx(struct net_device *dev, int bufnum,
BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
skb->protocol = cpu_to_be16(ETH_P_ARCNET);
-;
netif_rx(skb);
}
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index 083e21094b2..66bcbbb6bab 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -149,7 +149,6 @@ static void rx(struct net_device *dev, int bufnum,
BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
skb->protocol = cpu_to_be16(ETH_P_ARCNET);
-;
netif_rx(skb);
}
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index be2c6cfe6e8..1372e9a99f5 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -2296,7 +2296,7 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
u32 ctrl;
u32 mac_ctrl_data;
u32 master_ctrl_data;
- u32 wol_ctrl_data;
+ u32 wol_ctrl_data = 0;
u16 mii_bmsr_data;
u16 save_autoneg_advertised;
u16 mii_intr_status_data;
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index 0695be14cf9..aa76cbada5e 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -3122,7 +3122,7 @@
The fields are:[4:0] - tail pointer; [10:5] - Link List size; 15:11] -
header pointer. */
#define TCM_REG_XX_TABLE 0x50240
-/* [RW 4] Load value for for cfc ac credit cnt. */
+/* [RW 4] Load value for cfc ac credit cnt. */
#define TM_REG_CFC_AC_CRDCNT_VAL 0x164208
/* [RW 4] Load value for cfc cld credit cnt. */
#define TM_REG_CFC_CLD_CRDCNT_VAL 0x164210
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index cea5cfe23b7..c3fa31c9f2a 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1987,7 +1987,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
// find new aggregator for the related port(s)
new_aggregator = __get_first_agg(port);
for (; new_aggregator; new_aggregator = __get_next_agg(new_aggregator)) {
- // if the new aggregator is empty, or it connected to to our port only
+ // if the new aggregator is empty, or it is connected to our port only
if (!new_aggregator->lag_ports || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator)) {
break;
}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 09007437246..df32c109b7a 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -75,6 +75,13 @@ config CAN_EMS_PCI
CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche
(http://www.ems-wuensche.de).
+config CAN_EMS_USB
+ tristate "EMS CPC-USB/ARM7 CAN/USB interface"
+ depends on USB && CAN_DEV
+ ---help---
+ This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
+ from from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
+
config CAN_KVASER_PCI
tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards"
depends on PCI && CAN_SJA1000
@@ -82,6 +89,12 @@ config CAN_KVASER_PCI
This driver is for the the PCIcanx and PCIcan cards (1, 2 or
4 channel) from Kvaser (http://www.kvaser.com).
+config CAN_AT91
+ tristate "Atmel AT91 onchip CAN controller"
+ depends on CAN && CAN_DEV && ARCH_AT91SAM9263
+ ---help---
+ This is a driver for the SoC CAN controller in Atmel's AT91SAM9263.
+
config CAN_DEBUG_DEVICES
bool "CAN devices debugging messages"
depends on CAN
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 523a941b358..0dea62721f2 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -7,6 +7,9 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o
obj-$(CONFIG_CAN_DEV) += can-dev.o
can-dev-y := dev.o
+obj-y += usb/
+
obj-$(CONFIG_CAN_SJA1000) += sja1000/
+obj-$(CONFIG_CAN_AT91) += at91_can.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
new file mode 100644
index 00000000000..f67ae285a35
--- /dev/null
+++ b/drivers/net/can/at91_can.c
@@ -0,0 +1,1186 @@
+/*
+ * at91_can.c - CAN network driver for AT91 SoC CAN controller
+ *
+ * (C) 2007 by Hans J. Koch <hjk@linutronix.de>
+ * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ * This software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2 as distributed in the 'COPYING'
+ * file from the main directory of the linux kernel source.
+ *
+ * Send feedback to <socketcan-users@lists.berlios.de>
+ *
+ *
+ * Your platform definition file should specify something like:
+ *
+ * static struct at91_can_data ek_can_data = {
+ * transceiver_switch = sam9263ek_transceiver_switch,
+ * };
+ *
+ * at91_add_device_can(&ek_can_data);
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#include <mach/board.h>
+
+#define DRV_NAME "at91_can"
+#define AT91_NAPI_WEIGHT 12
+
+/*
+ * RX/TX Mailbox split
+ * don't dare to touch
+ */
+#define AT91_MB_RX_NUM 12
+#define AT91_MB_TX_SHIFT 2
+
+#define AT91_MB_RX_FIRST 0
+#define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1)
+
+#define AT91_MB_RX_MASK(i) ((1 << (i)) - 1)
+#define AT91_MB_RX_SPLIT 8
+#define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1)
+#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT))
+
+#define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT)
+#define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1)
+#define AT91_MB_TX_LAST (AT91_MB_TX_FIRST + AT91_MB_TX_NUM - 1)
+
+#define AT91_NEXT_PRIO_SHIFT (AT91_MB_TX_SHIFT)
+#define AT91_NEXT_PRIO_MASK (0xf << AT91_MB_TX_SHIFT)
+#define AT91_NEXT_MB_MASK (AT91_MB_TX_NUM - 1)
+#define AT91_NEXT_MASK ((AT91_MB_TX_NUM - 1) | AT91_NEXT_PRIO_MASK)
+
+/* Common registers */
+enum at91_reg {
+ AT91_MR = 0x000,
+ AT91_IER = 0x004,
+ AT91_IDR = 0x008,
+ AT91_IMR = 0x00C,
+ AT91_SR = 0x010,
+ AT91_BR = 0x014,
+ AT91_TIM = 0x018,
+ AT91_TIMESTP = 0x01C,
+ AT91_ECR = 0x020,
+ AT91_TCR = 0x024,
+ AT91_ACR = 0x028,
+};
+
+/* Mailbox registers (0 <= i <= 15) */
+#define AT91_MMR(i) (enum at91_reg)(0x200 + ((i) * 0x20))
+#define AT91_MAM(i) (enum at91_reg)(0x204 + ((i) * 0x20))
+#define AT91_MID(i) (enum at91_reg)(0x208 + ((i) * 0x20))
+#define AT91_MFID(i) (enum at91_reg)(0x20C + ((i) * 0x20))
+#define AT91_MSR(i) (enum at91_reg)(0x210 + ((i) * 0x20))
+#define AT91_MDL(i) (enum at91_reg)(0x214 + ((i) * 0x20))
+#define AT91_MDH(i) (enum at91_reg)(0x218 + ((i) * 0x20))
+#define AT91_MCR(i) (enum at91_reg)(0x21C + ((i) * 0x20))
+
+/* Register bits */
+#define AT91_MR_CANEN BIT(0)
+#define AT91_MR_LPM BIT(1)
+#define AT91_MR_ABM BIT(2)
+#define AT91_MR_OVL BIT(3)
+#define AT91_MR_TEOF BIT(4)
+#define AT91_MR_TTM BIT(5)
+#define AT91_MR_TIMFRZ BIT(6)
+#define AT91_MR_DRPT BIT(7)
+
+#define AT91_SR_RBSY BIT(29)
+
+#define AT91_MMR_PRIO_SHIFT (16)
+
+#define AT91_MID_MIDE BIT(29)
+
+#define AT91_MSR_MRTR BIT(20)
+#define AT91_MSR_MABT BIT(22)
+#define AT91_MSR_MRDY BIT(23)
+#define AT91_MSR_MMI BIT(24)
+
+#define AT91_MCR_MRTR BIT(20)
+#define AT91_MCR_MTCR BIT(23)
+
+/* Mailbox Modes */
+enum at91_mb_mode {
+ AT91_MB_MODE_DISABLED = 0,
+ AT91_MB_MODE_RX = 1,
+ AT91_MB_MODE_RX_OVRWR = 2,
+ AT91_MB_MODE_TX = 3,
+ AT91_MB_MODE_CONSUMER = 4,
+ AT91_MB_MODE_PRODUCER = 5,
+};
+
+/* Interrupt mask bits */
+#define AT91_IRQ_MB_RX ((1 << (AT91_MB_RX_LAST + 1)) \
+ - (1 << AT91_MB_RX_FIRST))
+#define AT91_IRQ_MB_TX ((1 << (AT91_MB_TX_LAST + 1)) \
+ - (1 << AT91_MB_TX_FIRST))
+#define AT91_IRQ_MB_ALL (AT91_IRQ_MB_RX | AT91_IRQ_MB_TX)
+
+#define AT91_IRQ_ERRA (1 << 16)
+#define AT91_IRQ_WARN (1 << 17)
+#define AT91_IRQ_ERRP (1 << 18)
+#define AT91_IRQ_BOFF (1 << 19)
+#define AT91_IRQ_SLEEP (1 << 20)
+#define AT91_IRQ_WAKEUP (1 << 21)
+#define AT91_IRQ_TOVF (1 << 22)
+#define AT91_IRQ_TSTP (1 << 23)
+#define AT91_IRQ_CERR (1 << 24)
+#define AT91_IRQ_SERR (1 << 25)
+#define AT91_IRQ_AERR (1 << 26)
+#define AT91_IRQ_FERR (1 << 27)
+#define AT91_IRQ_BERR (1 << 28)
+
+#define AT91_IRQ_ERR_ALL (0x1fff0000)
+#define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \
+ AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR)
+#define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \
+ AT91_IRQ_ERRP | AT91_IRQ_BOFF)
+
+#define AT91_IRQ_ALL (0x1fffffff)
+
+struct at91_priv {
+ struct can_priv can; /* must be the first member! */
+ struct net_device *dev;
+ struct napi_struct napi;
+
+ void __iomem *reg_base;
+
+ u32 reg_sr;
+ unsigned int tx_next;
+ unsigned int tx_echo;
+ unsigned int rx_next;
+
+ struct clk *clk;
+ struct at91_can_data *pdata;
+};
+
+static struct can_bittiming_const at91_bittiming_const = {
+ .tseg1_min = 4,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 2,
+ .brp_max = 128,
+ .brp_inc = 1,
+};
+
+static inline int get_tx_next_mb(const struct at91_priv *priv)
+{
+ return (priv->tx_next & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST;
+}
+
+static inline int get_tx_next_prio(const struct at91_priv *priv)
+{
+ return (priv->tx_next >> AT91_NEXT_PRIO_SHIFT) & 0xf;
+}
+
+static inline int get_tx_echo_mb(const struct at91_priv *priv)
+{
+ return (priv->tx_echo & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST;
+}
+
+static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
+{
+ return readl(priv->reg_base + reg);
+}
+
+static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
+ u32 value)
+{
+ writel(value, priv->reg_base + reg);
+}
+
+static inline void set_mb_mode_prio(const struct at91_priv *priv,
+ unsigned int mb, enum at91_mb_mode mode, int prio)
+{
+ at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16));
+}
+
+static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
+ enum at91_mb_mode mode)
+{
+ set_mb_mode_prio(priv, mb, mode, 0);
+}
+
+static struct sk_buff *alloc_can_skb(struct net_device *dev,
+ struct can_frame **cf)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
+ if (unlikely(!skb))
+ return NULL;
+
+ skb->protocol = htons(ETH_P_CAN);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
+
+ return skb;
+}
+
+static struct sk_buff *alloc_can_err_skb(struct net_device *dev,
+ struct can_frame **cf)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_can_skb(dev, cf);
+ if (unlikely(!skb))
+ return NULL;
+
+ memset(*cf, 0, sizeof(struct can_frame));
+ (*cf)->can_id = CAN_ERR_FLAG;
+ (*cf)->can_dlc = CAN_ERR_DLC;
+
+ return skb;
+}
+
+/*
+ * Swtich transceiver on or off
+ */
+static void at91_transceiver_switch(const struct at91_priv *priv, int on)
+{
+ if (priv->pdata && priv->pdata->transceiver_switch)
+ priv->pdata->transceiver_switch(on);
+}
+
+static void at91_setup_mailboxes(struct net_device *dev)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+ unsigned int i;
+
+ /*
+ * The first 12 mailboxes are used as a reception FIFO. The
+ * last mailbox is configured with overwrite option. The
+ * overwrite flag indicates a FIFO overflow.
+ */
+ for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++)
+ set_mb_mode(priv, i, AT91_MB_MODE_RX);
+ set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
+
+ /* The last 4 mailboxes are used for transmitting. */
+ for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++)
+ set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
+
+ /* Reset tx and rx helper pointers */
+ priv->tx_next = priv->tx_echo = priv->rx_next = 0;
+}
+
+static int at91_set_bittiming(struct net_device *dev)
+{
+ const struct at91_priv *priv = netdev_priv(dev);
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ u32 reg_br;
+
+ reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) << 24) |
+ ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
+ ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
+ ((bt->phase_seg2 - 1) << 0);
+
+ dev_info(dev->dev.parent, "writing AT91_BR: 0x%08x\n", reg_br);
+
+ at91_write(priv, AT91_BR, reg_br);
+
+ return 0;
+}
+
+static void at91_chip_start(struct net_device *dev)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+ u32 reg_mr, reg_ier;
+
+ /* disable interrupts */
+ at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
+
+ /* disable chip */
+ reg_mr = at91_read(priv, AT91_MR);
+ at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
+
+ at91_setup_mailboxes(dev);
+ at91_transceiver_switch(priv, 1);
+
+ /* enable chip */
+ at91_write(priv, AT91_MR, AT91_MR_CANEN);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* Enable interrupts */
+ reg_ier = AT91_IRQ_MB_RX | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME;
+ at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
+ at91_write(priv, AT91_IER, reg_ier);
+}
+
+static void at91_chip_stop(struct net_device *dev, enum can_state state)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+ u32 reg_mr;
+
+ /* disable interrupts */
+ at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
+
+ reg_mr = at91_read(priv, AT91_MR);
+ at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
+
+ at91_transceiver_switch(priv, 0);
+ priv->can.state = state;
+}
+
+/*
+ * theory of operation:
+ *
+ * According to the datasheet priority 0 is the highest priority, 15
+ * is the lowest. If two mailboxes have the same priority level the
+ * message of the mailbox with the lowest number is sent first.
+ *
+ * We use the first TX mailbox (AT91_MB_TX_FIRST) with prio 0, then
+ * the next mailbox with prio 0, and so on, until all mailboxes are
+ * used. Then we start from the beginning with mailbox
+ * AT91_MB_TX_FIRST, but with prio 1, mailbox AT91_MB_TX_FIRST + 1
+ * prio 1. When we reach the last mailbox with prio 15, we have to
+ * stop sending, waiting for all messages to be delivered, then start
+ * again with mailbox AT91_MB_TX_FIRST prio 0.
+ *
+ * We use the priv->tx_next as counter for the next transmission
+ * mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits
+ * encode the mailbox number, the upper 4 bits the mailbox priority:
+ *
+ * priv->tx_next = (prio << AT91_NEXT_PRIO_SHIFT) ||
+ * (mb - AT91_MB_TX_FIRST);
+ *
+ */
+static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ unsigned int mb, prio;
+ u32 reg_mid, reg_mcr;
+
+ mb = get_tx_next_mb(priv);
+ prio = get_tx_next_prio(priv);
+
+ if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
+ netif_stop_queue(dev);
+
+ dev_err(dev->dev.parent,
+ "BUG! TX buffer full when queue awake!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ if (cf->can_id & CAN_EFF_FLAG)
+ reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
+ else
+ reg_mid = (cf->can_id & CAN_SFF_MASK) << 18;
+
+ reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
+ (cf->can_dlc << 16) | AT91_MCR_MTCR;
+
+ /* disable MB while writing ID (see datasheet) */
+ set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED);
+ at91_write(priv, AT91_MID(mb), reg_mid);
+ set_mb_mode_prio(priv, mb, AT91_MB_MODE_TX, prio);
+
+ at91_write(priv, AT91_MDL(mb), *(u32 *)(cf->data + 0));
+ at91_write(priv, AT91_MDH(mb), *(u32 *)(cf->data + 4));
+
+ /* This triggers transmission */
+ at91_write(priv, AT91_MCR(mb), reg_mcr);
+
+ stats->tx_bytes += cf->can_dlc;
+ dev->trans_start = jiffies;
+
+ /* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */
+ can_put_echo_skb(skb, dev, mb - AT91_MB_TX_FIRST);
+
+ /*
+ * we have to stop the queue and deliver all messages in case
+ * of a prio+mb counter wrap around. This is the case if
+ * tx_next buffer prio and mailbox equals 0.
+ *
+ * also stop the queue if next buffer is still in use
+ * (== not ready)
+ */
+ priv->tx_next++;
+ if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) &
+ AT91_MSR_MRDY) ||
+ (priv->tx_next & AT91_NEXT_MASK) == 0)
+ netif_stop_queue(dev);
+
+ /* Enable interrupt for this mailbox */
+ at91_write(priv, AT91_IER, 1 << mb);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * at91_activate_rx_low - activate lower rx mailboxes
+ * @priv: a91 context
+ *
+ * Reenables the lower mailboxes for reception of new CAN messages
+ */
+static inline void at91_activate_rx_low(const struct at91_priv *priv)
+{
+ u32 mask = AT91_MB_RX_LOW_MASK;
+ at91_write(priv, AT91_TCR, mask);
+}
+
+/**
+ * at91_activate_rx_mb - reactive single rx mailbox
+ * @priv: a91 context
+ * @mb: mailbox to reactivate
+ *
+ * Reenables given mailbox for reception of new CAN messages
+ */
+static inline void at91_activate_rx_mb(const struct at91_priv *priv,
+ unsigned int mb)
+{
+ u32 mask = 1 << mb;
+ at91_write(priv, AT91_TCR, mask);
+}
+
+/**
+ * at91_rx_overflow_err - send error frame due to rx overflow
+ * @dev: net device
+ */
+static void at91_rx_overflow_err(struct net_device *dev)
+{
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+
+ dev_dbg(dev->dev.parent, "RX buffer overflow\n");
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
+ skb = alloc_can_err_skb(dev, &cf);
+ if (unlikely(!skb))
+ return;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ netif_receive_skb(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+/**
+ * at91_read_mb - read CAN msg from mailbox (lowlevel impl)
+ * @dev: net device
+ * @mb: mailbox number to read from
+ * @cf: can frame where to store message
+ *
+ * Reads a CAN message from the given mailbox and stores data into
+ * given can frame. "mb" and "cf" must be valid.
+ */
+static void at91_read_mb(struct net_device *dev, unsigned int mb,
+ struct can_frame *cf)
+{
+ const struct at91_priv *priv = netdev_priv(dev);
+ u32 reg_msr, reg_mid;
+
+ reg_mid = at91_read(priv, AT91_MID(mb));
+ if (reg_mid & AT91_MID_MIDE)
+ cf->can_id = ((reg_mid >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ else
+ cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK;
+
+ reg_msr = at91_read(priv, AT91_MSR(mb));
+ if (reg_msr & AT91_MSR_MRTR)
+ cf->can_id |= CAN_RTR_FLAG;
+ cf->can_dlc = min_t(__u8, (reg_msr >> 16) & 0xf, 8);
+
+ *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
+ *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
+
+ if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI))
+ at91_rx_overflow_err(dev);
+}
+
+/**
+ * at91_read_msg - read CAN message from mailbox
+ * @dev: net device
+ * @mb: mail box to read from
+ *
+ * Reads a CAN message from given mailbox, and put into linux network
+ * RX queue, does all housekeeping chores (stats, ...)
+ */
+static void at91_read_msg(struct net_device *dev, unsigned int mb)
+{
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ skb = alloc_can_skb(dev, &cf);
+ if (unlikely(!skb)) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ at91_read_mb(dev, mb, cf);
+ netif_receive_skb(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+/**
+ * at91_poll_rx - read multiple CAN messages from mailboxes
+ * @dev: net device
+ * @quota: max number of pkgs we're allowed to receive
+ *
+ * Theory of Operation:
+ *
+ * 12 of the 16 mailboxes on the chip are reserved for RX. we split
+ * them into 2 groups. The lower group holds 8 and upper 4 mailboxes.
+ *
+ * Like it or not, but the chip always saves a received CAN message
+ * into the first free mailbox it finds (starting with the
+ * lowest). This makes it very difficult to read the messages in the
+ * right order from the chip. This is how we work around that problem:
+ *
+ * The first message goes into mb nr. 0 and issues an interrupt. All
+ * rx ints are disabled in the interrupt handler and a napi poll is
+ * scheduled. We read the mailbox, but do _not_ reenable the mb (to
+ * receive another message).
+ *
+ * lower mbxs upper
+ * ______^______ __^__
+ * / \ / \
+ * +-+-+-+-+-+-+-+-++-+-+-+-+
+ * |x|x|x|x|x|x|x|x|| | | | |
+ * +-+-+-+-+-+-+-+-++-+-+-+-+
+ * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail
+ * 0 1 2 3 4 5 6 7 8 9 0 1 / box
+ *
+ * The variable priv->rx_next points to the next mailbox to read a
+ * message from. As long we're in the lower mailboxes we just read the
+ * mailbox but not reenable it.
+ *
+ * With completion of the last of the lower mailboxes, we reenable the
+ * whole first group, but continue to look for filled mailboxes in the
+ * upper mailboxes. Imagine the second group like overflow mailboxes,
+ * which takes CAN messages if the lower goup is full. While in the
+ * upper group we reenable the mailbox right after reading it. Giving
+ * the chip more room to store messages.
+ *
+ * After finishing we look again in the lower group if we've still
+ * quota.
+ *
+ */
+static int at91_poll_rx(struct net_device *dev, int quota)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+ u32 reg_sr = at91_read(priv, AT91_SR);
+ const unsigned long *addr = (unsigned long *)&reg_sr;
+ unsigned int mb;
+ int received = 0;
+
+ if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
+ reg_sr & AT91_MB_RX_LOW_MASK)
+ dev_info(dev->dev.parent,
+ "order of incoming frames cannot be guaranteed\n");
+
+ again:
+ for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
+ mb < AT91_MB_RX_NUM && quota > 0;
+ reg_sr = at91_read(priv, AT91_SR),
+ mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) {
+ at91_read_msg(dev, mb);
+
+ /* reactivate mailboxes */
+ if (mb == AT91_MB_RX_LOW_LAST)
+ /* all lower mailboxed, if just finished it */
+ at91_activate_rx_low(priv);
+ else if (mb > AT91_MB_RX_LOW_LAST)
+ /* only the mailbox we read */
+ at91_activate_rx_mb(priv, mb);
+
+ received++;
+ quota--;
+ }
+
+ /* upper group completed, look again in lower */
+ if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
+ quota > 0 && mb >= AT91_MB_RX_NUM) {
+ priv->rx_next = 0;
+ goto again;
+ }
+
+ return received;
+}
+
+static void at91_poll_err_frame(struct net_device *dev,
+ struct can_frame *cf, u32 reg_sr)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+
+ /* CRC error */
+ if (reg_sr & AT91_IRQ_CERR) {
+ dev_dbg(dev->dev.parent, "CERR irq\n");
+ dev->stats.rx_errors++;
+ priv->can.can_stats.bus_error++;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ }
+
+ /* Stuffing Error */
+ if (reg_sr & AT91_IRQ_SERR) {
+ dev_dbg(dev->dev.parent, "SERR irq\n");
+ dev->stats.rx_errors++;
+ priv->can.can_stats.bus_error++;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ }
+
+ /* Acknowledgement Error */
+ if (reg_sr & AT91_IRQ_AERR) {
+ dev_dbg(dev->dev.parent, "AERR irq\n");
+ dev->stats.tx_errors++;
+ cf->can_id |= CAN_ERR_ACK;
+ }
+
+ /* Form error */
+ if (reg_sr & AT91_IRQ_FERR) {
+ dev_dbg(dev->dev.parent, "FERR irq\n");
+ dev->stats.rx_errors++;
+ priv->can.can_stats.bus_error++;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ }
+
+ /* Bit Error */
+ if (reg_sr & AT91_IRQ_BERR) {
+ dev_dbg(dev->dev.parent, "BERR irq\n");
+ dev->stats.tx_errors++;
+ priv->can.can_stats.bus_error++;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ }
+}
+
+static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
+{
+ struct sk_buff *skb;
+ struct can_frame *cf;
+
+ if (quota == 0)
+ return 0;
+
+ skb = alloc_can_err_skb(dev, &cf);
+ if (unlikely(!skb))
+ return 0;
+
+ at91_poll_err_frame(dev, cf, reg_sr);
+ netif_receive_skb(skb);
+
+ dev->last_rx = jiffies;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += cf->can_dlc;
+
+ return 1;
+}
+
+static int at91_poll(struct napi_struct *napi, int quota)
+{
+ struct net_device *dev = napi->dev;
+ const struct at91_priv *priv = netdev_priv(dev);
+ u32 reg_sr = at91_read(priv, AT91_SR);
+ int work_done = 0;
+
+ if (reg_sr & AT91_IRQ_MB_RX)
+ work_done += at91_poll_rx(dev, quota - work_done);
+
+ /*
+ * The error bits are clear on read,
+ * so use saved value from irq handler.
+ */
+ reg_sr |= priv->reg_sr;
+ if (reg_sr & AT91_IRQ_ERR_FRAME)
+ work_done += at91_poll_err(dev, quota - work_done, reg_sr);
+
+ if (work_done < quota) {
+ /* enable IRQs for frame errors and all mailboxes >= rx_next */
+ u32 reg_ier = AT91_IRQ_ERR_FRAME;
+ reg_ier |= AT91_IRQ_MB_RX & ~AT91_MB_RX_MASK(priv->rx_next);
+
+ napi_complete(napi);
+ at91_write(priv, AT91_IER, reg_ier);
+ }
+
+ return work_done;
+}
+
+/*
+ * theory of operation:
+ *
+ * priv->tx_echo holds the number of the oldest can_frame put for
+ * transmission into the hardware, but not yet ACKed by the CAN tx
+ * complete IRQ.
+ *
+ * We iterate from priv->tx_echo to priv->tx_next and check if the
+ * packet has been transmitted, echo it back to the CAN framework. If
+ * we discover a not yet transmitted package, stop looking for more.
+ *
+ */
+static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+ u32 reg_msr;
+ unsigned int mb;
+
+ /* masking of reg_sr not needed, already done by at91_irq */
+
+ for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
+ mb = get_tx_echo_mb(priv);
+
+ /* no event in mailbox? */
+ if (!(reg_sr & (1 << mb)))
+ break;
+
+ /* Disable irq for this TX mailbox */
+ at91_write(priv, AT91_IDR, 1 << mb);
+
+ /*
+ * only echo if mailbox signals us a transfer
+ * complete (MSR_MRDY). Otherwise it's a tansfer
+ * abort. "can_bus_off()" takes care about the skbs
+ * parked in the echo queue.
+ */
+ reg_msr = at91_read(priv, AT91_MSR(mb));
+ if (likely(reg_msr & AT91_MSR_MRDY &&
+ ~reg_msr & AT91_MSR_MABT)) {
+ /* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */
+ can_get_echo_skb(dev, mb - AT91_MB_TX_FIRST);
+ dev->stats.tx_packets++;
+ }
+ }
+
+ /*
+ * restart queue if we don't have a wrap around but restart if
+ * we get a TX int for the last can frame directly before a
+ * wrap around.
+ */
+ if ((priv->tx_next & AT91_NEXT_MASK) != 0 ||
+ (priv->tx_echo & AT91_NEXT_MASK) == 0)
+ netif_wake_queue(dev);
+}
+
+static void at91_irq_err_state(struct net_device *dev,
+ struct can_frame *cf, enum can_state new_state)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+ u32 reg_idr, reg_ier, reg_ecr;
+ u8 tec, rec;
+
+ reg_ecr = at91_read(priv, AT91_ECR);
+ rec = reg_ecr & 0xff;
+ tec = reg_ecr >> 16;
+
+ switch (priv->can.state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ /*
+ * from: ERROR_ACTIVE
+ * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF
+ * => : there was a warning int
+ */
+ if (new_state >= CAN_STATE_ERROR_WARNING &&
+ new_state <= CAN_STATE_BUS_OFF) {
+ dev_dbg(dev->dev.parent, "Error Warning IRQ\n");
+ priv->can.can_stats.error_warning++;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = (tec > rec) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ }
+ case CAN_STATE_ERROR_WARNING: /* fallthrough */
+ /*
+ * from: ERROR_ACTIVE, ERROR_WARNING
+ * to : ERROR_PASSIVE, BUS_OFF
+ * => : error passive int
+ */
+ if (new_state >= CAN_STATE_ERROR_PASSIVE &&
+ new_state <= CAN_STATE_BUS_OFF) {
+ dev_dbg(dev->dev.parent, "Error Passive IRQ\n");
+ priv->can.can_stats.error_passive++;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = (tec > rec) ?
+ CAN_ERR_CRTL_TX_PASSIVE :
+ CAN_ERR_CRTL_RX_PASSIVE;
+ }
+ break;
+ case CAN_STATE_BUS_OFF:
+ /*
+ * from: BUS_OFF
+ * to : ERROR_ACTIVE, ERROR_WARNING, ERROR_PASSIVE
+ */
+ if (new_state <= CAN_STATE_ERROR_PASSIVE) {
+ cf->can_id |= CAN_ERR_RESTARTED;
+
+ dev_dbg(dev->dev.parent, "restarted\n");
+ priv->can.can_stats.restarts++;
+
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
+ }
+ break;
+ default:
+ break;
+ }
+
+
+ /* process state changes depending on the new state */
+ switch (new_state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ /*
+ * actually we want to enable AT91_IRQ_WARN here, but
+ * it screws up the system under certain
+ * circumstances. so just enable AT91_IRQ_ERRP, thus
+ * the "fallthrough"
+ */
+ dev_dbg(dev->dev.parent, "Error Active\n");
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_ACTIVE;
+ case CAN_STATE_ERROR_WARNING: /* fallthrough */
+ reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF;
+ reg_ier = AT91_IRQ_ERRP;
+ break;
+ case CAN_STATE_ERROR_PASSIVE:
+ reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_ERRP;
+ reg_ier = AT91_IRQ_BOFF;
+ break;
+ case CAN_STATE_BUS_OFF:
+ reg_idr = AT91_IRQ_ERRA | AT91_IRQ_ERRP |
+ AT91_IRQ_WARN | AT91_IRQ_BOFF;
+ reg_ier = 0;
+
+ cf->can_id |= CAN_ERR_BUSOFF;
+
+ dev_dbg(dev->dev.parent, "bus-off\n");
+ netif_carrier_off(dev);
+ priv->can.can_stats.bus_off++;
+
+ /* turn off chip, if restart is disabled */
+ if (!priv->can.restart_ms) {
+ at91_chip_stop(dev, CAN_STATE_BUS_OFF);
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+
+ at91_write(priv, AT91_IDR, reg_idr);
+ at91_write(priv, AT91_IER, reg_ier);
+}
+
+static void at91_irq_err(struct net_device *dev)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ enum can_state new_state;
+ u32 reg_sr;
+
+ reg_sr = at91_read(priv, AT91_SR);
+
+ /* we need to look at the unmasked reg_sr */
+ if (unlikely(reg_sr & AT91_IRQ_BOFF))
+ new_state = CAN_STATE_BUS_OFF;
+ else if (unlikely(reg_sr & AT91_IRQ_ERRP))
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ else if (unlikely(reg_sr & AT91_IRQ_WARN))
+ new_state = CAN_STATE_ERROR_WARNING;
+ else if (likely(reg_sr & AT91_IRQ_ERRA))
+ new_state = CAN_STATE_ERROR_ACTIVE;
+ else {
+ dev_err(dev->dev.parent, "BUG! hardware in undefined state\n");
+ return;
+ }
+
+ /* state hasn't changed */
+ if (likely(new_state == priv->can.state))
+ return;
+
+ skb = alloc_can_err_skb(dev, &cf);
+ if (unlikely(!skb))
+ return;
+
+ at91_irq_err_state(dev, cf, new_state);
+ netif_rx(skb);
+
+ dev->last_rx = jiffies;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += cf->can_dlc;
+
+ priv->can.state = new_state;
+}
+
+/*
+ * interrupt handler
+ */
+static irqreturn_t at91_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct at91_priv *priv = netdev_priv(dev);
+ irqreturn_t handled = IRQ_NONE;
+ u32 reg_sr, reg_imr;
+
+ reg_sr = at91_read(priv, AT91_SR);
+ reg_imr = at91_read(priv, AT91_IMR);
+
+ /* Ignore masked interrupts */
+ reg_sr &= reg_imr;
+ if (!reg_sr)
+ goto exit;
+
+ handled = IRQ_HANDLED;
+
+ /* Receive or error interrupt? -> napi */
+ if (reg_sr & (AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME)) {
+ /*
+ * The error bits are clear on read,
+ * save for later use.
+ */
+ priv->reg_sr = reg_sr;
+ at91_write(priv, AT91_IDR,
+ AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME);
+ napi_schedule(&priv->napi);
+ }
+
+ /* Transmission complete interrupt */
+ if (reg_sr & AT91_IRQ_MB_TX)
+ at91_irq_tx(dev, reg_sr);
+
+ at91_irq_err(dev);
+
+ exit:
+ return handled;
+}
+
+static int at91_open(struct net_device *dev)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+ int err;
+
+ clk_enable(priv->clk);
+
+ /* check or determine and set bittime */
+ err = open_candev(dev);
+ if (err)
+ goto out;
+
+ /* register interrupt handler */
+ if (request_irq(dev->irq, at91_irq, IRQF_SHARED,
+ dev->name, dev)) {
+ err = -EAGAIN;
+ goto out_close;
+ }
+
+ /* start chip and queuing */
+ at91_chip_start(dev);
+ napi_enable(&priv->napi);
+ netif_start_queue(dev);
+
+ return 0;
+
+ out_close:
+ close_candev(dev);
+ out:
+ clk_disable(priv->clk);
+
+ return err;
+}
+
+/*
+ * stop CAN bus activity
+ */
+static int at91_close(struct net_device *dev)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ napi_disable(&priv->napi);
+ at91_chip_stop(dev, CAN_STATE_STOPPED);
+
+ free_irq(dev->irq, dev);
+ clk_disable(priv->clk);
+
+ close_candev(dev);
+
+ return 0;
+}
+
+static int at91_set_mode(struct net_device *dev, enum can_mode mode)
+{
+ switch (mode) {
+ case CAN_MODE_START:
+ at91_chip_start(dev);
+ netif_wake_queue(dev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops at91_netdev_ops = {
+ .ndo_open = at91_open,
+ .ndo_stop = at91_close,
+ .ndo_start_xmit = at91_start_xmit,
+};
+
+static int __init at91_can_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct at91_priv *priv;
+ struct resource *res;
+ struct clk *clk;
+ void __iomem *addr;
+ int err, irq;
+
+ clk = clk_get(&pdev->dev, "can_clk");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "no clock defined\n");
+ err = -ENODEV;
+ goto exit;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || !irq) {
+ err = -ENODEV;
+ goto exit_put;
+ }
+
+ if (!request_mem_region(res->start,
+ resource_size(res),
+ pdev->name)) {
+ err = -EBUSY;
+ goto exit_put;
+ }
+
+ addr = ioremap_nocache(res->start, resource_size(res));
+ if (!addr) {
+ err = -ENOMEM;
+ goto exit_release;
+ }
+
+ dev = alloc_candev(sizeof(struct at91_priv));
+ if (!dev) {
+ err = -ENOMEM;
+ goto exit_iounmap;
+ }
+
+ dev->netdev_ops = &at91_netdev_ops;
+ dev->irq = irq;
+ dev->flags |= IFF_ECHO;
+
+ priv = netdev_priv(dev);
+ priv->can.clock.freq = clk_get_rate(clk);
+ priv->can.bittiming_const = &at91_bittiming_const;
+ priv->can.do_set_bittiming = at91_set_bittiming;
+ priv->can.do_set_mode = at91_set_mode;
+ priv->reg_base = addr;
+ priv->dev = dev;
+ priv->clk = clk;
+ priv->pdata = pdev->dev.platform_data;
+
+ netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT);
+
+ dev_set_drvdata(&pdev->dev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = register_candev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "registering netdev failed\n");
+ goto exit_free;
+ }
+
+ dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
+ priv->reg_base, dev->irq);
+
+ return 0;
+
+ exit_free:
+ free_netdev(dev);
+ exit_iounmap:
+ iounmap(addr);
+ exit_release:
+ release_mem_region(res->start, resource_size(res));
+ exit_put:
+ clk_put(clk);
+ exit:
+ return err;
+}
+
+static int __devexit at91_can_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct at91_priv *priv = netdev_priv(dev);
+ struct resource *res;
+
+ unregister_netdev(dev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ free_netdev(dev);
+
+ iounmap(priv->reg_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ clk_put(priv->clk);
+
+ return 0;
+}
+
+static struct platform_driver at91_can_driver = {
+ .probe = at91_can_probe,
+ .remove = __devexit_p(at91_can_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init at91_can_module_init(void)
+{
+ printk(KERN_INFO "%s netdevice driver\n", DRV_NAME);
+ return platform_driver_register(&at91_can_driver);
+}
+
+static void __exit at91_can_module_exit(void)
+{
+ platform_driver_unregister(&at91_can_driver);
+ printk(KERN_INFO "%s: driver removed\n", DRV_NAME);
+}
+
+module_init(at91_can_module_init);
+module_exit(at91_can_module_exit);
+
+MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DRV_NAME " CAN netdevice driver");
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 7d84b8ac9c1..fd04789d337 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -94,12 +94,14 @@ struct ems_pci_card {
#define EMS_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
#define EMS_PCI_V1_BASE_BAR 1
-#define EMS_PCI_V1_MEM_SIZE 4096
+#define EMS_PCI_V1_CONF_SIZE 4096 /* size of PITA control area */
#define EMS_PCI_V2_BASE_BAR 2
-#define EMS_PCI_V2_MEM_SIZE 128
+#define EMS_PCI_V2_CONF_SIZE 128 /* size of PLX control area */
#define EMS_PCI_CAN_BASE_OFFSET 0x400 /* offset where the controllers starts */
#define EMS_PCI_CAN_CTRL_SIZE 0x200 /* memory size for each controller */
+#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */
+
static struct pci_device_id ems_pci_tbl[] = {
/* CPC-PCI v1 */
{PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,},
@@ -224,7 +226,7 @@ static int __devinit ems_pci_add_card(struct pci_dev *pdev,
struct sja1000_priv *priv;
struct net_device *dev;
struct ems_pci_card *card;
- int max_chan, mem_size, base_bar;
+ int max_chan, conf_size, base_bar;
int err, i;
/* Enabling PCI device */
@@ -251,22 +253,22 @@ static int __devinit ems_pci_add_card(struct pci_dev *pdev,
card->version = 2; /* CPC-PCI v2 */
max_chan = EMS_PCI_V2_MAX_CHAN;
base_bar = EMS_PCI_V2_BASE_BAR;
- mem_size = EMS_PCI_V2_MEM_SIZE;
+ conf_size = EMS_PCI_V2_CONF_SIZE;
} else {
card->version = 1; /* CPC-PCI v1 */
max_chan = EMS_PCI_V1_MAX_CHAN;
base_bar = EMS_PCI_V1_BASE_BAR;
- mem_size = EMS_PCI_V1_MEM_SIZE;
+ conf_size = EMS_PCI_V1_CONF_SIZE;
}
/* Remap configuration space and controller memory area */
- card->conf_addr = pci_iomap(pdev, 0, mem_size);
+ card->conf_addr = pci_iomap(pdev, 0, conf_size);
if (card->conf_addr == NULL) {
err = -ENOMEM;
goto failure_cleanup;
}
- card->base_addr = pci_iomap(pdev, base_bar, mem_size);
+ card->base_addr = pci_iomap(pdev, base_bar, EMS_PCI_BASE_SIZE);
if (card->base_addr == NULL) {
err = -ENOMEM;
goto failure_cleanup;
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
new file mode 100644
index 00000000000..c3f75ba701b
--- /dev/null
+++ b/drivers/net/can/usb/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Linux Controller Area Network USB drivers.
+#
+
+obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
new file mode 100644
index 00000000000..9012e0abc62
--- /dev/null
+++ b/drivers/net/can/usb/ems_usb.c
@@ -0,0 +1,1155 @@
+/*
+ * CAN driver for EMS Dr. Thomas Wuensche CPC-USB/ARM7
+ *
+ * Copyright (C) 2004-2009 EMS Dr. Thomas Wuensche
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/init.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+MODULE_AUTHOR("Sebastian Haas <haas@ems-wuensche.com>");
+MODULE_DESCRIPTION("CAN driver for EMS Dr. Thomas Wuensche CAN/USB interfaces");
+MODULE_LICENSE("GPL v2");
+
+/* Control-Values for CPC_Control() Command Subject Selection */
+#define CONTR_CAN_MESSAGE 0x04
+#define CONTR_CAN_STATE 0x0C
+#define CONTR_BUS_ERROR 0x1C
+
+/* Control Command Actions */
+#define CONTR_CONT_OFF 0
+#define CONTR_CONT_ON 1
+#define CONTR_ONCE 2
+
+/* Messages from CPC to PC */
+#define CPC_MSG_TYPE_CAN_FRAME 1 /* CAN data frame */
+#define CPC_MSG_TYPE_RTR_FRAME 8 /* CAN remote frame */
+#define CPC_MSG_TYPE_CAN_PARAMS 12 /* Actual CAN parameters */
+#define CPC_MSG_TYPE_CAN_STATE 14 /* CAN state message */
+#define CPC_MSG_TYPE_EXT_CAN_FRAME 16 /* Extended CAN data frame */
+#define CPC_MSG_TYPE_EXT_RTR_FRAME 17 /* Extended remote frame */
+#define CPC_MSG_TYPE_CONTROL 19 /* change interface behavior */
+#define CPC_MSG_TYPE_CONFIRM 20 /* command processed confirmation */
+#define CPC_MSG_TYPE_OVERRUN 21 /* overrun events */
+#define CPC_MSG_TYPE_CAN_FRAME_ERROR 23 /* detected bus errors */
+#define CPC_MSG_TYPE_ERR_COUNTER 25 /* RX/TX error counter */
+
+/* Messages from the PC to the CPC interface */
+#define CPC_CMD_TYPE_CAN_FRAME 1 /* CAN data frame */
+#define CPC_CMD_TYPE_CONTROL 3 /* control of interface behavior */
+#define CPC_CMD_TYPE_CAN_PARAMS 6 /* set CAN parameters */
+#define CPC_CMD_TYPE_RTR_FRAME 13 /* CAN remote frame */
+#define CPC_CMD_TYPE_CAN_STATE 14 /* CAN state message */
+#define CPC_CMD_TYPE_EXT_CAN_FRAME 15 /* Extended CAN data frame */
+#define CPC_CMD_TYPE_EXT_RTR_FRAME 16 /* Extended CAN remote frame */
+#define CPC_CMD_TYPE_CAN_EXIT 200 /* exit the CAN */
+
+#define CPC_CMD_TYPE_INQ_ERR_COUNTER 25 /* request the CAN error counters */
+#define CPC_CMD_TYPE_CLEAR_MSG_QUEUE 8 /* clear CPC_MSG queue */
+#define CPC_CMD_TYPE_CLEAR_CMD_QUEUE 28 /* clear CPC_CMD queue */
+
+#define CPC_CC_TYPE_SJA1000 2 /* Philips basic CAN controller */
+
+#define CPC_CAN_ECODE_ERRFRAME 0x01 /* Ecode type */
+
+/* Overrun types */
+#define CPC_OVR_EVENT_CAN 0x01
+#define CPC_OVR_EVENT_CANSTATE 0x02
+#define CPC_OVR_EVENT_BUSERROR 0x04
+
+/*
+ * If the CAN controller lost a message we indicate it with the highest bit
+ * set in the count field.
+ */
+#define CPC_OVR_HW 0x80
+
+/* Size of the "struct ems_cpc_msg" without the union */
+#define CPC_MSG_HEADER_LEN 11
+#define CPC_CAN_MSG_MIN_SIZE 5
+
+/* Define these values to match your devices */
+#define USB_CPCUSB_VENDOR_ID 0x12D6
+
+#define USB_CPCUSB_ARM7_PRODUCT_ID 0x0444
+
+/* Mode register NXP LPC2119/SJA1000 CAN Controller */
+#define SJA1000_MOD_NORMAL 0x00
+#define SJA1000_MOD_RM 0x01
+
+/* ECC register NXP LPC2119/SJA1000 CAN Controller */
+#define SJA1000_ECC_SEG 0x1F
+#define SJA1000_ECC_DIR 0x20
+#define SJA1000_ECC_ERR 0x06
+#define SJA1000_ECC_BIT 0x00
+#define SJA1000_ECC_FORM 0x40
+#define SJA1000_ECC_STUFF 0x80
+#define SJA1000_ECC_MASK 0xc0
+
+/* Status register content */
+#define SJA1000_SR_BS 0x80
+#define SJA1000_SR_ES 0x40
+
+#define SJA1000_DEFAULT_OUTPUT_CONTROL 0xDA
+
+/*
+ * The device actually uses a 16MHz clock to generate the CAN clock
+ * but it expects SJA1000 bit settings based on 8MHz (is internally
+ * converted).
+ */
+#define EMS_USB_ARM7_CLOCK 8000000
+
+/*
+ * CAN-Message representation in a CPC_MSG. Message object type is
+ * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
+ * CPC_MSG_TYPE_EXT_CAN_FRAME or CPC_MSG_TYPE_EXT_RTR_FRAME.
+ */
+struct cpc_can_msg {
+ u32 id;
+ u8 length;
+ u8 msg[8];
+};
+
+/* Representation of the CAN parameters for the SJA1000 controller */
+struct cpc_sja1000_params {
+ u8 mode;
+ u8 acc_code0;
+ u8 acc_code1;
+ u8 acc_code2;
+ u8 acc_code3;
+ u8 acc_mask0;
+ u8 acc_mask1;
+ u8 acc_mask2;
+ u8 acc_mask3;
+ u8 btr0;
+ u8 btr1;
+ u8 outp_contr;
+};
+
+/* CAN params message representation */
+struct cpc_can_params {
+ u8 cc_type;
+
+ /* Will support M16C CAN controller in the future */
+ union {
+ struct cpc_sja1000_params sja1000;
+ } cc_params;
+};
+
+/* Structure for confirmed message handling */
+struct cpc_confirm {
+ u8 error; /* error code */
+};
+
+/* Structure for overrun conditions */
+struct cpc_overrun {
+ u8 event;
+ u8 count;
+};
+
+/* SJA1000 CAN errors (compatible to NXP LPC2119) */
+struct cpc_sja1000_can_error {
+ u8 ecc;
+ u8 rxerr;
+ u8 txerr;
+};
+
+/* structure for CAN error conditions */
+struct cpc_can_error {
+ u8 ecode;
+
+ struct {
+ u8 cc_type;
+
+ /* Other controllers may also provide error code capture regs */
+ union {
+ struct cpc_sja1000_can_error sja1000;
+ } regs;
+ } cc;
+};
+
+/*
+ * Structure containing RX/TX error counter. This structure is used to request
+ * the values of the CAN controllers TX and RX error counter.
+ */
+struct cpc_can_err_counter {
+ u8 rx;
+ u8 tx;
+};
+
+/* Main message type used between library and application */
+struct __attribute__ ((packed)) ems_cpc_msg {
+ u8 type; /* type of message */
+ u8 length; /* length of data within union 'msg' */
+ u8 msgid; /* confirmation handle */
+ u32 ts_sec; /* timestamp in seconds */
+ u32 ts_nsec; /* timestamp in nano seconds */
+
+ union {
+ u8 generic[64];
+ struct cpc_can_msg can_msg;
+ struct cpc_can_params can_params;
+ struct cpc_confirm confirmation;
+ struct cpc_overrun overrun;
+ struct cpc_can_error error;
+ struct cpc_can_err_counter err_counter;
+ u8 can_state;
+ } msg;
+};
+
+/*
+ * Table of devices that work with this driver
+ * NOTE: This driver supports only CPC-USB/ARM7 (LPC2119) yet.
+ */
+static struct usb_device_id ems_usb_table[] = {
+ {USB_DEVICE(USB_CPCUSB_VENDOR_ID, USB_CPCUSB_ARM7_PRODUCT_ID)},
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, ems_usb_table);
+
+#define RX_BUFFER_SIZE 64
+#define CPC_HEADER_SIZE 4
+#define INTR_IN_BUFFER_SIZE 4
+
+#define MAX_RX_URBS 10
+#define MAX_TX_URBS CAN_ECHO_SKB_MAX
+
+struct ems_usb;
+
+struct ems_tx_urb_context {
+ struct ems_usb *dev;
+
+ u32 echo_index;
+ u8 dlc;
+};
+
+struct ems_usb {
+ struct can_priv can; /* must be the first member */
+ int open_time;
+
+ struct sk_buff *echo_skb[MAX_TX_URBS];
+
+ struct usb_device *udev;
+ struct net_device *netdev;
+
+ atomic_t active_tx_urbs;
+ struct usb_anchor tx_submitted;
+ struct ems_tx_urb_context tx_contexts[MAX_TX_URBS];
+
+ struct usb_anchor rx_submitted;
+
+ struct urb *intr_urb;
+
+ u8 *tx_msg_buffer;
+
+ u8 *intr_in_buffer;
+ unsigned int free_slots; /* remember number of available slots */
+
+ struct ems_cpc_msg active_params; /* active controller parameters */
+};
+
+static void ems_usb_read_interrupt_callback(struct urb *urb)
+{
+ struct ems_usb *dev = urb->context;
+ struct net_device *netdev = dev->netdev;
+ int err;
+
+ if (!netif_device_present(netdev))
+ return;
+
+ switch (urb->status) {
+ case 0:
+ dev->free_slots = dev->intr_in_buffer[1];
+ break;
+
+ case -ECONNRESET: /* unlink */
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+
+ default:
+ dev_info(netdev->dev.parent, "Rx interrupt aborted %d\n",
+ urb->status);
+ break;
+ }
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+
+ if (err == -ENODEV)
+ netif_device_detach(netdev);
+ else if (err)
+ dev_err(netdev->dev.parent,
+ "failed resubmitting intr urb: %d\n", err);
+
+ return;
+}
+
+static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
+{
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ int i;
+ struct net_device_stats *stats = &dev->netdev->stats;
+
+ skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame));
+ if (skb == NULL)
+ return;
+
+ skb->protocol = htons(ETH_P_CAN);
+
+ cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
+
+ cf->can_id = msg->msg.can_msg.id;
+ cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8);
+
+ if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME
+ || msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME)
+ cf->can_id |= CAN_EFF_FLAG;
+
+ if (msg->type == CPC_MSG_TYPE_RTR_FRAME
+ || msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ for (i = 0; i < cf->can_dlc; i++)
+ cf->data[i] = msg->msg.can_msg.msg[i];
+ }
+
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
+{
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct net_device_stats *stats = &dev->netdev->stats;
+
+ skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame));
+ if (skb == NULL)
+ return;
+
+ skb->protocol = htons(ETH_P_CAN);
+
+ cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
+ memset(cf, 0, sizeof(struct can_frame));
+
+ cf->can_id = CAN_ERR_FLAG;
+ cf->can_dlc = CAN_ERR_DLC;
+
+ if (msg->type == CPC_MSG_TYPE_CAN_STATE) {
+ u8 state = msg->msg.can_state;
+
+ if (state & SJA1000_SR_BS) {
+ dev->can.state = CAN_STATE_BUS_OFF;
+ cf->can_id |= CAN_ERR_BUSOFF;
+
+ can_bus_off(dev->netdev);
+ } else if (state & SJA1000_SR_ES) {
+ dev->can.state = CAN_STATE_ERROR_WARNING;
+ dev->can.can_stats.error_warning++;
+ } else {
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ dev->can.can_stats.error_passive++;
+ }
+ } else if (msg->type == CPC_MSG_TYPE_CAN_FRAME_ERROR) {
+ u8 ecc = msg->msg.error.cc.regs.sja1000.ecc;
+ u8 txerr = msg->msg.error.cc.regs.sja1000.txerr;
+ u8 rxerr = msg->msg.error.cc.regs.sja1000.rxerr;
+
+ /* bus error interrupt */
+ dev->can.can_stats.bus_error++;
+ stats->rx_errors++;
+
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ switch (ecc & SJA1000_ECC_MASK) {
+ case SJA1000_ECC_BIT:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case SJA1000_ECC_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case SJA1000_ECC_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+ cf->data[3] = ecc & SJA1000_ECC_SEG;
+ break;
+ }
+
+ /* Error occured during transmission? */
+ if ((ecc & SJA1000_ECC_DIR) == 0)
+ cf->data[2] |= CAN_ERR_PROT_TX;
+
+ if (dev->can.state == CAN_STATE_ERROR_WARNING ||
+ dev->can.state == CAN_STATE_ERROR_PASSIVE) {
+ cf->data[1] = (txerr > rxerr) ?
+ CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
+ }
+ } else if (msg->type == CPC_MSG_TYPE_OVERRUN) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ }
+
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+/*
+ * callback for bulk IN urb
+ */
+static void ems_usb_read_bulk_callback(struct urb *urb)
+{
+ struct ems_usb *dev = urb->context;
+ struct net_device *netdev;
+ int retval;
+
+ netdev = dev->netdev;
+
+ if (!netif_device_present(netdev))
+ return;
+
+ switch (urb->status) {
+ case 0: /* success */
+ break;
+
+ case -ENOENT:
+ return;
+
+ default:
+ dev_info(netdev->dev.parent, "Rx URB aborted (%d)\n",
+ urb->status);
+ goto resubmit_urb;
+ }
+
+ if (urb->actual_length > CPC_HEADER_SIZE) {
+ struct ems_cpc_msg *msg;
+ u8 *ibuf = urb->transfer_buffer;
+ u8 msg_count, again, start;
+
+ msg_count = ibuf[0] & ~0x80;
+ again = ibuf[0] & 0x80;
+
+ start = CPC_HEADER_SIZE;
+
+ while (msg_count) {
+ msg = (struct ems_cpc_msg *)&ibuf[start];
+
+ switch (msg->type) {
+ case CPC_MSG_TYPE_CAN_STATE:
+ /* Process CAN state changes */
+ ems_usb_rx_err(dev, msg);
+ break;
+
+ case CPC_MSG_TYPE_CAN_FRAME:
+ case CPC_MSG_TYPE_EXT_CAN_FRAME:
+ case CPC_MSG_TYPE_RTR_FRAME:
+ case CPC_MSG_TYPE_EXT_RTR_FRAME:
+ ems_usb_rx_can_msg(dev, msg);
+ break;
+
+ case CPC_MSG_TYPE_CAN_FRAME_ERROR:
+ /* Process errorframe */
+ ems_usb_rx_err(dev, msg);
+ break;
+
+ case CPC_MSG_TYPE_OVERRUN:
+ /* Message lost while receiving */
+ ems_usb_rx_err(dev, msg);
+ break;
+ }
+
+ start += CPC_MSG_HEADER_LEN + msg->length;
+ msg_count--;
+
+ if (start > urb->transfer_buffer_length) {
+ dev_err(netdev->dev.parent, "format error\n");
+ break;
+ }
+ }
+ }
+
+resubmit_urb:
+ usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2),
+ urb->transfer_buffer, RX_BUFFER_SIZE,
+ ems_usb_read_bulk_callback, dev);
+
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+
+ if (retval == -ENODEV)
+ netif_device_detach(netdev);
+ else if (retval)
+ dev_err(netdev->dev.parent,
+ "failed resubmitting read bulk urb: %d\n", retval);
+
+ return;
+}
+
+/*
+ * callback for bulk IN urb
+ */
+static void ems_usb_write_bulk_callback(struct urb *urb)
+{
+ struct ems_tx_urb_context *context = urb->context;
+ struct ems_usb *dev;
+ struct net_device *netdev;
+
+ BUG_ON(!context);
+
+ dev = context->dev;
+ netdev = dev->netdev;
+
+ /* free up our allocated buffer */
+ usb_buffer_free(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
+
+ atomic_dec(&dev->active_tx_urbs);
+
+ if (!netif_device_present(netdev))
+ return;
+
+ if (urb->status)
+ dev_info(netdev->dev.parent, "Tx URB aborted (%d)\n",
+ urb->status);
+
+ netdev->trans_start = jiffies;
+
+ /* transmission complete interrupt */
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += context->dlc;
+
+ can_get_echo_skb(netdev, context->echo_index);
+
+ /* Release context */
+ context->echo_index = MAX_TX_URBS;
+
+ if (netif_queue_stopped(netdev))
+ netif_wake_queue(netdev);
+}
+
+/*
+ * Send the given CPC command synchronously
+ */
+static int ems_usb_command_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
+{
+ int actual_length;
+
+ /* Copy payload */
+ memcpy(&dev->tx_msg_buffer[CPC_HEADER_SIZE], msg,
+ msg->length + CPC_MSG_HEADER_LEN);
+
+ /* Clear header */
+ memset(&dev->tx_msg_buffer[0], 0, CPC_HEADER_SIZE);
+
+ return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
+ &dev->tx_msg_buffer[0],
+ msg->length + CPC_MSG_HEADER_LEN + CPC_HEADER_SIZE,
+ &actual_length, 1000);
+}
+
+/*
+ * Change CAN controllers' mode register
+ */
+static int ems_usb_write_mode(struct ems_usb *dev, u8 mode)
+{
+ dev->active_params.msg.can_params.cc_params.sja1000.mode = mode;
+
+ return ems_usb_command_msg(dev, &dev->active_params);
+}
+
+/*
+ * Send a CPC_Control command to change behaviour when interface receives a CAN
+ * message, bus error or CAN state changed notifications.
+ */
+static int ems_usb_control_cmd(struct ems_usb *dev, u8 val)
+{
+ struct ems_cpc_msg cmd;
+
+ cmd.type = CPC_CMD_TYPE_CONTROL;
+ cmd.length = CPC_MSG_HEADER_LEN + 1;
+
+ cmd.msgid = 0;
+
+ cmd.msg.generic[0] = val;
+
+ return ems_usb_command_msg(dev, &cmd);
+}
+
+/*
+ * Start interface
+ */
+static int ems_usb_start(struct ems_usb *dev)
+{
+ struct net_device *netdev = dev->netdev;
+ int err, i;
+
+ dev->intr_in_buffer[0] = 0;
+ dev->free_slots = 15; /* initial size */
+
+ for (i = 0; i < MAX_RX_URBS; i++) {
+ struct urb *urb = NULL;
+ u8 *buf = NULL;
+
+ /* create a URB, and a buffer for it */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ dev_err(netdev->dev.parent,
+ "No memory left for URBs\n");
+ return -ENOMEM;
+ }
+
+ buf = usb_buffer_alloc(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
+ &urb->transfer_dma);
+ if (!buf) {
+ dev_err(netdev->dev.parent,
+ "No memory left for USB buffer\n");
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2),
+ buf, RX_BUFFER_SIZE,
+ ems_usb_read_bulk_callback, dev);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_anchor_urb(urb, &dev->rx_submitted);
+
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err) {
+ if (err == -ENODEV)
+ netif_device_detach(dev->netdev);
+
+ usb_unanchor_urb(urb);
+ usb_buffer_free(dev->udev, RX_BUFFER_SIZE, buf,
+ urb->transfer_dma);
+ break;
+ }
+
+ /* Drop reference, USB core will take care of freeing it */
+ usb_free_urb(urb);
+ }
+
+ /* Did we submit any URBs */
+ if (i == 0) {
+ dev_warn(netdev->dev.parent, "couldn't setup read URBs\n");
+ return err;
+ }
+
+ /* Warn if we've couldn't transmit all the URBs */
+ if (i < MAX_RX_URBS)
+ dev_warn(netdev->dev.parent, "rx performance may be slow\n");
+
+ /* Setup and start interrupt URB */
+ usb_fill_int_urb(dev->intr_urb, dev->udev,
+ usb_rcvintpipe(dev->udev, 1),
+ dev->intr_in_buffer,
+ INTR_IN_BUFFER_SIZE,
+ ems_usb_read_interrupt_callback, dev, 1);
+
+ err = usb_submit_urb(dev->intr_urb, GFP_KERNEL);
+ if (err) {
+ if (err == -ENODEV)
+ netif_device_detach(dev->netdev);
+
+ dev_warn(netdev->dev.parent, "intr URB submit failed: %d\n",
+ err);
+
+ return err;
+ }
+
+ /* CPC-USB will transfer received message to host */
+ err = ems_usb_control_cmd(dev, CONTR_CAN_MESSAGE | CONTR_CONT_ON);
+ if (err)
+ goto failed;
+
+ /* CPC-USB will transfer CAN state changes to host */
+ err = ems_usb_control_cmd(dev, CONTR_CAN_STATE | CONTR_CONT_ON);
+ if (err)
+ goto failed;
+
+ /* CPC-USB will transfer bus errors to host */
+ err = ems_usb_control_cmd(dev, CONTR_BUS_ERROR | CONTR_CONT_ON);
+ if (err)
+ goto failed;
+
+ err = ems_usb_write_mode(dev, SJA1000_MOD_NORMAL);
+ if (err)
+ goto failed;
+
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return 0;
+
+failed:
+ if (err == -ENODEV)
+ netif_device_detach(dev->netdev);
+
+ dev_warn(netdev->dev.parent, "couldn't submit control: %d\n", err);
+
+ return err;
+}
+
+static void unlink_all_urbs(struct ems_usb *dev)
+{
+ int i;
+
+ usb_unlink_urb(dev->intr_urb);
+
+ usb_kill_anchored_urbs(&dev->rx_submitted);
+
+ usb_kill_anchored_urbs(&dev->tx_submitted);
+ atomic_set(&dev->active_tx_urbs, 0);
+
+ for (i = 0; i < MAX_TX_URBS; i++)
+ dev->tx_contexts[i].echo_index = MAX_TX_URBS;
+}
+
+static int ems_usb_open(struct net_device *netdev)
+{
+ struct ems_usb *dev = netdev_priv(netdev);
+ int err;
+
+ err = ems_usb_write_mode(dev, SJA1000_MOD_RM);
+ if (err)
+ return err;
+
+ /* common open */
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ /* finally start device */
+ err = ems_usb_start(dev);
+ if (err) {
+ if (err == -ENODEV)
+ netif_device_detach(dev->netdev);
+
+ dev_warn(netdev->dev.parent, "couldn't start device: %d\n",
+ err);
+
+ close_candev(netdev);
+
+ return err;
+ }
+
+ dev->open_time = jiffies;
+
+ netif_start_queue(netdev);
+
+ return 0;
+}
+
+static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ems_usb *dev = netdev_priv(netdev);
+ struct ems_tx_urb_context *context = NULL;
+ struct net_device_stats *stats = &netdev->stats;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ struct ems_cpc_msg *msg;
+ struct urb *urb;
+ u8 *buf;
+ int i, err;
+ size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN
+ + sizeof(struct cpc_can_msg);
+
+ /* create a URB, and a buffer for it, and copy the data to the URB */
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ dev_err(netdev->dev.parent, "No memory left for URBs\n");
+ goto nomem;
+ }
+
+ buf = usb_buffer_alloc(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma);
+ if (!buf) {
+ dev_err(netdev->dev.parent, "No memory left for USB buffer\n");
+ usb_free_urb(urb);
+ goto nomem;
+ }
+
+ msg = (struct ems_cpc_msg *)&buf[CPC_HEADER_SIZE];
+
+ msg->msg.can_msg.id = cf->can_id & CAN_ERR_MASK;
+ msg->msg.can_msg.length = cf->can_dlc;
+
+ if (cf->can_id & CAN_RTR_FLAG) {
+ msg->type = cf->can_id & CAN_EFF_FLAG ?
+ CPC_CMD_TYPE_EXT_RTR_FRAME : CPC_CMD_TYPE_RTR_FRAME;
+
+ msg->length = CPC_CAN_MSG_MIN_SIZE;
+ } else {
+ msg->type = cf->can_id & CAN_EFF_FLAG ?
+ CPC_CMD_TYPE_EXT_CAN_FRAME : CPC_CMD_TYPE_CAN_FRAME;
+
+ for (i = 0; i < cf->can_dlc; i++)
+ msg->msg.can_msg.msg[i] = cf->data[i];
+
+ msg->length = CPC_CAN_MSG_MIN_SIZE + cf->can_dlc;
+ }
+
+ for (i = 0; i < MAX_TX_URBS; i++) {
+ if (dev->tx_contexts[i].echo_index == MAX_TX_URBS) {
+ context = &dev->tx_contexts[i];
+ break;
+ }
+ }
+
+ /*
+ * May never happen! When this happens we'd more URBs in flight as
+ * allowed (MAX_TX_URBS).
+ */
+ if (!context) {
+ usb_unanchor_urb(urb);
+ usb_buffer_free(dev->udev, size, buf, urb->transfer_dma);
+
+ dev_warn(netdev->dev.parent, "couldn't find free context\n");
+
+ return NETDEV_TX_BUSY;
+ }
+
+ context->dev = dev;
+ context->echo_index = i;
+ context->dlc = cf->can_dlc;
+
+ usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf,
+ size, ems_usb_write_bulk_callback, context);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_anchor_urb(urb, &dev->tx_submitted);
+
+ can_put_echo_skb(skb, netdev, context->echo_index);
+
+ atomic_inc(&dev->active_tx_urbs);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(err)) {
+ can_free_echo_skb(netdev, context->echo_index);
+
+ usb_unanchor_urb(urb);
+ usb_buffer_free(dev->udev, size, buf, urb->transfer_dma);
+ dev_kfree_skb(skb);
+
+ atomic_dec(&dev->active_tx_urbs);
+
+ if (err == -ENODEV) {
+ netif_device_detach(netdev);
+ } else {
+ dev_warn(netdev->dev.parent, "failed tx_urb %d\n", err);
+
+ stats->tx_dropped++;
+ }
+ } else {
+ netdev->trans_start = jiffies;
+
+ /* Slow down tx path */
+ if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
+ dev->free_slots < 5) {
+ netif_stop_queue(netdev);
+ }
+ }
+
+ /*
+ * Release our reference to this URB, the USB core will eventually free
+ * it entirely.
+ */
+ usb_free_urb(urb);
+
+ return NETDEV_TX_OK;
+
+nomem:
+ if (skb)
+ dev_kfree_skb(skb);
+
+ stats->tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+
+static int ems_usb_close(struct net_device *netdev)
+{
+ struct ems_usb *dev = netdev_priv(netdev);
+
+ /* Stop polling */
+ unlink_all_urbs(dev);
+
+ netif_stop_queue(netdev);
+
+ /* Set CAN controller to reset mode */
+ if (ems_usb_write_mode(dev, SJA1000_MOD_RM))
+ dev_warn(netdev->dev.parent, "couldn't stop device");
+
+ close_candev(netdev);
+
+ dev->open_time = 0;
+
+ return 0;
+}
+
+static const struct net_device_ops ems_usb_netdev_ops = {
+ .ndo_open = ems_usb_open,
+ .ndo_stop = ems_usb_close,
+ .ndo_start_xmit = ems_usb_start_xmit,
+};
+
+static struct can_bittiming_const ems_usb_bittiming_const = {
+ .name = "ems_usb",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 64,
+ .brp_inc = 1,
+};
+
+static int ems_usb_set_mode(struct net_device *netdev, enum can_mode mode)
+{
+ struct ems_usb *dev = netdev_priv(netdev);
+
+ if (!dev->open_time)
+ return -EINVAL;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ if (ems_usb_write_mode(dev, SJA1000_MOD_NORMAL))
+ dev_warn(netdev->dev.parent, "couldn't start device");
+
+ if (netif_queue_stopped(netdev))
+ netif_wake_queue(netdev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ems_usb_set_bittiming(struct net_device *netdev)
+{
+ struct ems_usb *dev = netdev_priv(netdev);
+ struct can_bittiming *bt = &dev->can.bittiming;
+ u8 btr0, btr1;
+
+ btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
+ btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
+ (((bt->phase_seg2 - 1) & 0x7) << 4);
+ if (dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ btr1 |= 0x80;
+
+ dev_info(netdev->dev.parent, "setting BTR0=0x%02x BTR1=0x%02x\n",
+ btr0, btr1);
+
+ dev->active_params.msg.can_params.cc_params.sja1000.btr0 = btr0;
+ dev->active_params.msg.can_params.cc_params.sja1000.btr1 = btr1;
+
+ return ems_usb_command_msg(dev, &dev->active_params);
+}
+
+static void init_params_sja1000(struct ems_cpc_msg *msg)
+{
+ struct cpc_sja1000_params *sja1000 =
+ &msg->msg.can_params.cc_params.sja1000;
+
+ msg->type = CPC_CMD_TYPE_CAN_PARAMS;
+ msg->length = sizeof(struct cpc_can_params);
+ msg->msgid = 0;
+
+ msg->msg.can_params.cc_type = CPC_CC_TYPE_SJA1000;
+
+ /* Acceptance filter open */
+ sja1000->acc_code0 = 0x00;
+ sja1000->acc_code1 = 0x00;
+ sja1000->acc_code2 = 0x00;
+ sja1000->acc_code3 = 0x00;
+
+ /* Acceptance filter open */
+ sja1000->acc_mask0 = 0xFF;
+ sja1000->acc_mask1 = 0xFF;
+ sja1000->acc_mask2 = 0xFF;
+ sja1000->acc_mask3 = 0xFF;
+
+ sja1000->btr0 = 0;
+ sja1000->btr1 = 0;
+
+ sja1000->outp_contr = SJA1000_DEFAULT_OUTPUT_CONTROL;
+ sja1000->mode = SJA1000_MOD_RM;
+}
+
+/*
+ * probe function for new CPC-USB devices
+ */
+static int ems_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct net_device *netdev;
+ struct ems_usb *dev;
+ int i, err = -ENOMEM;
+
+ netdev = alloc_candev(sizeof(struct ems_usb));
+ if (!netdev) {
+ dev_err(netdev->dev.parent, "Couldn't alloc candev\n");
+ return -ENOMEM;
+ }
+
+ dev = netdev_priv(netdev);
+
+ dev->udev = interface_to_usbdev(intf);
+ dev->netdev = netdev;
+
+ dev->can.state = CAN_STATE_STOPPED;
+ dev->can.clock.freq = EMS_USB_ARM7_CLOCK;
+ dev->can.bittiming_const = &ems_usb_bittiming_const;
+ dev->can.do_set_bittiming = ems_usb_set_bittiming;
+ dev->can.do_set_mode = ems_usb_set_mode;
+
+ netdev->flags |= IFF_ECHO; /* we support local echo */
+
+ netdev->netdev_ops = &ems_usb_netdev_ops;
+
+ netdev->flags |= IFF_ECHO; /* we support local echo */
+
+ init_usb_anchor(&dev->rx_submitted);
+
+ init_usb_anchor(&dev->tx_submitted);
+ atomic_set(&dev->active_tx_urbs, 0);
+
+ for (i = 0; i < MAX_TX_URBS; i++)
+ dev->tx_contexts[i].echo_index = MAX_TX_URBS;
+
+ dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->intr_urb) {
+ dev_err(netdev->dev.parent, "Couldn't alloc intr URB\n");
+ goto cleanup_candev;
+ }
+
+ dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL);
+ if (!dev->intr_in_buffer) {
+ dev_err(netdev->dev.parent, "Couldn't alloc Intr buffer\n");
+ goto cleanup_intr_urb;
+ }
+
+ dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE +
+ sizeof(struct ems_cpc_msg), GFP_KERNEL);
+ if (!dev->tx_msg_buffer) {
+ dev_err(netdev->dev.parent, "Couldn't alloc Tx buffer\n");
+ goto cleanup_intr_in_buffer;
+ }
+
+ usb_set_intfdata(intf, dev);
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+
+ init_params_sja1000(&dev->active_params);
+
+ err = ems_usb_command_msg(dev, &dev->active_params);
+ if (err) {
+ dev_err(netdev->dev.parent,
+ "couldn't initialize controller: %d\n", err);
+ goto cleanup_tx_msg_buffer;
+ }
+
+ err = register_candev(netdev);
+ if (err) {
+ dev_err(netdev->dev.parent,
+ "couldn't register CAN device: %d\n", err);
+ goto cleanup_tx_msg_buffer;
+ }
+
+ return 0;
+
+cleanup_tx_msg_buffer:
+ kfree(dev->tx_msg_buffer);
+
+cleanup_intr_in_buffer:
+ kfree(dev->intr_in_buffer);
+
+cleanup_intr_urb:
+ usb_free_urb(dev->intr_urb);
+
+cleanup_candev:
+ free_candev(netdev);
+
+ return err;
+}
+
+/*
+ * called by the usb core when the device is removed from the system
+ */
+static void ems_usb_disconnect(struct usb_interface *intf)
+{
+ struct ems_usb *dev = usb_get_intfdata(intf);
+
+ usb_set_intfdata(intf, NULL);
+
+ if (dev) {
+ unregister_netdev(dev->netdev);
+ free_candev(dev->netdev);
+
+ unlink_all_urbs(dev);
+
+ usb_free_urb(dev->intr_urb);
+
+ kfree(dev->intr_in_buffer);
+ }
+}
+
+/* usb specific object needed to register this driver with the usb subsystem */
+static struct usb_driver ems_usb_driver = {
+ .name = "ems_usb",
+ .probe = ems_usb_probe,
+ .disconnect = ems_usb_disconnect,
+ .id_table = ems_usb_table,
+};
+
+static int __init ems_usb_init(void)
+{
+ int err;
+
+ printk(KERN_INFO "CPC-USB kernel driver loaded\n");
+
+ /* register this driver with the USB subsystem */
+ err = usb_register(&ems_usb_driver);
+
+ if (err) {
+ err("usb_register failed. Error number %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit ems_usb_exit(void)
+{
+ /* deregister this driver with the USB subsystem */
+ usb_deregister(&ems_usb_driver);
+}
+
+module_init(ems_usb_init);
+module_exit(ems_usb_exit);
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index d45eacb7670..211c8e9182f 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -85,8 +85,6 @@ static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
cp->uio_dev = iminor(inode);
- cnic_shutdown_bnx2_rx_ring(dev);
-
cnic_init_bnx2_tx_ring(dev);
cnic_init_bnx2_rx_ring(dev);
@@ -98,6 +96,8 @@ static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
struct cnic_dev *dev = uinfo->priv;
struct cnic_local *cp = dev->cnic_priv;
+ cnic_shutdown_bnx2_rx_ring(dev);
+
cp->uio_dev = -1;
return 0;
}
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 3e3fab8afb1..61f9da2b494 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -1109,7 +1109,7 @@ static int external_switch;
static int __devinit cpmac_probe(struct platform_device *pdev)
{
int rc, phy_id;
- char mdio_bus_id[BUS_ID_SIZE];
+ char mdio_bus_id[MII_BUS_ID_SIZE];
struct resource *mem;
struct cpmac_priv *priv;
struct net_device *dev;
@@ -1118,7 +1118,7 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
if (external_switch || dumb_switch) {
- strncpy(mdio_bus_id, "0", BUS_ID_SIZE); /* fixed phys bus */
+ strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */
phy_id = pdev->id;
} else {
for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
@@ -1126,7 +1126,7 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
continue;
if (!cpmac_mii->phy_map[phy_id])
continue;
- strncpy(mdio_bus_id, cpmac_mii->id, BUS_ID_SIZE);
+ strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE);
break;
}
}
@@ -1167,7 +1167,7 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(debug_level, 0xff);
memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr));
- snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
+ snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 15c0195ebd3..a24be34a3f7 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -768,10 +768,24 @@ e100_negotiate(struct net_device* dev)
e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data);
- /* Renegotiate with link partner */
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
if (autoneg_normal) {
- data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
- data |= BMCR_ANENABLE | BMCR_ANRESTART;
+ /* Renegotiate with link partner */
+ data |= BMCR_ANENABLE | BMCR_ANRESTART;
+ } else {
+ /* Don't negotiate speed or duplex */
+ data &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
+
+ /* Set speed and duplex static */
+ if (current_speed_selection == 10)
+ data &= ~BMCR_SPEED100;
+ else
+ data |= BMCR_SPEED100;
+
+ if (current_duplex != full)
+ data &= ~BMCR_FULLDPLX;
+ else
+ data |= BMCR_FULLDPLX;
}
e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data);
}
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index d465eaa796c..65a2d0ba64e 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -200,6 +200,9 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
/** NOTE:: For DM646x the IN_VECTOR has changed */
#define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC BIT(EMAC_DEF_RX_CH)
#define EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC BIT(16 + EMAC_DEF_TX_CH)
+#define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT BIT(26)
+#define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT BIT(27)
+
/* CPPI bit positions */
#define EMAC_CPPI_SOP_BIT BIT(31)
@@ -2167,7 +2170,11 @@ static int emac_poll(struct napi_struct *napi, int budget)
emac_int_enable(priv);
}
- if (unlikely(status & EMAC_DM644X_MAC_IN_VECTOR_HOST_INT)) {
+ mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
+ if (priv->version == EMAC_VERSION_2)
+ mask = EMAC_DM646X_MAC_IN_VECTOR_HOST_INT;
+
+ if (unlikely(status & mask)) {
u32 ch, cause;
dev_err(emac_dev, "DaVinci EMAC: Fatal Hardware Error\n");
netif_stop_queue(ndev);
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index cda6b397550..45ac225a7aa 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -3035,7 +3035,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
/* If TBI compatibility is was previously off, turn it on. For
* compatibility with a TBI link partner, we will store bad
* packets. Some frames have an additional byte on the end and
- * will look like CRC errors to to the hardware.
+ * will look like CRC errors to the hardware.
*/
if (!hw->tbi_compatibility_on) {
hw->tbi_compatibility_on = true;
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 977c3d35827..41bd7aeafd8 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -3083,7 +3083,6 @@ static const struct net_device_ops ehea_netdev_ops = {
.ndo_poll_controller = ehea_netpoll,
#endif
.ndo_get_stats = ehea_get_stats,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = ehea_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_multicast_list = ehea_set_multicast_list,
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 3747457f5e6..bc7c5b7abb8 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -751,7 +751,7 @@ int ehea_create_busmap(void)
mutex_lock(&ehea_busmap_mutex);
ehea_mr_len = 0;
- ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
+ ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
ehea_create_busmap_callback);
mutex_unlock(&ehea_busmap_mutex);
return ret;
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index 117fc6c12e3..66813c91a72 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -1666,3 +1666,4 @@ MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>");
MODULE_LICENSE("GPL");
module_param_named(debug, debug.msg_enable, int, 0);
MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., ffff=all)");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 2234118eedb..6c144b525b4 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -293,7 +293,7 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
rxtime = get_ictt_value(priv->rxic);
rxcount = get_icft_value(priv->rxic);
txtime = get_ictt_value(priv->txic);
- txcount = get_icft_value(priv->txic);;
+ txcount = get_icft_value(priv->txic);
cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
cvals->rx_max_coalesced_frames = rxcount;
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 1d7d7fef414..89c82c5e63e 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2556,13 +2556,13 @@ static int __devinit emac_init_config(struct emac_instance *dev)
if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
dev->mdio_ph = 0;
if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
- dev->zmii_ph = 0;;
+ dev->zmii_ph = 0;
if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
- dev->zmii_port = 0xffffffff;;
+ dev->zmii_port = 0xffffffff;
if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
- dev->rgmii_ph = 0;;
+ dev->rgmii_ph = 0;
if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
- dev->rgmii_port = 0xffffffff;;
+ dev->rgmii_port = 0xffffffff;
if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
dev->fifo_entry_size = 16;
if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index a0231cd079f..7d76bb085e1 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -286,41 +286,6 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
}
/**
- * igb_update_mc_addr_list - Update Multicast addresses
- * @hw: pointer to the HW structure
- * @mc_addr_list: array of multicast addresses to program
- * @mc_addr_count: number of multicast addresses to program
- *
- * Updates entire Multicast Table Array.
- * The caller must have a packed mc_addr_list of multicast addresses.
- **/
-void igb_update_mc_addr_list(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count)
-{
- u32 hash_value, hash_bit, hash_reg;
- int i;
-
- /* clear mta_shadow */
- memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
-
- /* update mta_shadow from mc_addr_list */
- for (i = 0; (u32) i < mc_addr_count; i++) {
- hash_value = igb_hash_mc_addr(hw, mc_addr_list);
-
- hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
- hash_bit = hash_value & 0x1F;
-
- hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
- mc_addr_list += (ETH_ALEN);
- }
-
- /* replace the entire MTA table */
- for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
- array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
- wrfl();
-}
-
-/**
* igb_hash_mc_addr - Generate a multicast hash value
* @hw: pointer to the HW structure
* @mc_addr: pointer to a multicast address
@@ -329,7 +294,7 @@ void igb_update_mc_addr_list(struct e1000_hw *hw,
* the multicast filter table array address and new table value. See
* igb_mta_set()
**/
-u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
{
u32 hash_value, hash_mask;
u8 bit_shift = 0;
@@ -392,6 +357,41 @@ u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
}
/**
+ * igb_update_mc_addr_list - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates entire Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void igb_update_mc_addr_list(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count)
+{
+ u32 hash_value, hash_bit, hash_reg;
+ int i;
+
+ /* clear mta_shadow */
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+
+ /* update mta_shadow from mc_addr_list */
+ for (i = 0; (u32) i < mc_addr_count; i++) {
+ hash_value = igb_hash_mc_addr(hw, mc_addr_list);
+
+ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+ hash_bit = hash_value & 0x1F;
+
+ hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ mc_addr_list += (ETH_ALEN);
+ }
+
+ /* replace the entire MTA table */
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
+ wrfl();
+}
+
+/**
* igb_clear_hw_cntrs_base - Clear base hardware counters
* @hw: pointer to the HW structure
*
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h
index 7518af8cbbf..bca17d88241 100644
--- a/drivers/net/igb/e1000_mac.h
+++ b/drivers/net/igb/e1000_mac.h
@@ -88,6 +88,5 @@ enum e1000_mng_mode {
#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
-extern u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
#endif
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index d2639c4a086..5d6c1530a8c 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -3966,7 +3966,7 @@ static int igb_set_vf_multicasts(struct igb_adapter *adapter,
/* VFs are limited to using the MTA hash table for their multicast
* addresses */
for (i = 0; i < n; i++)
- vf_data->vf_mc_hashes[i] = hash_list[i];;
+ vf_data->vf_mc_hashes[i] = hash_list[i];
/* Flush and reset the mta with the new values */
igb_set_rx_mode(adapter->netdev);
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 1445e586519..84db145d2b5 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -17,6 +17,7 @@
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/gpio.h>
#include <net/irda/irda.h>
#include <net/irda/irmod.h>
@@ -163,6 +164,22 @@ inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si)
}
/*
+ * Set the IrDA communications mode.
+ */
+static void pxa_irda_set_mode(struct pxa_irda *si, int mode)
+{
+ if (si->pdata->transceiver_mode)
+ si->pdata->transceiver_mode(si->dev, mode);
+ else {
+ if (gpio_is_valid(si->pdata->gpio_pwdown))
+ gpio_set_value(si->pdata->gpio_pwdown,
+ !(mode & IR_OFF) ^
+ !si->pdata->gpio_pwdown_inverted);
+ pxa2xx_transceiver_mode(si->dev, mode);
+ }
+}
+
+/*
* Set the IrDA communications speed.
*/
static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
@@ -188,7 +205,7 @@ static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
pxa_irda_disable_clk(si);
/* set board transceiver to SIR mode */
- si->pdata->transceiver_mode(si->dev, IR_SIRMODE);
+ pxa_irda_set_mode(si, IR_SIRMODE);
/* enable the STUART clock */
pxa_irda_enable_sirclk(si);
@@ -222,7 +239,7 @@ static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
ICCR0 = 0;
/* set board transceiver to FIR mode */
- si->pdata->transceiver_mode(si->dev, IR_FIRMODE);
+ pxa_irda_set_mode(si, IR_FIRMODE);
/* enable the FICP clock */
pxa_irda_enable_firclk(si);
@@ -641,7 +658,7 @@ static void pxa_irda_shutdown(struct pxa_irda *si)
local_irq_restore(flags);
/* power off board transceiver */
- si->pdata->transceiver_mode(si->dev, IR_OFF);
+ pxa_irda_set_mode(si, IR_OFF);
printk(KERN_DEBUG "pxa_ir: irda shutdown\n");
}
@@ -849,10 +866,26 @@ static int pxa_irda_probe(struct platform_device *pdev)
if (err)
goto err_mem_5;
- if (si->pdata->startup)
+ if (gpio_is_valid(si->pdata->gpio_pwdown)) {
+ err = gpio_request(si->pdata->gpio_pwdown, "IrDA switch");
+ if (err)
+ goto err_startup;
+ err = gpio_direction_output(si->pdata->gpio_pwdown,
+ !si->pdata->gpio_pwdown_inverted);
+ if (err) {
+ gpio_free(si->pdata->gpio_pwdown);
+ goto err_startup;
+ }
+ }
+
+ if (si->pdata->startup) {
err = si->pdata->startup(si->dev);
- if (err)
- goto err_startup;
+ if (err)
+ goto err_startup;
+ }
+
+ if (gpio_is_valid(si->pdata->gpio_pwdown) && si->pdata->startup)
+ dev_warn(si->dev, "gpio_pwdown and startup() both defined!\n");
dev->netdev_ops = &pxa_irda_netdev_ops;
@@ -903,6 +936,8 @@ static int pxa_irda_remove(struct platform_device *_dev)
if (dev) {
struct pxa_irda *si = netdev_priv(dev);
unregister_netdev(dev);
+ if (gpio_is_valid(si->pdata->gpio_pwdown))
+ gpio_free(si->pdata->gpio_pwdown);
if (si->pdata->shutdown)
si->pdata->shutdown(si->dev);
kfree(si->tx_buff.head);
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index dd688d45e9c..385be601666 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -267,7 +267,8 @@ struct ixgbe_adapter {
enum ixgbe_fc_mode last_lfc_mode;
/* Interrupt Throttle Rate */
- u32 itr_setting;
+ u32 rx_itr_setting;
+ u32 tx_itr_setting;
u16 eitr_low;
u16 eitr_high;
@@ -351,7 +352,8 @@ struct ixgbe_adapter {
struct ixgbe_hw_stats stats;
/* Interrupt Throttle Rate */
- u32 eitr_param;
+ u32 rx_eitr_param;
+ u32 tx_eitr_param;
unsigned long state;
u64 tx_busy;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 026e94a9984..53b0a668025 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1929,7 +1929,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
/* only valid if in constant ITR mode */
- switch (adapter->itr_setting) {
+ switch (adapter->rx_itr_setting) {
case 0:
/* throttling disabled */
ec->rx_coalesce_usecs = 0;
@@ -1940,9 +1940,25 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
break;
default:
/* fixed interrupt rate mode */
- ec->rx_coalesce_usecs = 1000000/adapter->eitr_param;
+ ec->rx_coalesce_usecs = 1000000/adapter->rx_eitr_param;
break;
}
+
+ /* only valid if in constant ITR mode */
+ switch (adapter->tx_itr_setting) {
+ case 0:
+ /* throttling disabled */
+ ec->tx_coalesce_usecs = 0;
+ break;
+ case 1:
+ /* dynamic ITR mode */
+ ec->tx_coalesce_usecs = 1;
+ break;
+ default:
+ ec->tx_coalesce_usecs = 1000000/adapter->tx_eitr_param;
+ break;
+ }
+
return 0;
}
@@ -1953,6 +1969,14 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ixgbe_q_vector *q_vector;
int i;
+ /*
+ * don't accept tx specific changes if we've got mixed RxTx vectors
+ * test and jump out here if needed before changing the rx numbers
+ */
+ if ((1000000/ec->tx_coalesce_usecs) != adapter->tx_eitr_param &&
+ adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count)
+ return -EINVAL;
+
if (ec->tx_max_coalesced_frames_irq)
adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
@@ -1963,26 +1987,49 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
return -EINVAL;
/* store the value in ints/second */
- adapter->eitr_param = 1000000/ec->rx_coalesce_usecs;
+ adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
/* static value of interrupt rate */
- adapter->itr_setting = adapter->eitr_param;
+ adapter->rx_itr_setting = adapter->rx_eitr_param;
/* clear the lower bit as its used for dynamic state */
- adapter->itr_setting &= ~1;
+ adapter->rx_itr_setting &= ~1;
} else if (ec->rx_coalesce_usecs == 1) {
/* 1 means dynamic mode */
- adapter->eitr_param = 20000;
- adapter->itr_setting = 1;
+ adapter->rx_eitr_param = 20000;
+ adapter->rx_itr_setting = 1;
} else {
/*
* any other value means disable eitr, which is best
* served by setting the interrupt rate very high
*/
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
- adapter->eitr_param = IXGBE_MAX_RSC_INT_RATE;
+ adapter->rx_eitr_param = IXGBE_MAX_RSC_INT_RATE;
else
- adapter->eitr_param = IXGBE_MAX_INT_RATE;
- adapter->itr_setting = 0;
+ adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
+ adapter->rx_itr_setting = 0;
+ }
+
+ if (ec->tx_coalesce_usecs > 1) {
+ /* check the limits */
+ if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
+ (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE))
+ return -EINVAL;
+
+ /* store the value in ints/second */
+ adapter->tx_eitr_param = 1000000/ec->tx_coalesce_usecs;
+
+ /* static value of interrupt rate */
+ adapter->tx_itr_setting = adapter->tx_eitr_param;
+
+ /* clear the lower bit as its used for dynamic state */
+ adapter->tx_itr_setting &= ~1;
+ } else if (ec->tx_coalesce_usecs == 1) {
+ /* 1 means dynamic mode */
+ adapter->tx_eitr_param = 10000;
+ adapter->tx_itr_setting = 1;
+ } else {
+ adapter->tx_eitr_param = IXGBE_MAX_INT_RATE;
+ adapter->tx_itr_setting = 0;
}
/* MSI/MSIx Interrupt Mode */
@@ -1992,17 +2039,17 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
for (i = 0; i < num_vectors; i++) {
q_vector = adapter->q_vector[i];
if (q_vector->txr_count && !q_vector->rxr_count)
- /* tx vector gets half the rate */
- q_vector->eitr = (adapter->eitr_param >> 1);
+ /* tx only */
+ q_vector->eitr = adapter->tx_eitr_param;
else
/* rx only or mixed */
- q_vector->eitr = adapter->eitr_param;
+ q_vector->eitr = adapter->rx_eitr_param;
ixgbe_write_eitr(q_vector);
}
/* Legacy Interrupt Mode */
} else {
q_vector = adapter->q_vector[0];
- q_vector->eitr = adapter->eitr_param;
+ q_vector->eitr = adapter->rx_eitr_param;
ixgbe_write_eitr(q_vector);
}
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 59ad9590e70..c407bd9de0d 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -926,12 +926,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
r_idx + 1);
}
- /* if this is a tx only vector halve the interrupt rate */
if (q_vector->txr_count && !q_vector->rxr_count)
- q_vector->eitr = (adapter->eitr_param >> 1);
+ /* tx only */
+ q_vector->eitr = adapter->tx_eitr_param;
else if (q_vector->rxr_count)
- /* rx only */
- q_vector->eitr = adapter->eitr_param;
+ /* rx or mixed */
+ q_vector->eitr = adapter->rx_eitr_param;
ixgbe_write_eitr(q_vector);
}
@@ -1359,7 +1359,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
/* If all Rx work done, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
- if (adapter->itr_setting & 1)
+ if (adapter->rx_itr_setting & 1)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter,
@@ -1420,7 +1420,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
/* If all Rx work done, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
- if (adapter->itr_setting & 1)
+ if (adapter->rx_itr_setting & 1)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter,
@@ -1458,10 +1458,10 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
work_done = budget;
- /* If all Rx work done, exit the polling mode */
+ /* If all Tx work done, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
- if (adapter->itr_setting & 1)
+ if (adapter->tx_itr_setting & 1)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
@@ -1848,7 +1848,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
- EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
+ EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
ixgbe_set_ivar(adapter, 0, 0, 0);
ixgbe_set_ivar(adapter, 1, 0, 0);
@@ -1970,6 +1970,50 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_configure_rscctl - enable RSC for the indicated ring
+ * @adapter: address of board private structure
+ * @index: index of ring to set
+ * @rx_buf_len: rx buffer length
+ **/
+static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index,
+ int rx_buf_len)
+{
+ struct ixgbe_ring *rx_ring;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int j;
+ u32 rscctrl;
+
+ rx_ring = &adapter->rx_ring[index];
+ j = rx_ring->reg_idx;
+ rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
+ rscctrl |= IXGBE_RSCCTL_RSCEN;
+ /*
+ * we must limit the number of descriptors so that the
+ * total size of max desc * buf_len is not greater
+ * than 65535
+ */
+ if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+#if (MAX_SKB_FRAGS > 16)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+#elif (MAX_SKB_FRAGS > 8)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+#elif (MAX_SKB_FRAGS > 4)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+#else
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
+#endif
+ } else {
+ if (rx_buf_len < IXGBE_RXBUFFER_4096)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+ else if (rx_buf_len < IXGBE_RXBUFFER_8192)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+ else
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
+}
+
+/**
* ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
* @adapter: board private structure
*
@@ -1990,7 +2034,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
u32 fctrl, hlreg0;
u32 reta = 0, mrqc = 0;
u32 rdrxctl;
- u32 rscctrl;
int rx_buf_len;
/* Decide whether to use packet split mode or not */
@@ -2148,36 +2191,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
/* Enable 82599 HW-RSC */
- for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_ring = &adapter->rx_ring[i];
- j = rx_ring->reg_idx;
- rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
- rscctrl |= IXGBE_RSCCTL_RSCEN;
- /*
- * we must limit the number of descriptors so that the
- * total size of max desc * buf_len is not greater
- * than 65535
- */
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
-#if (MAX_SKB_FRAGS > 16)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-#elif (MAX_SKB_FRAGS > 8)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-#elif (MAX_SKB_FRAGS > 4)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
-#else
- rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
-#endif
- } else {
- if (rx_buf_len < IXGBE_RXBUFFER_4096)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
- else if (rx_buf_len < IXGBE_RXBUFFER_8192)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
- else
- rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
- }
- IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
- }
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbe_configure_rscctl(adapter, i, rx_buf_len);
+
/* Disable RSC for ACK packets */
IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
(IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
@@ -2926,6 +2942,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
ixgbe_napi_disable_all(adapter);
+ clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
+ del_timer_sync(&adapter->sfp_timer);
del_timer_sync(&adapter->watchdog_timer);
cancel_work_sync(&adapter->watchdog_task);
@@ -2989,7 +3007,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
- if (adapter->itr_setting & 1)
+ if (adapter->rx_itr_setting & 1)
ixgbe_set_itr(adapter);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
@@ -3599,7 +3617,10 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
- q_vector->eitr = adapter->eitr_param;
+ if (q_vector->txr_count && !q_vector->rxr_count)
+ q_vector->eitr = adapter->tx_eitr_param;
+ else
+ q_vector->eitr = adapter->rx_eitr_param;
q_vector->v_idx = q_idx;
netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
adapter->q_vector[q_idx] = q_vector;
@@ -3868,8 +3889,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->fc.disable_fc_autoneg = false;
/* enable itr by default in dynamic mode */
- adapter->itr_setting = 1;
- adapter->eitr_param = 20000;
+ adapter->rx_itr_setting = 1;
+ adapter->rx_eitr_param = 20000;
+ adapter->tx_itr_setting = 1;
+ adapter->tx_eitr_param = 10000;
/* set defaults for eitr in MegaBytes */
adapter->eitr_low = 10;
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 547ac7c7479..23783586435 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -1321,3 +1321,4 @@ MODULE_LICENSE("GPL");
module_param_named(message, msg_enable, int, 0);
MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
+MODULE_ALIAS("spi:ks8851");
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index da8d0a0ca94..f2a197fd47a 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -865,7 +865,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
dcrs = dcr_resource_start(np, 0);
if (dcrs == 0) {
dev_err(&op->dev, "could not get DMA register address\n");
- goto nodev;;
+ goto nodev;
}
lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index fb65b427c69..1d0d4d9ab62 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -241,7 +241,7 @@ static int macb_mii_init(struct macb *bp)
struct eth_platform_data *pdata;
int err = -ENXIO, i;
- /* Enable managment port */
+ /* Enable management port */
macb_writel(bp, NCR, MACB_BIT(MPE));
bp->mii_bus = mdiobus_alloc();
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index cee199ceba2..3c16602172f 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -33,6 +33,7 @@
*/
#include <linux/mlx4/cmd.h>
+#include <linux/cache.h>
#include "fw.h"
#include "icm.h"
@@ -698,6 +699,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_IN_SIZE 0x200
#define INIT_HCA_VERSION_OFFSET 0x000
#define INIT_HCA_VERSION 2
+#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
#define INIT_HCA_FLAGS_OFFSET 0x014
#define INIT_HCA_QPC_OFFSET 0x020
#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
@@ -735,6 +737,9 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
*((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
+ *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
+ (ilog2(cache_line_size()) - 4) << 5;
+
#if defined(__LITTLE_ENDIAN)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
#elif defined(__BIG_ENDIAN)
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index f7bdde111df..b5aa974827e 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1469,6 +1469,7 @@ netxen_nic_resume(struct pci_dev *pdev)
}
netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
+ return 0;
err_out_detach:
netxen_nic_detach(adapter);
@@ -1903,12 +1904,13 @@ static void netxen_tx_timeout_task(struct work_struct *work)
netif_wake_queue(adapter->netdev);
- goto done;
+ clear_bit(__NX_RESETTING, &adapter->state);
} else {
+ clear_bit(__NX_RESETTING, &adapter->state);
if (!netxen_nic_reset_context(adapter)) {
adapter->netdev->trans_start = jiffies;
- goto done;
+ return;
}
/* context reset failed, fall through for fw reset */
@@ -1916,8 +1918,6 @@ static void netxen_tx_timeout_task(struct work_struct *work)
request_reset:
adapter->need_fw_reset = 1;
-done:
- clear_bit(__NX_RESETTING, &adapter->state);
}
struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index bd0ac690d12..aad3b370c56 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -615,10 +615,10 @@ static int init586(struct net_device *dev)
/* addr_len |!src_insert |pre-len |loopback */
writeb(0x2e, &cfg_cmd->adr_len);
writeb(0x00, &cfg_cmd->priority);
- writeb(0x60, &cfg_cmd->ifs);;
+ writeb(0x60, &cfg_cmd->ifs);
writeb(0x00, &cfg_cmd->time_low);
writeb(0xf2, &cfg_cmd->time_high);
- writeb(0x00, &cfg_cmd->promisc);;
+ writeb(0x00, &cfg_cmd->promisc);
if (dev->flags & IFF_ALLMULTI) {
int len = ((char __iomem *)p->iscp - (char __iomem *)ptr - 8) / 6;
if (num_addrs > len) {
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 76cc2614f48..f9364d0678f 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -5615,7 +5615,7 @@ static void niu_init_tx_mac(struct niu *np)
/* The XMAC_MIN register only accepts values for TX min which
* have the low 3 bits cleared.
*/
- BUILD_BUG_ON(min & 0x7);
+ BUG_ON(min & 0x7);
if (np->flags & NIU_FLAGS_XMAC)
niu_init_tx_xmac(np, min, max);
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 97db1c73234..474876c879c 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -340,12 +340,11 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link)
base = &virt[hw_info[i].offset & (req.Size-1)];
if ((readb(base+0) == hw_info[i].a0) &&
(readb(base+2) == hw_info[i].a1) &&
- (readb(base+4) == hw_info[i].a2))
- break;
- }
- if (i < NR_INFO) {
- for (j = 0; j < 6; j++)
- dev->dev_addr[j] = readb(base + (j<<1));
+ (readb(base+4) == hw_info[i].a2)) {
+ for (j = 0; j < 6; j++)
+ dev->dev_addr[j] = readb(base + (j<<1));
+ break;
+ }
}
iounmap(virt);
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 22052925782..7783c5db81d 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -2630,7 +2630,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
FLAGS_LI; /* Load irq delay values */
if (rx_ring->lbq_len) {
cqicb->flags |= FLAGS_LL; /* Load lbq values */
- tmp = (u64)rx_ring->lbq_base_dma;;
+ tmp = (u64)rx_ring->lbq_base_dma;
base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
page_entries = 0;
do {
@@ -2654,7 +2654,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
}
if (rx_ring->sbq_len) {
cqicb->flags |= FLAGS_LS; /* Load sbq values */
- tmp = (u64)rx_ring->sbq_base_dma;;
+ tmp = (u64)rx_ring->sbq_base_dma;
base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
page_entries = 0;
do {
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index bc98e7f69ee..ede937ee50c 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -72,7 +72,7 @@ static int rionet_check = 0;
static int rionet_capable = 1;
/*
- * This is a fast lookup table for for translating TX
+ * This is a fast lookup table for translating TX
* Ethernet packets into a destination RIO device. It
* could be made into a hash table to save memory depending
* on system trade-offs.
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 07a7e4b8f8f..cc4b2f99989 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -884,13 +884,12 @@ static int efx_wanted_rx_queues(void)
int count;
int cpu;
- if (unlikely(!alloc_cpumask_var(&core_mask, GFP_KERNEL))) {
+ if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
printk(KERN_WARNING
"sfc: RSS disabled due to allocation failure\n");
return 1;
}
- cpumask_clear(core_mask);
count = 0;
for_each_online_cpu(cpu) {
if (!cpumask_test_cpu(cpu, core_mask)) {
diff --git a/drivers/net/skfp/pcmplc.c b/drivers/net/skfp/pcmplc.c
index f1df2ec8ad4..e6b33ee05ed 100644
--- a/drivers/net/skfp/pcmplc.c
+++ b/drivers/net/skfp/pcmplc.c
@@ -960,7 +960,7 @@ static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd)
/*PC88b*/
if (!phy->cf_join) {
phy->cf_join = TRUE ;
- queue_event(smc,EVENT_CFM,CF_JOIN+np) ; ;
+ queue_event(smc,EVENT_CFM,CF_JOIN+np) ;
}
if (cmd == PC_JOIN)
GO_STATE(PC8_ACTIVE) ;
diff --git a/drivers/net/skfp/pmf.c b/drivers/net/skfp/pmf.c
index 79e665e0853..a320fdb3727 100644
--- a/drivers/net/skfp/pmf.c
+++ b/drivers/net/skfp/pmf.c
@@ -807,9 +807,9 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para,
mib_p->fddiPORTLerFlag ;
sp->p4050_pad = 0 ;
sp->p4050_cutoff =
- mib_p->fddiPORTLer_Cutoff ; ;
+ mib_p->fddiPORTLer_Cutoff ;
sp->p4050_alarm =
- mib_p->fddiPORTLer_Alarm ; ;
+ mib_p->fddiPORTLer_Alarm ;
sp->p4050_estimate =
mib_p->fddiPORTLer_Estimate ;
sp->p4050_reject_ct =
@@ -829,7 +829,7 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para,
sp->p4051_porttype =
mib_p->fddiPORTMy_Type ;
sp->p4051_connectstate =
- mib_p->fddiPORTConnectState ; ;
+ mib_p->fddiPORTConnectState ;
sp->p4051_pc_neighbor =
mib_p->fddiPORTNeighborType ;
sp->p4051_pc_withhold =
@@ -853,7 +853,7 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para,
struct smt_p_4053 *sp ;
sp = (struct smt_p_4053 *) to ;
sp->p4053_multiple =
- mib_p->fddiPORTMultiple_P ; ;
+ mib_p->fddiPORTMultiple_P ;
sp->p4053_availablepaths =
mib_p->fddiPORTAvailablePaths ;
sp->p4053_currentpath =
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 62e852e21ab..55bad408196 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -215,7 +215,7 @@ static void skge_wol_init(struct skge_port *skge)
if (skge->wol & WAKE_MAGIC)
ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
else
- ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;;
+ ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;
ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 4bb52e9cd37..ef1165718dd 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -765,7 +765,7 @@ static void sky2_wol_init(struct sky2_port *sky2)
if (sky2->wol & WAKE_MAGIC)
ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
else
- ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;;
+ ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;
ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
@@ -1497,7 +1497,6 @@ static int sky2_up(struct net_device *dev)
if (ramsize > 0) {
u32 rxspace;
- hw->flags |= SKY2_HW_RAM_BUFFER;
pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
if (ramsize < 16)
rxspace = ramsize / 2;
@@ -2926,6 +2925,9 @@ static int __devinit sky2_init(struct sky2_hw *hw)
++hw->ports;
}
+ if (sky2_read8(hw, B2_E_0))
+ hw->flags |= SKY2_HW_RAM_BUFFER;
+
return 0;
}
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index f1e5e4542c2..bc74db0d12f 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -1016,7 +1016,6 @@ static const struct net_device_ops vnet_ops = {
.ndo_open = vnet_open,
.ndo_stop = vnet_close,
.ndo_set_multicast_list = vnet_set_rx_mode,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = vnet_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = vnet_tx_timeout,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index d3ee1994b02..4fdfa2ae541 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -946,8 +946,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
char *name;
unsigned long flags = 0;
- err = -EINVAL;
-
if (!capable(CAP_NET_ADMIN))
return -EPERM;
err = security_tun_dev_create();
@@ -964,7 +962,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
flags |= TUN_TAP_DEV;
name = "tap%d";
} else
- goto failed;
+ return -EINVAL;
if (*ifr->ifr_name)
name = ifr->ifr_name;
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 45cebfb302c..23300656c26 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -300,20 +300,23 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return 0;
}
- crc = get_unaligned_le32(skb2->data
- + len - ETH_FCS_LEN);
- skb_trim(skb2, len - ETH_FCS_LEN);
-
/*
* The bmCRC helps to denote when the CRC field in
* the Ethernet frame contains a calculated CRC:
* bmCRC = 1 : CRC is calculated
* bmCRC = 0 : CRC = 0xDEADBEEF
*/
- if (header & BIT(14))
- crc2 = ~crc32_le(~0, skb2->data, skb2->len);
- else
+ if (header & BIT(14)) {
+ crc = get_unaligned_le32(skb2->data
+ + len - ETH_FCS_LEN);
+ crc2 = ~crc32_le(~0, skb2->data, skb2->len
+ - ETH_FCS_LEN);
+ } else {
+ crc = get_unaligned_be32(skb2->data
+ + len - ETH_FCS_LEN);
crc2 = 0xdeadbeef;
+ }
+ skb_trim(skb2, len - ETH_FCS_LEN);
if (is_last)
return crc == crc2;
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index e2a39b9be96..e391ef969c2 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -263,6 +263,7 @@ static int kaweth_control(struct kaweth_device *kaweth,
int timeout)
{
struct usb_ctrlrequest *dr;
+ int retval;
dbg("kaweth_control()");
@@ -278,18 +279,21 @@ static int kaweth_control(struct kaweth_device *kaweth,
return -ENOMEM;
}
- dr->bRequestType= requesttype;
+ dr->bRequestType = requesttype;
dr->bRequest = request;
dr->wValue = cpu_to_le16(value);
dr->wIndex = cpu_to_le16(index);
dr->wLength = cpu_to_le16(size);
- return kaweth_internal_control_msg(kaweth->dev,
- pipe,
- dr,
- data,
- size,
- timeout);
+ retval = kaweth_internal_control_msg(kaweth->dev,
+ pipe,
+ dr,
+ data,
+ size,
+ timeout);
+
+ kfree(dr);
+ return retval;
}
/****************************************************************
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 938fb3530a7..c6c922247d0 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1227,7 +1227,7 @@ static const struct driver_info smsc95xx_info = {
.rx_fixup = smsc95xx_rx_fixup,
.tx_fixup = smsc95xx_tx_fixup,
.status = smsc95xx_status,
- .flags = FLAG_ETHER,
+ .flags = FLAG_ETHER | FLAG_SEND_ZLP,
};
static const struct usb_device_id products[] = {
@@ -1237,10 +1237,75 @@ static const struct usb_device_id products[] = {
.driver_info = (unsigned long) &smsc95xx_info,
},
{
+ /* SMSC9505 USB Ethernet Device */
+ USB_DEVICE(0x0424, 0x9505),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9500A USB Ethernet Device */
+ USB_DEVICE(0x0424, 0x9E00),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9505A USB Ethernet Device */
+ USB_DEVICE(0x0424, 0x9E01),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
/* SMSC9512/9514 USB Hub & Ethernet Device */
USB_DEVICE(0x0424, 0xec00),
.driver_info = (unsigned long) &smsc95xx_info,
},
+ {
+ /* SMSC9500 USB Ethernet Device (SAL10) */
+ USB_DEVICE(0x0424, 0x9900),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9505 USB Ethernet Device (SAL10) */
+ USB_DEVICE(0x0424, 0x9901),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9500A USB Ethernet Device (SAL10) */
+ USB_DEVICE(0x0424, 0x9902),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9505A USB Ethernet Device (SAL10) */
+ USB_DEVICE(0x0424, 0x9903),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9512/9514 USB Hub & Ethernet Device (SAL10) */
+ USB_DEVICE(0x0424, 0x9904),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9500A USB Ethernet Device (HAL) */
+ USB_DEVICE(0x0424, 0x9905),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9505A USB Ethernet Device (HAL) */
+ USB_DEVICE(0x0424, 0x9906),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9500 USB Ethernet Device (Alternate ID) */
+ USB_DEVICE(0x0424, 0x9907),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9500A USB Ethernet Device (Alternate ID) */
+ USB_DEVICE(0x0424, 0x9908),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9512/9514 USB Hub & Ethernet Device (Alternate ID) */
+ USB_DEVICE(0x0424, 0x9909),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
{ }, /* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 24b36f79515..ca5ca5ae061 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1049,7 +1049,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
* NOTE: strictly conforming cdc-ether devices should expect
* the ZLP here, but ignore the one-byte packet.
*/
- if ((length % dev->maxpacket) == 0) {
+ if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) {
urb->transfer_buffer_length++;
if (skb_tailroom(skb)) {
skb->data[skb->len] = 0;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 32266fb89c2..d445845f277 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1,4 +1,4 @@
-/* A simple network driver using virtio.
+/* A network driver using virtio.
*
* Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
*
@@ -22,6 +22,7 @@
#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
#include <linux/virtio_net.h>
#include <linux/scatterlist.h>
#include <linux/if_vlan.h>
@@ -47,19 +48,9 @@ struct virtnet_info
struct napi_struct napi;
unsigned int status;
- /* The skb we couldn't send because buffers were full. */
- struct sk_buff *last_xmit_skb;
-
- /* If we need to free in a timer, this is it. */
- struct timer_list xmit_free_timer;
-
/* Number of input buffers, and max we've ever had. */
unsigned int num, max;
- /* For cleaning up after transmission. */
- struct tasklet_struct tasklet;
- bool free_in_tasklet;
-
/* I like... big packets and I cannot lie! */
bool big_packets;
@@ -77,9 +68,17 @@ struct virtnet_info
struct page *pages;
};
-static inline void *skb_vnet_hdr(struct sk_buff *skb)
+struct skb_vnet_hdr {
+ union {
+ struct virtio_net_hdr hdr;
+ struct virtio_net_hdr_mrg_rxbuf mhdr;
+ };
+ unsigned int num_sg;
+};
+
+static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
{
- return (struct virtio_net_hdr *)skb->cb;
+ return (struct skb_vnet_hdr *)skb->cb;
}
static void give_a_page(struct virtnet_info *vi, struct page *page)
@@ -118,17 +117,13 @@ static void skb_xmit_done(struct virtqueue *svq)
/* We were probably waiting for more output buffers. */
netif_wake_queue(vi->dev);
-
- /* Make sure we re-xmit last_xmit_skb: if there are no more packets
- * queued, start_xmit won't be called. */
- tasklet_schedule(&vi->tasklet);
}
static void receive_skb(struct net_device *dev, struct sk_buff *skb,
unsigned len)
{
struct virtnet_info *vi = netdev_priv(dev);
- struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
+ struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
int err;
int i;
@@ -139,7 +134,6 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
}
if (vi->mergeable_rx_bufs) {
- struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
unsigned int copy;
char *p = page_address(skb_shinfo(skb)->frags[0].page);
@@ -147,8 +141,8 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
len = PAGE_SIZE;
len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
- memcpy(hdr, p, sizeof(*mhdr));
- p += sizeof(*mhdr);
+ memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr));
+ p += sizeof(hdr->mhdr);
copy = len;
if (copy > skb_tailroom(skb))
@@ -163,13 +157,13 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
skb_shinfo(skb)->nr_frags--;
} else {
skb_shinfo(skb)->frags[0].page_offset +=
- sizeof(*mhdr) + copy;
+ sizeof(hdr->mhdr) + copy;
skb_shinfo(skb)->frags[0].size = len;
skb->data_len += len;
skb->len += len;
}
- while (--mhdr->num_buffers) {
+ while (--hdr->mhdr.num_buffers) {
struct sk_buff *nskb;
i = skb_shinfo(skb)->nr_frags;
@@ -183,7 +177,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
if (!nskb) {
pr_debug("%s: rx error: %d buffers missing\n",
- dev->name, mhdr->num_buffers);
+ dev->name, hdr->mhdr.num_buffers);
dev->stats.rx_length_errors++;
goto drop;
}
@@ -204,7 +198,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
skb->len += len;
}
} else {
- len -= sizeof(struct virtio_net_hdr);
+ len -= sizeof(hdr->hdr);
if (len <= MAX_PACKET_LEN)
trim_pages(vi, skb);
@@ -222,9 +216,11 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
- if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
pr_debug("Needs csum!\n");
- if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset))
+ if (!skb_partial_csum_set(skb,
+ hdr->hdr.csum_start,
+ hdr->hdr.csum_offset))
goto frame_err;
}
@@ -232,9 +228,9 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
ntohs(skb->protocol), skb->len, skb->pkt_type);
- if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
pr_debug("GSO!\n");
- switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
break;
@@ -247,14 +243,14 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
default:
if (net_ratelimit())
printk(KERN_WARNING "%s: bad gso type %u.\n",
- dev->name, hdr->gso_type);
+ dev->name, hdr->hdr.gso_type);
goto frame_err;
}
- if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
+ if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
- skb_shinfo(skb)->gso_size = hdr->gso_size;
+ skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
if (skb_shinfo(skb)->gso_size == 0) {
if (net_ratelimit())
printk(KERN_WARNING "%s: zero gso size.\n",
@@ -284,8 +280,8 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
bool oom = false;
sg_init_table(sg, 2+MAX_SKB_FRAGS);
- for (;;) {
- struct virtio_net_hdr *hdr;
+ do {
+ struct skb_vnet_hdr *hdr;
skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
if (unlikely(!skb)) {
@@ -297,7 +293,7 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
skb_put(skb, MAX_PACKET_LEN);
hdr = skb_vnet_hdr(skb);
- sg_set_buf(sg, hdr, sizeof(*hdr));
+ sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
if (vi->big_packets) {
for (i = 0; i < MAX_SKB_FRAGS; i++) {
@@ -320,14 +316,14 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
skb_queue_head(&vi->recv, skb);
err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
- if (err) {
+ if (err < 0) {
skb_unlink(skb, &vi->recv);
trim_pages(vi, skb);
kfree_skb(skb);
break;
}
vi->num++;
- }
+ } while (err >= num);
if (unlikely(vi->num > vi->max))
vi->max = vi->num;
vi->rvq->vq_ops->kick(vi->rvq);
@@ -345,7 +341,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
if (!vi->mergeable_rx_bufs)
return try_fill_recv_maxbufs(vi, gfp);
- for (;;) {
+ do {
skb_frag_t *f;
skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
@@ -373,13 +369,13 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
skb_queue_head(&vi->recv, skb);
err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
- if (err) {
+ if (err < 0) {
skb_unlink(skb, &vi->recv);
kfree_skb(skb);
break;
}
vi->num++;
- }
+ } while (err > 0);
if (unlikely(vi->num > vi->max))
vi->max = vi->num;
vi->rvq->vq_ops->kick(vi->rvq);
@@ -447,42 +443,26 @@ again:
return received;
}
-static void free_old_xmit_skbs(struct virtnet_info *vi)
+static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
{
struct sk_buff *skb;
- unsigned int len;
+ unsigned int len, tot_sgs = 0;
while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
__skb_unlink(skb, &vi->send);
vi->dev->stats.tx_bytes += skb->len;
vi->dev->stats.tx_packets++;
+ tot_sgs += skb_vnet_hdr(skb)->num_sg;
kfree_skb(skb);
}
-}
-
-/* If the virtio transport doesn't always notify us when all in-flight packets
- * are consumed, we fall back to using this function on a timer to free them. */
-static void xmit_free(unsigned long data)
-{
- struct virtnet_info *vi = (void *)data;
-
- netif_tx_lock(vi->dev);
-
- free_old_xmit_skbs(vi);
-
- if (!skb_queue_empty(&vi->send))
- mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
-
- netif_tx_unlock(vi->dev);
+ return tot_sgs;
}
static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
{
- int num, err;
struct scatterlist sg[2+MAX_SKB_FRAGS];
- struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
- struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
+ struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
sg_init_table(sg, 2+MAX_SKB_FRAGS);
@@ -490,108 +470,89 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- hdr->csum_start = skb->csum_start - skb_headroom(skb);
- hdr->csum_offset = skb->csum_offset;
+ hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb);
+ hdr->hdr.csum_offset = skb->csum_offset;
} else {
- hdr->flags = 0;
- hdr->csum_offset = hdr->csum_start = 0;
+ hdr->hdr.flags = 0;
+ hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
}
if (skb_is_gso(skb)) {
- hdr->hdr_len = skb_headlen(skb);
- hdr->gso_size = skb_shinfo(skb)->gso_size;
+ hdr->hdr.hdr_len = skb_headlen(skb);
+ hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
- hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
- hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+ hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
- hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
else
BUG();
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
- hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
+ hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
} else {
- hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
- hdr->gso_size = hdr->hdr_len = 0;
+ hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
+ hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
}
- mhdr->num_buffers = 0;
+ hdr->mhdr.num_buffers = 0;
/* Encode metadata header at front. */
if (vi->mergeable_rx_bufs)
- sg_set_buf(sg, mhdr, sizeof(*mhdr));
+ sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr));
else
- sg_set_buf(sg, hdr, sizeof(*hdr));
-
- num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
-
- err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
- if (!err && !vi->free_in_tasklet)
- mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
+ sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
- return err;
-}
-
-static void xmit_tasklet(unsigned long data)
-{
- struct virtnet_info *vi = (void *)data;
-
- netif_tx_lock_bh(vi->dev);
- if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) == 0) {
- vi->svq->vq_ops->kick(vi->svq);
- vi->last_xmit_skb = NULL;
- }
- if (vi->free_in_tasklet)
- free_old_xmit_skbs(vi);
- netif_tx_unlock_bh(vi->dev);
+ hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
+ return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
}
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
+ int capacity;
again:
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(vi);
- /* If we has a buffer left over from last time, send it now. */
- if (unlikely(vi->last_xmit_skb) &&
- xmit_skb(vi, vi->last_xmit_skb) != 0)
- goto stop_queue;
-
- vi->last_xmit_skb = NULL;
-
/* Put new one in send queue and do transmit */
- if (likely(skb)) {
- __skb_queue_head(&vi->send, skb);
- if (xmit_skb(vi, skb) != 0) {
- vi->last_xmit_skb = skb;
- skb = NULL;
- goto stop_queue;
+ __skb_queue_head(&vi->send, skb);
+ capacity = xmit_skb(vi, skb);
+
+ /* This can happen with OOM and indirect buffers. */
+ if (unlikely(capacity < 0)) {
+ netif_stop_queue(dev);
+ dev_warn(&dev->dev, "Unexpected full queue\n");
+ if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
+ vi->svq->vq_ops->disable_cb(vi->svq);
+ netif_start_queue(dev);
+ goto again;
}
+ return NETDEV_TX_BUSY;
}
-done:
- vi->svq->vq_ops->kick(vi->svq);
- return NETDEV_TX_OK;
-stop_queue:
- pr_debug("%s: virtio not prepared to send\n", dev->name);
- netif_stop_queue(dev);
-
- /* Activate callback for using skbs: if this returns false it
- * means some were used in the meantime. */
- if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
- vi->svq->vq_ops->disable_cb(vi->svq);
- netif_start_queue(dev);
- goto again;
- }
- if (skb) {
- /* Drop this skb: we only queue one. */
- vi->dev->stats.tx_dropped++;
- kfree_skb(skb);
+ vi->svq->vq_ops->kick(vi->svq);
+ /* Don't wait up for transmitted skbs to be freed. */
+ skb_orphan(skb);
+ nf_reset(skb);
+
+ /* Apparently nice girls don't return TX_BUSY; stop the queue
+ * before it gets out of hand. Naturally, this wastes entries. */
+ if (capacity < 2+MAX_SKB_FRAGS) {
+ netif_stop_queue(dev);
+ if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
+ /* More just got used, free them then recheck. */
+ capacity += free_old_xmit_skbs(vi);
+ if (capacity >= 2+MAX_SKB_FRAGS) {
+ netif_start_queue(dev);
+ vi->svq->vq_ops->disable_cb(vi->svq);
+ }
+ }
}
- goto done;
+
+ return NETDEV_TX_OK;
}
static int virtnet_set_mac_address(struct net_device *dev, void *p)
@@ -668,7 +629,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
- BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi));
+ BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
vi->cvq->vq_ops->kick(vi->cvq);
@@ -924,10 +885,6 @@ static int virtnet_probe(struct virtio_device *vdev)
vi->pages = NULL;
INIT_DELAYED_WORK(&vi->refill, refill_work);
- /* If they give us a callback when all buffers are done, we don't need
- * the timer. */
- vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
-
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
|| virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
@@ -959,11 +916,6 @@ static int virtnet_probe(struct virtio_device *vdev)
skb_queue_head_init(&vi->recv);
skb_queue_head_init(&vi->send);
- tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi);
-
- if (!vi->free_in_tasklet)
- setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi);
-
err = register_netdev(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
@@ -1004,9 +956,6 @@ static void virtnet_remove(struct virtio_device *vdev)
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
- if (!vi->free_in_tasklet)
- del_timer_sync(&vi->xmit_free_timer);
-
/* Free our skbs in send and recv queues, if any. */
while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
kfree_skb(skb);
@@ -1040,7 +989,6 @@ static unsigned int features[] = {
VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
- VIRTIO_F_NOTIFY_ON_EMPTY,
};
static struct virtio_driver virtio_net = {
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 62779a520ca..3e94f0ce090 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -1541,7 +1541,7 @@ void vxge_hw_ring_rxd_1b_info_get(
rxd_info->l4_cksum_valid =
(u32)VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(rxdp->control_0);
rxd_info->l4_cksum =
- (u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0);;
+ (u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0);
rxd_info->frame =
(u32)VXGE_HW_RING_RXD_ETHER_ENCAP_GET(rxdp->control_0);
rxd_info->proto =
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index b378037a29b..068d7a9d3e3 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -2350,7 +2350,7 @@ static int vxge_enable_msix(struct vxgedev *vdev)
enum vxge_hw_status status;
/* 0 - Tx, 1 - Rx */
int tim_msix_id[4];
- int alarm_msix_id = 0, msix_intr_vect = 0;;
+ int alarm_msix_id = 0, msix_intr_vect = 0;
vdev->intr_cnt = 0;
/* allocate msix vectors */
diff --git a/drivers/net/wireless/arlan-proc.c b/drivers/net/wireless/arlan-proc.c
index 2ab1d59870f..a8b689635a3 100644
--- a/drivers/net/wireless/arlan-proc.c
+++ b/drivers/net/wireless/arlan-proc.c
@@ -402,7 +402,7 @@ static int arlan_setup_card_by_book(struct net_device *dev)
static char arlan_drive_info[ARLAN_STR_SIZE] = "A655\n\0";
-static int arlan_sysctl_info(ctl_table * ctl, int write, struct file *filp,
+static int arlan_sysctl_info(ctl_table * ctl, int write,
void __user *buffer, size_t * lenp, loff_t *ppos)
{
int i;
@@ -629,7 +629,7 @@ final:
*lenp = pos;
if (!write)
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ retv = proc_dostring(ctl, write, buffer, lenp, ppos);
else
{
*lenp = 0;
@@ -639,7 +639,7 @@ final:
}
-static int arlan_sysctl_info161719(ctl_table * ctl, int write, struct file *filp,
+static int arlan_sysctl_info161719(ctl_table * ctl, int write,
void __user *buffer, size_t * lenp, loff_t *ppos)
{
int i;
@@ -669,11 +669,11 @@ static int arlan_sysctl_info161719(ctl_table * ctl, int write, struct file *filp
final:
*lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ retv = proc_dostring(ctl, write, buffer, lenp, ppos);
return retv;
}
-static int arlan_sysctl_infotxRing(ctl_table * ctl, int write, struct file *filp,
+static int arlan_sysctl_infotxRing(ctl_table * ctl, int write,
void __user *buffer, size_t * lenp, loff_t *ppos)
{
int i;
@@ -698,11 +698,11 @@ static int arlan_sysctl_infotxRing(ctl_table * ctl, int write, struct file *filp
SARLBNpln(u_char, txBuffer, 0x800);
final:
*lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ retv = proc_dostring(ctl, write, buffer, lenp, ppos);
return retv;
}
-static int arlan_sysctl_inforxRing(ctl_table * ctl, int write, struct file *filp,
+static int arlan_sysctl_inforxRing(ctl_table * ctl, int write,
void __user *buffer, size_t * lenp, loff_t *ppos)
{
int i;
@@ -726,11 +726,11 @@ static int arlan_sysctl_inforxRing(ctl_table * ctl, int write, struct file *filp
SARLBNpln(u_char, rxBuffer, 0x800);
final:
*lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ retv = proc_dostring(ctl, write, buffer, lenp, ppos);
return retv;
}
-static int arlan_sysctl_info18(ctl_table * ctl, int write, struct file *filp,
+static int arlan_sysctl_info18(ctl_table * ctl, int write,
void __user *buffer, size_t * lenp, loff_t *ppos)
{
int i;
@@ -756,7 +756,7 @@ static int arlan_sysctl_info18(ctl_table * ctl, int write, struct file *filp,
final:
*lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ retv = proc_dostring(ctl, write, buffer, lenp, ppos);
return retv;
}
@@ -766,7 +766,7 @@ final:
static char conf_reset_result[200];
-static int arlan_configure(ctl_table * ctl, int write, struct file *filp,
+static int arlan_configure(ctl_table * ctl, int write,
void __user *buffer, size_t * lenp, loff_t *ppos)
{
int pos = 0;
@@ -788,10 +788,10 @@ static int arlan_configure(ctl_table * ctl, int write, struct file *filp,
return -1;
*lenp = pos;
- return proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ return proc_dostring(ctl, write, buffer, lenp, ppos);
}
-static int arlan_sysctl_reset(ctl_table * ctl, int write, struct file *filp,
+static int arlan_sysctl_reset(ctl_table * ctl, int write,
void __user *buffer, size_t * lenp, loff_t *ppos)
{
int pos = 0;
@@ -811,7 +811,7 @@ static int arlan_sysctl_reset(ctl_table * ctl, int write, struct file *filp,
} else
return -1;
*lenp = pos + 3;
- return proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ return proc_dostring(ctl, write, buffer, lenp, ppos);
}
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index e0138ac8bf5..e974e5829e1 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -64,6 +64,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
{ USB_DEVICE(0x0cf3, 0x9170) },
/* Atheros TG121N */
{ USB_DEVICE(0x0cf3, 0x1001) },
+ /* TP-Link TL-WN821N v2 */
+ { USB_DEVICE(0x0cf3, 0x1002) },
/* Cace Airpcap NX */
{ USB_DEVICE(0xcace, 0x0300) },
/* D-Link DWA 160A */
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index debad07d990..c63ea6afd96 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -982,7 +982,7 @@
#define AR5K_5414_CBCFG_BUF_DIS 0x10 /* Disable buffer */
/*
- * PCI-E Power managment configuration
+ * PCI-E Power management configuration
* and status register [5424+]
*/
#define AR5K_PCIE_PM_CTL 0x4068 /* Register address */
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 3234995e888..0ad6d0b76e9 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -609,14 +609,24 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
AR_PHY_CH1_EXT_CCA,
AR_PHY_CH2_EXT_CCA
};
- u8 chainmask;
+ u8 chainmask, rx_chain_status;
+ rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK);
if (AR_SREV_9285(ah))
chainmask = 0x9;
- else if (AR_SREV_9280(ah) || AR_SREV_9287(ah))
- chainmask = 0x1B;
- else
- chainmask = 0x3F;
+ else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) {
+ if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4))
+ chainmask = 0x1B;
+ else
+ chainmask = 0x09;
+ } else {
+ if (rx_chain_status & 0x4)
+ chainmask = 0x3F;
+ else if (rx_chain_status & 0x2)
+ chainmask = 0x1B;
+ else
+ chainmask = 0x09;
+ }
h = ah->nfCalHist;
@@ -697,6 +707,8 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah)
noise_floor = AR_PHY_CCA_MAX_AR9280_GOOD_VALUE;
else if (AR_SREV_9285(ah))
noise_floor = AR_PHY_CCA_MAX_AR9285_GOOD_VALUE;
+ else if (AR_SREV_9287(ah))
+ noise_floor = AR_PHY_CCA_MAX_AR9287_GOOD_VALUE;
else
noise_floor = AR_PHY_CCA_MAX_AR5416_GOOD_VALUE;
@@ -924,6 +936,7 @@ static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah, bool is_reset)
regVal |= (1 << (19 + i));
REG_WRITE(ah, 0x7834, regVal);
udelay(1);
+ regVal = REG_READ(ah, 0x7834);
regVal &= (~(0x1 << (19 + i)));
reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9);
regVal |= (reg_field << (19 + i));
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 019bcbba40e..9028ab193e4 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -28,6 +28,7 @@ extern const struct ath9k_percal_data adc_init_dc_cal;
#define AR_PHY_CCA_MAX_AR5416_GOOD_VALUE -85
#define AR_PHY_CCA_MAX_AR9280_GOOD_VALUE -112
#define AR_PHY_CCA_MAX_AR9285_GOOD_VALUE -118
+#define AR_PHY_CCA_MAX_AR9287_GOOD_VALUE -118
#define AR_PHY_CCA_MAX_HIGH_VALUE -62
#define AR_PHY_CCA_MIN_BAD_VALUE -140
#define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index ae7fb5dcb26..4071fc91da0 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -509,6 +509,8 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE,
eep->baseEepHeader.dacLpMode);
+ udelay(100);
+
REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL, AR_PHY_FRAME_CTL_TX_CLIP,
pModal->miscBits >> 2);
@@ -902,7 +904,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
u16 powerLimit)
{
#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */
-#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10 /* 10*log10(3)*2 */
+#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index b6c6cca0781..ca7694caf36 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -842,7 +842,7 @@ static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
{
- if (AR_SREV_9287_11(ah))
+ if (AR_SREV_9287_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9287Modes_rx_gain_9287_1_1,
ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6);
@@ -853,7 +853,7 @@ static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
else if (AR_SREV_9280_20(ah))
ath9k_hw_init_rxgain_ini(ah);
- if (AR_SREV_9287_11(ah)) {
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9287Modes_tx_gain_9287_1_1,
ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6);
@@ -965,7 +965,7 @@ int ath9k_hw_init(struct ath_hw *ah)
ath9k_hw_init_mode_regs(ah);
if (ah->is_pciexpress)
- ath9k_hw_configpcipowersave(ah, 0);
+ ath9k_hw_configpcipowersave(ah, 0, 0);
else
ath9k_hw_disablepcie(ah);
@@ -1273,6 +1273,15 @@ static void ath9k_hw_override_ini(struct ath_hw *ah,
*/
REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ val = REG_READ(ah, AR_PCU_MISC_MODE2) &
+ (~AR_PCU_MISC_MODE2_HWWAR1);
+
+ if (AR_SREV_9287_10_OR_LATER(ah))
+ val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
+
+ REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
+ }
if (!AR_SREV_5416_20_OR_LATER(ah) ||
AR_SREV_9280_10_OR_LATER(ah))
@@ -1784,7 +1793,7 @@ static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan,
static bool ath9k_hw_chip_reset(struct ath_hw *ah,
struct ath9k_channel *chan)
{
- if (OLC_FOR_AR9280_20_LATER) {
+ if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) {
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON))
return false;
} else if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
@@ -2338,6 +2347,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
struct ath9k_channel *curchan = ah->curchan;
u32 saveDefAntenna;
u32 macStaId1;
+ u64 tsf = 0;
int i, rx_chainmask, r;
ah->extprotspacing = sc->ht_extprotspacing;
@@ -2347,7 +2357,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return -EIO;
- if (curchan)
+ if (curchan && !ah->chip_fullsleep)
ath9k_hw_getnf(ah, curchan);
if (bChannelChange &&
@@ -2356,8 +2366,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
(chan->channel != ah->curchan->channel) &&
((chan->channelFlags & CHANNEL_ALL) ==
(ah->curchan->channelFlags & CHANNEL_ALL)) &&
- (!AR_SREV_9280(ah) || (!IS_CHAN_A_5MHZ_SPACED(chan) &&
- !IS_CHAN_A_5MHZ_SPACED(ah->curchan)))) {
+ !(AR_SREV_9280(ah) || IS_CHAN_A_5MHZ_SPACED(chan) ||
+ IS_CHAN_A_5MHZ_SPACED(ah->curchan))) {
if (ath9k_hw_channel_change(ah, chan, sc->tx_chan_width)) {
ath9k_hw_loadnf(ah, ah->curchan);
@@ -2372,6 +2382,10 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
+ /* For chips on which RTC reset is done, save TSF before it gets cleared */
+ if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
+ tsf = ath9k_hw_gettsf64(ah);
+
saveLedState = REG_READ(ah, AR_CFG_LED) &
(AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL |
AR_CFG_LED_BLINK_THRESH_SEL | AR_CFG_LED_BLINK_SLOW);
@@ -2398,6 +2412,10 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
udelay(50);
}
+ /* Restore TSF */
+ if (tsf && AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
+ ath9k_hw_settsf64(ah, tsf);
+
if (AR_SREV_9280_10_OR_LATER(ah))
REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
@@ -3005,9 +3023,10 @@ void ath9k_ps_restore(struct ath_softc *sc)
* Programming the SerDes must go through the same 288 bit serial shift
* register as the other analog registers. Hence the 9 writes.
*/
-void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore)
+void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off)
{
u8 i;
+ u32 val;
if (ah->is_pciexpress != true)
return;
@@ -3017,84 +3036,113 @@ void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore)
return;
/* Nothing to do on restore for 11N */
- if (restore)
- return;
+ if (!restore) {
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ /*
+ * AR9280 2.0 or later chips use SerDes values from the
+ * initvals.h initialized depending on chipset during
+ * ath9k_hw_init()
+ */
+ for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
+ REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
+ INI_RA(&ah->iniPcieSerdes, i, 1));
+ }
+ } else if (AR_SREV_9280(ah) &&
+ (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
+
+ /* RX shut off when elecidle is asserted */
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
+
+ /* Shut off CLKREQ active in L1 */
+ if (ah->config.pcie_clock_req)
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
+ else
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
- if (AR_SREV_9280_20_OR_LATER(ah)) {
- /*
- * AR9280 2.0 or later chips use SerDes values from the
- * initvals.h initialized depending on chipset during
- * ath9k_hw_init()
- */
- for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
- REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
- INI_RA(&ah->iniPcieSerdes, i, 1));
- }
- } else if (AR_SREV_9280(ah) &&
- (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
- REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
- /* RX shut off when elecidle is asserted */
- REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
+ /* Load the new settings */
+ REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
- /* Shut off CLKREQ active in L1 */
- if (ah->config.pcie_clock_req)
- REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
- else
- REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
-
- REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
+ } else {
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
- /* Load the new settings */
- REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+ /* RX shut off when elecidle is asserted */
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
- } else {
- REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
+ /*
+ * Ignore ah->ah_config.pcie_clock_req setting for
+ * pre-AR9280 11n
+ */
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
- /* RX shut off when elecidle is asserted */
- REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
- /*
- * Ignore ah->ah_config.pcie_clock_req setting for
- * pre-AR9280 11n
- */
- REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
+ /* Load the new settings */
+ REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+ }
- REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
+ udelay(1000);
- /* Load the new settings */
- REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
- }
+ /* set bit 19 to allow forcing of pcie core into L1 state */
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
- udelay(1000);
+ /* Several PCIe massages to ensure proper behaviour */
+ if (ah->config.pcie_waen) {
+ val = ah->config.pcie_waen;
+ if (!power_off)
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else {
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
+ AR_SREV_9287(ah)) {
+ val = AR9285_WA_DEFAULT;
+ if (!power_off)
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else if (AR_SREV_9280(ah)) {
+ /*
+ * On AR9280 chips bit 22 of 0x4004 needs to be
+ * set otherwise card may disappear.
+ */
+ val = AR9280_WA_DEFAULT;
+ if (!power_off)
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else
+ val = AR_WA_DEFAULT;
+ }
- /* set bit 19 to allow forcing of pcie core into L1 state */
- REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+ REG_WRITE(ah, AR_WA, val);
+ }
- /* Several PCIe massages to ensure proper behaviour */
- if (ah->config.pcie_waen) {
- REG_WRITE(ah, AR_WA, ah->config.pcie_waen);
- } else {
- if (AR_SREV_9285(ah) || AR_SREV_9271(ah) || AR_SREV_9287(ah))
- REG_WRITE(ah, AR_WA, AR9285_WA_DEFAULT);
+ if (power_off) {
/*
- * On AR9280 chips bit 22 of 0x4004 needs to be set to
- * otherwise card may disappear.
+ * Set PCIe workaround bits
+ * bit 14 in WA register (disable L1) should only
+ * be set when device enters D3 and be cleared
+ * when device comes back to D0.
*/
- else if (AR_SREV_9280(ah))
- REG_WRITE(ah, AR_WA, AR9280_WA_DEFAULT);
- else
- REG_WRITE(ah, AR_WA, AR_WA_DEFAULT);
+ if (ah->config.pcie_waen) {
+ if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
+ REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
+ } else {
+ if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
+ AR_SREV_9287(ah)) &&
+ (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
+ (AR_SREV_9280(ah) &&
+ (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
+ REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
+ }
+ }
}
}
@@ -3652,15 +3700,7 @@ void ath9k_hw_fill_cap_info(struct ath_hw *ah)
}
#endif
- if ((ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) ||
- (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE) ||
- (ah->hw_version.macVersion == AR_SREV_VERSION_9160) ||
- (ah->hw_version.macVersion == AR_SREV_VERSION_9100) ||
- (ah->hw_version.macVersion == AR_SREV_VERSION_9280) ||
- (ah->hw_version.macVersion == AR_SREV_VERSION_9285))
- pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
- else
- pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
+ pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
if (AR_SREV_9280(ah) || AR_SREV_9285(ah))
pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 9106a0b537d..b8923457182 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -106,7 +106,7 @@
#define AH_TSF_WRITE_TIMEOUT 100 /* (us) */
#define AH_TIME_QUANTUM 10
#define AR_KEYTABLE_SIZE 128
-#define POWER_UP_TIME 200000
+#define POWER_UP_TIME 10000
#define SPUR_RSSI_THRESH 40
#define CAB_TIMEOUT_VAL 10
@@ -650,7 +650,7 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
const struct ath9k_beacon_state *bs);
bool ath9k_hw_setpower(struct ath_hw *ah,
enum ath9k_power_mode mode);
-void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore);
+void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off);
/* Interrupt Handling */
bool ath9k_hw_intrpend(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 3dc7b5a13e6..52bed89063d 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1131,7 +1131,7 @@ void ath_radio_enable(struct ath_softc *sc)
int r;
ath9k_ps_wakeup(sc);
- ath9k_hw_configpcipowersave(ah, 0);
+ ath9k_hw_configpcipowersave(ah, 0, 0);
if (!ah->curchan)
ah->curchan = ath_get_curchannel(sc, sc->hw);
@@ -1202,7 +1202,7 @@ void ath_radio_disable(struct ath_softc *sc)
spin_unlock_bh(&sc->sc_resetlock);
ath9k_hw_phy_disable(ah);
- ath9k_hw_configpcipowersave(ah, 1);
+ ath9k_hw_configpcipowersave(ah, 1, 1);
ath9k_ps_restore(sc);
ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
}
@@ -1226,11 +1226,6 @@ static void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
bool blocked = !!ath_is_rfkill_set(sc);
wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
-
- if (blocked)
- ath_radio_disable(sc);
- else
- ath_radio_enable(sc);
}
static void ath_start_rfkill_poll(struct ath_softc *sc)
@@ -1260,6 +1255,7 @@ void ath_detach(struct ath_softc *sc)
DPRINTF(sc, ATH_DBG_CONFIG, "Detach ATH hw\n");
ath_deinit_leds(sc);
+ wiphy_rfkill_stop_polling(sc->hw->wiphy);
for (i = 0; i < sc->num_sec_wiphy; i++) {
struct ath_wiphy *aphy = sc->sec_wiphy[i];
@@ -1942,7 +1938,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
init_channel = ath_get_curchannel(sc, hw);
/* Reset SERDES registers */
- ath9k_hw_configpcipowersave(sc->sc_ah, 0);
+ ath9k_hw_configpcipowersave(sc->sc_ah, 0, 0);
/*
* The basic interface to setting the hardware in a good
@@ -2166,11 +2162,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
} else
sc->rx.rxlink = NULL;
- wiphy_rfkill_stop_polling(sc->hw->wiphy);
-
/* disable HAL and put h/w to sleep */
ath9k_hw_disable(sc->sc_ah);
- ath9k_hw_configpcipowersave(sc->sc_ah, 1);
+ ath9k_hw_configpcipowersave(sc->sc_ah, 1, 1);
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
sc->sc_flags |= SC_OP_INVALID;
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index e5c29eb86e8..d83b77f821e 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -676,8 +676,9 @@
#define AR_RC_HOSTIF 0x00000100
#define AR_WA 0x4004
+#define AR_WA_D3_L1_DISABLE (1 << 14)
#define AR9285_WA_DEFAULT 0x004a05cb
-#define AR9280_WA_DEFAULT 0x0040073f
+#define AR9280_WA_DEFAULT 0x0040073b
#define AR_WA_DEFAULT 0x0000073f
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index a3b36b3a9d6..cce188837d1 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -3330,7 +3330,7 @@ static void atmel_smooth_qual(struct atmel_private *priv)
priv->wstats.qual.updated &= ~IW_QUAL_QUAL_INVALID;
}
-/* deals with incoming managment frames. */
+/* deals with incoming management frames. */
static void atmel_management_frame(struct atmel_private *priv,
struct ieee80211_hdr *header,
u16 frame_len, u8 rssi)
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 83e38134acc..54ea61c15d8 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -61,11 +61,28 @@ config B43_PCMCIA
If unsure, say N.
+config B43_SDIO
+ bool "Broadcom 43xx SDIO device support (EXPERIMENTAL)"
+ depends on B43 && SSB_SDIOHOST_POSSIBLE && EXPERIMENTAL
+ select SSB_SDIOHOST
+ ---help---
+ Broadcom 43xx device support for Soft-MAC SDIO devices.
+
+ With this config option you can drive Soft-MAC b43 cards with a
+ Secure Digital I/O interface.
+ This includes the WLAN daughter card found on the Nintendo Wii
+ video game console.
+ Note that this does not support Broadcom 43xx Full-MAC devices.
+
+ It's safe to select Y here, even if you don't have a B43 SDIO device.
+
+ If unsure, say N.
+
# Data transfers to the device via PIO
-# This is only needed on PCMCIA devices. All others can do DMA properly.
+# This is only needed on PCMCIA and SDIO devices. All others can do DMA properly.
config B43_PIO
bool
- depends on B43 && (B43_PCMCIA || B43_FORCE_PIO)
+ depends on B43 && (B43_SDIO || B43_PCMCIA || B43_FORCE_PIO)
select SSB_BLOCKIO
default y
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index da379f4b0c3..84772a2542d 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -16,6 +16,7 @@ b43-$(CONFIG_B43_PIO) += pio.o
b43-y += rfkill.o
b43-$(CONFIG_B43_LEDS) += leds.o
b43-$(CONFIG_B43_PCMCIA) += pcmcia.o
+b43-$(CONFIG_B43_SDIO) += sdio.o
b43-$(CONFIG_B43_DEBUG) += debugfs.o
obj-$(CONFIG_B43) += b43.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 09cfe68537b..fa1549a03c7 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -629,13 +629,6 @@ struct b43_wl {
* from the mac80211 subsystem. */
u16 mac80211_initially_registered_queues;
- /* R/W lock for data transmission.
- * Transmissions on 2+ queues can run concurrently, but somebody else
- * might sync with TX by write_lock_irqsave()'ing. */
- rwlock_t tx_lock;
- /* Lock for LEDs access. */
- spinlock_t leds_lock;
-
/* We can only have one operating interface (802.11 core)
* at a time. General information about this interface follows.
*/
@@ -686,6 +679,9 @@ struct b43_wl {
struct work_struct tx_work;
/* Queue of packets to be transmitted. */
struct sk_buff_head tx_queue;
+
+ /* The device LEDs. */
+ struct b43_leds leds;
};
/* The type of the firmware file. */
@@ -768,13 +764,10 @@ struct b43_wldev {
/* The device initialization status.
* Use b43_status() to query. */
atomic_t __init_status;
- /* Saved init status for handling suspend. */
- int suspend_init_status;
bool bad_frames_preempt; /* Use "Bad Frames Preemption" (default off) */
bool dfq_valid; /* Directed frame queue valid (IBSS PS mode, ATIM) */
bool radio_hw_enable; /* saved state of radio hardware enabled state */
- bool suspend_in_progress; /* TRUE, if we are in a suspend/resume cycle */
bool qos_enabled; /* TRUE, if QoS is used. */
bool hwcrypto_enabled; /* TRUE, if HW crypto acceleration is enabled. */
@@ -794,12 +787,6 @@ struct b43_wldev {
/* Various statistics about the physical device. */
struct b43_stats stats;
- /* The device LEDs. */
- struct b43_led led_tx;
- struct b43_led led_rx;
- struct b43_led led_assoc;
- struct b43_led led_radio;
-
/* Reason code of the last interrupt. */
u32 irq_reason;
u32 dma_reason[6];
@@ -830,6 +817,10 @@ struct b43_wldev {
/* Debugging stuff follows. */
#ifdef CONFIG_B43_DEBUG
struct b43_dfsentry *dfsentry;
+ unsigned int irq_count;
+ unsigned int irq_bit_count[32];
+ unsigned int tx_count;
+ unsigned int rx_count;
#endif
};
diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
index 8f64943e3f6..80b19a44a40 100644
--- a/drivers/net/wireless/b43/debugfs.c
+++ b/drivers/net/wireless/b43/debugfs.c
@@ -689,6 +689,7 @@ static void b43_add_dynamic_debug(struct b43_wldev *dev)
add_dyn_dbg("debug_lo", B43_DBG_LO, 0);
add_dyn_dbg("debug_firmware", B43_DBG_FIRMWARE, 0);
add_dyn_dbg("debug_keys", B43_DBG_KEYS, 0);
+ add_dyn_dbg("debug_verbose_stats", B43_DBG_VERBOSESTATS, 0);
#undef add_dyn_dbg
}
diff --git a/drivers/net/wireless/b43/debugfs.h b/drivers/net/wireless/b43/debugfs.h
index e47b4b488b0..822aad8842f 100644
--- a/drivers/net/wireless/b43/debugfs.h
+++ b/drivers/net/wireless/b43/debugfs.h
@@ -13,6 +13,7 @@ enum b43_dyndbg { /* Dynamic debugging features */
B43_DBG_LO,
B43_DBG_FIRMWARE,
B43_DBG_KEYS,
+ B43_DBG_VERBOSESTATS,
__B43_NR_DYNDBG,
};
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index a467ee260a1..8701034569f 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1428,9 +1428,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
ring->nr_failed_tx_packets++;
ring->nr_total_packet_tries += status->frame_count;
#endif /* DEBUG */
- ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
+ ieee80211_tx_status(dev->wl->hw, meta->skb);
- /* skb is freed by ieee80211_tx_status_irqsafe() */
+ /* skb is freed by ieee80211_tx_status() */
meta->skb = NULL;
} else {
/* No need to call free_descriptor_buffer here, as
diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c
index c8b317094c3..fbe3d4f62ce 100644
--- a/drivers/net/wireless/b43/leds.c
+++ b/drivers/net/wireless/b43/leds.c
@@ -34,57 +34,88 @@
static void b43_led_turn_on(struct b43_wldev *dev, u8 led_index,
bool activelow)
{
- struct b43_wl *wl = dev->wl;
- unsigned long flags;
u16 ctl;
- spin_lock_irqsave(&wl->leds_lock, flags);
ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL);
if (activelow)
ctl &= ~(1 << led_index);
else
ctl |= (1 << led_index);
b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl);
- spin_unlock_irqrestore(&wl->leds_lock, flags);
}
static void b43_led_turn_off(struct b43_wldev *dev, u8 led_index,
bool activelow)
{
- struct b43_wl *wl = dev->wl;
- unsigned long flags;
u16 ctl;
- spin_lock_irqsave(&wl->leds_lock, flags);
ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL);
if (activelow)
ctl |= (1 << led_index);
else
ctl &= ~(1 << led_index);
b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl);
- spin_unlock_irqrestore(&wl->leds_lock, flags);
}
-/* Callback from the LED subsystem. */
-static void b43_led_brightness_set(struct led_classdev *led_dev,
- enum led_brightness brightness)
+static void b43_led_update(struct b43_wldev *dev,
+ struct b43_led *led)
{
- struct b43_led *led = container_of(led_dev, struct b43_led, led_dev);
- struct b43_wldev *dev = led->dev;
bool radio_enabled;
+ bool turn_on;
- if (unlikely(b43_status(dev) < B43_STAT_INITIALIZED))
+ if (!led->wl)
return;
- /* Checking the radio-enabled status here is slightly racy,
- * but we want to avoid the locking overhead and we don't care
- * whether the LED has the wrong state for a second. */
radio_enabled = (dev->phy.radio_on && dev->radio_hw_enable);
- if (brightness == LED_OFF || !radio_enabled)
- b43_led_turn_off(dev, led->index, led->activelow);
+ /* The led->state read is racy, but we don't care. In case we raced
+ * with the brightness_set handler, we will be called again soon
+ * to fixup our state. */
+ if (radio_enabled)
+ turn_on = atomic_read(&led->state) != LED_OFF;
else
+ turn_on = 0;
+ if (turn_on == led->hw_state)
+ return;
+ led->hw_state = turn_on;
+
+ if (turn_on)
b43_led_turn_on(dev, led->index, led->activelow);
+ else
+ b43_led_turn_off(dev, led->index, led->activelow);
+}
+
+static void b43_leds_work(struct work_struct *work)
+{
+ struct b43_leds *leds = container_of(work, struct b43_leds, work);
+ struct b43_wl *wl = container_of(leds, struct b43_wl, leds);
+ struct b43_wldev *dev;
+
+ mutex_lock(&wl->mutex);
+ dev = wl->current_dev;
+ if (unlikely(!dev || b43_status(dev) < B43_STAT_STARTED))
+ goto out_unlock;
+
+ b43_led_update(dev, &wl->leds.led_tx);
+ b43_led_update(dev, &wl->leds.led_rx);
+ b43_led_update(dev, &wl->leds.led_radio);
+ b43_led_update(dev, &wl->leds.led_assoc);
+
+out_unlock:
+ mutex_unlock(&wl->mutex);
+}
+
+/* Callback from the LED subsystem. */
+static void b43_led_brightness_set(struct led_classdev *led_dev,
+ enum led_brightness brightness)
+{
+ struct b43_led *led = container_of(led_dev, struct b43_led, led_dev);
+ struct b43_wl *wl = led->wl;
+
+ if (likely(!wl->leds.stop)) {
+ atomic_set(&led->state, brightness);
+ ieee80211_queue_work(wl->hw, &wl->leds.work);
+ }
}
static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
@@ -93,15 +124,15 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
{
int err;
- b43_led_turn_off(dev, led_index, activelow);
- if (led->dev)
+ if (led->wl)
return -EEXIST;
if (!default_trigger)
return -EINVAL;
- led->dev = dev;
+ led->wl = dev->wl;
led->index = led_index;
led->activelow = activelow;
strncpy(led->name, name, sizeof(led->name));
+ atomic_set(&led->state, 0);
led->led_dev.name = led->name;
led->led_dev.default_trigger = default_trigger;
@@ -110,19 +141,19 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
err = led_classdev_register(dev->dev->dev, &led->led_dev);
if (err) {
b43warn(dev->wl, "LEDs: Failed to register %s\n", name);
- led->dev = NULL;
+ led->wl = NULL;
return err;
}
+
return 0;
}
static void b43_unregister_led(struct b43_led *led)
{
- if (!led->dev)
+ if (!led->wl)
return;
led_classdev_unregister(&led->led_dev);
- b43_led_turn_off(led->dev, led->index, led->activelow);
- led->dev = NULL;
+ led->wl = NULL;
}
static void b43_map_led(struct b43_wldev *dev,
@@ -137,24 +168,20 @@ static void b43_map_led(struct b43_wldev *dev,
* generic LED triggers. */
switch (behaviour) {
case B43_LED_INACTIVE:
- break;
case B43_LED_OFF:
- b43_led_turn_off(dev, led_index, activelow);
- break;
case B43_LED_ON:
- b43_led_turn_on(dev, led_index, activelow);
break;
case B43_LED_ACTIVITY:
case B43_LED_TRANSFER:
case B43_LED_APTRANSFER:
snprintf(name, sizeof(name),
"b43-%s::tx", wiphy_name(hw->wiphy));
- b43_register_led(dev, &dev->led_tx, name,
+ b43_register_led(dev, &dev->wl->leds.led_tx, name,
ieee80211_get_tx_led_name(hw),
led_index, activelow);
snprintf(name, sizeof(name),
"b43-%s::rx", wiphy_name(hw->wiphy));
- b43_register_led(dev, &dev->led_rx, name,
+ b43_register_led(dev, &dev->wl->leds.led_rx, name,
ieee80211_get_rx_led_name(hw),
led_index, activelow);
break;
@@ -164,18 +191,15 @@ static void b43_map_led(struct b43_wldev *dev,
case B43_LED_MODE_BG:
snprintf(name, sizeof(name),
"b43-%s::radio", wiphy_name(hw->wiphy));
- b43_register_led(dev, &dev->led_radio, name,
+ b43_register_led(dev, &dev->wl->leds.led_radio, name,
ieee80211_get_radio_led_name(hw),
led_index, activelow);
- /* Sync the RF-kill LED state with radio and switch states. */
- if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev))
- b43_led_turn_on(dev, led_index, activelow);
break;
case B43_LED_WEIRD:
case B43_LED_ASSOC:
snprintf(name, sizeof(name),
"b43-%s::assoc", wiphy_name(hw->wiphy));
- b43_register_led(dev, &dev->led_assoc, name,
+ b43_register_led(dev, &dev->wl->leds.led_assoc, name,
ieee80211_get_assoc_led_name(hw),
led_index, activelow);
break;
@@ -186,58 +210,150 @@ static void b43_map_led(struct b43_wldev *dev,
}
}
-void b43_leds_init(struct b43_wldev *dev)
+static void b43_led_get_sprominfo(struct b43_wldev *dev,
+ unsigned int led_index,
+ enum b43_led_behaviour *behaviour,
+ bool *activelow)
{
struct ssb_bus *bus = dev->dev->bus;
u8 sprom[4];
- int i;
- enum b43_led_behaviour behaviour;
- bool activelow;
sprom[0] = bus->sprom.gpio0;
sprom[1] = bus->sprom.gpio1;
sprom[2] = bus->sprom.gpio2;
sprom[3] = bus->sprom.gpio3;
- for (i = 0; i < 4; i++) {
- if (sprom[i] == 0xFF) {
- /* There is no LED information in the SPROM
- * for this LED. Hardcode it here. */
- activelow = 0;
- switch (i) {
- case 0:
- behaviour = B43_LED_ACTIVITY;
- activelow = 1;
- if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ)
- behaviour = B43_LED_RADIO_ALL;
- break;
- case 1:
- behaviour = B43_LED_RADIO_B;
- if (bus->boardinfo.vendor == PCI_VENDOR_ID_ASUSTEK)
- behaviour = B43_LED_ASSOC;
- break;
- case 2:
- behaviour = B43_LED_RADIO_A;
- break;
- case 3:
- behaviour = B43_LED_OFF;
- break;
- default:
- B43_WARN_ON(1);
- return;
- }
+ if (sprom[led_index] == 0xFF) {
+ /* There is no LED information in the SPROM
+ * for this LED. Hardcode it here. */
+ *activelow = 0;
+ switch (led_index) {
+ case 0:
+ *behaviour = B43_LED_ACTIVITY;
+ *activelow = 1;
+ if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ)
+ *behaviour = B43_LED_RADIO_ALL;
+ break;
+ case 1:
+ *behaviour = B43_LED_RADIO_B;
+ if (bus->boardinfo.vendor == PCI_VENDOR_ID_ASUSTEK)
+ *behaviour = B43_LED_ASSOC;
+ break;
+ case 2:
+ *behaviour = B43_LED_RADIO_A;
+ break;
+ case 3:
+ *behaviour = B43_LED_OFF;
+ break;
+ default:
+ B43_WARN_ON(1);
+ return;
+ }
+ } else {
+ *behaviour = sprom[led_index] & B43_LED_BEHAVIOUR;
+ *activelow = !!(sprom[led_index] & B43_LED_ACTIVELOW);
+ }
+}
+
+void b43_leds_init(struct b43_wldev *dev)
+{
+ struct b43_led *led;
+ unsigned int i;
+ enum b43_led_behaviour behaviour;
+ bool activelow;
+
+ /* Sync the RF-kill LED state (if we have one) with radio and switch states. */
+ led = &dev->wl->leds.led_radio;
+ if (led->wl) {
+ if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev)) {
+ b43_led_turn_on(dev, led->index, led->activelow);
+ led->hw_state = 1;
+ atomic_set(&led->state, 1);
} else {
- behaviour = sprom[i] & B43_LED_BEHAVIOUR;
- activelow = !!(sprom[i] & B43_LED_ACTIVELOW);
+ b43_led_turn_off(dev, led->index, led->activelow);
+ led->hw_state = 0;
+ atomic_set(&led->state, 0);
}
- b43_map_led(dev, i, behaviour, activelow);
}
+
+ /* Initialize TX/RX/ASSOC leds */
+ led = &dev->wl->leds.led_tx;
+ if (led->wl) {
+ b43_led_turn_off(dev, led->index, led->activelow);
+ led->hw_state = 0;
+ atomic_set(&led->state, 0);
+ }
+ led = &dev->wl->leds.led_rx;
+ if (led->wl) {
+ b43_led_turn_off(dev, led->index, led->activelow);
+ led->hw_state = 0;
+ atomic_set(&led->state, 0);
+ }
+ led = &dev->wl->leds.led_assoc;
+ if (led->wl) {
+ b43_led_turn_off(dev, led->index, led->activelow);
+ led->hw_state = 0;
+ atomic_set(&led->state, 0);
+ }
+
+ /* Initialize other LED states. */
+ for (i = 0; i < B43_MAX_NR_LEDS; i++) {
+ b43_led_get_sprominfo(dev, i, &behaviour, &activelow);
+ switch (behaviour) {
+ case B43_LED_OFF:
+ b43_led_turn_off(dev, i, activelow);
+ break;
+ case B43_LED_ON:
+ b43_led_turn_on(dev, i, activelow);
+ break;
+ default:
+ /* Leave others as-is. */
+ break;
+ }
+ }
+
+ dev->wl->leds.stop = 0;
}
void b43_leds_exit(struct b43_wldev *dev)
{
- b43_unregister_led(&dev->led_tx);
- b43_unregister_led(&dev->led_rx);
- b43_unregister_led(&dev->led_assoc);
- b43_unregister_led(&dev->led_radio);
+ struct b43_leds *leds = &dev->wl->leds;
+
+ b43_led_turn_off(dev, leds->led_tx.index, leds->led_tx.activelow);
+ b43_led_turn_off(dev, leds->led_rx.index, leds->led_rx.activelow);
+ b43_led_turn_off(dev, leds->led_assoc.index, leds->led_assoc.activelow);
+ b43_led_turn_off(dev, leds->led_radio.index, leds->led_radio.activelow);
+}
+
+void b43_leds_stop(struct b43_wldev *dev)
+{
+ struct b43_leds *leds = &dev->wl->leds;
+
+ leds->stop = 1;
+ cancel_work_sync(&leds->work);
+}
+
+void b43_leds_register(struct b43_wldev *dev)
+{
+ unsigned int i;
+ enum b43_led_behaviour behaviour;
+ bool activelow;
+
+ INIT_WORK(&dev->wl->leds.work, b43_leds_work);
+
+ /* Register the LEDs to the LED subsystem. */
+ for (i = 0; i < B43_MAX_NR_LEDS; i++) {
+ b43_led_get_sprominfo(dev, i, &behaviour, &activelow);
+ b43_map_led(dev, i, behaviour, activelow);
+ }
+}
+
+void b43_leds_unregister(struct b43_wldev *dev)
+{
+ struct b43_leds *leds = &dev->wl->leds;
+
+ b43_unregister_led(&leds->led_tx);
+ b43_unregister_led(&leds->led_rx);
+ b43_unregister_led(&leds->led_assoc);
+ b43_unregister_led(&leds->led_radio);
}
diff --git a/drivers/net/wireless/b43/leds.h b/drivers/net/wireless/b43/leds.h
index b8b1dd52124..9592e4c5a5f 100644
--- a/drivers/net/wireless/b43/leds.h
+++ b/drivers/net/wireless/b43/leds.h
@@ -7,12 +7,13 @@ struct b43_wldev;
#include <linux/types.h>
#include <linux/leds.h>
+#include <linux/workqueue.h>
#define B43_LED_MAX_NAME_LEN 31
struct b43_led {
- struct b43_wldev *dev;
+ struct b43_wl *wl;
/* The LED class device */
struct led_classdev led_dev;
/* The index number of the LED. */
@@ -22,8 +23,24 @@ struct b43_led {
bool activelow;
/* The unique name string for this LED device. */
char name[B43_LED_MAX_NAME_LEN + 1];
+ /* The current status of the LED. This is updated locklessly. */
+ atomic_t state;
+ /* The active state in hardware. */
+ bool hw_state;
};
+struct b43_leds {
+ struct b43_led led_tx;
+ struct b43_led led_rx;
+ struct b43_led led_radio;
+ struct b43_led led_assoc;
+
+ bool stop;
+ struct work_struct work;
+};
+
+#define B43_MAX_NR_LEDS 4
+
#define B43_LED_BEHAVIOUR 0x7F
#define B43_LED_ACTIVELOW 0x80
/* LED behaviour values */
@@ -42,23 +59,35 @@ enum b43_led_behaviour {
B43_LED_INACTIVE,
};
+void b43_leds_register(struct b43_wldev *dev);
+void b43_leds_unregister(struct b43_wldev *dev);
void b43_leds_init(struct b43_wldev *dev);
void b43_leds_exit(struct b43_wldev *dev);
+void b43_leds_stop(struct b43_wldev *dev);
#else /* CONFIG_B43_LEDS */
/* LED support disabled */
-struct b43_led {
+struct b43_leds {
/* empty */
};
+static inline void b43_leds_register(struct b43_wldev *dev)
+{
+}
+static inline void b43_leds_unregister(struct b43_wldev *dev)
+{
+}
static inline void b43_leds_init(struct b43_wldev *dev)
{
}
static inline void b43_leds_exit(struct b43_wldev *dev)
{
}
+static inline void b43_leds_stop(struct b43_wldev *dev)
+{
+}
#endif /* CONFIG_B43_LEDS */
#endif /* B43_LEDS_H_ */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index e789792a36b..9b907a36bb8 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -8,6 +8,9 @@
Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org>
Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
+ SDIO support
+ Copyright (c) 2009 Albert Herranz <albert_herranz@yahoo.es>
+
Some parts of the code in this file are derived from the ipw2200
driver Copyright(c) 2003 - 2004 Intel Corporation.
@@ -53,6 +56,8 @@
#include "xmit.h"
#include "lo.h"
#include "pcmcia.h"
+#include "sdio.h"
+#include <linux/mmc/sdio_func.h>
MODULE_DESCRIPTION("Broadcom B43 wireless driver");
MODULE_AUTHOR("Martin Langer");
@@ -1587,7 +1592,7 @@ static void b43_beacon_update_trigger_work(struct work_struct *work)
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (likely(dev && (b43_status(dev) >= B43_STAT_INITIALIZED))) {
- if (0 /*FIXME dev->dev->bus->bustype == SSB_BUSTYPE_SDIO*/) {
+ if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) {
/* wl->mutex is enough. */
b43_do_beacon_update_trigger_work(dev);
mmiowb();
@@ -1825,6 +1830,16 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
/* Re-enable interrupts on the device by restoring the current interrupt mask. */
b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask);
+
+#if B43_DEBUG
+ if (b43_debug(dev, B43_DBG_VERBOSESTATS)) {
+ dev->irq_count++;
+ for (i = 0; i < ARRAY_SIZE(dev->irq_bit_count); i++) {
+ if (reason & (1 << i))
+ dev->irq_bit_count[i]++;
+ }
+ }
+#endif
}
/* Interrupt thread handler. Handles device interrupts in thread context. */
@@ -1905,6 +1920,21 @@ static irqreturn_t b43_interrupt_handler(int irq, void *dev_id)
return ret;
}
+/* SDIO interrupt handler. This runs in process context. */
+static void b43_sdio_interrupt_handler(struct b43_wldev *dev)
+{
+ struct b43_wl *wl = dev->wl;
+ irqreturn_t ret;
+
+ mutex_lock(&wl->mutex);
+
+ ret = b43_do_interrupt(dev);
+ if (ret == IRQ_WAKE_THREAD)
+ b43_do_interrupt_thread(dev);
+
+ mutex_unlock(&wl->mutex);
+}
+
void b43_do_release_fw(struct b43_firmware_file *fw)
{
release_firmware(fw->data);
@@ -2645,6 +2675,20 @@ static void b43_adjust_opmode(struct b43_wldev *dev)
cfp_pretbtt = 50;
}
b43_write16(dev, 0x612, cfp_pretbtt);
+
+ /* FIXME: We don't currently implement the PMQ mechanism,
+ * so always disable it. If we want to implement PMQ,
+ * we need to enable it here (clear DISCPMQ) in AP mode.
+ */
+ if (0 /* ctl & B43_MACCTL_AP */) {
+ b43_write32(dev, B43_MMIO_MACCTL,
+ b43_read32(dev, B43_MMIO_MACCTL)
+ & ~B43_MACCTL_DISCPMQ);
+ } else {
+ b43_write32(dev, B43_MMIO_MACCTL,
+ b43_read32(dev, B43_MMIO_MACCTL)
+ | B43_MACCTL_DISCPMQ);
+ }
}
static void b43_rate_memory_write(struct b43_wldev *dev, u16 rate, int is_ofdm)
@@ -2873,6 +2917,27 @@ static void b43_periodic_every15sec(struct b43_wldev *dev)
atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT);
wmb();
+
+#if B43_DEBUG
+ if (b43_debug(dev, B43_DBG_VERBOSESTATS)) {
+ unsigned int i;
+
+ b43dbg(dev->wl, "Stats: %7u IRQs/sec, %7u TX/sec, %7u RX/sec\n",
+ dev->irq_count / 15,
+ dev->tx_count / 15,
+ dev->rx_count / 15);
+ dev->irq_count = 0;
+ dev->tx_count = 0;
+ dev->rx_count = 0;
+ for (i = 0; i < ARRAY_SIZE(dev->irq_bit_count); i++) {
+ if (dev->irq_bit_count[i]) {
+ b43dbg(dev->wl, "Stats: %7u IRQ-%02u/sec (0x%08X)\n",
+ dev->irq_bit_count[i] / 15, i, (1 << i));
+ dev->irq_bit_count[i] = 0;
+ }
+ }
+ }
+#endif
}
static void do_periodic_work(struct b43_wldev *dev)
@@ -3002,14 +3067,18 @@ static void b43_security_init(struct b43_wldev *dev)
static int b43_rng_read(struct hwrng *rng, u32 *data)
{
struct b43_wl *wl = (struct b43_wl *)rng->priv;
+ struct b43_wldev *dev;
+ int count = -ENODEV;
- /* FIXME: We need to take wl->mutex here to make sure the device
- * is not going away from under our ass. However it could deadlock
- * with hwrng internal locking. */
-
- *data = b43_read16(wl->current_dev, B43_MMIO_RNG);
+ mutex_lock(&wl->mutex);
+ dev = wl->current_dev;
+ if (likely(dev && b43_status(dev) >= B43_STAT_INITIALIZED)) {
+ *data = b43_read16(dev, B43_MMIO_RNG);
+ count = sizeof(u16);
+ }
+ mutex_unlock(&wl->mutex);
- return (sizeof(u16));
+ return count;
}
#endif /* CONFIG_B43_HWRNG */
@@ -3068,6 +3137,9 @@ static void b43_tx_work(struct work_struct *work)
dev_kfree_skb(skb); /* Drop it */
}
+#if B43_DEBUG
+ dev->tx_count++;
+#endif
mutex_unlock(&wl->mutex);
}
@@ -3820,7 +3892,7 @@ redo:
/* Disable interrupts on the device. */
b43_set_status(dev, B43_STAT_INITIALIZED);
- if (0 /*FIXME dev->dev->bus->bustype == SSB_BUSTYPE_SDIO*/) {
+ if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) {
/* wl->mutex is locked. That is enough. */
b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0);
b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* Flush */
@@ -3830,10 +3902,15 @@ redo:
b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* Flush */
spin_unlock_irq(&wl->hardirq_lock);
}
- /* Synchronize the interrupt handlers. Unlock to avoid deadlocks. */
+ /* Synchronize and free the interrupt handlers. Unlock to avoid deadlocks. */
orig_dev = dev;
mutex_unlock(&wl->mutex);
- synchronize_irq(dev->dev->irq);
+ if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) {
+ b43_sdio_free_irq(dev);
+ } else {
+ synchronize_irq(dev->dev->irq);
+ free_irq(dev->dev->irq, dev);
+ }
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (!dev)
@@ -3850,7 +3927,7 @@ redo:
dev_kfree_skb(skb_dequeue(&wl->tx_queue));
b43_mac_suspend(dev);
- free_irq(dev->dev->irq, dev);
+ b43_leds_exit(dev);
b43dbg(wl, "Wireless interface stopped\n");
return dev;
@@ -3864,12 +3941,20 @@ static int b43_wireless_core_start(struct b43_wldev *dev)
B43_WARN_ON(b43_status(dev) != B43_STAT_INITIALIZED);
drain_txstatus_queue(dev);
- err = request_threaded_irq(dev->dev->irq, b43_interrupt_handler,
- b43_interrupt_thread_handler,
- IRQF_SHARED, KBUILD_MODNAME, dev);
- if (err) {
- b43err(dev->wl, "Cannot request IRQ-%d\n", dev->dev->irq);
- goto out;
+ if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) {
+ err = b43_sdio_request_irq(dev, b43_sdio_interrupt_handler);
+ if (err) {
+ b43err(dev->wl, "Cannot request SDIO IRQ\n");
+ goto out;
+ }
+ } else {
+ err = request_threaded_irq(dev->dev->irq, b43_interrupt_handler,
+ b43_interrupt_thread_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (err) {
+ b43err(dev->wl, "Cannot request IRQ-%d\n", dev->dev->irq);
+ goto out;
+ }
}
/* We are ready to run. */
@@ -3882,8 +3967,10 @@ static int b43_wireless_core_start(struct b43_wldev *dev)
/* Start maintainance work */
b43_periodic_tasks_setup(dev);
+ b43_leds_init(dev);
+
b43dbg(dev->wl, "Wireless interface started\n");
- out:
+out:
return err;
}
@@ -4160,10 +4247,6 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
macctl |= B43_MACCTL_PSM_JMP0;
b43_write32(dev, B43_MMIO_MACCTL, macctl);
- if (!dev->suspend_in_progress) {
- b43_leds_exit(dev);
- b43_rng_exit(dev->wl);
- }
b43_dma_free(dev);
b43_pio_free(dev);
b43_chip_exit(dev);
@@ -4180,7 +4263,6 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
/* Initialize a wireless core */
static int b43_wireless_core_init(struct b43_wldev *dev)
{
- struct b43_wl *wl = dev->wl;
struct ssb_bus *bus = dev->dev->bus;
struct ssb_sprom *sprom = &bus->sprom;
struct b43_phy *phy = &dev->phy;
@@ -4264,7 +4346,9 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
/* Maximum Contention Window */
b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF);
- if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) || B43_FORCE_PIO) {
+ if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) ||
+ (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) ||
+ B43_FORCE_PIO) {
dev->__using_pio_transfers = 1;
err = b43_pio_init(dev);
} else {
@@ -4280,15 +4364,13 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
ssb_bus_powerup(bus, !(sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW));
b43_upload_card_macaddress(dev);
b43_security_init(dev);
- if (!dev->suspend_in_progress)
- b43_rng_init(wl);
+
+ ieee80211_wake_queues(dev->wl->hw);
ieee80211_wake_queues(dev->wl->hw);
b43_set_status(dev, B43_STAT_INITIALIZED);
- if (!dev->suspend_in_progress)
- b43_leds_init(dev);
out:
return err;
@@ -4837,7 +4919,6 @@ static int b43_wireless_init(struct ssb_device *dev)
/* Initialize struct b43_wl */
wl->hw = hw;
- spin_lock_init(&wl->leds_lock);
mutex_init(&wl->mutex);
spin_lock_init(&wl->hardirq_lock);
INIT_LIST_HEAD(&wl->devlist);
@@ -4878,6 +4959,8 @@ static int b43_probe(struct ssb_device *dev, const struct ssb_device_id *id)
err = ieee80211_register_hw(wl->hw);
if (err)
goto err_one_core_detach;
+ b43_leds_register(wl->current_dev);
+ b43_rng_init(wl);
}
out:
@@ -4906,12 +4989,15 @@ static void b43_remove(struct ssb_device *dev)
* might have modified it. Restoring is important, so the networking
* stack can properly free resources. */
wl->hw->queues = wl->mac80211_initially_registered_queues;
+ b43_leds_stop(wldev);
ieee80211_unregister_hw(wl->hw);
}
b43_one_core_detach(dev);
if (list_empty(&wl->devlist)) {
+ b43_rng_exit(wl);
+ b43_leds_unregister(wldev);
/* Last core on the chip unregistered.
* We can destroy common struct b43_wl.
*/
@@ -4929,80 +5015,17 @@ void b43_controller_restart(struct b43_wldev *dev, const char *reason)
ieee80211_queue_work(dev->wl->hw, &dev->restart_work);
}
-#ifdef CONFIG_PM
-
-static int b43_suspend(struct ssb_device *dev, pm_message_t state)
-{
- struct b43_wldev *wldev = ssb_get_drvdata(dev);
- struct b43_wl *wl = wldev->wl;
-
- b43dbg(wl, "Suspending...\n");
-
- mutex_lock(&wl->mutex);
- wldev->suspend_in_progress = true;
- wldev->suspend_init_status = b43_status(wldev);
- if (wldev->suspend_init_status >= B43_STAT_STARTED)
- wldev = b43_wireless_core_stop(wldev);
- if (wldev && wldev->suspend_init_status >= B43_STAT_INITIALIZED)
- b43_wireless_core_exit(wldev);
- mutex_unlock(&wl->mutex);
-
- b43dbg(wl, "Device suspended.\n");
-
- return 0;
-}
-
-static int b43_resume(struct ssb_device *dev)
-{
- struct b43_wldev *wldev = ssb_get_drvdata(dev);
- struct b43_wl *wl = wldev->wl;
- int err = 0;
-
- b43dbg(wl, "Resuming...\n");
-
- mutex_lock(&wl->mutex);
- if (wldev->suspend_init_status >= B43_STAT_INITIALIZED) {
- err = b43_wireless_core_init(wldev);
- if (err) {
- b43err(wl, "Resume failed at core init\n");
- goto out;
- }
- }
- if (wldev->suspend_init_status >= B43_STAT_STARTED) {
- err = b43_wireless_core_start(wldev);
- if (err) {
- b43_leds_exit(wldev);
- b43_rng_exit(wldev->wl);
- b43_wireless_core_exit(wldev);
- b43err(wl, "Resume failed at core start\n");
- goto out;
- }
- }
- b43dbg(wl, "Device resumed.\n");
- out:
- wldev->suspend_in_progress = false;
- mutex_unlock(&wl->mutex);
- return err;
-}
-
-#else /* CONFIG_PM */
-# define b43_suspend NULL
-# define b43_resume NULL
-#endif /* CONFIG_PM */
-
static struct ssb_driver b43_ssb_driver = {
.name = KBUILD_MODNAME,
.id_table = b43_ssb_tbl,
.probe = b43_probe,
.remove = b43_remove,
- .suspend = b43_suspend,
- .resume = b43_resume,
};
static void b43_print_driverinfo(void)
{
const char *feat_pci = "", *feat_pcmcia = "", *feat_nphy = "",
- *feat_leds = "";
+ *feat_leds = "", *feat_sdio = "";
#ifdef CONFIG_B43_PCI_AUTOSELECT
feat_pci = "P";
@@ -5016,11 +5039,14 @@ static void b43_print_driverinfo(void)
#ifdef CONFIG_B43_LEDS
feat_leds = "L";
#endif
+#ifdef CONFIG_B43_SDIO
+ feat_sdio = "S";
+#endif
printk(KERN_INFO "Broadcom 43xx driver loaded "
- "[ Features: %s%s%s%s, Firmware-ID: "
+ "[ Features: %s%s%s%s%s, Firmware-ID: "
B43_SUPPORTED_FIRMWARE_ID " ]\n",
feat_pci, feat_pcmcia, feat_nphy,
- feat_leds);
+ feat_leds, feat_sdio);
}
static int __init b43_init(void)
@@ -5031,13 +5057,18 @@ static int __init b43_init(void)
err = b43_pcmcia_init();
if (err)
goto err_dfs_exit;
- err = ssb_driver_register(&b43_ssb_driver);
+ err = b43_sdio_init();
if (err)
goto err_pcmcia_exit;
+ err = ssb_driver_register(&b43_ssb_driver);
+ if (err)
+ goto err_sdio_exit;
b43_print_driverinfo();
return err;
+err_sdio_exit:
+ b43_sdio_exit();
err_pcmcia_exit:
b43_pcmcia_exit();
err_dfs_exit:
@@ -5048,6 +5079,7 @@ err_dfs_exit:
static void __exit b43_exit(void)
{
ssb_driver_unregister(&b43_ssb_driver);
+ b43_sdio_exit();
b43_pcmcia_exit();
b43_debugfs_exit();
}
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index 3e02d969f68..1e318d815a5 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -2228,6 +2228,16 @@ static enum b43_txpwr_result b43_lpphy_op_recalc_txpower(struct b43_wldev *dev,
return B43_TXPWR_RES_DONE;
}
+void b43_lpphy_op_switch_analog(struct b43_wldev *dev, bool on)
+{
+ if (on) {
+ b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xfff8);
+ } else {
+ b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0x0007);
+ b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVR, 0x0007);
+ }
+}
+
const struct b43_phy_operations b43_phyops_lp = {
.allocate = b43_lpphy_op_allocate,
.free = b43_lpphy_op_free,
@@ -2239,7 +2249,7 @@ const struct b43_phy_operations b43_phyops_lp = {
.radio_read = b43_lpphy_op_radio_read,
.radio_write = b43_lpphy_op_radio_write,
.software_rfkill = b43_lpphy_op_software_rfkill,
- .switch_analog = b43_phyop_switch_analog_generic,
+ .switch_analog = b43_lpphy_op_switch_analog,
.switch_channel = b43_lpphy_op_switch_channel,
.get_default_chan = b43_lpphy_op_get_default_chan,
.set_rx_antenna = b43_lpphy_op_set_rx_antenna,
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index 3498b68385e..e96091b3149 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -574,7 +574,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
q->buffer_used -= total_len;
q->free_packet_slots += 1;
- ieee80211_tx_status_irqsafe(dev->wl->hw, pack->skb);
+ ieee80211_tx_status(dev->wl->hw, pack->skb);
pack->skb = NULL;
list_add(&pack->list, &q->packets_list);
diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
index 31e55999893..7a3218c5ba7 100644
--- a/drivers/net/wireless/b43/rfkill.c
+++ b/drivers/net/wireless/b43/rfkill.c
@@ -28,7 +28,7 @@
/* Returns TRUE, if the radio is enabled in hardware. */
bool b43_is_hw_radio_enabled(struct b43_wldev *dev)
{
- if (dev->phy.rev >= 3) {
+ if (dev->phy.rev >= 3 || dev->phy.type == B43_PHYTYPE_LP) {
if (!(b43_read32(dev, B43_MMIO_RADIO_HWENABLED_HI)
& B43_MMIO_RADIO_HWENABLED_HI_MASK))
return 1;
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
new file mode 100644
index 00000000000..0d3ac64147a
--- /dev/null
+++ b/drivers/net/wireless/b43/sdio.c
@@ -0,0 +1,202 @@
+/*
+ * Broadcom B43 wireless driver
+ *
+ * SDIO over Sonics Silicon Backplane bus glue for b43.
+ *
+ * Copyright (C) 2009 Albert Herranz
+ * Copyright (C) 2009 Michael Buesch <mb@bu3sch.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/ssb/ssb.h>
+
+#include "sdio.h"
+#include "b43.h"
+
+
+#define HNBU_CHIPID 0x01 /* vendor & device id */
+
+#define B43_SDIO_BLOCK_SIZE 64 /* rx fifo max size in bytes */
+
+
+static const struct b43_sdio_quirk {
+ u16 vendor;
+ u16 device;
+ unsigned int quirks;
+} b43_sdio_quirks[] = {
+ { 0x14E4, 0x4318, SSB_QUIRK_SDIO_READ_AFTER_WRITE32, },
+ { },
+};
+
+
+static unsigned int b43_sdio_get_quirks(u16 vendor, u16 device)
+{
+ const struct b43_sdio_quirk *q;
+
+ for (q = b43_sdio_quirks; q->quirks; q++) {
+ if (vendor == q->vendor && device == q->device)
+ return q->quirks;
+ }
+
+ return 0;
+}
+
+static void b43_sdio_interrupt_dispatcher(struct sdio_func *func)
+{
+ struct b43_sdio *sdio = sdio_get_drvdata(func);
+ struct b43_wldev *dev = sdio->irq_handler_opaque;
+
+ if (unlikely(b43_status(dev) < B43_STAT_STARTED))
+ return;
+
+ sdio_release_host(func);
+ sdio->irq_handler(dev);
+ sdio_claim_host(func);
+}
+
+int b43_sdio_request_irq(struct b43_wldev *dev,
+ void (*handler)(struct b43_wldev *dev))
+{
+ struct ssb_bus *bus = dev->dev->bus;
+ struct sdio_func *func = bus->host_sdio;
+ struct b43_sdio *sdio = sdio_get_drvdata(func);
+ int err;
+
+ sdio->irq_handler_opaque = dev;
+ sdio->irq_handler = handler;
+ sdio_claim_host(func);
+ err = sdio_claim_irq(func, b43_sdio_interrupt_dispatcher);
+ sdio_release_host(func);
+
+ return err;
+}
+
+void b43_sdio_free_irq(struct b43_wldev *dev)
+{
+ struct ssb_bus *bus = dev->dev->bus;
+ struct sdio_func *func = bus->host_sdio;
+ struct b43_sdio *sdio = sdio_get_drvdata(func);
+
+ sdio_claim_host(func);
+ sdio_release_irq(func);
+ sdio_release_host(func);
+ sdio->irq_handler_opaque = NULL;
+ sdio->irq_handler = NULL;
+}
+
+static int b43_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ struct b43_sdio *sdio;
+ struct sdio_func_tuple *tuple;
+ u16 vendor = 0, device = 0;
+ int error;
+
+ /* Look for the card chip identifier. */
+ tuple = func->tuples;
+ while (tuple) {
+ switch (tuple->code) {
+ case 0x80:
+ switch (tuple->data[0]) {
+ case HNBU_CHIPID:
+ if (tuple->size != 5)
+ break;
+ vendor = tuple->data[1] | (tuple->data[2]<<8);
+ device = tuple->data[3] | (tuple->data[4]<<8);
+ dev_info(&func->dev, "Chip ID %04x:%04x\n",
+ vendor, device);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ tuple = tuple->next;
+ }
+ if (!vendor || !device) {
+ error = -ENODEV;
+ goto out;
+ }
+
+ sdio_claim_host(func);
+ error = sdio_set_block_size(func, B43_SDIO_BLOCK_SIZE);
+ if (error) {
+ dev_err(&func->dev, "failed to set block size to %u bytes,"
+ " error %d\n", B43_SDIO_BLOCK_SIZE, error);
+ goto err_release_host;
+ }
+ error = sdio_enable_func(func);
+ if (error) {
+ dev_err(&func->dev, "failed to enable func, error %d\n", error);
+ goto err_release_host;
+ }
+ sdio_release_host(func);
+
+ sdio = kzalloc(sizeof(*sdio), GFP_KERNEL);
+ if (!sdio) {
+ error = -ENOMEM;
+ dev_err(&func->dev, "failed to allocate ssb bus\n");
+ goto err_disable_func;
+ }
+ error = ssb_bus_sdiobus_register(&sdio->ssb, func,
+ b43_sdio_get_quirks(vendor, device));
+ if (error) {
+ dev_err(&func->dev, "failed to register ssb sdio bus,"
+ " error %d\n", error);
+ goto err_free_ssb;
+ }
+ sdio_set_drvdata(func, sdio);
+
+ return 0;
+
+err_free_ssb:
+ kfree(sdio);
+err_disable_func:
+ sdio_disable_func(func);
+err_release_host:
+ sdio_release_host(func);
+out:
+ return error;
+}
+
+static void b43_sdio_remove(struct sdio_func *func)
+{
+ struct b43_sdio *sdio = sdio_get_drvdata(func);
+
+ ssb_bus_unregister(&sdio->ssb);
+ sdio_disable_func(func);
+ kfree(sdio);
+ sdio_set_drvdata(func, NULL);
+}
+
+static const struct sdio_device_id b43_sdio_ids[] = {
+ { SDIO_DEVICE(0x02d0, 0x044b) }, /* Nintendo Wii WLAN daughter card */
+ { },
+};
+
+static struct sdio_driver b43_sdio_driver = {
+ .name = "b43-sdio",
+ .id_table = b43_sdio_ids,
+ .probe = b43_sdio_probe,
+ .remove = b43_sdio_remove,
+};
+
+int b43_sdio_init(void)
+{
+ return sdio_register_driver(&b43_sdio_driver);
+}
+
+void b43_sdio_exit(void)
+{
+ sdio_unregister_driver(&b43_sdio_driver);
+}
diff --git a/drivers/net/wireless/b43/sdio.h b/drivers/net/wireless/b43/sdio.h
new file mode 100644
index 00000000000..fb633094403
--- /dev/null
+++ b/drivers/net/wireless/b43/sdio.h
@@ -0,0 +1,45 @@
+#ifndef B43_SDIO_H_
+#define B43_SDIO_H_
+
+#include <linux/ssb/ssb.h>
+
+struct b43_wldev;
+
+
+#ifdef CONFIG_B43_SDIO
+
+struct b43_sdio {
+ struct ssb_bus ssb;
+ void *irq_handler_opaque;
+ void (*irq_handler)(struct b43_wldev *dev);
+};
+
+int b43_sdio_request_irq(struct b43_wldev *dev,
+ void (*handler)(struct b43_wldev *dev));
+void b43_sdio_free_irq(struct b43_wldev *dev);
+
+int b43_sdio_init(void);
+void b43_sdio_exit(void);
+
+
+#else /* CONFIG_B43_SDIO */
+
+
+int b43_sdio_request_irq(struct b43_wldev *dev,
+ void (*handler)(struct b43_wldev *dev))
+{
+ return -ENODEV;
+}
+void b43_sdio_free_irq(struct b43_wldev *dev)
+{
+}
+static inline int b43_sdio_init(void)
+{
+ return 0;
+}
+static inline void b43_sdio_exit(void)
+{
+}
+
+#endif /* CONFIG_B43_SDIO */
+#endif /* B43_SDIO_H_ */
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 14f541248b5..ac9f600995e 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -690,8 +690,11 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
}
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
- ieee80211_rx_irqsafe(dev->wl->hw, skb);
+ ieee80211_rx(dev->wl->hw, skb);
+#if B43_DEBUG
+ dev->rx_count++;
+#endif
return;
drop:
b43dbg(dev->wl, "RX: Packet dropped\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index ca61d3796ce..3259b884154 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2021,6 +2021,12 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
agg->frame_count, txq_id, idx);
hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
+ if (!hdr) {
+ IWL_ERR(priv,
+ "BUG_ON idx doesn't point to valid skb"
+ " idx=%d, txq_id=%d\n", idx, txq_id);
+ return -1;
+ }
sc = le16_to_cpu(hdr->seq_ctrl);
if (idx != (SEQ_TO_SN(sc) & 0xff)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 1d539e3b8db..a6391c7fea5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1163,6 +1163,12 @@ static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
agg->frame_count, txq_id, idx);
hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
+ if (!hdr) {
+ IWL_ERR(priv,
+ "BUG_ON idx doesn't point to valid skb"
+ " idx=%d, txq_id=%d\n", idx, txq_id);
+ return -1;
+ }
sc = le16_to_cpu(hdr->seq_ctrl);
if (idx != (SEQ_TO_SN(sc) & 0xff)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index b90adcb73b0..8e1bb53c0aa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -250,12 +250,20 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
}
spin_unlock_irqrestore(&rxq->lock, flags);
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ priority |= __GFP_NOWARN;
/* Alloc a new receive buffer */
skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
priority);
if (!skb) {
- IWL_CRIT(priv, "Can not allocate SKB buffers\n");
+ if (net_ratelimit())
+ IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
+ if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+ net_ratelimit())
+ IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
+ priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
+ rxq->free_count);
/* We don't reschedule replenish work here -- we will
* call the restock method and if it still needs
* more buffers it will schedule replenish */
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index a2b9ec82b96..c6633fec821 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -520,7 +520,7 @@ int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
struct iwl_host_cmd cmd = {
.id = REPLY_WEPKEY,
.data = wep_cmd,
- .flags = CMD_SYNC,
+ .flags = CMD_ASYNC,
};
memset(wep_cmd, 0, cmd_size +
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 090966837f3..4f2d4393728 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1146,11 +1146,18 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
}
spin_unlock_irqrestore(&rxq->lock, flags);
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ priority |= __GFP_NOWARN;
/* Alloc a new receive buffer */
skb = alloc_skb(priv->hw_params.rx_buf_size, priority);
if (!skb) {
if (net_ratelimit())
- IWL_CRIT(priv, ": Can not allocate SKB buffers\n");
+ IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
+ if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+ net_ratelimit())
+ IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
+ priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
+ rxq->free_count);
/* We don't reschedule replenish work here -- we will
* call the restock method and if it still needs
* more buffers it will schedule replenish */
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 446e327180f..cb8be8d7abc 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -1222,3 +1222,4 @@ MODULE_DESCRIPTION("Libertas SPI WLAN Driver");
MODULE_AUTHOR("Andrey Yurovsky <andrey@cozybit.com>, "
"Colin McCabe <colin@cozybit.com>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:libertas_spi");
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 05458d9249c..afd26bf0664 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -731,3 +731,4 @@ module_exit(p54spi_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
+MODULE_ALIAS("spi:cx3110x");
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 5462cb5ad99..567f029a8cd 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -380,7 +380,7 @@ static inline void rt2x00crypto_tx_insert_iv(struct sk_buff *skb,
{
}
-static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, bool l2pad,
+static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
unsigned int header_length,
struct rxdone_entry_desc *rxdesc)
{
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 7b14d5bc63d..88060e11754 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -1,5 +1,5 @@
menuconfig WL12XX
- boolean "TI wl12xx driver support"
+ tristate "TI wl12xx driver support"
depends on MAC80211 && WLAN_80211 && EXPERIMENTAL
---help---
This will enable TI wl12xx driver support. The drivers make
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 5809ef5b18f..1103256ad98 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -1426,3 +1426,4 @@ EXPORT_SYMBOL_GPL(wl1251_free_hw);
MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>");
+MODULE_ALIAS("spi:wl12xx");
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 5e110a2328a..4e79a980013 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -368,7 +368,7 @@ error:
return r;
}
-/* MAC address: if custom mac addresses are to to be used CR_MAC_ADDR_P1 and
+/* MAC address: if custom mac addresses are to be used CR_MAC_ADDR_P1 and
* CR_MAC_ADDR_P2 must be overwritten
*/
int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 38688847d56..23a6a6d4863 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -1070,7 +1070,7 @@ static int eject_installer(struct usb_interface *intf)
/* Find bulk out endpoint */
endpoint = &iface_desc->endpoint[1].desc;
- if ((endpoint->bEndpointAddress & USB_TYPE_MASK) == USB_DIR_OUT &&
+ if (usb_endpoint_dir_out(endpoint) &&
usb_endpoint_xfer_bulk(endpoint)) {
bulk_out_ep = endpoint->bEndpointAddress;
} else {
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index dc22782633a..83a044dbd1d 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -134,18 +134,15 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata)
}
/* Enable the Rx interrupts for the first buffer */
- reg_data = in_be32(drvdata->base_addr + XEL_RSR_OFFSET);
out_be32(drvdata->base_addr + XEL_RSR_OFFSET,
- reg_data | XEL_RSR_RECV_IE_MASK);
+ XEL_RSR_RECV_IE_MASK);
/* Enable the Rx interrupts for the second Buffer if
* configured in HW */
if (drvdata->rx_ping_pong != 0) {
- reg_data = in_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
- XEL_RSR_OFFSET);
out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
XEL_RSR_OFFSET,
- reg_data | XEL_RSR_RECV_IE_MASK);
+ XEL_RSR_RECV_IE_MASK);
}
/* Enable the Global Interrupt Enable */
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 69f85c07d17..ddf224d456b 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -447,7 +447,6 @@ struct of_modalias_table {
static struct of_modalias_table of_modalias_table[] = {
{ "fsl,mcu-mpc8349emitx", "mcu-mpc8349emitx" },
{ "mmc-spi-slot", "mmc_spi" },
- { "stm,m25p40", "m25p80" },
};
/**
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index 8574622e36a..c9e2ae90f19 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -154,9 +154,8 @@ int sync_start(void)
{
int err;
- if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
return -ENOMEM;
- cpumask_clear(marked_cpus);
start_cpu_work();
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index b7e4cee2426..2766a6d3c2e 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -35,7 +35,7 @@ static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
}
-static struct super_operations s_ops = {
+static const struct super_operations s_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
};
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index a45b0c0d574..a6b4a5a53d4 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1266,7 +1266,7 @@ ccio_ioc_init(struct ioc *ioc)
** Hot-Plug/Removal of PCI cards. (aka PCI OLARD).
*/
- iova_space_size = (u32) (num_physpages / count_parisc_driver(&ccio_driver));
+ iova_space_size = (u32) (totalram_pages / count_parisc_driver(&ccio_driver));
/* limit IOVA space size to 1MB-1GB */
@@ -1305,7 +1305,7 @@ ccio_ioc_init(struct ioc *ioc)
DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n",
__func__, ioc->ioc_regs,
- (unsigned long) num_physpages >> (20 - PAGE_SHIFT),
+ (unsigned long) totalram_pages >> (20 - PAGE_SHIFT),
iova_space_size>>20,
iov_order + PAGE_SHIFT);
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 123d8fe3427..57a6d19eba4 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -1390,7 +1390,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
** for DMA hints - ergo only 30 bits max.
*/
- iova_space_size = (u32) (num_physpages/global_ioc_cnt);
+ iova_space_size = (u32) (totalram_pages/global_ioc_cnt);
/* limit IOVA space size to 1MB-1GB */
if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
@@ -1415,7 +1415,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
__func__,
ioc->ioc_hpa,
- (unsigned long) num_physpages >> (20 - PAGE_SHIFT),
+ (unsigned long) totalram_pages >> (20 - PAGE_SHIFT),
iova_space_size>>20,
iov_order + PAGE_SHIFT);
diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
index 554e11f9e1c..8eefe56f1cb 100644
--- a/drivers/parport/procfs.c
+++ b/drivers/parport/procfs.c
@@ -31,7 +31,7 @@
#define PARPORT_MIN_SPINTIME_VALUE 1
#define PARPORT_MAX_SPINTIME_VALUE 1000
-static int do_active_device(ctl_table *table, int write, struct file *filp,
+static int do_active_device(ctl_table *table, int write,
void __user *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
@@ -68,7 +68,7 @@ static int do_active_device(ctl_table *table, int write, struct file *filp,
}
#ifdef CONFIG_PARPORT_1284
-static int do_autoprobe(ctl_table *table, int write, struct file *filp,
+static int do_autoprobe(ctl_table *table, int write,
void __user *result, size_t *lenp, loff_t *ppos)
{
struct parport_device_info *info = table->extra2;
@@ -111,7 +111,7 @@ static int do_autoprobe(ctl_table *table, int write, struct file *filp,
#endif /* IEEE1284.3 support. */
static int do_hardware_base_addr (ctl_table *table, int write,
- struct file *filp, void __user *result,
+ void __user *result,
size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
@@ -139,7 +139,7 @@ static int do_hardware_base_addr (ctl_table *table, int write,
}
static int do_hardware_irq (ctl_table *table, int write,
- struct file *filp, void __user *result,
+ void __user *result,
size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
@@ -167,7 +167,7 @@ static int do_hardware_irq (ctl_table *table, int write,
}
static int do_hardware_dma (ctl_table *table, int write,
- struct file *filp, void __user *result,
+ void __user *result,
size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
@@ -195,7 +195,7 @@ static int do_hardware_dma (ctl_table *table, int write,
}
static int do_hardware_modes (ctl_table *table, int write,
- struct file *filp, void __user *result,
+ void __user *result,
size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index ab99783dcce..14bbaa17e2c 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -34,9 +34,9 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/tboot.h>
+#include <linux/dmi.h>
-#undef PREFIX
-#define PREFIX "DMAR:"
+#define PREFIX "DMAR: "
/* No locks are needed as DMA remapping hardware unit
* list is constructed at boot time and hotplug of
@@ -577,9 +577,6 @@ int __init dmar_table_init(void)
printk(KERN_INFO PREFIX "No ATSR found\n");
#endif
-#ifdef CONFIG_INTR_REMAP
- parse_ioapics_under_ir();
-#endif
return 0;
}
@@ -639,20 +636,31 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
+ if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
+ /* Promote an attitude of violence to a BIOS engineer today */
+ WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ drhd->reg_base_addr,
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
+ goto err_unmap;
+ }
+
#ifdef CONFIG_DMAR
agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) {
printk(KERN_ERR
"Cannot get a valid agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
- goto error;
+ goto err_unmap;
}
msagaw = iommu_calculate_max_sagaw(iommu);
if (msagaw < 0) {
printk(KERN_ERR
"Cannot get a valid max agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
- goto error;
+ goto err_unmap;
}
#endif
iommu->agaw = agaw;
@@ -672,7 +680,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
}
ver = readl(iommu->reg + DMAR_VER_REG);
- pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
+ pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
(unsigned long long)drhd->reg_base_addr,
DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
(unsigned long long)iommu->cap,
@@ -682,7 +690,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
drhd->iommu = iommu;
return 0;
-error:
+
+ err_unmap:
+ iounmap(iommu->reg);
+ error:
kfree(iommu);
return -1;
}
@@ -1219,7 +1230,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
source_id, guest_addr);
fault_index++;
- if (fault_index > cap_num_fault_regs(iommu->cap))
+ if (fault_index >= cap_num_fault_regs(iommu->cap))
fault_index = 0;
spin_lock_irqsave(&iommu->register_lock, flag);
}
@@ -1312,3 +1323,13 @@ int dmar_reenable_qi(struct intel_iommu *iommu)
return 0;
}
+
+/*
+ * Check interrupt remapping support in DMAR table description.
+ */
+int dmar_ir_support(void)
+{
+ struct acpi_table_dmar *dmar;
+ dmar = (struct acpi_table_dmar *)dmar_tbl;
+ return dmar->flags & 0x1;
+}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 5befa7e379b..e7be66dbac2 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -398,23 +398,20 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
acpi_handle *phandle = (acpi_handle *)context;
acpi_status status;
struct acpi_device_info *info;
- struct acpi_buffer info_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
int retval = 0;
- status = acpi_get_object_info(handle, &info_buffer);
+ status = acpi_get_object_info(handle, &info);
if (ACPI_FAILURE(status)) {
err("%s: Failed to get device information status=0x%x\n",
__func__, status);
return retval;
}
- info = info_buffer.pointer;
- info->hardware_id.value[sizeof(info->hardware_id.value) - 1] = '\0';
if (info->current_status && (info->valid & ACPI_VALID_HID) &&
- (!strcmp(info->hardware_id.value, IBM_HARDWARE_ID1) ||
- !strcmp(info->hardware_id.value, IBM_HARDWARE_ID2))) {
+ (!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) ||
+ !strcmp(info->hardware_id.string, IBM_HARDWARE_ID2))) {
dbg("found hardware: %s, handle: %p\n",
- info->hardware_id.value, handle);
+ info->hardware_id.string, handle);
*phandle = handle;
/* returning non-zero causes the search to stop
* and returns this value to the caller of
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 36faa9a8e18..3070f77eb56 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -72,15 +72,9 @@ do { \
#define SLOT_NAME_SIZE 10
struct slot {
- u8 bus;
- u8 device;
u8 state;
- u8 hp_slot;
- u32 number;
struct controller *ctrl;
- struct hpc_ops *hpc_ops;
struct hotplug_slot *hotplug_slot;
- struct list_head slot_list;
struct delayed_work work; /* work for button event */
struct mutex lock;
};
@@ -92,18 +86,10 @@ struct event_info {
};
struct controller {
- struct mutex crit_sect; /* critical section mutex */
struct mutex ctrl_lock; /* controller lock */
- int num_slots; /* Number of slots on ctlr */
- int slot_num_inc; /* 1 or -1 */
- struct pci_dev *pci_dev;
struct pcie_device *pcie; /* PCI Express port service */
- struct list_head slot_list;
- struct hpc_ops *hpc_ops;
+ struct slot *slot;
wait_queue_head_t queue; /* sleep & wake process */
- u8 slot_device_offset;
- u32 first_slot; /* First physical slot number */ /* PCIE only has 1 slot */
- u8 slot_bus; /* Bus where the slots handled by this controller sit */
u32 slot_cap;
u8 cap_base;
struct timer_list poll_timer;
@@ -131,40 +117,20 @@ struct controller {
#define POWERON_STATE 3
#define POWEROFF_STATE 4
-/* Error messages */
-#define INTERLOCK_OPEN 0x00000002
-#define ADD_NOT_SUPPORTED 0x00000003
-#define CARD_FUNCTIONING 0x00000005
-#define ADAPTER_NOT_SAME 0x00000006
-#define NO_ADAPTER_PRESENT 0x00000009
-#define NOT_ENOUGH_RESOURCES 0x0000000B
-#define DEVICE_TYPE_NOT_SUPPORTED 0x0000000C
-#define WRONG_BUS_FREQUENCY 0x0000000D
-#define POWER_FAILURE 0x0000000E
-
-/* Field definitions in Slot Capabilities Register */
-#define ATTN_BUTTN_PRSN 0x00000001
-#define PWR_CTRL_PRSN 0x00000002
-#define MRL_SENS_PRSN 0x00000004
-#define ATTN_LED_PRSN 0x00000008
-#define PWR_LED_PRSN 0x00000010
-#define HP_SUPR_RM_SUP 0x00000020
-#define EMI_PRSN 0x00020000
-#define NO_CMD_CMPL_SUP 0x00040000
-
-#define ATTN_BUTTN(ctrl) ((ctrl)->slot_cap & ATTN_BUTTN_PRSN)
-#define POWER_CTRL(ctrl) ((ctrl)->slot_cap & PWR_CTRL_PRSN)
-#define MRL_SENS(ctrl) ((ctrl)->slot_cap & MRL_SENS_PRSN)
-#define ATTN_LED(ctrl) ((ctrl)->slot_cap & ATTN_LED_PRSN)
-#define PWR_LED(ctrl) ((ctrl)->slot_cap & PWR_LED_PRSN)
-#define HP_SUPR_RM(ctrl) ((ctrl)->slot_cap & HP_SUPR_RM_SUP)
-#define EMI(ctrl) ((ctrl)->slot_cap & EMI_PRSN)
-#define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & NO_CMD_CMPL_SUP)
+#define ATTN_BUTTN(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_ABP)
+#define POWER_CTRL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_PCP)
+#define MRL_SENS(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_MRLSP)
+#define ATTN_LED(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_AIP)
+#define PWR_LED(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_PIP)
+#define HP_SUPR_RM(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_HPS)
+#define EMI(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_EIP)
+#define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_NCCS)
+#define PSN(ctrl) ((ctrl)->slot_cap >> 19)
extern int pciehp_sysfs_enable_slot(struct slot *slot);
extern int pciehp_sysfs_disable_slot(struct slot *slot);
extern u8 pciehp_handle_attention_button(struct slot *p_slot);
- extern u8 pciehp_handle_switch_change(struct slot *p_slot);
+extern u8 pciehp_handle_switch_change(struct slot *p_slot);
extern u8 pciehp_handle_presence_change(struct slot *p_slot);
extern u8 pciehp_handle_power_fault(struct slot *p_slot);
extern int pciehp_configure_device(struct slot *p_slot);
@@ -175,45 +141,30 @@ int pcie_init_notification(struct controller *ctrl);
int pciehp_enable_slot(struct slot *p_slot);
int pciehp_disable_slot(struct slot *p_slot);
int pcie_enable_notification(struct controller *ctrl);
+int pciehp_power_on_slot(struct slot *slot);
+int pciehp_power_off_slot(struct slot *slot);
+int pciehp_get_power_status(struct slot *slot, u8 *status);
+int pciehp_get_attention_status(struct slot *slot, u8 *status);
+
+int pciehp_set_attention_status(struct slot *slot, u8 status);
+int pciehp_get_latch_status(struct slot *slot, u8 *status);
+int pciehp_get_adapter_status(struct slot *slot, u8 *status);
+int pciehp_get_max_link_speed(struct slot *slot, enum pci_bus_speed *speed);
+int pciehp_get_max_link_width(struct slot *slot, enum pcie_link_width *val);
+int pciehp_get_cur_link_speed(struct slot *slot, enum pci_bus_speed *speed);
+int pciehp_get_cur_link_width(struct slot *slot, enum pcie_link_width *val);
+int pciehp_query_power_fault(struct slot *slot);
+void pciehp_green_led_on(struct slot *slot);
+void pciehp_green_led_off(struct slot *slot);
+void pciehp_green_led_blink(struct slot *slot);
+int pciehp_check_link_status(struct controller *ctrl);
+void pciehp_release_ctrl(struct controller *ctrl);
static inline const char *slot_name(struct slot *slot)
{
return hotplug_slot_name(slot->hotplug_slot);
}
-static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device)
-{
- struct slot *slot;
-
- list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
- if (slot->device == device)
- return slot;
- }
-
- ctrl_err(ctrl, "Slot (device=0x%02x) not found\n", device);
- return NULL;
-}
-
-struct hpc_ops {
- int (*power_on_slot)(struct slot *slot);
- int (*power_off_slot)(struct slot *slot);
- int (*get_power_status)(struct slot *slot, u8 *status);
- int (*get_attention_status)(struct slot *slot, u8 *status);
- int (*set_attention_status)(struct slot *slot, u8 status);
- int (*get_latch_status)(struct slot *slot, u8 *status);
- int (*get_adapter_status)(struct slot *slot, u8 *status);
- int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
- int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
- int (*get_max_lnk_width)(struct slot *slot, enum pcie_link_width *val);
- int (*get_cur_lnk_width)(struct slot *slot, enum pcie_link_width *val);
- int (*query_power_fault)(struct slot *slot);
- void (*green_led_on)(struct slot *slot);
- void (*green_led_off)(struct slot *slot);
- void (*green_led_blink)(struct slot *slot);
- void (*release_ctlr)(struct controller *ctrl);
- int (*check_lnk_status)(struct controller *ctrl);
-};
-
#ifdef CONFIG_ACPI
#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 7163e6a6cfa..37c8d3d0323 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -33,6 +33,11 @@
#define PCIEHP_DETECT_AUTO (2)
#define PCIEHP_DETECT_DEFAULT PCIEHP_DETECT_AUTO
+struct dummy_slot {
+ u32 number;
+ struct list_head list;
+};
+
static int slot_detection_mode;
static char *pciehp_detect_mode;
module_param(pciehp_detect_mode, charp, 0444);
@@ -77,7 +82,7 @@ static int __init dummy_probe(struct pcie_device *dev)
int pos;
u32 slot_cap;
acpi_handle handle;
- struct slot *slot, *tmp;
+ struct dummy_slot *slot, *tmp;
struct pci_dev *pdev = dev->port;
/* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */
if (pciehp_get_hp_hw_control_from_firmware(pdev))
@@ -89,11 +94,11 @@ static int __init dummy_probe(struct pcie_device *dev)
if (!slot)
return -ENOMEM;
slot->number = slot_cap >> 19;
- list_for_each_entry(tmp, &dummy_slots, slot_list) {
+ list_for_each_entry(tmp, &dummy_slots, list) {
if (tmp->number == slot->number)
dup_slot_id++;
}
- list_add_tail(&slot->slot_list, &dummy_slots);
+ list_add_tail(&slot->list, &dummy_slots);
handle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle))
acpi_slot_detected = 1;
@@ -109,11 +114,11 @@ static struct pcie_port_service_driver __initdata dummy_driver = {
static int __init select_detection_mode(void)
{
- struct slot *slot, *tmp;
+ struct dummy_slot *slot, *tmp;
pcie_port_service_register(&dummy_driver);
pcie_port_service_unregister(&dummy_driver);
- list_for_each_entry_safe(slot, tmp, &dummy_slots, slot_list) {
- list_del(&slot->slot_list);
+ list_for_each_entry_safe(slot, tmp, &dummy_slots, list) {
+ list_del(&slot->list);
kfree(slot);
}
if (acpi_slot_detected && dup_slot_id)
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 2317557fdee..bc234719b1d 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -99,65 +99,55 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
kfree(hotplug_slot);
}
-static int init_slots(struct controller *ctrl)
+static int init_slot(struct controller *ctrl)
{
- struct slot *slot;
- struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *info;
+ struct slot *slot = ctrl->slot;
+ struct hotplug_slot *hotplug = NULL;
+ struct hotplug_slot_info *info = NULL;
char name[SLOT_NAME_SIZE];
int retval = -ENOMEM;
- list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
- hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
- if (!hotplug_slot)
- goto error;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- goto error_hpslot;
-
- /* register this slot with the hotplug pci core */
- hotplug_slot->info = info;
- hotplug_slot->private = slot;
- hotplug_slot->release = &release_slot;
- hotplug_slot->ops = &pciehp_hotplug_slot_ops;
- slot->hotplug_slot = hotplug_slot;
- snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
-
- ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x "
- "hp_slot=%x sun=%x slot_device_offset=%x\n",
- pci_domain_nr(ctrl->pci_dev->subordinate),
- slot->bus, slot->device, slot->hp_slot, slot->number,
- ctrl->slot_device_offset);
- retval = pci_hp_register(hotplug_slot,
- ctrl->pci_dev->subordinate,
- slot->device,
- name);
- if (retval) {
- ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
- retval);
- goto error_info;
- }
- get_power_status(hotplug_slot, &info->power_status);
- get_attention_status(hotplug_slot, &info->attention_status);
- get_latch_status(hotplug_slot, &info->latch_status);
- get_adapter_status(hotplug_slot, &info->adapter_status);
+ hotplug = kzalloc(sizeof(*hotplug), GFP_KERNEL);
+ if (!hotplug)
+ goto out;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ goto out;
+
+ /* register this slot with the hotplug pci core */
+ hotplug->info = info;
+ hotplug->private = slot;
+ hotplug->release = &release_slot;
+ hotplug->ops = &pciehp_hotplug_slot_ops;
+ slot->hotplug_slot = hotplug;
+ snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl));
+
+ ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:00 sun=%x\n",
+ pci_domain_nr(ctrl->pcie->port->subordinate),
+ ctrl->pcie->port->subordinate->number, PSN(ctrl));
+ retval = pci_hp_register(hotplug,
+ ctrl->pcie->port->subordinate, 0, name);
+ if (retval) {
+ ctrl_err(ctrl,
+ "pci_hp_register failed with error %d\n", retval);
+ goto out;
+ }
+ get_power_status(hotplug, &info->power_status);
+ get_attention_status(hotplug, &info->attention_status);
+ get_latch_status(hotplug, &info->latch_status);
+ get_adapter_status(hotplug, &info->adapter_status);
+out:
+ if (retval) {
+ kfree(info);
+ kfree(hotplug);
}
-
- return 0;
-error_info:
- kfree(info);
-error_hpslot:
- kfree(hotplug_slot);
-error:
return retval;
}
-static void cleanup_slots(struct controller *ctrl)
+static void cleanup_slot(struct controller *ctrl)
{
- struct slot *slot;
- list_for_each_entry(slot, &ctrl->slot_list, slot_list)
- pci_hp_deregister(slot->hotplug_slot);
+ pci_hp_deregister(ctrl->slot->hotplug_slot);
}
/*
@@ -173,7 +163,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
hotplug_slot->info->attention_status = status;
if (ATTN_LED(slot->ctrl))
- slot->hpc_ops->set_attention_status(slot, status);
+ pciehp_set_attention_status(slot, status);
return 0;
}
@@ -208,7 +198,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_power_status(slot, value);
+ retval = pciehp_get_power_status(slot, value);
if (retval < 0)
*value = hotplug_slot->info->power_status;
@@ -223,7 +213,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_attention_status(slot, value);
+ retval = pciehp_get_attention_status(slot, value);
if (retval < 0)
*value = hotplug_slot->info->attention_status;
@@ -238,7 +228,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_latch_status(slot, value);
+ retval = pciehp_get_latch_status(slot, value);
if (retval < 0)
*value = hotplug_slot->info->latch_status;
@@ -253,7 +243,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_adapter_status(slot, value);
+ retval = pciehp_get_adapter_status(slot, value);
if (retval < 0)
*value = hotplug_slot->info->adapter_status;
@@ -269,7 +259,7 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_max_bus_speed(slot, value);
+ retval = pciehp_get_max_link_speed(slot, value);
if (retval < 0)
*value = PCI_SPEED_UNKNOWN;
@@ -284,7 +274,7 @@ static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_cur_bus_speed(slot, value);
+ retval = pciehp_get_cur_link_speed(slot, value);
if (retval < 0)
*value = PCI_SPEED_UNKNOWN;
@@ -295,7 +285,7 @@ static int pciehp_probe(struct pcie_device *dev)
{
int rc;
struct controller *ctrl;
- struct slot *t_slot;
+ struct slot *slot;
u8 value;
struct pci_dev *pdev = dev->port;
@@ -314,7 +304,7 @@ static int pciehp_probe(struct pcie_device *dev)
set_service_data(dev, ctrl);
/* Setup the slot information structures */
- rc = init_slots(ctrl);
+ rc = init_slot(ctrl);
if (rc) {
if (rc == -EBUSY)
ctrl_warn(ctrl, "Slot already registered by another "
@@ -332,15 +322,15 @@ static int pciehp_probe(struct pcie_device *dev)
}
/* Check if slot is occupied */
- t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
- t_slot->hpc_ops->get_adapter_status(t_slot, &value);
+ slot = ctrl->slot;
+ pciehp_get_adapter_status(slot, &value);
if (value) {
if (pciehp_force)
- pciehp_enable_slot(t_slot);
+ pciehp_enable_slot(slot);
} else {
/* Power off slot if not occupied */
if (POWER_CTRL(ctrl)) {
- rc = t_slot->hpc_ops->power_off_slot(t_slot);
+ rc = pciehp_power_off_slot(slot);
if (rc)
goto err_out_free_ctrl_slot;
}
@@ -349,19 +339,19 @@ static int pciehp_probe(struct pcie_device *dev)
return 0;
err_out_free_ctrl_slot:
- cleanup_slots(ctrl);
+ cleanup_slot(ctrl);
err_out_release_ctlr:
- ctrl->hpc_ops->release_ctlr(ctrl);
+ pciehp_release_ctrl(ctrl);
err_out_none:
return -ENODEV;
}
-static void pciehp_remove (struct pcie_device *dev)
+static void pciehp_remove(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
- cleanup_slots(ctrl);
- ctrl->hpc_ops->release_ctlr(ctrl);
+ cleanup_slot(ctrl);
+ pciehp_release_ctrl(ctrl);
}
#ifdef CONFIG_PM
@@ -376,20 +366,20 @@ static int pciehp_resume (struct pcie_device *dev)
dev_info(&dev->device, "%s ENTRY\n", __func__);
if (pciehp_force) {
struct controller *ctrl = get_service_data(dev);
- struct slot *t_slot;
+ struct slot *slot;
u8 status;
/* reinitialize the chipset's event detection logic */
pcie_enable_notification(ctrl);
- t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
+ slot = ctrl->slot;
/* Check if slot is occupied */
- t_slot->hpc_ops->get_adapter_status(t_slot, &status);
+ pciehp_get_adapter_status(slot, &status);
if (status)
- pciehp_enable_slot(t_slot);
+ pciehp_enable_slot(slot);
else
- pciehp_disable_slot(t_slot);
+ pciehp_disable_slot(slot);
}
return 0;
}
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index b97cb4c3e0f..84487d126e4 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -82,7 +82,7 @@ u8 pciehp_handle_switch_change(struct slot *p_slot)
/* Switch Change */
ctrl_dbg(ctrl, "Switch interrupt received\n");
- p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ pciehp_get_latch_status(p_slot, &getstatus);
if (getstatus) {
/*
* Switch opened
@@ -114,7 +114,7 @@ u8 pciehp_handle_presence_change(struct slot *p_slot)
/* Switch is open, assume a presence change
* Save the presence state
*/
- p_slot->hpc_ops->get_adapter_status(p_slot, &presence_save);
+ pciehp_get_adapter_status(p_slot, &presence_save);
if (presence_save) {
/*
* Card Present
@@ -143,7 +143,7 @@ u8 pciehp_handle_power_fault(struct slot *p_slot)
/* power fault */
ctrl_dbg(ctrl, "Power fault interrupt received\n");
- if ( !(p_slot->hpc_ops->query_power_fault(p_slot))) {
+ if (!pciehp_query_power_fault(p_slot)) {
/*
* power fault Cleared
*/
@@ -172,7 +172,7 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
{
/* turn off slot, turn on Amber LED, turn off Green LED if supported*/
if (POWER_CTRL(ctrl)) {
- if (pslot->hpc_ops->power_off_slot(pslot)) {
+ if (pciehp_power_off_slot(pslot)) {
ctrl_err(ctrl,
"Issue of Slot Power Off command failed\n");
return;
@@ -186,10 +186,10 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
}
if (PWR_LED(ctrl))
- pslot->hpc_ops->green_led_off(pslot);
+ pciehp_green_led_off(pslot);
if (ATTN_LED(ctrl)) {
- if (pslot->hpc_ops->set_attention_status(pslot, 1)) {
+ if (pciehp_set_attention_status(pslot, 1)) {
ctrl_err(ctrl,
"Issue of Set Attention Led command failed\n");
return;
@@ -208,24 +208,20 @@ static int board_added(struct slot *p_slot)
{
int retval = 0;
struct controller *ctrl = p_slot->ctrl;
- struct pci_bus *parent = ctrl->pci_dev->subordinate;
-
- ctrl_dbg(ctrl, "%s: slot device, slot offset, hp slot = %d, %d, %d\n",
- __func__, p_slot->device, ctrl->slot_device_offset,
- p_slot->hp_slot);
+ struct pci_bus *parent = ctrl->pcie->port->subordinate;
if (POWER_CTRL(ctrl)) {
/* Power on slot */
- retval = p_slot->hpc_ops->power_on_slot(p_slot);
+ retval = pciehp_power_on_slot(p_slot);
if (retval)
return retval;
}
if (PWR_LED(ctrl))
- p_slot->hpc_ops->green_led_blink(p_slot);
+ pciehp_green_led_blink(p_slot);
/* Check link training status */
- retval = p_slot->hpc_ops->check_lnk_status(ctrl);
+ retval = pciehp_check_link_status(ctrl);
if (retval) {
ctrl_err(ctrl, "Failed to check link status\n");
set_slot_off(ctrl, p_slot);
@@ -233,21 +229,21 @@ static int board_added(struct slot *p_slot)
}
/* Check for a power fault */
- if (p_slot->hpc_ops->query_power_fault(p_slot)) {
+ if (pciehp_query_power_fault(p_slot)) {
ctrl_dbg(ctrl, "Power fault detected\n");
- retval = POWER_FAILURE;
+ retval = -EIO;
goto err_exit;
}
retval = pciehp_configure_device(p_slot);
if (retval) {
- ctrl_err(ctrl, "Cannot add device at %04x:%02x:%02x\n",
- pci_domain_nr(parent), p_slot->bus, p_slot->device);
+ ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n",
+ pci_domain_nr(parent), parent->number);
goto err_exit;
}
if (PWR_LED(ctrl))
- p_slot->hpc_ops->green_led_on(p_slot);
+ pciehp_green_led_on(p_slot);
return 0;
@@ -269,11 +265,9 @@ static int remove_board(struct slot *p_slot)
if (retval)
return retval;
- ctrl_dbg(ctrl, "%s: hp_slot = %d\n", __func__, p_slot->hp_slot);
-
if (POWER_CTRL(ctrl)) {
/* power off slot */
- retval = p_slot->hpc_ops->power_off_slot(p_slot);
+ retval = pciehp_power_off_slot(p_slot);
if (retval) {
ctrl_err(ctrl,
"Issue of Slot Disable command failed\n");
@@ -287,9 +281,9 @@ static int remove_board(struct slot *p_slot)
msleep(1000);
}
+ /* turn off Green LED */
if (PWR_LED(ctrl))
- /* turn off Green LED */
- p_slot->hpc_ops->green_led_off(p_slot);
+ pciehp_green_led_off(p_slot);
return 0;
}
@@ -317,18 +311,17 @@ static void pciehp_power_thread(struct work_struct *work)
case POWEROFF_STATE:
mutex_unlock(&p_slot->lock);
ctrl_dbg(p_slot->ctrl,
- "Disabling domain:bus:device=%04x:%02x:%02x\n",
- pci_domain_nr(p_slot->ctrl->pci_dev->subordinate),
- p_slot->bus, p_slot->device);
+ "Disabling domain:bus:device=%04x:%02x:00\n",
+ pci_domain_nr(p_slot->ctrl->pcie->port->subordinate),
+ p_slot->ctrl->pcie->port->subordinate->number);
pciehp_disable_slot(p_slot);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
break;
case POWERON_STATE:
mutex_unlock(&p_slot->lock);
- if (pciehp_enable_slot(p_slot) &&
- PWR_LED(p_slot->ctrl))
- p_slot->hpc_ops->green_led_off(p_slot);
+ if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl))
+ pciehp_green_led_off(p_slot);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
break;
@@ -379,10 +372,10 @@ static int update_slot_info(struct slot *slot)
if (!info)
return -ENOMEM;
- slot->hpc_ops->get_power_status(slot, &(info->power_status));
- slot->hpc_ops->get_attention_status(slot, &(info->attention_status));
- slot->hpc_ops->get_latch_status(slot, &(info->latch_status));
- slot->hpc_ops->get_adapter_status(slot, &(info->adapter_status));
+ pciehp_get_power_status(slot, &info->power_status);
+ pciehp_get_attention_status(slot, &info->attention_status);
+ pciehp_get_latch_status(slot, &info->latch_status);
+ pciehp_get_adapter_status(slot, &info->adapter_status);
result = pci_hp_change_slot_info(slot->hotplug_slot, info);
kfree (info);
@@ -399,7 +392,7 @@ static void handle_button_press_event(struct slot *p_slot)
switch (p_slot->state) {
case STATIC_STATE:
- p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ pciehp_get_power_status(p_slot, &getstatus);
if (getstatus) {
p_slot->state = BLINKINGOFF_STATE;
ctrl_info(ctrl,
@@ -413,9 +406,9 @@ static void handle_button_press_event(struct slot *p_slot)
}
/* blink green LED and turn off amber */
if (PWR_LED(ctrl))
- p_slot->hpc_ops->green_led_blink(p_slot);
+ pciehp_green_led_blink(p_slot);
if (ATTN_LED(ctrl))
- p_slot->hpc_ops->set_attention_status(p_slot, 0);
+ pciehp_set_attention_status(p_slot, 0);
schedule_delayed_work(&p_slot->work, 5*HZ);
break;
@@ -430,13 +423,13 @@ static void handle_button_press_event(struct slot *p_slot)
cancel_delayed_work(&p_slot->work);
if (p_slot->state == BLINKINGOFF_STATE) {
if (PWR_LED(ctrl))
- p_slot->hpc_ops->green_led_on(p_slot);
+ pciehp_green_led_on(p_slot);
} else {
if (PWR_LED(ctrl))
- p_slot->hpc_ops->green_led_off(p_slot);
+ pciehp_green_led_off(p_slot);
}
if (ATTN_LED(ctrl))
- p_slot->hpc_ops->set_attention_status(p_slot, 0);
+ pciehp_set_attention_status(p_slot, 0);
ctrl_info(ctrl, "PCI slot #%s - action canceled "
"due to button press\n", slot_name(p_slot));
p_slot->state = STATIC_STATE;
@@ -474,7 +467,7 @@ static void handle_surprise_event(struct slot *p_slot)
info->p_slot = p_slot;
INIT_WORK(&info->work, pciehp_power_thread);
- p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
+ pciehp_get_adapter_status(p_slot, &getstatus);
if (!getstatus)
p_slot->state = POWEROFF_STATE;
else
@@ -498,9 +491,9 @@ static void interrupt_event_handler(struct work_struct *work)
if (!POWER_CTRL(ctrl))
break;
if (ATTN_LED(ctrl))
- p_slot->hpc_ops->set_attention_status(p_slot, 1);
+ pciehp_set_attention_status(p_slot, 1);
if (PWR_LED(ctrl))
- p_slot->hpc_ops->green_led_off(p_slot);
+ pciehp_green_led_off(p_slot);
break;
case INT_PRESENCE_ON:
case INT_PRESENCE_OFF:
@@ -525,45 +518,38 @@ int pciehp_enable_slot(struct slot *p_slot)
int rc;
struct controller *ctrl = p_slot->ctrl;
- /* Check to see if (latch closed, card present, power off) */
- mutex_lock(&p_slot->ctrl->crit_sect);
-
- rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
+ rc = pciehp_get_adapter_status(p_slot, &getstatus);
if (rc || !getstatus) {
ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
- mutex_unlock(&p_slot->ctrl->crit_sect);
return -ENODEV;
}
if (MRL_SENS(p_slot->ctrl)) {
- rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ rc = pciehp_get_latch_status(p_slot, &getstatus);
if (rc || getstatus) {
ctrl_info(ctrl, "Latch open on slot(%s)\n",
slot_name(p_slot));
- mutex_unlock(&p_slot->ctrl->crit_sect);
return -ENODEV;
}
}
if (POWER_CTRL(p_slot->ctrl)) {
- rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ rc = pciehp_get_power_status(p_slot, &getstatus);
if (rc || getstatus) {
ctrl_info(ctrl, "Already enabled on slot(%s)\n",
slot_name(p_slot));
- mutex_unlock(&p_slot->ctrl->crit_sect);
return -EINVAL;
}
}
- p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ pciehp_get_latch_status(p_slot, &getstatus);
rc = board_added(p_slot);
if (rc) {
- p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ pciehp_get_latch_status(p_slot, &getstatus);
}
update_slot_info(p_slot);
- mutex_unlock(&p_slot->ctrl->crit_sect);
return rc;
}
@@ -577,35 +563,29 @@ int pciehp_disable_slot(struct slot *p_slot)
if (!p_slot->ctrl)
return 1;
- /* Check to see if (latch closed, card present, power on) */
- mutex_lock(&p_slot->ctrl->crit_sect);
-
if (!HP_SUPR_RM(p_slot->ctrl)) {
- ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
+ ret = pciehp_get_adapter_status(p_slot, &getstatus);
if (ret || !getstatus) {
ctrl_info(ctrl, "No adapter on slot(%s)\n",
slot_name(p_slot));
- mutex_unlock(&p_slot->ctrl->crit_sect);
return -ENODEV;
}
}
if (MRL_SENS(p_slot->ctrl)) {
- ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ ret = pciehp_get_latch_status(p_slot, &getstatus);
if (ret || getstatus) {
ctrl_info(ctrl, "Latch open on slot(%s)\n",
slot_name(p_slot));
- mutex_unlock(&p_slot->ctrl->crit_sect);
return -ENODEV;
}
}
if (POWER_CTRL(p_slot->ctrl)) {
- ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ ret = pciehp_get_power_status(p_slot, &getstatus);
if (ret || !getstatus) {
ctrl_info(ctrl, "Already disabled on slot(%s)\n",
slot_name(p_slot));
- mutex_unlock(&p_slot->ctrl->crit_sect);
return -EINVAL;
}
}
@@ -613,7 +593,6 @@ int pciehp_disable_slot(struct slot *p_slot)
ret = remove_board(p_slot);
update_slot_info(p_slot);
- mutex_unlock(&p_slot->ctrl->crit_sect);
return ret;
}
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 271f917b6f2..9ef4605c1ef 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -44,25 +44,25 @@ static atomic_t pciehp_num_controllers = ATOMIC_INIT(0);
static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value)
{
- struct pci_dev *dev = ctrl->pci_dev;
+ struct pci_dev *dev = ctrl->pcie->port;
return pci_read_config_word(dev, ctrl->cap_base + reg, value);
}
static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value)
{
- struct pci_dev *dev = ctrl->pci_dev;
+ struct pci_dev *dev = ctrl->pcie->port;
return pci_read_config_dword(dev, ctrl->cap_base + reg, value);
}
static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value)
{
- struct pci_dev *dev = ctrl->pci_dev;
+ struct pci_dev *dev = ctrl->pcie->port;
return pci_write_config_word(dev, ctrl->cap_base + reg, value);
}
static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
{
- struct pci_dev *dev = ctrl->pci_dev;
+ struct pci_dev *dev = ctrl->pcie->port;
return pci_write_config_dword(dev, ctrl->cap_base + reg, value);
}
@@ -266,7 +266,7 @@ static void pcie_wait_link_active(struct controller *ctrl)
ctrl_dbg(ctrl, "Data Link Layer Link Active not set in 1000 msec\n");
}
-static int hpc_check_lnk_status(struct controller *ctrl)
+int pciehp_check_link_status(struct controller *ctrl)
{
u16 lnk_status;
int retval = 0;
@@ -305,7 +305,7 @@ static int hpc_check_lnk_status(struct controller *ctrl)
return retval;
}
-static int hpc_get_attention_status(struct slot *slot, u8 *status)
+int pciehp_get_attention_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u16 slot_ctrl;
@@ -344,7 +344,7 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status)
return 0;
}
-static int hpc_get_power_status(struct slot *slot, u8 *status)
+int pciehp_get_power_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u16 slot_ctrl;
@@ -376,7 +376,7 @@ static int hpc_get_power_status(struct slot *slot, u8 *status)
return retval;
}
-static int hpc_get_latch_status(struct slot *slot, u8 *status)
+int pciehp_get_latch_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u16 slot_status;
@@ -392,7 +392,7 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status)
return 0;
}
-static int hpc_get_adapter_status(struct slot *slot, u8 *status)
+int pciehp_get_adapter_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u16 slot_status;
@@ -408,7 +408,7 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status)
return 0;
}
-static int hpc_query_power_fault(struct slot *slot)
+int pciehp_query_power_fault(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_status;
@@ -422,7 +422,7 @@ static int hpc_query_power_fault(struct slot *slot)
return !!(slot_status & PCI_EXP_SLTSTA_PFD);
}
-static int hpc_set_attention_status(struct slot *slot, u8 value)
+int pciehp_set_attention_status(struct slot *slot, u8 value)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
@@ -450,7 +450,7 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
return rc;
}
-static void hpc_set_green_led_on(struct slot *slot)
+void pciehp_green_led_on(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
@@ -463,7 +463,7 @@ static void hpc_set_green_led_on(struct slot *slot)
__func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
}
-static void hpc_set_green_led_off(struct slot *slot)
+void pciehp_green_led_off(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
@@ -476,7 +476,7 @@ static void hpc_set_green_led_off(struct slot *slot)
__func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
}
-static void hpc_set_green_led_blink(struct slot *slot)
+void pciehp_green_led_blink(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
@@ -489,7 +489,7 @@ static void hpc_set_green_led_blink(struct slot *slot)
__func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
}
-static int hpc_power_on_slot(struct slot * slot)
+int pciehp_power_on_slot(struct slot * slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
@@ -497,8 +497,6 @@ static int hpc_power_on_slot(struct slot * slot)
u16 slot_status;
int retval = 0;
- ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot);
-
/* Clear sticky power-fault bit from previous power failures */
retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
if (retval) {
@@ -539,7 +537,7 @@ static int hpc_power_on_slot(struct slot * slot)
static inline int pcie_mask_bad_dllp(struct controller *ctrl)
{
- struct pci_dev *dev = ctrl->pci_dev;
+ struct pci_dev *dev = ctrl->pcie->port;
int pos;
u32 reg;
@@ -556,7 +554,7 @@ static inline int pcie_mask_bad_dllp(struct controller *ctrl)
static inline void pcie_unmask_bad_dllp(struct controller *ctrl)
{
- struct pci_dev *dev = ctrl->pci_dev;
+ struct pci_dev *dev = ctrl->pcie->port;
u32 reg;
int pos;
@@ -570,7 +568,7 @@ static inline void pcie_unmask_bad_dllp(struct controller *ctrl)
pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg);
}
-static int hpc_power_off_slot(struct slot * slot)
+int pciehp_power_off_slot(struct slot * slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
@@ -578,8 +576,6 @@ static int hpc_power_off_slot(struct slot * slot)
int retval = 0;
int changed;
- ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot);
-
/*
* Set Bad DLLP Mask bit in Correctable Error Mask
* Register. This is the workaround against Bad DLLP error
@@ -614,8 +610,8 @@ static int hpc_power_off_slot(struct slot * slot)
static irqreturn_t pcie_isr(int irq, void *dev_id)
{
struct controller *ctrl = (struct controller *)dev_id;
+ struct slot *slot = ctrl->slot;
u16 detected, intr_loc;
- struct slot *p_slot;
/*
* In order to guarantee that all interrupt events are
@@ -656,29 +652,27 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
if (!(intr_loc & ~PCI_EXP_SLTSTA_CC))
return IRQ_HANDLED;
- p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
-
/* Check MRL Sensor Changed */
if (intr_loc & PCI_EXP_SLTSTA_MRLSC)
- pciehp_handle_switch_change(p_slot);
+ pciehp_handle_switch_change(slot);
/* Check Attention Button Pressed */
if (intr_loc & PCI_EXP_SLTSTA_ABP)
- pciehp_handle_attention_button(p_slot);
+ pciehp_handle_attention_button(slot);
/* Check Presence Detect Changed */
if (intr_loc & PCI_EXP_SLTSTA_PDC)
- pciehp_handle_presence_change(p_slot);
+ pciehp_handle_presence_change(slot);
/* Check Power Fault Detected */
if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
ctrl->power_fault_detected = 1;
- pciehp_handle_power_fault(p_slot);
+ pciehp_handle_power_fault(slot);
}
return IRQ_HANDLED;
}
-static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
+int pciehp_get_max_link_speed(struct slot *slot, enum pci_bus_speed *value)
{
struct controller *ctrl = slot->ctrl;
enum pcie_link_speed lnk_speed;
@@ -709,7 +703,7 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
return retval;
}
-static int hpc_get_max_lnk_width(struct slot *slot,
+int pciehp_get_max_lnk_width(struct slot *slot,
enum pcie_link_width *value)
{
struct controller *ctrl = slot->ctrl;
@@ -759,7 +753,7 @@ static int hpc_get_max_lnk_width(struct slot *slot,
return retval;
}
-static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
+int pciehp_get_cur_link_speed(struct slot *slot, enum pci_bus_speed *value)
{
struct controller *ctrl = slot->ctrl;
enum pcie_link_speed lnk_speed = PCI_SPEED_UNKNOWN;
@@ -791,7 +785,7 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
return retval;
}
-static int hpc_get_cur_lnk_width(struct slot *slot,
+int pciehp_get_cur_lnk_width(struct slot *slot,
enum pcie_link_width *value)
{
struct controller *ctrl = slot->ctrl;
@@ -842,30 +836,6 @@ static int hpc_get_cur_lnk_width(struct slot *slot,
return retval;
}
-static void pcie_release_ctrl(struct controller *ctrl);
-static struct hpc_ops pciehp_hpc_ops = {
- .power_on_slot = hpc_power_on_slot,
- .power_off_slot = hpc_power_off_slot,
- .set_attention_status = hpc_set_attention_status,
- .get_power_status = hpc_get_power_status,
- .get_attention_status = hpc_get_attention_status,
- .get_latch_status = hpc_get_latch_status,
- .get_adapter_status = hpc_get_adapter_status,
-
- .get_max_bus_speed = hpc_get_max_lnk_speed,
- .get_cur_bus_speed = hpc_get_cur_lnk_speed,
- .get_max_lnk_width = hpc_get_max_lnk_width,
- .get_cur_lnk_width = hpc_get_cur_lnk_width,
-
- .query_power_fault = hpc_query_power_fault,
- .green_led_on = hpc_set_green_led_on,
- .green_led_off = hpc_set_green_led_off,
- .green_led_blink = hpc_set_green_led_blink,
-
- .release_ctlr = pcie_release_ctrl,
- .check_lnk_status = hpc_check_lnk_status,
-};
-
int pcie_enable_notification(struct controller *ctrl)
{
u16 cmd, mask;
@@ -930,23 +900,16 @@ static int pcie_init_slot(struct controller *ctrl)
if (!slot)
return -ENOMEM;
- slot->hp_slot = 0;
slot->ctrl = ctrl;
- slot->bus = ctrl->pci_dev->subordinate->number;
- slot->device = ctrl->slot_device_offset + slot->hp_slot;
- slot->hpc_ops = ctrl->hpc_ops;
- slot->number = ctrl->first_slot;
mutex_init(&slot->lock);
INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
- list_add(&slot->slot_list, &ctrl->slot_list);
+ ctrl->slot = slot;
return 0;
}
static void pcie_cleanup_slot(struct controller *ctrl)
{
- struct slot *slot;
- slot = list_first_entry(&ctrl->slot_list, struct slot, slot_list);
- list_del(&slot->slot_list);
+ struct slot *slot = ctrl->slot;
cancel_delayed_work(&slot->work);
flush_scheduled_work();
flush_workqueue(pciehp_wq);
@@ -957,7 +920,7 @@ static inline void dbg_ctrl(struct controller *ctrl)
{
int i;
u16 reg16;
- struct pci_dev *pdev = ctrl->pci_dev;
+ struct pci_dev *pdev = ctrl->pcie->port;
if (!pciehp_debug)
return;
@@ -980,7 +943,7 @@ static inline void dbg_ctrl(struct controller *ctrl)
(unsigned long long)pci_resource_start(pdev, i));
}
ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap);
- ctrl_info(ctrl, " Physical Slot Number : %d\n", ctrl->first_slot);
+ ctrl_info(ctrl, " Physical Slot Number : %d\n", PSN(ctrl));
ctrl_info(ctrl, " Attention Button : %3s\n",
ATTN_BUTTN(ctrl) ? "yes" : "no");
ctrl_info(ctrl, " Power Controller : %3s\n",
@@ -1014,10 +977,7 @@ struct controller *pcie_init(struct pcie_device *dev)
dev_err(&dev->device, "%s: Out of memory\n", __func__);
goto abort;
}
- INIT_LIST_HEAD(&ctrl->slot_list);
-
ctrl->pcie = dev;
- ctrl->pci_dev = pdev;
ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (!ctrl->cap_base) {
ctrl_err(ctrl, "Cannot find PCI Express capability\n");
@@ -1029,11 +989,6 @@ struct controller *pcie_init(struct pcie_device *dev)
}
ctrl->slot_cap = slot_cap;
- ctrl->first_slot = slot_cap >> 19;
- ctrl->slot_device_offset = 0;
- ctrl->num_slots = 1;
- ctrl->hpc_ops = &pciehp_hpc_ops;
- mutex_init(&ctrl->crit_sect);
mutex_init(&ctrl->ctrl_lock);
init_waitqueue_head(&ctrl->queue);
dbg_ctrl(ctrl);
@@ -1089,7 +1044,7 @@ abort:
return NULL;
}
-void pcie_release_ctrl(struct controller *ctrl)
+void pciehp_release_ctrl(struct controller *ctrl)
{
pcie_shutdown_notification(ctrl);
pcie_cleanup_slot(ctrl);
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 02e24d63b3e..21733108add 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -63,27 +63,27 @@ static int __ref pciehp_add_bridge(struct pci_dev *dev)
int pciehp_configure_device(struct slot *p_slot)
{
struct pci_dev *dev;
- struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
+ struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate;
int num, fn;
struct controller *ctrl = p_slot->ctrl;
- dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0));
+ dev = pci_get_slot(parent, PCI_DEVFN(0, 0));
if (dev) {
ctrl_err(ctrl, "Device %s already exists "
- "at %04x:%02x:%02x, cannot hot-add\n", pci_name(dev),
- pci_domain_nr(parent), p_slot->bus, p_slot->device);
+ "at %04x:%02x:00, cannot hot-add\n", pci_name(dev),
+ pci_domain_nr(parent), parent->number);
pci_dev_put(dev);
return -EINVAL;
}
- num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0));
+ num = pci_scan_slot(parent, PCI_DEVFN(0, 0));
if (num == 0) {
ctrl_err(ctrl, "No new device found\n");
return -ENODEV;
}
for (fn = 0; fn < 8; fn++) {
- dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, fn));
+ dev = pci_get_slot(parent, PCI_DEVFN(0, fn));
if (!dev)
continue;
if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
@@ -111,19 +111,18 @@ int pciehp_unconfigure_device(struct slot *p_slot)
int j;
u8 bctl = 0;
u8 presence = 0;
- struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
+ struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate;
u16 command;
struct controller *ctrl = p_slot->ctrl;
- ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n",
- __func__, pci_domain_nr(parent), p_slot->bus, p_slot->device);
- ret = p_slot->hpc_ops->get_adapter_status(p_slot, &presence);
+ ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n",
+ __func__, pci_domain_nr(parent), parent->number);
+ ret = pciehp_get_adapter_status(p_slot, &presence);
if (ret)
presence = 0;
for (j = 0; j < 8; j++) {
- struct pci_dev* temp = pci_get_slot(parent,
- (p_slot->device << 3) | j);
+ struct pci_dev* temp = pci_get_slot(parent, PCI_DEVFN(0, j));
if (!temp)
continue;
if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 562221e1191..855dd7ca47f 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -38,6 +38,7 @@
#include <linux/intel-iommu.h>
#include <linux/sysdev.h>
#include <linux/tboot.h>
+#include <linux/dmi.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
#include "pci.h"
@@ -56,8 +57,14 @@
#define MAX_AGAW_WIDTH 64
-#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
-#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
+#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
+#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
+
+/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
+ to match. That way, we can use 'unsigned long' for PFNs with impunity. */
+#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
+ __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
+#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
@@ -252,7 +259,8 @@ static inline int first_pte_in_page(struct dma_pte *pte)
* 2. It maps to each iommu if successful.
* 3. Each iommu mapps to this domain if successful.
*/
-struct dmar_domain *si_domain;
+static struct dmar_domain *si_domain;
+static int hw_pass_through = 1;
/* devices under the same p2p bridge are owned in one domain */
#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
@@ -728,7 +736,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
return NULL;
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
- pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
+ pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
if (cmpxchg64(&pte->val, 0ULL, pteval)) {
/* Someone else set it while we were thinking; use theirs. */
free_pgtable_page(tmp_page);
@@ -778,9 +786,10 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
+ BUG_ON(start_pfn > last_pfn);
/* we don't need lock here; nobody else touches the iova range */
- while (start_pfn <= last_pfn) {
+ do {
first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
if (!pte) {
start_pfn = align_to_level(start_pfn + 1, 2);
@@ -794,7 +803,8 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
- }
+
+ } while (start_pfn && start_pfn <= last_pfn);
}
/* free page table pages. last level pte should already be cleared */
@@ -810,6 +820,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
+ BUG_ON(start_pfn > last_pfn);
/* We don't need lock here; nobody else touches the iova range */
level = 2;
@@ -820,7 +831,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
if (tmp + level_size(level) - 1 > last_pfn)
return;
- while (tmp + level_size(level) - 1 <= last_pfn) {
+ do {
first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
if (!pte) {
tmp = align_to_level(tmp + 1, level + 1);
@@ -839,7 +850,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
- }
+ } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
level++;
}
/* free pgd */
@@ -1158,6 +1169,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
pr_debug("Number of Domains supportd <%ld>\n", ndomains);
nlongs = BITS_TO_LONGS(ndomains);
+ spin_lock_init(&iommu->lock);
+
/* TBD: there might be 64K domains,
* consider other allocation for future chip
*/
@@ -1170,12 +1183,9 @@ static int iommu_init_domains(struct intel_iommu *iommu)
GFP_KERNEL);
if (!iommu->domains) {
printk(KERN_ERR "Allocating domain array failed\n");
- kfree(iommu->domain_ids);
return -ENOMEM;
}
- spin_lock_init(&iommu->lock);
-
/*
* if Caching mode is set, then invalid translations are tagged
* with domainid 0. Hence we need to pre-allocate it.
@@ -1195,22 +1205,24 @@ void free_dmar_iommu(struct intel_iommu *iommu)
int i;
unsigned long flags;
- i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
- for (; i < cap_ndoms(iommu->cap); ) {
- domain = iommu->domains[i];
- clear_bit(i, iommu->domain_ids);
+ if ((iommu->domains) && (iommu->domain_ids)) {
+ i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
+ for (; i < cap_ndoms(iommu->cap); ) {
+ domain = iommu->domains[i];
+ clear_bit(i, iommu->domain_ids);
+
+ spin_lock_irqsave(&domain->iommu_lock, flags);
+ if (--domain->iommu_count == 0) {
+ if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
+ vm_domain_exit(domain);
+ else
+ domain_exit(domain);
+ }
+ spin_unlock_irqrestore(&domain->iommu_lock, flags);
- spin_lock_irqsave(&domain->iommu_lock, flags);
- if (--domain->iommu_count == 0) {
- if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
- vm_domain_exit(domain);
- else
- domain_exit(domain);
+ i = find_next_bit(iommu->domain_ids,
+ cap_ndoms(iommu->cap), i+1);
}
- spin_unlock_irqrestore(&domain->iommu_lock, flags);
-
- i = find_next_bit(iommu->domain_ids,
- cap_ndoms(iommu->cap), i+1);
}
if (iommu->gcmd & DMA_GCMD_TE)
@@ -1310,7 +1322,6 @@ static void iommu_detach_domain(struct dmar_domain *domain,
}
static struct iova_domain reserved_iova_list;
-static struct lock_class_key reserved_alloc_key;
static struct lock_class_key reserved_rbtree_key;
static void dmar_init_reserved_ranges(void)
@@ -1321,8 +1332,6 @@ static void dmar_init_reserved_ranges(void)
init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
- lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
- &reserved_alloc_key);
lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
&reserved_rbtree_key);
@@ -1959,14 +1968,35 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
struct dmar_domain *domain;
int ret;
- printk(KERN_INFO
- "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
- pci_name(pdev), start, end);
-
domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain)
return -ENOMEM;
+ /* For _hardware_ passthrough, don't bother. But for software
+ passthrough, we do it anyway -- it may indicate a memory
+ range which is reserved in E820, so which didn't get set
+ up to start with in si_domain */
+ if (domain == si_domain && hw_pass_through) {
+ printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
+ pci_name(pdev), start, end);
+ return 0;
+ }
+
+ printk(KERN_INFO
+ "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
+ pci_name(pdev), start, end);
+
+ if (end >> agaw_to_width(domain->agaw)) {
+ WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ agaw_to_width(domain->agaw),
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
+ ret = -EIO;
+ goto error;
+ }
+
ret = iommu_domain_identity_map(domain, start, end);
if (ret)
goto error;
@@ -2017,23 +2047,6 @@ static inline void iommu_prepare_isa(void)
}
#endif /* !CONFIG_DMAR_FLPY_WA */
-/* Initialize each context entry as pass through.*/
-static int __init init_context_pass_through(void)
-{
- struct pci_dev *pdev = NULL;
- struct dmar_domain *domain;
- int ret;
-
- for_each_pci_dev(pdev) {
- domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
- ret = domain_context_mapping(domain, pdev,
- CONTEXT_TT_PASS_THROUGH);
- if (ret)
- return ret;
- }
- return 0;
-}
-
static int md_domain_init(struct dmar_domain *domain, int guest_width);
static int __init si_domain_work_fn(unsigned long start_pfn,
@@ -2048,7 +2061,7 @@ static int __init si_domain_work_fn(unsigned long start_pfn,
}
-static int si_domain_init(void)
+static int __init si_domain_init(int hw)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
@@ -2075,6 +2088,9 @@ static int si_domain_init(void)
si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
+ if (hw)
+ return 0;
+
for_each_online_node(nid) {
work_with_active_regions(nid, si_domain_work_fn, &ret);
if (ret)
@@ -2101,15 +2117,23 @@ static int identity_mapping(struct pci_dev *pdev)
}
static int domain_add_dev_info(struct dmar_domain *domain,
- struct pci_dev *pdev)
+ struct pci_dev *pdev,
+ int translation)
{
struct device_domain_info *info;
unsigned long flags;
+ int ret;
info = alloc_devinfo_mem();
if (!info)
return -ENOMEM;
+ ret = domain_context_mapping(domain, pdev, translation);
+ if (ret) {
+ free_devinfo_mem(info);
+ return ret;
+ }
+
info->segment = pci_domain_nr(pdev->bus);
info->bus = pdev->bus->number;
info->devfn = pdev->devfn;
@@ -2166,27 +2190,25 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
return 1;
}
-static int iommu_prepare_static_identity_mapping(void)
+static int __init iommu_prepare_static_identity_mapping(int hw)
{
struct pci_dev *pdev = NULL;
int ret;
- ret = si_domain_init();
+ ret = si_domain_init(hw);
if (ret)
return -EFAULT;
for_each_pci_dev(pdev) {
if (iommu_should_identity_map(pdev, 1)) {
- printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
- pci_name(pdev));
+ printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
+ hw ? "hardware" : "software", pci_name(pdev));
- ret = domain_context_mapping(si_domain, pdev,
+ ret = domain_add_dev_info(si_domain, pdev,
+ hw ? CONTEXT_TT_PASS_THROUGH :
CONTEXT_TT_MULTI_LEVEL);
if (ret)
return ret;
- ret = domain_add_dev_info(si_domain, pdev);
- if (ret)
- return ret;
}
}
@@ -2200,14 +2222,6 @@ int __init init_dmars(void)
struct pci_dev *pdev;
struct intel_iommu *iommu;
int i, ret;
- int pass_through = 1;
-
- /*
- * In case pass through can not be enabled, iommu tries to use identity
- * mapping.
- */
- if (iommu_pass_through)
- iommu_identity_mapping = 1;
/*
* for each drhd
@@ -2235,7 +2249,6 @@ int __init init_dmars(void)
deferred_flush = kzalloc(g_num_of_iommus *
sizeof(struct deferred_flush_tables), GFP_KERNEL);
if (!deferred_flush) {
- kfree(g_iommus);
ret = -ENOMEM;
goto error;
}
@@ -2262,14 +2275,8 @@ int __init init_dmars(void)
goto error;
}
if (!ecap_pass_through(iommu->ecap))
- pass_through = 0;
+ hw_pass_through = 0;
}
- if (iommu_pass_through)
- if (!pass_through) {
- printk(KERN_INFO
- "Pass Through is not supported by hardware.\n");
- iommu_pass_through = 0;
- }
/*
* Start from the sane iommu hardware state.
@@ -2324,64 +2331,57 @@ int __init init_dmars(void)
}
}
+ if (iommu_pass_through)
+ iommu_identity_mapping = 1;
+#ifdef CONFIG_DMAR_BROKEN_GFX_WA
+ else
+ iommu_identity_mapping = 2;
+#endif
/*
- * If pass through is set and enabled, context entries of all pci
- * devices are intialized by pass through translation type.
+ * If pass through is not set or not enabled, setup context entries for
+ * identity mappings for rmrr, gfx, and isa and may fall back to static
+ * identity mapping if iommu_identity_mapping is set.
*/
- if (iommu_pass_through) {
- ret = init_context_pass_through();
+ if (iommu_identity_mapping) {
+ ret = iommu_prepare_static_identity_mapping(hw_pass_through);
if (ret) {
- printk(KERN_ERR "IOMMU: Pass through init failed.\n");
- iommu_pass_through = 0;
+ printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
+ goto error;
}
}
-
/*
- * If pass through is not set or not enabled, setup context entries for
- * identity mappings for rmrr, gfx, and isa and may fall back to static
- * identity mapping if iommu_identity_mapping is set.
+ * For each rmrr
+ * for each dev attached to rmrr
+ * do
+ * locate drhd for dev, alloc domain for dev
+ * allocate free domain
+ * allocate page table entries for rmrr
+ * if context not allocated for bus
+ * allocate and init context
+ * set present in root table for this bus
+ * init context with domain, translation etc
+ * endfor
+ * endfor
*/
- if (!iommu_pass_through) {
-#ifdef CONFIG_DMAR_BROKEN_GFX_WA
- if (!iommu_identity_mapping)
- iommu_identity_mapping = 2;
-#endif
- if (iommu_identity_mapping)
- iommu_prepare_static_identity_mapping();
- /*
- * For each rmrr
- * for each dev attached to rmrr
- * do
- * locate drhd for dev, alloc domain for dev
- * allocate free domain
- * allocate page table entries for rmrr
- * if context not allocated for bus
- * allocate and init context
- * set present in root table for this bus
- * init context with domain, translation etc
- * endfor
- * endfor
- */
- printk(KERN_INFO "IOMMU: Setting RMRR:\n");
- for_each_rmrr_units(rmrr) {
- for (i = 0; i < rmrr->devices_cnt; i++) {
- pdev = rmrr->devices[i];
- /*
- * some BIOS lists non-exist devices in DMAR
- * table.
- */
- if (!pdev)
- continue;
- ret = iommu_prepare_rmrr_dev(rmrr, pdev);
- if (ret)
- printk(KERN_ERR
- "IOMMU: mapping reserved region failed\n");
- }
+ printk(KERN_INFO "IOMMU: Setting RMRR:\n");
+ for_each_rmrr_units(rmrr) {
+ for (i = 0; i < rmrr->devices_cnt; i++) {
+ pdev = rmrr->devices[i];
+ /*
+ * some BIOS lists non-exist devices in DMAR
+ * table.
+ */
+ if (!pdev)
+ continue;
+ ret = iommu_prepare_rmrr_dev(rmrr, pdev);
+ if (ret)
+ printk(KERN_ERR
+ "IOMMU: mapping reserved region failed\n");
}
-
- iommu_prepare_isa();
}
+ iommu_prepare_isa();
+
/*
* for each drhd
* enable fault log
@@ -2404,11 +2404,12 @@ int __init init_dmars(void)
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
- iommu_disable_protect_mem_regions(iommu);
ret = iommu_enable_translation(iommu);
if (ret)
goto error;
+
+ iommu_disable_protect_mem_regions(iommu);
}
return 0;
@@ -2455,8 +2456,7 @@ static struct iova *intel_alloc_iova(struct device *dev,
return iova;
}
-static struct dmar_domain *
-get_valid_domain_for_dev(struct pci_dev *pdev)
+static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
{
struct dmar_domain *domain;
int ret;
@@ -2484,6 +2484,18 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
return domain;
}
+static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
+{
+ struct device_domain_info *info;
+
+ /* No lock here, assumes no domain exit in normal case */
+ info = dev->dev.archdata.iommu;
+ if (likely(info))
+ return info->domain;
+
+ return __get_valid_domain_for_dev(dev);
+}
+
static int iommu_dummy(struct pci_dev *pdev)
{
return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
@@ -2526,10 +2538,10 @@ static int iommu_no_mapping(struct device *dev)
*/
if (iommu_should_identity_map(pdev, 0)) {
int ret;
- ret = domain_add_dev_info(si_domain, pdev);
- if (ret)
- return 0;
- ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
+ ret = domain_add_dev_info(si_domain, pdev,
+ hw_pass_through ?
+ CONTEXT_TT_PASS_THROUGH :
+ CONTEXT_TT_MULTI_LEVEL);
if (!ret) {
printk(KERN_INFO "64bit %s uses identity mapping\n",
pci_name(pdev));
@@ -2638,10 +2650,9 @@ static void flush_unmaps(void)
unsigned long mask;
struct iova *iova = deferred_flush[i].iova[j];
- mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
- mask = ilog2(mask >> VTD_PAGE_SHIFT);
+ mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
- iova->pfn_lo << PAGE_SHIFT, mask);
+ (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
__free_iova(&deferred_flush[i].domain[j]->iovad, iova);
}
deferred_flush[i].next = 0;
@@ -2734,12 +2745,6 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
}
}
-static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
- int dir)
-{
- intel_unmap_page(dev, dev_addr, size, dir, NULL);
-}
-
static void *intel_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
@@ -2772,7 +2777,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
size = PAGE_ALIGN(size);
order = get_order(size);
- intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
+ intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
free_pages((unsigned long)vaddr, order);
}
@@ -2808,11 +2813,18 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
/* free page tables */
dma_pte_free_pagetable(domain, start_pfn, last_pfn);
- iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
- (last_pfn - start_pfn + 1));
-
- /* free iova */
- __free_iova(&domain->iovad, iova);
+ if (intel_iommu_strict) {
+ iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
+ last_pfn - start_pfn + 1);
+ /* free iova */
+ __free_iova(&domain->iovad, iova);
+ } else {
+ add_unmap(domain, iova);
+ /*
+ * queue up the release of the unmap to save the 1/6th of the
+ * cpu used up by the iotlb flush operation...
+ */
+ }
}
static int intel_nontranslate_map_sg(struct device *hddev,
@@ -3056,8 +3068,8 @@ static int init_iommu_hw(void)
DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH);
- iommu_disable_protect_mem_regions(iommu);
iommu_enable_translation(iommu);
+ iommu_disable_protect_mem_regions(iommu);
}
return 0;
@@ -3205,7 +3217,7 @@ int __init intel_iommu_init(void)
* Check the need for DMA-remapping initialization now.
* Above initialization will also be used by Interrupt-remapping.
*/
- if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
+ if (no_iommu || swiotlb || dmar_disabled)
return -ENODEV;
iommu_init_mempool();
@@ -3227,14 +3239,7 @@ int __init intel_iommu_init(void)
init_timer(&unmap_timer);
force_iommu = 1;
-
- if (!iommu_pass_through) {
- printk(KERN_INFO
- "Multi-level page-table translation for DMAR.\n");
- dma_ops = &intel_dma_ops;
- } else
- printk(KERN_INFO
- "DMAR: Pass through translation for DMAR.\n");
+ dma_ops = &intel_dma_ops;
init_iommu_sysfs();
@@ -3517,7 +3522,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
struct intel_iommu *iommu;
int addr_width;
u64 end;
- int ret;
/* normally pdev is not mapped */
if (unlikely(domain_context_mapped(pdev))) {
@@ -3549,12 +3553,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
return -EFAULT;
}
- ret = domain_add_dev_info(dmar_domain, pdev);
- if (ret)
- return ret;
-
- ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
- return ret;
+ return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
}
static void intel_iommu_detach_device(struct iommu_domain *domain,
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 44803644ca0..0ed78a764de 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -603,6 +603,9 @@ int __init intr_remapping_supported(void)
if (disable_intremap)
return 0;
+ if (!dmar_ir_support())
+ return 0;
+
for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu;
@@ -618,6 +621,11 @@ int __init enable_intr_remapping(int eim)
struct dmar_drhd_unit *drhd;
int setup = 0;
+ if (parse_ioapics_under_ir() != 1) {
+ printk(KERN_INFO "Not enable interrupt remapping\n");
+ return -1;
+ }
+
for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu;
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 46dd440e231..7914951ef29 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -22,7 +22,6 @@
void
init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
{
- spin_lock_init(&iovad->iova_alloc_lock);
spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT;
iovad->cached32_node = NULL;
@@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn,
bool size_aligned)
{
- unsigned long flags;
struct iova *new_iova;
int ret;
@@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
if (size_aligned)
size = __roundup_pow_of_two(size);
- spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
new_iova, size_aligned);
- spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
if (ret) {
free_iova_mem(new_iova);
return NULL;
@@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad,
struct iova *iova;
unsigned int overlap = 0;
- spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
- spin_lock(&iovad->iova_rbtree_lock);
+ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
iova = container_of(node, struct iova, node);
@@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad,
iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
finish:
- spin_unlock(&iovad->iova_rbtree_lock);
- spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return iova;
}
@@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
unsigned long flags;
struct rb_node *node;
- spin_lock_irqsave(&from->iova_alloc_lock, flags);
- spin_lock(&from->iova_rbtree_lock);
+ spin_lock_irqsave(&from->iova_rbtree_lock, flags);
for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
struct iova *iova = container_of(node, struct iova, node);
struct iova *new_iova;
@@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
iova->pfn_lo, iova->pfn_lo);
}
- spin_unlock(&from->iova_rbtree_lock);
- spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
+ spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
}
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 10c0e62bd5a..2ce8f9ccc66 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -318,6 +318,8 @@ static int __init aer_service_init(void)
{
if (pcie_aer_disable)
return -ENXIO;
+ if (!pci_msi_enabled())
+ return -ENXIO;
return pcie_port_service_register(&aerdriver);
}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index f289ca9bf18..745402e8e49 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -303,9 +303,6 @@ static void pcie_get_aspm_reg(struct pci_dev *pdev,
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32);
info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
- /* 00b and 10b are defined as "Reserved". */
- if (info->support == PCIE_LINK_STATE_L1)
- info->support = 0;
info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 047394d98ac..3247828aa20 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -71,6 +71,7 @@ pxa2xx-obj-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x2xx_cs.o
pxa2xx-obj-$(CONFIG_ARCH_VIPER) += pxa2xx_viper.o
pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o
pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o
+pxa2xx-obj-$(CONFIG_MACH_PALMTC) += pxa2xx_palmtc.o
pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o
pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o
pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 7b424e0b044..32c44040c1e 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -27,6 +27,7 @@
#include <linux/proc_fs.h>
#include <linux/poll.h>
#include <linux/pci.h>
+#include <linux/seq_file.h>
#include <linux/smp_lock.h>
#include <linux/workqueue.h>
@@ -105,37 +106,40 @@ static struct pcmcia_driver *get_pcmcia_driver(dev_info_t *dev_info)
#ifdef CONFIG_PROC_FS
static struct proc_dir_entry *proc_pccard = NULL;
-static int proc_read_drivers_callback(struct device_driver *driver, void *d)
+static int proc_read_drivers_callback(struct device_driver *driver, void *_m)
{
- char **p = d;
+ struct seq_file *m = _m;
struct pcmcia_driver *p_drv = container_of(driver,
struct pcmcia_driver, drv);
- *p += sprintf(*p, "%-24.24s 1 %d\n", p_drv->drv.name,
+ seq_printf(m, "%-24.24s 1 %d\n", p_drv->drv.name,
#ifdef CONFIG_MODULE_UNLOAD
(p_drv->owner) ? module_refcount(p_drv->owner) : 1
#else
1
#endif
);
- d = (void *) p;
-
return 0;
}
-static int proc_read_drivers(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+static int pccard_drivers_proc_show(struct seq_file *m, void *v)
{
- char *p = buf;
- int rc;
-
- rc = bus_for_each_drv(&pcmcia_bus_type, NULL,
- (void *) &p, proc_read_drivers_callback);
- if (rc < 0)
- return rc;
+ return bus_for_each_drv(&pcmcia_bus_type, NULL,
+ m, proc_read_drivers_callback);
+}
- return (p - buf);
+static int pccard_drivers_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pccard_drivers_proc_show, NULL);
}
+
+static const struct file_operations pccard_drivers_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pccard_drivers_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif
@@ -1011,7 +1015,7 @@ void __init pcmcia_setup_ioctl(void) {
#ifdef CONFIG_PROC_FS
proc_pccard = proc_mkdir("bus/pccard", NULL);
if (proc_pccard)
- create_proc_read_entry("drivers",0,proc_pccard,proc_read_drivers,NULL);
+ proc_create("drivers", 0, proc_pccard, &pccard_drivers_proc_fops);
#endif
}
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index c49a7269f6d..87e22ef8eb0 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -300,25 +300,29 @@ static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
return soc_common_drv_pcmcia_remove(&dev->dev);
}
-static int pxa2xx_drv_pcmcia_suspend(struct platform_device *dev, pm_message_t state)
+static int pxa2xx_drv_pcmcia_suspend(struct device *dev)
{
- return pcmcia_socket_dev_suspend(&dev->dev, state);
+ return pcmcia_socket_dev_suspend(dev, PMSG_SUSPEND);
}
-static int pxa2xx_drv_pcmcia_resume(struct platform_device *dev)
+static int pxa2xx_drv_pcmcia_resume(struct device *dev)
{
- pxa2xx_configure_sockets(&dev->dev);
- return pcmcia_socket_dev_resume(&dev->dev);
+ pxa2xx_configure_sockets(dev);
+ return pcmcia_socket_dev_resume(dev);
}
+static struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = {
+ .suspend = pxa2xx_drv_pcmcia_suspend,
+ .resume = pxa2xx_drv_pcmcia_resume,
+};
+
static struct platform_driver pxa2xx_pcmcia_driver = {
.probe = pxa2xx_drv_pcmcia_probe,
.remove = pxa2xx_drv_pcmcia_remove,
- .suspend = pxa2xx_drv_pcmcia_suspend,
- .resume = pxa2xx_drv_pcmcia_resume,
.driver = {
.name = "pxa2xx-pcmcia",
.owner = THIS_MODULE,
+ .pm = &pxa2xx_drv_pcmcia_pm_ops,
},
};
diff --git a/drivers/pcmcia/pxa2xx_palmtc.c b/drivers/pcmcia/pxa2xx_palmtc.c
new file mode 100644
index 00000000000..3a8993ed562
--- /dev/null
+++ b/drivers/pcmcia/pxa2xx_palmtc.c
@@ -0,0 +1,230 @@
+/*
+ * linux/drivers/pcmcia/pxa2xx_palmtc.c
+ *
+ * Driver for Palm Tungsten|C PCMCIA
+ *
+ * Copyright (C) 2008 Alex Osborne <ato@meshy.org>
+ * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include <asm/mach-types.h>
+#include <mach/palmtc.h>
+#include "soc_common.h"
+
+static int palmtc_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
+{
+ int ret;
+
+ ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER1, "PCMCIA PWR1");
+ if (ret)
+ goto err1;
+ ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER1, 0);
+ if (ret)
+ goto err2;
+
+ ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER2, "PCMCIA PWR2");
+ if (ret)
+ goto err2;
+ ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER2, 0);
+ if (ret)
+ goto err3;
+
+ ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER3, "PCMCIA PWR3");
+ if (ret)
+ goto err3;
+ ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER3, 0);
+ if (ret)
+ goto err4;
+
+ ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_RESET, "PCMCIA RST");
+ if (ret)
+ goto err4;
+ ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_RESET, 1);
+ if (ret)
+ goto err5;
+
+ ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_READY, "PCMCIA RDY");
+ if (ret)
+ goto err5;
+ ret = gpio_direction_input(GPIO_NR_PALMTC_PCMCIA_READY);
+ if (ret)
+ goto err6;
+
+ ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_PWRREADY, "PCMCIA PWRRDY");
+ if (ret)
+ goto err6;
+ ret = gpio_direction_input(GPIO_NR_PALMTC_PCMCIA_PWRREADY);
+ if (ret)
+ goto err7;
+
+ skt->irq = IRQ_GPIO(GPIO_NR_PALMTC_PCMCIA_READY);
+ return 0;
+
+err7:
+ gpio_free(GPIO_NR_PALMTC_PCMCIA_PWRREADY);
+err6:
+ gpio_free(GPIO_NR_PALMTC_PCMCIA_READY);
+err5:
+ gpio_free(GPIO_NR_PALMTC_PCMCIA_RESET);
+err4:
+ gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER3);
+err3:
+ gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER2);
+err2:
+ gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER1);
+err1:
+ return ret;
+}
+
+static void palmtc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
+{
+ gpio_free(GPIO_NR_PALMTC_PCMCIA_PWRREADY);
+ gpio_free(GPIO_NR_PALMTC_PCMCIA_READY);
+ gpio_free(GPIO_NR_PALMTC_PCMCIA_RESET);
+ gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER3);
+ gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER2);
+ gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER1);
+}
+
+static void palmtc_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
+ struct pcmcia_state *state)
+{
+ state->detect = 1; /* always inserted */
+ state->ready = !!gpio_get_value(GPIO_NR_PALMTC_PCMCIA_READY);
+ state->bvd1 = 1;
+ state->bvd2 = 1;
+ state->wrprot = 0;
+ state->vs_3v = 1;
+ state->vs_Xv = 0;
+}
+
+static int palmtc_wifi_powerdown(void)
+{
+ gpio_set_value(GPIO_NR_PALMTC_PCMCIA_RESET, 1);
+ gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER2, 0);
+ mdelay(40);
+ gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER1, 0);
+ return 0;
+}
+
+static int palmtc_wifi_powerup(void)
+{
+ int timeout = 50;
+
+ gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER3, 1);
+ mdelay(50);
+
+ /* Power up the card, 1.8V first, after a while 3.3V */
+ gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER1, 1);
+ mdelay(100);
+ gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER2, 1);
+
+ /* Wait till the card is ready */
+ while (!gpio_get_value(GPIO_NR_PALMTC_PCMCIA_PWRREADY) &&
+ timeout) {
+ mdelay(1);
+ timeout--;
+ }
+
+ /* Power down the WiFi in case of error */
+ if (!timeout) {
+ palmtc_wifi_powerdown();
+ return 1;
+ }
+
+ /* Reset the card */
+ gpio_set_value(GPIO_NR_PALMTC_PCMCIA_RESET, 1);
+ mdelay(20);
+ gpio_set_value(GPIO_NR_PALMTC_PCMCIA_RESET, 0);
+ mdelay(25);
+
+ gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER3, 0);
+
+ return 0;
+}
+
+static int palmtc_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
+ const socket_state_t *state)
+{
+ int ret = 1;
+
+ if (state->Vcc == 0)
+ ret = palmtc_wifi_powerdown();
+ else if (state->Vcc == 33)
+ ret = palmtc_wifi_powerup();
+
+ return ret;
+}
+
+static void palmtc_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
+{
+}
+
+static void palmtc_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
+{
+}
+
+static struct pcmcia_low_level palmtc_pcmcia_ops = {
+ .owner = THIS_MODULE,
+
+ .first = 0,
+ .nr = 1,
+
+ .hw_init = palmtc_pcmcia_hw_init,
+ .hw_shutdown = palmtc_pcmcia_hw_shutdown,
+
+ .socket_state = palmtc_pcmcia_socket_state,
+ .configure_socket = palmtc_pcmcia_configure_socket,
+
+ .socket_init = palmtc_pcmcia_socket_init,
+ .socket_suspend = palmtc_pcmcia_socket_suspend,
+};
+
+static struct platform_device *palmtc_pcmcia_device;
+
+static int __init palmtc_pcmcia_init(void)
+{
+ int ret;
+
+ if (!machine_is_palmtc())
+ return -ENODEV;
+
+ palmtc_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
+ if (!palmtc_pcmcia_device)
+ return -ENOMEM;
+
+ ret = platform_device_add_data(palmtc_pcmcia_device, &palmtc_pcmcia_ops,
+ sizeof(palmtc_pcmcia_ops));
+
+ if (!ret)
+ ret = platform_device_add(palmtc_pcmcia_device);
+
+ if (ret)
+ platform_device_put(palmtc_pcmcia_device);
+
+ return ret;
+}
+
+static void __exit palmtc_pcmcia_exit(void)
+{
+ platform_device_unregister(palmtc_pcmcia_device);
+}
+
+module_init(palmtc_pcmcia_init);
+module_exit(palmtc_pcmcia_exit);
+
+MODULE_AUTHOR("Alex Osborne <ato@meshy.org>,"
+ " Marek Vasut <marek.vasut@gmail.com>");
+MODULE_DESCRIPTION("PCMCIA support for Palm Tungsten|C");
+MODULE_ALIAS("platform:pxa2xx-pcmcia");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/sa1100_jornada720.c b/drivers/pcmcia/sa1100_jornada720.c
index 57ca085473d..7eedb42f800 100644
--- a/drivers/pcmcia/sa1100_jornada720.c
+++ b/drivers/pcmcia/sa1100_jornada720.c
@@ -16,89 +16,103 @@
#include "sa1111_generic.h"
-#define SOCKET0_POWER GPIO_GPIO0
-#define SOCKET0_3V GPIO_GPIO2
-#define SOCKET1_POWER (GPIO_GPIO1 | GPIO_GPIO3)
-#warning *** Does SOCKET1_3V actually do anything?
+/* Does SOCKET1_3V actually do anything? */
+#define SOCKET0_POWER GPIO_GPIO0
+#define SOCKET0_3V GPIO_GPIO2
+#define SOCKET1_POWER (GPIO_GPIO1 | GPIO_GPIO3)
#define SOCKET1_3V GPIO_GPIO3
static int jornada720_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
- /*
- * What is all this crap for?
- */
- GRER |= 0x00000002;
- /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
- sa1111_set_io_dir(SA1111_DEV(skt->dev), GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
- sa1111_set_io(SA1111_DEV(skt->dev), GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
- sa1111_set_sleep_io(SA1111_DEV(skt->dev), GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
-
- return sa1111_pcmcia_hw_init(skt);
+ unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
+
+ /*
+ * What is all this crap for?
+ */
+ GRER |= 0x00000002;
+ /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
+ sa1111_set_io_dir(SA1111_DEV(skt->dev), pin, 0, 0);
+ sa1111_set_io(SA1111_DEV(skt->dev), pin, 0);
+ sa1111_set_sleep_io(SA1111_DEV(skt->dev), pin, 0);
+
+ return sa1111_pcmcia_hw_init(skt);
}
static int
jornada720_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state)
{
- unsigned int pa_dwr_mask, pa_dwr_set;
- int ret;
-
-printk("%s(): config socket %d vcc %d vpp %d\n", __func__,
- skt->nr, state->Vcc, state->Vpp);
-
- switch (skt->nr) {
- case 0:
- pa_dwr_mask = SOCKET0_POWER | SOCKET0_3V;
-
- switch (state->Vcc) {
- default:
- case 0: pa_dwr_set = 0; break;
- case 33: pa_dwr_set = SOCKET0_POWER | SOCKET0_3V; break;
- case 50: pa_dwr_set = SOCKET0_POWER; break;
- }
- break;
-
- case 1:
- pa_dwr_mask = SOCKET1_POWER;
-
- switch (state->Vcc) {
- default:
- case 0: pa_dwr_set = 0; break;
- case 33: pa_dwr_set = SOCKET1_POWER; break;
- case 50: pa_dwr_set = SOCKET1_POWER; break;
- }
- break;
-
- default:
- return -1;
- }
-
- if (state->Vpp != state->Vcc && state->Vpp != 0) {
- printk(KERN_ERR "%s(): slot cannot support VPP %u\n",
- __func__, state->Vpp);
- return -1;
- }
-
- ret = sa1111_pcmcia_configure_socket(skt, state);
- if (ret == 0) {
- unsigned long flags;
-
- local_irq_save(flags);
- sa1111_set_io(SA1111_DEV(skt->dev), pa_dwr_mask, pa_dwr_set);
- local_irq_restore(flags);
- }
-
- return ret;
+ unsigned int pa_dwr_mask, pa_dwr_set;
+ int ret;
+
+ printk(KERN_INFO "%s(): config socket %d vcc %d vpp %d\n", __func__,
+ skt->nr, state->Vcc, state->Vpp);
+
+ switch (skt->nr) {
+ case 0:
+ pa_dwr_mask = SOCKET0_POWER | SOCKET0_3V;
+
+ switch (state->Vcc) {
+ default:
+ case 0:
+ pa_dwr_set = 0;
+ break;
+ case 33:
+ pa_dwr_set = SOCKET0_POWER | SOCKET0_3V;
+ break;
+ case 50:
+ pa_dwr_set = SOCKET0_POWER;
+ break;
+ }
+ break;
+
+ case 1:
+ pa_dwr_mask = SOCKET1_POWER;
+
+ switch (state->Vcc) {
+ default:
+ case 0:
+ pa_dwr_set = 0;
+ break;
+ case 33:
+ pa_dwr_set = SOCKET1_POWER;
+ break;
+ case 50:
+ pa_dwr_set = SOCKET1_POWER;
+ break;
+ }
+ break;
+
+ default:
+ return -1;
+ }
+
+ if (state->Vpp != state->Vcc && state->Vpp != 0) {
+ printk(KERN_ERR "%s(): slot cannot support VPP %u\n",
+ __func__, state->Vpp);
+ return -EPERM;
+ }
+
+ ret = sa1111_pcmcia_configure_socket(skt, state);
+ if (ret == 0) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ sa1111_set_io(SA1111_DEV(skt->dev), pa_dwr_mask, pa_dwr_set);
+ local_irq_restore(flags);
+ }
+
+ return ret;
}
static struct pcmcia_low_level jornada720_pcmcia_ops = {
- .owner = THIS_MODULE,
- .hw_init = jornada720_pcmcia_hw_init,
- .hw_shutdown = sa1111_pcmcia_hw_shutdown,
- .socket_state = sa1111_pcmcia_socket_state,
- .configure_socket = jornada720_pcmcia_configure_socket,
-
- .socket_init = sa1111_pcmcia_socket_init,
- .socket_suspend = sa1111_pcmcia_socket_suspend,
+ .owner = THIS_MODULE,
+ .hw_init = jornada720_pcmcia_hw_init,
+ .hw_shutdown = sa1111_pcmcia_hw_shutdown,
+ .socket_state = sa1111_pcmcia_socket_state,
+ .configure_socket = jornada720_pcmcia_configure_socket,
+
+ .socket_init = sa1111_pcmcia_socket_init,
+ .socket_suspend = sa1111_pcmcia_socket_suspend,
};
int __devinit pcmcia_jornada720_init(struct device *dev)
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 737fe5d87c4..b459e87a30a 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -717,7 +717,7 @@ static void yenta_free_resources(struct yenta_socket *socket)
/*
* Close it down - release our resources and go home..
*/
-static void yenta_close(struct pci_dev *dev)
+static void __devexit yenta_close(struct pci_dev *dev)
{
struct yenta_socket *sock = pci_get_drvdata(dev);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 77c6097ced8..55ca39dea42 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -99,6 +99,7 @@ config FUJITSU_LAPTOP
depends on ACPI
depends on INPUT
depends on BACKLIGHT_CLASS_DEVICE
+ depends on LEDS_CLASS || LEDS_CLASS=n
---help---
This is a driver for laptops built by Fujitsu:
@@ -396,6 +397,15 @@ config ACPI_ASUS
NOTE: This driver is deprecated and will probably be removed soon,
use asus-laptop instead.
+config TOPSTAR_LAPTOP
+ tristate "Topstar Laptop Extras"
+ depends on ACPI
+ depends on INPUT
+ ---help---
+ This driver adds support for hotkeys found on Topstar laptops.
+
+ If you have a Topstar laptop, say Y or M here.
+
config ACPI_TOSHIBA
tristate "Toshiba Laptop Extras"
depends on ACPI
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 641b8bfa553..d1c16210a51 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -19,4 +19,5 @@ obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o
obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
obj-$(CONFIG_ACPI_WMI) += wmi.o
obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o
+obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o
obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index fb45f5ee8df..454970d2d70 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -746,7 +746,9 @@ static acpi_status WMID_set_u32(u32 value, u32 cap, struct wmi_interface *iface)
return AE_BAD_PARAMETER;
if (quirks->mailled == 1) {
param = value ? 0x92 : 0x93;
+ i8042_lock_chip();
i8042_command(&param, 0x1059);
+ i8042_unlock_chip();
return 0;
}
break;
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index bdfee177eef..0a8f735f6c4 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -52,7 +52,7 @@
*/
#undef START_IN_KERNEL_MODE
-#define DRV_VER "0.5.13"
+#define DRV_VER "0.5.17"
/*
* According to the Atom N270 datasheet,
@@ -90,6 +90,7 @@ static unsigned int fanoff = 58;
static unsigned int verbose;
static unsigned int fanstate = ACERHDF_FAN_AUTO;
static char force_bios[16];
+static char force_product[16];
static unsigned int prev_interval;
struct thermal_zone_device *thz_dev;
struct thermal_cooling_device *cl_dev;
@@ -107,34 +108,62 @@ module_param(verbose, uint, 0600);
MODULE_PARM_DESC(verbose, "Enable verbose dmesg output");
module_param_string(force_bios, force_bios, 16, 0);
MODULE_PARM_DESC(force_bios, "Force BIOS version and omit BIOS check");
+module_param_string(force_product, force_product, 16, 0);
+MODULE_PARM_DESC(force_product, "Force BIOS product and omit BIOS check");
+
+/*
+ * cmd_off: to switch the fan completely off / to check if the fan is off
+ * cmd_auto: to set the BIOS in control of the fan. The BIOS regulates then
+ * the fan speed depending on the temperature
+ */
+struct fancmd {
+ u8 cmd_off;
+ u8 cmd_auto;
+};
/* BIOS settings */
struct bios_settings_t {
const char *vendor;
+ const char *product;
const char *version;
unsigned char fanreg;
unsigned char tempreg;
- unsigned char fancmd[2]; /* fan off and auto commands */
+ struct fancmd cmd;
};
/* Register addresses and values for different BIOS versions */
static const struct bios_settings_t bios_tbl[] = {
- {"Acer", "v0.3109", 0x55, 0x58, {0x1f, 0x00} },
- {"Acer", "v0.3114", 0x55, 0x58, {0x1f, 0x00} },
- {"Acer", "v0.3301", 0x55, 0x58, {0xaf, 0x00} },
- {"Acer", "v0.3304", 0x55, 0x58, {0xaf, 0x00} },
- {"Acer", "v0.3305", 0x55, 0x58, {0xaf, 0x00} },
- {"Acer", "v0.3308", 0x55, 0x58, {0x21, 0x00} },
- {"Acer", "v0.3309", 0x55, 0x58, {0x21, 0x00} },
- {"Acer", "v0.3310", 0x55, 0x58, {0x21, 0x00} },
- {"Gateway", "v0.3103", 0x55, 0x58, {0x21, 0x00} },
- {"Packard Bell", "v0.3105", 0x55, 0x58, {0x21, 0x00} },
- {"", "", 0, 0, {0, 0} }
+ /* AOA110 */
+ {"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x00} },
+ {"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x00} },
+ {"Acer", "AOA110", "v0.3301", 0x55, 0x58, {0xaf, 0x00} },
+ {"Acer", "AOA110", "v0.3304", 0x55, 0x58, {0xaf, 0x00} },
+ {"Acer", "AOA110", "v0.3305", 0x55, 0x58, {0xaf, 0x00} },
+ {"Acer", "AOA110", "v0.3307", 0x55, 0x58, {0xaf, 0x00} },
+ {"Acer", "AOA110", "v0.3308", 0x55, 0x58, {0x21, 0x00} },
+ {"Acer", "AOA110", "v0.3309", 0x55, 0x58, {0x21, 0x00} },
+ {"Acer", "AOA110", "v0.3310", 0x55, 0x58, {0x21, 0x00} },
+ /* AOA150 */
+ {"Acer", "AOA150", "v0.3114", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3301", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3304", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3305", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3307", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00} },
+ /* special BIOS / other */
+ {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} },
+ {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} },
+ {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} },
+ {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} },
+ {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} },
+ /* pewpew-terminator */
+ {"", "", "", 0, 0, {0, 0} }
};
static const struct bios_settings_t *bios_cfg __read_mostly;
-
static int acerhdf_get_temp(int *temp)
{
u8 read_temp;
@@ -150,13 +179,14 @@ static int acerhdf_get_temp(int *temp)
static int acerhdf_get_fanstate(int *state)
{
u8 fan;
- bool tmp;
if (ec_read(bios_cfg->fanreg, &fan))
return -EINVAL;
- tmp = (fan == bios_cfg->fancmd[ACERHDF_FAN_OFF]);
- *state = tmp ? ACERHDF_FAN_OFF : ACERHDF_FAN_AUTO;
+ if (fan != bios_cfg->cmd.cmd_off)
+ *state = ACERHDF_FAN_AUTO;
+ else
+ *state = ACERHDF_FAN_OFF;
return 0;
}
@@ -175,7 +205,8 @@ static void acerhdf_change_fanstate(int state)
state = ACERHDF_FAN_AUTO;
}
- cmd = bios_cfg->fancmd[state];
+ cmd = (state == ACERHDF_FAN_OFF) ? bios_cfg->cmd.cmd_off
+ : bios_cfg->cmd.cmd_auto;
fanstate = state;
ec_write(bios_cfg->fanreg, cmd);
@@ -408,7 +439,7 @@ struct thermal_cooling_device_ops acerhdf_cooling_ops = {
};
/* suspend / resume functionality */
-static int acerhdf_suspend(struct platform_device *dev, pm_message_t state)
+static int acerhdf_suspend(struct device *dev)
{
if (kernelmode)
acerhdf_change_fanstate(ACERHDF_FAN_AUTO);
@@ -419,14 +450,6 @@ static int acerhdf_suspend(struct platform_device *dev, pm_message_t state)
return 0;
}
-static int acerhdf_resume(struct platform_device *device)
-{
- if (verbose)
- pr_notice("resuming\n");
-
- return 0;
-}
-
static int __devinit acerhdf_probe(struct platform_device *device)
{
return 0;
@@ -437,15 +460,19 @@ static int acerhdf_remove(struct platform_device *device)
return 0;
}
-struct platform_driver acerhdf_drv = {
+static struct dev_pm_ops acerhdf_pm_ops = {
+ .suspend = acerhdf_suspend,
+ .freeze = acerhdf_suspend,
+};
+
+static struct platform_driver acerhdf_driver = {
.driver = {
- .name = "acerhdf",
+ .name = "acerhdf",
.owner = THIS_MODULE,
+ .pm = &acerhdf_pm_ops,
},
.probe = acerhdf_probe,
.remove = acerhdf_remove,
- .suspend = acerhdf_suspend,
- .resume = acerhdf_resume,
};
@@ -454,32 +481,40 @@ static int acerhdf_check_hardware(void)
{
char const *vendor, *version, *product;
int i;
+ unsigned long prod_len = 0;
/* get BIOS data */
vendor = dmi_get_system_info(DMI_SYS_VENDOR);
version = dmi_get_system_info(DMI_BIOS_VERSION);
product = dmi_get_system_info(DMI_PRODUCT_NAME);
+
pr_info("Acer Aspire One Fan driver, v.%s\n", DRV_VER);
- if (!force_bios[0]) {
- if (strncmp(product, "AO", 2)) {
- pr_err("no Aspire One hardware found\n");
- return -EINVAL;
- }
- } else {
- pr_info("forcing BIOS version: %s\n", version);
+ if (force_bios[0]) {
version = force_bios;
+ pr_info("forcing BIOS version: %s\n", version);
+ kernelmode = 0;
+ }
+
+ if (force_product[0]) {
+ product = force_product;
+ pr_info("forcing BIOS product: %s\n", product);
kernelmode = 0;
}
+ prod_len = strlen(product);
+
if (verbose)
pr_info("BIOS info: %s %s, product: %s\n",
vendor, version, product);
/* search BIOS version and vendor in BIOS settings table */
for (i = 0; bios_tbl[i].version[0]; i++) {
- if (!strcmp(bios_tbl[i].vendor, vendor) &&
+ if (strlen(bios_tbl[i].product) >= prod_len &&
+ !strncmp(bios_tbl[i].product, product,
+ strlen(bios_tbl[i].product)) &&
+ !strcmp(bios_tbl[i].vendor, vendor) &&
!strcmp(bios_tbl[i].version, version)) {
bios_cfg = &bios_tbl[i];
break;
@@ -487,8 +522,8 @@ static int acerhdf_check_hardware(void)
}
if (!bios_cfg) {
- pr_err("unknown (unsupported) BIOS version %s/%s, "
- "please report, aborting!\n", vendor, version);
+ pr_err("unknown (unsupported) BIOS version %s/%s/%s, "
+ "please report, aborting!\n", vendor, product, version);
return -EINVAL;
}
@@ -509,7 +544,7 @@ static int acerhdf_register_platform(void)
{
int err = 0;
- err = platform_driver_register(&acerhdf_drv);
+ err = platform_driver_register(&acerhdf_driver);
if (err)
return err;
@@ -525,7 +560,7 @@ static void acerhdf_unregister_platform(void)
return;
platform_device_del(acerhdf_dev);
- platform_driver_unregister(&acerhdf_drv);
+ platform_driver_unregister(&acerhdf_driver);
}
static int acerhdf_register_thermal(void)
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index db657bbeec9..b39d2bb3e75 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -77,15 +77,16 @@
* Flags for hotk status
* WL_ON and BT_ON are also used for wireless_status()
*/
-#define WL_ON 0x01 //internal Wifi
-#define BT_ON 0x02 //internal Bluetooth
-#define MLED_ON 0x04 //mail LED
-#define TLED_ON 0x08 //touchpad LED
-#define RLED_ON 0x10 //Record LED
-#define PLED_ON 0x20 //Phone LED
-#define GLED_ON 0x40 //Gaming LED
-#define LCD_ON 0x80 //LCD backlight
-#define GPS_ON 0x100 //GPS
+#define WL_ON 0x01 /* internal Wifi */
+#define BT_ON 0x02 /* internal Bluetooth */
+#define MLED_ON 0x04 /* mail LED */
+#define TLED_ON 0x08 /* touchpad LED */
+#define RLED_ON 0x10 /* Record LED */
+#define PLED_ON 0x20 /* Phone LED */
+#define GLED_ON 0x40 /* Gaming LED */
+#define LCD_ON 0x80 /* LCD backlight */
+#define GPS_ON 0x100 /* GPS */
+#define KEY_ON 0x200 /* Keyboard backlight */
#define ASUS_LOG ASUS_HOTK_FILE ": "
#define ASUS_ERR KERN_ERR ASUS_LOG
@@ -98,7 +99,8 @@ MODULE_AUTHOR("Julien Lerouge, Karol Kozimor, Corentin Chary");
MODULE_DESCRIPTION(ASUS_HOTK_NAME);
MODULE_LICENSE("GPL");
-/* WAPF defines the behavior of the Fn+Fx wlan key
+/*
+ * WAPF defines the behavior of the Fn+Fx wlan key
* The significance of values is yet to be found, but
* most of the time:
* 0x0 will do nothing
@@ -125,7 +127,8 @@ ASUS_HANDLE(gled_set, ASUS_HOTK_PREFIX "GLED"); /* G1, G2 (probably) */
/* LEDD */
ASUS_HANDLE(ledd_set, ASUS_HOTK_PREFIX "SLCM");
-/* Bluetooth and WLAN
+/*
+ * Bluetooth and WLAN
* WLED and BLED are not handled like other XLED, because in some dsdt
* they also control the WLAN/Bluetooth device.
*/
@@ -149,22 +152,32 @@ ASUS_HANDLE(lcd_switch, "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */
/* Display */
ASUS_HANDLE(display_set, ASUS_HOTK_PREFIX "SDSP");
-ASUS_HANDLE(display_get, "\\_SB.PCI0.P0P1.VGA.GETD", /* A6B, A6K A6R A7D F3JM L4R M6R A3G
- M6A M6V VX-1 V6J V6V W3Z */
- "\\_SB.PCI0.P0P2.VGA.GETD", /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V
- S5A M5A z33A W1Jc W2V G1 */
- "\\_SB.PCI0.P0P3.VGA.GETD", /* A6V A6Q */
- "\\_SB.PCI0.P0PA.VGA.GETD", /* A6T, A6M */
- "\\_SB.PCI0.PCI1.VGAC.NMAP", /* L3C */
- "\\_SB.PCI0.VGA.GETD", /* Z96F */
- "\\ACTD", /* A2D */
- "\\ADVG", /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */
- "\\DNXT", /* P30 */
- "\\INFB", /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */
- "\\SSTE"); /* A3F A6F A3N A3L M6N W3N W6A */
-
-ASUS_HANDLE(ls_switch, ASUS_HOTK_PREFIX "ALSC"); /* Z71A Z71V */
-ASUS_HANDLE(ls_level, ASUS_HOTK_PREFIX "ALSL"); /* Z71A Z71V */
+ASUS_HANDLE(display_get,
+ /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */
+ "\\_SB.PCI0.P0P1.VGA.GETD",
+ /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */
+ "\\_SB.PCI0.P0P2.VGA.GETD",
+ /* A6V A6Q */
+ "\\_SB.PCI0.P0P3.VGA.GETD",
+ /* A6T, A6M */
+ "\\_SB.PCI0.P0PA.VGA.GETD",
+ /* L3C */
+ "\\_SB.PCI0.PCI1.VGAC.NMAP",
+ /* Z96F */
+ "\\_SB.PCI0.VGA.GETD",
+ /* A2D */
+ "\\ACTD",
+ /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */
+ "\\ADVG",
+ /* P30 */
+ "\\DNXT",
+ /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */
+ "\\INFB",
+ /* A3F A6F A3N A3L M6N W3N W6A */
+ "\\SSTE");
+
+ASUS_HANDLE(ls_switch, ASUS_HOTK_PREFIX "ALSC"); /* Z71A Z71V */
+ASUS_HANDLE(ls_level, ASUS_HOTK_PREFIX "ALSL"); /* Z71A Z71V */
/* GPS */
/* R2H use different handle for GPS on/off */
@@ -172,19 +185,23 @@ ASUS_HANDLE(gps_on, ASUS_HOTK_PREFIX "SDON"); /* R2H */
ASUS_HANDLE(gps_off, ASUS_HOTK_PREFIX "SDOF"); /* R2H */
ASUS_HANDLE(gps_status, ASUS_HOTK_PREFIX "GPST");
+/* Keyboard light */
+ASUS_HANDLE(kled_set, ASUS_HOTK_PREFIX "SLKB");
+ASUS_HANDLE(kled_get, ASUS_HOTK_PREFIX "GLKB");
+
/*
* This is the main structure, we can use it to store anything interesting
* about the hotk device
*/
struct asus_hotk {
- char *name; //laptop name
- struct acpi_device *device; //the device we are in
- acpi_handle handle; //the handle of the hotk device
- char status; //status of the hotk, for LEDs, ...
- u32 ledd_status; //status of the LED display
- u8 light_level; //light sensor level
- u8 light_switch; //light sensor switch value
- u16 event_count[128]; //count for each event TODO make this better
+ char *name; /* laptop name */
+ struct acpi_device *device; /* the device we are in */
+ acpi_handle handle; /* the handle of the hotk device */
+ char status; /* status of the hotk, for LEDs, ... */
+ u32 ledd_status; /* status of the LED display */
+ u8 light_level; /* light sensor level */
+ u8 light_switch; /* light sensor switch value */
+ u16 event_count[128]; /* count for each event TODO make this better */
struct input_dev *inputdev;
u16 *keycode_map;
};
@@ -237,28 +254,35 @@ static struct backlight_ops asusbl_ops = {
.update_status = update_bl_status,
};
-/* These functions actually update the LED's, and are called from a
+/*
+ * These functions actually update the LED's, and are called from a
* workqueue. By doing this as separate work rather than when the LED
* subsystem asks, we avoid messing with the Asus ACPI stuff during a
- * potentially bad time, such as a timer interrupt. */
+ * potentially bad time, such as a timer interrupt.
+ */
static struct workqueue_struct *led_workqueue;
-#define ASUS_LED(object, ledname) \
+#define ASUS_LED(object, ledname, max) \
static void object##_led_set(struct led_classdev *led_cdev, \
enum led_brightness value); \
+ static enum led_brightness object##_led_get( \
+ struct led_classdev *led_cdev); \
static void object##_led_update(struct work_struct *ignored); \
static int object##_led_wk; \
static DECLARE_WORK(object##_led_work, object##_led_update); \
static struct led_classdev object##_led = { \
.name = "asus::" ledname, \
.brightness_set = object##_led_set, \
+ .brightness_get = object##_led_get, \
+ .max_brightness = max \
}
-ASUS_LED(mled, "mail");
-ASUS_LED(tled, "touchpad");
-ASUS_LED(rled, "record");
-ASUS_LED(pled, "phone");
-ASUS_LED(gled, "gaming");
+ASUS_LED(mled, "mail", 1);
+ASUS_LED(tled, "touchpad", 1);
+ASUS_LED(rled, "record", 1);
+ASUS_LED(pled, "phone", 1);
+ASUS_LED(gled, "gaming", 1);
+ASUS_LED(kled, "kbd_backlight", 3);
struct key_entry {
char type;
@@ -278,16 +302,23 @@ static struct key_entry asus_keymap[] = {
{KE_KEY, 0x41, KEY_NEXTSONG},
{KE_KEY, 0x43, KEY_STOPCD},
{KE_KEY, 0x45, KEY_PLAYPAUSE},
+ {KE_KEY, 0x4c, KEY_MEDIA},
{KE_KEY, 0x50, KEY_EMAIL},
{KE_KEY, 0x51, KEY_WWW},
+ {KE_KEY, 0x55, KEY_CALC},
{KE_KEY, 0x5C, KEY_SCREENLOCK}, /* Screenlock */
{KE_KEY, 0x5D, KEY_WLAN},
+ {KE_KEY, 0x5E, KEY_WLAN},
+ {KE_KEY, 0x5F, KEY_WLAN},
+ {KE_KEY, 0x60, KEY_SWITCHVIDEOMODE},
{KE_KEY, 0x61, KEY_SWITCHVIDEOMODE},
{KE_KEY, 0x6B, BTN_TOUCH}, /* Lock Mouse */
{KE_KEY, 0x82, KEY_CAMERA},
{KE_KEY, 0x8A, KEY_PROG1},
{KE_KEY, 0x95, KEY_MEDIA},
{KE_KEY, 0x99, KEY_PHONE},
+ {KE_KEY, 0xc4, KEY_KBDILLUMUP},
+ {KE_KEY, 0xc5, KEY_KBDILLUMDOWN},
{KE_END, 0},
};
@@ -301,8 +332,8 @@ static struct key_entry asus_keymap[] = {
static int write_acpi_int(acpi_handle handle, const char *method, int val,
struct acpi_buffer *output)
{
- struct acpi_object_list params; //list of input parameters (an int here)
- union acpi_object in_obj; //the only param we use
+ struct acpi_object_list params; /* list of input parameters (an int) */
+ union acpi_object in_obj; /* the only param we use */
acpi_status status;
if (!handle)
@@ -399,6 +430,11 @@ static void write_status(acpi_handle handle, int out, int mask)
{ \
int value = object##_led_wk; \
write_status(object##_set_handle, value, (mask)); \
+ } \
+ static enum led_brightness object##_led_get( \
+ struct led_classdev *led_cdev) \
+ { \
+ return led_cdev->brightness; \
}
ASUS_LED_HANDLER(mled, MLED_ON);
@@ -407,6 +443,60 @@ ASUS_LED_HANDLER(rled, RLED_ON);
ASUS_LED_HANDLER(tled, TLED_ON);
ASUS_LED_HANDLER(gled, GLED_ON);
+/*
+ * Keyboard backlight
+ */
+static int get_kled_lvl(void)
+{
+ unsigned long long kblv;
+ struct acpi_object_list params;
+ union acpi_object in_obj;
+ acpi_status rv;
+
+ params.count = 1;
+ params.pointer = &in_obj;
+ in_obj.type = ACPI_TYPE_INTEGER;
+ in_obj.integer.value = 2;
+
+ rv = acpi_evaluate_integer(kled_get_handle, NULL, &params, &kblv);
+ if (ACPI_FAILURE(rv)) {
+ pr_warning("Error reading kled level\n");
+ return 0;
+ }
+ return kblv;
+}
+
+static int set_kled_lvl(int kblv)
+{
+ if (kblv > 0)
+ kblv = (1 << 7) | (kblv & 0x7F);
+ else
+ kblv = 0;
+
+ if (write_acpi_int(kled_set_handle, NULL, kblv, NULL)) {
+ pr_warning("Keyboard LED display write failed\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void kled_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ kled_led_wk = value;
+ queue_work(led_workqueue, &kled_led_work);
+}
+
+static void kled_led_update(struct work_struct *ignored)
+{
+ set_kled_lvl(kled_led_wk);
+}
+
+static enum led_brightness kled_led_get(struct led_classdev *led_cdev)
+{
+ return get_kled_lvl();
+}
+
static int get_lcd_state(void)
{
return read_status(LCD_ON);
@@ -498,7 +588,7 @@ static ssize_t show_infos(struct device *dev,
{
int len = 0;
unsigned long long temp;
- char buf[16]; //enough for all info
+ char buf[16]; /* enough for all info */
acpi_status rv = AE_OK;
/*
@@ -516,7 +606,17 @@ static ssize_t show_infos(struct device *dev,
*/
rv = acpi_evaluate_integer(hotk->handle, "SFUN", NULL, &temp);
if (!ACPI_FAILURE(rv))
- len += sprintf(page + len, "SFUN value : 0x%04x\n",
+ len += sprintf(page + len, "SFUN value : %#x\n",
+ (uint) temp);
+ /*
+ * The HWRS method return informations about the hardware.
+ * 0x80 bit is for WLAN, 0x100 for Bluetooth.
+ * The significance of others is yet to be found.
+ * If we don't find the method, we assume the device are present.
+ */
+ rv = acpi_evaluate_integer(hotk->handle, "HRWS", NULL, &temp);
+ if (!ACPI_FAILURE(rv))
+ len += sprintf(page + len, "HRWS value : %#x\n",
(uint) temp);
/*
* Another value for userspace: the ASYM method returns 0x02 for
@@ -527,7 +627,7 @@ static ssize_t show_infos(struct device *dev,
*/
rv = acpi_evaluate_integer(hotk->handle, "ASYM", NULL, &temp);
if (!ACPI_FAILURE(rv))
- len += sprintf(page + len, "ASYM value : 0x%04x\n",
+ len += sprintf(page + len, "ASYM value : %#x\n",
(uint) temp);
if (asus_info) {
snprintf(buf, 16, "%d", asus_info->length);
@@ -648,8 +748,10 @@ static int read_display(void)
unsigned long long value = 0;
acpi_status rv = AE_OK;
- /* In most of the case, we know how to set the display, but sometime
- we can't read it */
+ /*
+ * In most of the case, we know how to set the display, but sometime
+ * we can't read it
+ */
if (display_get_handle) {
rv = acpi_evaluate_integer(display_get_handle, NULL,
NULL, &value);
@@ -1037,6 +1139,9 @@ static int asus_hotk_get_info(void)
ASUS_HANDLE_INIT(ledd_set);
+ ASUS_HANDLE_INIT(kled_set);
+ ASUS_HANDLE_INIT(kled_get);
+
/*
* The HWRS method return informations about the hardware.
* 0x80 bit is for WLAN, 0x100 for Bluetooth.
@@ -1063,8 +1168,10 @@ static int asus_hotk_get_info(void)
ASUS_HANDLE_INIT(display_set);
ASUS_HANDLE_INIT(display_get);
- /* There is a lot of models with "ALSL", but a few get
- a real light sens, so we need to check it. */
+ /*
+ * There is a lot of models with "ALSL", but a few get
+ * a real light sens, so we need to check it.
+ */
if (!ASUS_HANDLE_INIT(ls_switch))
ASUS_HANDLE_INIT(ls_level);
@@ -1168,6 +1275,10 @@ static int asus_hotk_add(struct acpi_device *device)
/* LCD Backlight is on by default */
write_status(NULL, 1, LCD_ON);
+ /* Keyboard Backlight is on by default */
+ if (kled_set_handle)
+ set_kled_lvl(1);
+
/* LED display is off by default */
hotk->ledd_status = 0xFFF;
@@ -1222,6 +1333,7 @@ static void asus_led_exit(void)
ASUS_LED_UNREGISTER(pled);
ASUS_LED_UNREGISTER(rled);
ASUS_LED_UNREGISTER(gled);
+ ASUS_LED_UNREGISTER(kled);
}
static void asus_input_exit(void)
@@ -1301,13 +1413,20 @@ static int asus_led_init(struct device *dev)
if (rv)
goto out4;
+ if (kled_set_handle && kled_get_handle)
+ rv = ASUS_LED_REGISTER(kled, dev);
+ if (rv)
+ goto out5;
+
led_workqueue = create_singlethread_workqueue("led_workqueue");
if (!led_workqueue)
- goto out5;
+ goto out6;
return 0;
-out5:
+out6:
rv = -ENOMEM;
+ ASUS_LED_UNREGISTER(kled);
+out5:
ASUS_LED_UNREGISTER(gled);
out4:
ASUS_LED_UNREGISTER(pled);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 222ffb892f2..da3c08b3dcc 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -142,18 +142,28 @@ struct eeepc_hotk {
struct rfkill *wlan_rfkill;
struct rfkill *bluetooth_rfkill;
struct rfkill *wwan3g_rfkill;
+ struct rfkill *wimax_rfkill;
struct hotplug_slot *hotplug_slot;
- struct work_struct hotplug_work;
+ struct mutex hotplug_lock;
};
/* The actual device the driver binds to */
static struct eeepc_hotk *ehotk;
/* Platform device/driver */
+static int eeepc_hotk_thaw(struct device *device);
+static int eeepc_hotk_restore(struct device *device);
+
+static struct dev_pm_ops eeepc_pm_ops = {
+ .thaw = eeepc_hotk_thaw,
+ .restore = eeepc_hotk_restore,
+};
+
static struct platform_driver platform_driver = {
.driver = {
.name = EEEPC_HOTK_FILE,
.owner = THIS_MODULE,
+ .pm = &eeepc_pm_ops,
}
};
@@ -192,7 +202,6 @@ static struct key_entry eeepc_keymap[] = {
*/
static int eeepc_hotk_add(struct acpi_device *device);
static int eeepc_hotk_remove(struct acpi_device *device, int type);
-static int eeepc_hotk_resume(struct acpi_device *device);
static void eeepc_hotk_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id eeepc_device_ids[] = {
@@ -209,7 +218,6 @@ static struct acpi_driver eeepc_hotk_driver = {
.ops = {
.add = eeepc_hotk_add,
.remove = eeepc_hotk_remove,
- .resume = eeepc_hotk_resume,
.notify = eeepc_hotk_notify,
},
};
@@ -579,7 +587,6 @@ static void cmsg_quirks(void)
static int eeepc_hotk_check(void)
{
- const struct key_entry *key;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
int result;
@@ -604,31 +611,6 @@ static int eeepc_hotk_check(void)
pr_info("Get control methods supported: 0x%x\n",
ehotk->cm_supported);
}
- ehotk->inputdev = input_allocate_device();
- if (!ehotk->inputdev) {
- pr_info("Unable to allocate input device\n");
- return 0;
- }
- ehotk->inputdev->name = "Asus EeePC extra buttons";
- ehotk->inputdev->phys = EEEPC_HOTK_FILE "/input0";
- ehotk->inputdev->id.bustype = BUS_HOST;
- ehotk->inputdev->getkeycode = eeepc_getkeycode;
- ehotk->inputdev->setkeycode = eeepc_setkeycode;
-
- for (key = eeepc_keymap; key->type != KE_END; key++) {
- switch (key->type) {
- case KE_KEY:
- set_bit(EV_KEY, ehotk->inputdev->evbit);
- set_bit(key->keycode, ehotk->inputdev->keybit);
- break;
- }
- }
- result = input_register_device(ehotk->inputdev);
- if (result) {
- pr_info("Unable to register input device\n");
- input_free_device(ehotk->inputdev);
- return 0;
- }
} else {
pr_err("Hotkey device not present, aborting\n");
return -EINVAL;
@@ -661,40 +643,48 @@ static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
return 0;
}
-static void eeepc_hotplug_work(struct work_struct *work)
+static void eeepc_rfkill_hotplug(void)
{
struct pci_dev *dev;
- struct pci_bus *bus = pci_find_bus(0, 1);
- bool blocked;
+ struct pci_bus *bus;
+ bool blocked = eeepc_wlan_rfkill_blocked();
- if (!bus) {
- pr_warning("Unable to find PCI bus 1?\n");
- return;
- }
+ if (ehotk->wlan_rfkill)
+ rfkill_set_sw_state(ehotk->wlan_rfkill, blocked);
- blocked = eeepc_wlan_rfkill_blocked();
- if (!blocked) {
- dev = pci_get_slot(bus, 0);
- if (dev) {
- /* Device already present */
- pci_dev_put(dev);
- return;
- }
- dev = pci_scan_single_device(bus, 0);
- if (dev) {
- pci_bus_assign_resources(bus);
- if (pci_bus_add_device(dev))
- pr_err("Unable to hotplug wifi\n");
+ mutex_lock(&ehotk->hotplug_lock);
+
+ if (ehotk->hotplug_slot) {
+ bus = pci_find_bus(0, 1);
+ if (!bus) {
+ pr_warning("Unable to find PCI bus 1?\n");
+ goto out_unlock;
}
- } else {
- dev = pci_get_slot(bus, 0);
- if (dev) {
- pci_remove_bus_device(dev);
- pci_dev_put(dev);
+
+ if (!blocked) {
+ dev = pci_get_slot(bus, 0);
+ if (dev) {
+ /* Device already present */
+ pci_dev_put(dev);
+ goto out_unlock;
+ }
+ dev = pci_scan_single_device(bus, 0);
+ if (dev) {
+ pci_bus_assign_resources(bus);
+ if (pci_bus_add_device(dev))
+ pr_err("Unable to hotplug wifi\n");
+ }
+ } else {
+ dev = pci_get_slot(bus, 0);
+ if (dev) {
+ pci_remove_bus_device(dev);
+ pci_dev_put(dev);
+ }
}
}
- rfkill_set_sw_state(ehotk->wlan_rfkill, blocked);
+out_unlock:
+ mutex_unlock(&ehotk->hotplug_lock);
}
static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
@@ -702,7 +692,7 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
if (event != ACPI_NOTIFY_BUS_CHECK)
return;
- schedule_work(&ehotk->hotplug_work);
+ eeepc_rfkill_hotplug();
}
static void eeepc_hotk_notify(struct acpi_device *device, u32 event)
@@ -839,66 +829,38 @@ error_slot:
return ret;
}
-static int eeepc_hotk_add(struct acpi_device *device)
-{
- int result;
-
- if (!device)
- return -EINVAL;
- pr_notice(EEEPC_HOTK_NAME "\n");
- ehotk = kzalloc(sizeof(struct eeepc_hotk), GFP_KERNEL);
- if (!ehotk)
- return -ENOMEM;
- ehotk->init_flag = DISABLE_ASL_WLAN | DISABLE_ASL_DISPLAYSWITCH;
- ehotk->handle = device->handle;
- strcpy(acpi_device_name(device), EEEPC_HOTK_DEVICE_NAME);
- strcpy(acpi_device_class(device), EEEPC_HOTK_CLASS);
- device->driver_data = ehotk;
- ehotk->device = device;
- result = eeepc_hotk_check();
- if (result)
- goto ehotk_fail;
-
- return 0;
-
- ehotk_fail:
- kfree(ehotk);
- ehotk = NULL;
-
- return result;
-}
-
-static int eeepc_hotk_remove(struct acpi_device *device, int type)
-{
- if (!device || !acpi_driver_data(device))
- return -EINVAL;
-
- kfree(ehotk);
- return 0;
-}
-
-static int eeepc_hotk_resume(struct acpi_device *device)
+static int eeepc_hotk_thaw(struct device *device)
{
if (ehotk->wlan_rfkill) {
bool wlan;
- /* Workaround - it seems that _PTS disables the wireless
- without notification or changing the value read by WLAN.
- Normally this is fine because the correct value is restored
- from the non-volatile storage on resume, but we need to do
- it ourself if case suspend is aborted, or we lose wireless.
+ /*
+ * Work around bios bug - acpi _PTS turns off the wireless led
+ * during suspend. Normally it restores it on resume, but
+ * we should kick it ourselves in case hibernation is aborted.
*/
wlan = get_acpi(CM_ASL_WLAN);
set_acpi(CM_ASL_WLAN, wlan);
+ }
- rfkill_set_sw_state(ehotk->wlan_rfkill, wlan != 1);
+ return 0;
+}
- schedule_work(&ehotk->hotplug_work);
- }
+static int eeepc_hotk_restore(struct device *device)
+{
+ /* Refresh both wlan rfkill state and pci hotplug */
+ if (ehotk->wlan_rfkill)
+ eeepc_rfkill_hotplug();
if (ehotk->bluetooth_rfkill)
rfkill_set_sw_state(ehotk->bluetooth_rfkill,
get_acpi(CM_ASL_BLUETOOTH) != 1);
+ if (ehotk->wwan3g_rfkill)
+ rfkill_set_sw_state(ehotk->wwan3g_rfkill,
+ get_acpi(CM_ASL_3G) != 1);
+ if (ehotk->wimax_rfkill)
+ rfkill_set_sw_state(ehotk->wimax_rfkill,
+ get_acpi(CM_ASL_WIMAX) != 1);
return 0;
}
@@ -1019,16 +981,37 @@ static void eeepc_backlight_exit(void)
static void eeepc_rfkill_exit(void)
{
+ eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P5");
eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
- if (ehotk->wlan_rfkill)
+ if (ehotk->wlan_rfkill) {
rfkill_unregister(ehotk->wlan_rfkill);
- if (ehotk->bluetooth_rfkill)
- rfkill_unregister(ehotk->bluetooth_rfkill);
- if (ehotk->wwan3g_rfkill)
- rfkill_unregister(ehotk->wwan3g_rfkill);
+ rfkill_destroy(ehotk->wlan_rfkill);
+ ehotk->wlan_rfkill = NULL;
+ }
+ /*
+ * Refresh pci hotplug in case the rfkill state was changed after
+ * eeepc_unregister_rfkill_notifier()
+ */
+ eeepc_rfkill_hotplug();
if (ehotk->hotplug_slot)
pci_hp_deregister(ehotk->hotplug_slot);
+
+ if (ehotk->bluetooth_rfkill) {
+ rfkill_unregister(ehotk->bluetooth_rfkill);
+ rfkill_destroy(ehotk->bluetooth_rfkill);
+ ehotk->bluetooth_rfkill = NULL;
+ }
+ if (ehotk->wwan3g_rfkill) {
+ rfkill_unregister(ehotk->wwan3g_rfkill);
+ rfkill_destroy(ehotk->wwan3g_rfkill);
+ ehotk->wwan3g_rfkill = NULL;
+ }
+ if (ehotk->wimax_rfkill) {
+ rfkill_unregister(ehotk->wimax_rfkill);
+ rfkill_destroy(ehotk->wimax_rfkill);
+ ehotk->wimax_rfkill = NULL;
+ }
}
static void eeepc_input_exit(void)
@@ -1050,19 +1033,6 @@ static void eeepc_hwmon_exit(void)
eeepc_hwmon_device = NULL;
}
-static void __exit eeepc_laptop_exit(void)
-{
- eeepc_backlight_exit();
- eeepc_rfkill_exit();
- eeepc_input_exit();
- eeepc_hwmon_exit();
- acpi_bus_unregister_driver(&eeepc_hotk_driver);
- sysfs_remove_group(&platform_device->dev.kobj,
- &platform_attribute_group);
- platform_device_unregister(platform_device);
- platform_driver_unregister(&platform_driver);
-}
-
static int eeepc_new_rfkill(struct rfkill **rfkill,
const char *name, struct device *dev,
enum rfkill_type type, int cm)
@@ -1094,10 +1064,7 @@ static int eeepc_rfkill_init(struct device *dev)
{
int result = 0;
- INIT_WORK(&ehotk->hotplug_work, eeepc_hotplug_work);
-
- eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6");
- eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7");
+ mutex_init(&ehotk->hotplug_lock);
result = eeepc_new_rfkill(&ehotk->wlan_rfkill,
"eeepc-wlan", dev,
@@ -1120,6 +1087,13 @@ static int eeepc_rfkill_init(struct device *dev)
if (result && result != -ENODEV)
goto exit;
+ result = eeepc_new_rfkill(&ehotk->wimax_rfkill,
+ "eeepc-wimax", dev,
+ RFKILL_TYPE_WIMAX, CM_ASL_WIMAX);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
result = eeepc_setup_pci_hotplug();
/*
* If we get -EBUSY then something else is handling the PCI hotplug -
@@ -1128,6 +1102,15 @@ static int eeepc_rfkill_init(struct device *dev)
if (result == -EBUSY)
result = 0;
+ eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P5");
+ eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6");
+ eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7");
+ /*
+ * Refresh pci hotplug in case the rfkill state was changed during
+ * setup.
+ */
+ eeepc_rfkill_hotplug();
+
exit:
if (result && result != -ENODEV)
eeepc_rfkill_exit();
@@ -1172,21 +1155,61 @@ static int eeepc_hwmon_init(struct device *dev)
return result;
}
-static int __init eeepc_laptop_init(void)
+static int eeepc_input_init(struct device *dev)
{
- struct device *dev;
+ const struct key_entry *key;
int result;
- if (acpi_disabled)
- return -ENODEV;
- result = acpi_bus_register_driver(&eeepc_hotk_driver);
- if (result < 0)
+ ehotk->inputdev = input_allocate_device();
+ if (!ehotk->inputdev) {
+ pr_info("Unable to allocate input device\n");
+ return -ENOMEM;
+ }
+ ehotk->inputdev->name = "Asus EeePC extra buttons";
+ ehotk->inputdev->dev.parent = dev;
+ ehotk->inputdev->phys = EEEPC_HOTK_FILE "/input0";
+ ehotk->inputdev->id.bustype = BUS_HOST;
+ ehotk->inputdev->getkeycode = eeepc_getkeycode;
+ ehotk->inputdev->setkeycode = eeepc_setkeycode;
+
+ for (key = eeepc_keymap; key->type != KE_END; key++) {
+ switch (key->type) {
+ case KE_KEY:
+ set_bit(EV_KEY, ehotk->inputdev->evbit);
+ set_bit(key->keycode, ehotk->inputdev->keybit);
+ break;
+ }
+ }
+ result = input_register_device(ehotk->inputdev);
+ if (result) {
+ pr_info("Unable to register input device\n");
+ input_free_device(ehotk->inputdev);
return result;
- if (!ehotk) {
- acpi_bus_unregister_driver(&eeepc_hotk_driver);
- return -ENODEV;
}
+ return 0;
+}
+
+static int eeepc_hotk_add(struct acpi_device *device)
+{
+ struct device *dev;
+ int result;
+ if (!device)
+ return -EINVAL;
+ pr_notice(EEEPC_HOTK_NAME "\n");
+ ehotk = kzalloc(sizeof(struct eeepc_hotk), GFP_KERNEL);
+ if (!ehotk)
+ return -ENOMEM;
+ ehotk->init_flag = DISABLE_ASL_WLAN | DISABLE_ASL_DISPLAYSWITCH;
+ ehotk->handle = device->handle;
+ strcpy(acpi_device_name(device), EEEPC_HOTK_DEVICE_NAME);
+ strcpy(acpi_device_class(device), EEEPC_HOTK_CLASS);
+ device->driver_data = ehotk;
+ ehotk->device = device;
+
+ result = eeepc_hotk_check();
+ if (result)
+ goto fail_platform_driver;
eeepc_enable_camera();
/* Register platform stuff */
@@ -1216,6 +1239,10 @@ static int __init eeepc_laptop_init(void)
pr_info("Backlight controlled by ACPI video "
"driver\n");
+ result = eeepc_input_init(dev);
+ if (result)
+ goto fail_input;
+
result = eeepc_hwmon_init(dev);
if (result)
goto fail_hwmon;
@@ -1225,9 +1252,12 @@ static int __init eeepc_laptop_init(void)
goto fail_rfkill;
return 0;
+
fail_rfkill:
eeepc_hwmon_exit();
fail_hwmon:
+ eeepc_input_exit();
+fail_input:
eeepc_backlight_exit();
fail_backlight:
sysfs_remove_group(&platform_device->dev.kobj,
@@ -1239,9 +1269,49 @@ fail_platform_device2:
fail_platform_device1:
platform_driver_unregister(&platform_driver);
fail_platform_driver:
- eeepc_input_exit();
+ kfree(ehotk);
+
return result;
}
+static int eeepc_hotk_remove(struct acpi_device *device, int type)
+{
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ eeepc_backlight_exit();
+ eeepc_rfkill_exit();
+ eeepc_input_exit();
+ eeepc_hwmon_exit();
+ sysfs_remove_group(&platform_device->dev.kobj,
+ &platform_attribute_group);
+ platform_device_unregister(platform_device);
+ platform_driver_unregister(&platform_driver);
+
+ kfree(ehotk);
+ return 0;
+}
+
+static int __init eeepc_laptop_init(void)
+{
+ int result;
+
+ if (acpi_disabled)
+ return -ENODEV;
+ result = acpi_bus_register_driver(&eeepc_hotk_driver);
+ if (result < 0)
+ return result;
+ if (!ehotk) {
+ acpi_bus_unregister_driver(&eeepc_hotk_driver);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void __exit eeepc_laptop_exit(void)
+{
+ acpi_bus_unregister_driver(&eeepc_hotk_driver);
+}
+
module_init(eeepc_laptop_init);
module_exit(eeepc_laptop_exit);
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 218b9a16ac3..f35aee5c214 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -66,11 +66,11 @@
#include <linux/kfifo.h>
#include <linux/video_output.h>
#include <linux/platform_device.h>
-#ifdef CONFIG_LEDS_CLASS
+#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
#include <linux/leds.h>
#endif
-#define FUJITSU_DRIVER_VERSION "0.5.0"
+#define FUJITSU_DRIVER_VERSION "0.6.0"
#define FUJITSU_LCD_N_LEVELS 8
@@ -96,7 +96,7 @@
/* FUNC interface - responses */
#define UNSUPPORTED_CMD 0x80000000
-#ifdef CONFIG_LEDS_CLASS
+#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
/* FUNC interface - LED control */
#define FUNC_LED_OFF 0x1
#define FUNC_LED_ON 0x30001
@@ -176,7 +176,7 @@ static struct fujitsu_hotkey_t *fujitsu_hotkey;
static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event);
-#ifdef CONFIG_LEDS_CLASS
+#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
static enum led_brightness logolamp_get(struct led_classdev *cdev);
static void logolamp_set(struct led_classdev *cdev,
enum led_brightness brightness);
@@ -257,7 +257,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
return out_obj.integer.value;
}
-#ifdef CONFIG_LEDS_CLASS
+#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
/* LED class callbacks */
static void logolamp_set(struct led_classdev *cdev,
@@ -324,9 +324,6 @@ static int set_lcd_level(int level)
if (level < 0 || level >= fujitsu->max_brightness)
return -EINVAL;
- if (!fujitsu)
- return -EINVAL;
-
status = acpi_get_handle(fujitsu->acpi_handle, "SBLL", &handle);
if (ACPI_FAILURE(status)) {
vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBLL not present\n");
@@ -355,9 +352,6 @@ static int set_lcd_level_alt(int level)
if (level < 0 || level >= fujitsu->max_brightness)
return -EINVAL;
- if (!fujitsu)
- return -EINVAL;
-
status = acpi_get_handle(fujitsu->acpi_handle, "SBL2", &handle);
if (ACPI_FAILURE(status)) {
vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBL2 not present\n");
@@ -697,10 +691,10 @@ static int acpi_fujitsu_add(struct acpi_device *device)
result = acpi_bus_get_power(fujitsu->acpi_handle, &state);
if (result) {
printk(KERN_ERR "Error reading power state\n");
- goto end;
+ goto err_unregister_input_dev;
}
- printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
+ printk(KERN_INFO "ACPI: %s [%s] (%s)\n",
acpi_device_name(device), acpi_device_bid(device),
!device->power.state ? "on" : "off");
@@ -728,25 +722,22 @@ static int acpi_fujitsu_add(struct acpi_device *device)
return result;
-end:
+err_unregister_input_dev:
+ input_unregister_device(input);
err_free_input_dev:
input_free_device(input);
err_stop:
-
return result;
}
static int acpi_fujitsu_remove(struct acpi_device *device, int type)
{
- struct fujitsu_t *fujitsu = NULL;
+ struct fujitsu_t *fujitsu = acpi_driver_data(device);
+ struct input_dev *input = fujitsu->input;
- if (!device || !acpi_driver_data(device))
- return -EINVAL;
+ input_unregister_device(input);
- fujitsu = acpi_driver_data(device);
-
- if (!device || !acpi_driver_data(device))
- return -EINVAL;
+ input_free_device(input);
fujitsu->acpi_handle = NULL;
@@ -871,10 +862,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
result = acpi_bus_get_power(fujitsu_hotkey->acpi_handle, &state);
if (result) {
printk(KERN_ERR "Error reading power state\n");
- goto end;
+ goto err_unregister_input_dev;
}
- printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
+ printk(KERN_INFO "ACPI: %s [%s] (%s)\n",
acpi_device_name(device), acpi_device_bid(device),
!device->power.state ? "on" : "off");
@@ -911,7 +902,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
printk(KERN_INFO "fujitsu-laptop: BTNI: [0x%x]\n",
call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
- #ifdef CONFIG_LEDS_CLASS
+#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
result = led_classdev_register(&fujitsu->pf_device->dev,
&logolamp_led);
@@ -934,33 +925,41 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
"LED handler for keyboard lamps, error %i\n", result);
}
}
- #endif
+#endif
return result;
-end:
+err_unregister_input_dev:
+ input_unregister_device(input);
err_free_input_dev:
input_free_device(input);
err_free_fifo:
kfifo_free(fujitsu_hotkey->fifo);
err_stop:
-
return result;
}
static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type)
{
- struct fujitsu_hotkey_t *fujitsu_hotkey = NULL;
+ struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device);
+ struct input_dev *input = fujitsu_hotkey->input;
- if (!device || !acpi_driver_data(device))
- return -EINVAL;
+#ifdef CONFIG_LEDS_CLASS
+ if (fujitsu_hotkey->logolamp_registered)
+ led_classdev_unregister(&logolamp_led);
- fujitsu_hotkey = acpi_driver_data(device);
+ if (fujitsu_hotkey->kblamps_registered)
+ led_classdev_unregister(&kblamps_led);
+#endif
- fujitsu_hotkey->acpi_handle = NULL;
+ input_unregister_device(input);
+
+ input_free_device(input);
kfifo_free(fujitsu_hotkey->fifo);
+ fujitsu_hotkey->acpi_handle = NULL;
+
return 0;
}
@@ -1130,8 +1129,11 @@ static int __init fujitsu_init(void)
fujitsu->bl_device =
backlight_device_register("fujitsu-laptop", NULL, NULL,
&fujitsubl_ops);
- if (IS_ERR(fujitsu->bl_device))
- return PTR_ERR(fujitsu->bl_device);
+ if (IS_ERR(fujitsu->bl_device)) {
+ ret = PTR_ERR(fujitsu->bl_device);
+ fujitsu->bl_device = NULL;
+ goto fail_sysfs_group;
+ }
max_brightness = fujitsu->max_brightness;
fujitsu->bl_device->props.max_brightness = max_brightness - 1;
fujitsu->bl_device->props.brightness = fujitsu->brightness_level;
@@ -1171,32 +1173,22 @@ static int __init fujitsu_init(void)
return 0;
fail_hotkey1:
-
kfree(fujitsu_hotkey);
-
fail_hotkey:
-
platform_driver_unregister(&fujitsupf_driver);
-
fail_backlight:
-
if (fujitsu->bl_device)
backlight_device_unregister(fujitsu->bl_device);
-
+fail_sysfs_group:
+ sysfs_remove_group(&fujitsu->pf_device->dev.kobj,
+ &fujitsupf_attribute_group);
fail_platform_device2:
-
platform_device_del(fujitsu->pf_device);
-
fail_platform_device1:
-
platform_device_put(fujitsu->pf_device);
-
fail_platform_driver:
-
acpi_bus_unregister_driver(&acpi_fujitsu_driver);
-
fail_acpi:
-
kfree(fujitsu);
return ret;
@@ -1204,28 +1196,23 @@ fail_acpi:
static void __exit fujitsu_cleanup(void)
{
- #ifdef CONFIG_LEDS_CLASS
- if (fujitsu_hotkey->logolamp_registered != 0)
- led_classdev_unregister(&logolamp_led);
+ acpi_bus_unregister_driver(&acpi_fujitsu_hotkey_driver);
- if (fujitsu_hotkey->kblamps_registered != 0)
- led_classdev_unregister(&kblamps_led);
- #endif
+ kfree(fujitsu_hotkey);
- sysfs_remove_group(&fujitsu->pf_device->dev.kobj,
- &fujitsupf_attribute_group);
- platform_device_unregister(fujitsu->pf_device);
platform_driver_unregister(&fujitsupf_driver);
+
if (fujitsu->bl_device)
backlight_device_unregister(fujitsu->bl_device);
- acpi_bus_unregister_driver(&acpi_fujitsu_driver);
+ sysfs_remove_group(&fujitsu->pf_device->dev.kobj,
+ &fujitsupf_attribute_group);
- kfree(fujitsu);
+ platform_device_unregister(fujitsu->pf_device);
- acpi_bus_unregister_driver(&acpi_fujitsu_hotkey_driver);
+ acpi_bus_unregister_driver(&acpi_fujitsu_driver);
- kfree(fujitsu_hotkey);
+ kfree(fujitsu);
printk(KERN_INFO "fujitsu-laptop: driver unloaded.\n");
}
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index af04f5b049d..c2842171cec 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -507,7 +507,7 @@ static int __exit hp_wmi_bios_remove(struct platform_device *device)
}
if (bluetooth_rfkill) {
rfkill_unregister(bluetooth_rfkill);
- rfkill_destroy(wifi_rfkill);
+ rfkill_destroy(bluetooth_rfkill);
}
if (wwan_rfkill) {
rfkill_unregister(wwan_rfkill);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index dafaa4a92df..f9f68e0e734 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -976,15 +976,12 @@ static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
void *context, void **return_value)
{
struct acpi_device_info *info;
- struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
-
- if (ACPI_SUCCESS(acpi_get_object_info(handle, &buffer))) {
- info = buffer.pointer;
+ if (ACPI_SUCCESS(acpi_get_object_info(handle, &info))) {
printk(KERN_WARNING DRV_PFX "method: name: %4.4s, args %X\n",
(char *)&info->name, info->param_count);
- kfree(buffer.pointer);
+ kfree(info);
}
return AE_OK;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e8560085250..3910f2f3ead 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -22,7 +22,7 @@
*/
#define TPACPI_VERSION "0.23"
-#define TPACPI_SYSFS_VERSION 0x020400
+#define TPACPI_SYSFS_VERSION 0x020500
/*
* Changelog:
@@ -145,6 +145,51 @@ enum {
TP_ACPI_WGSV_STATE_UWBPWR = 0x0020, /* UWB radio enabled */
};
+/* HKEY events */
+enum tpacpi_hkey_event_t {
+ /* Hotkey-related */
+ TP_HKEY_EV_HOTKEY_BASE = 0x1001, /* first hotkey (FN+F1) */
+ TP_HKEY_EV_BRGHT_UP = 0x1010, /* Brightness up */
+ TP_HKEY_EV_BRGHT_DOWN = 0x1011, /* Brightness down */
+ TP_HKEY_EV_VOL_UP = 0x1015, /* Volume up or unmute */
+ TP_HKEY_EV_VOL_DOWN = 0x1016, /* Volume down or unmute */
+ TP_HKEY_EV_VOL_MUTE = 0x1017, /* Mixer output mute */
+
+ /* Reasons for waking up from S3/S4 */
+ TP_HKEY_EV_WKUP_S3_UNDOCK = 0x2304, /* undock requested, S3 */
+ TP_HKEY_EV_WKUP_S4_UNDOCK = 0x2404, /* undock requested, S4 */
+ TP_HKEY_EV_WKUP_S3_BAYEJ = 0x2305, /* bay ejection req, S3 */
+ TP_HKEY_EV_WKUP_S4_BAYEJ = 0x2405, /* bay ejection req, S4 */
+ TP_HKEY_EV_WKUP_S3_BATLOW = 0x2313, /* battery empty, S3 */
+ TP_HKEY_EV_WKUP_S4_BATLOW = 0x2413, /* battery empty, S4 */
+
+ /* Auto-sleep after eject request */
+ TP_HKEY_EV_BAYEJ_ACK = 0x3003, /* bay ejection complete */
+ TP_HKEY_EV_UNDOCK_ACK = 0x4003, /* undock complete */
+
+ /* Misc bay events */
+ TP_HKEY_EV_OPTDRV_EJ = 0x3006, /* opt. drive tray ejected */
+
+ /* User-interface events */
+ TP_HKEY_EV_LID_CLOSE = 0x5001, /* laptop lid closed */
+ TP_HKEY_EV_LID_OPEN = 0x5002, /* laptop lid opened */
+ TP_HKEY_EV_TABLET_TABLET = 0x5009, /* tablet swivel up */
+ TP_HKEY_EV_TABLET_NOTEBOOK = 0x500a, /* tablet swivel down */
+ TP_HKEY_EV_PEN_INSERTED = 0x500b, /* tablet pen inserted */
+ TP_HKEY_EV_PEN_REMOVED = 0x500c, /* tablet pen removed */
+ TP_HKEY_EV_BRGHT_CHANGED = 0x5010, /* backlight control event */
+
+ /* Thermal events */
+ TP_HKEY_EV_ALARM_BAT_HOT = 0x6011, /* battery too hot */
+ TP_HKEY_EV_ALARM_BAT_XHOT = 0x6012, /* battery critically hot */
+ TP_HKEY_EV_ALARM_SENSOR_HOT = 0x6021, /* sensor too hot */
+ TP_HKEY_EV_ALARM_SENSOR_XHOT = 0x6022, /* sensor critically hot */
+ TP_HKEY_EV_THM_TABLE_CHANGED = 0x6030, /* thermal table changed */
+
+ /* Misc */
+ TP_HKEY_EV_RFKILL_CHANGED = 0x7000, /* rfkill switch changed */
+};
+
/****************************************************************************
* Main driver
*/
@@ -1278,6 +1323,7 @@ static void tpacpi_destroy_rfkill(const enum tpacpi_rfk_id id)
tp_rfk = tpacpi_rfkill_switches[id];
if (tp_rfk) {
rfkill_unregister(tp_rfk->rfkill);
+ rfkill_destroy(tp_rfk->rfkill);
tpacpi_rfkill_switches[id] = NULL;
kfree(tp_rfk);
}
@@ -1601,6 +1647,196 @@ static void tpacpi_remove_driver_attributes(struct device_driver *drv)
#endif
}
+/*************************************************************************
+ * Firmware Data
+ */
+
+/*
+ * Table of recommended minimum BIOS versions
+ *
+ * Reasons for listing:
+ * 1. Stable BIOS, listed because the unknown ammount of
+ * bugs and bad ACPI behaviour on older versions
+ *
+ * 2. BIOS or EC fw with known bugs that trigger on Linux
+ *
+ * 3. BIOS with known reduced functionality in older versions
+ *
+ * We recommend the latest BIOS and EC version.
+ * We only support the latest BIOS and EC fw version as a rule.
+ *
+ * Sources: IBM ThinkPad Public Web Documents (update changelogs),
+ * Information from users in ThinkWiki
+ *
+ * WARNING: we use this table also to detect that the machine is
+ * a ThinkPad in some cases, so don't remove entries lightly.
+ */
+
+#define TPV_Q(__v, __id1, __id2, __bv1, __bv2) \
+ { .vendor = (__v), \
+ .bios = TPID(__id1, __id2), \
+ .ec = TPACPI_MATCH_ANY, \
+ .quirks = TPACPI_MATCH_ANY << 16 \
+ | (__bv1) << 8 | (__bv2) }
+
+#define TPV_Q_X(__v, __bid1, __bid2, __bv1, __bv2, \
+ __eid1, __eid2, __ev1, __ev2) \
+ { .vendor = (__v), \
+ .bios = TPID(__bid1, __bid2), \
+ .ec = TPID(__eid1, __eid2), \
+ .quirks = (__ev1) << 24 | (__ev2) << 16 \
+ | (__bv1) << 8 | (__bv2) }
+
+#define TPV_QI0(__id1, __id2, __bv1, __bv2) \
+ TPV_Q(PCI_VENDOR_ID_IBM, __id1, __id2, __bv1, __bv2)
+
+#define TPV_QI1(__id1, __id2, __bv1, __bv2, __ev1, __ev2) \
+ TPV_Q_X(PCI_VENDOR_ID_IBM, __id1, __id2, \
+ __bv1, __bv2, __id1, __id2, __ev1, __ev2)
+
+#define TPV_QI2(__bid1, __bid2, __bv1, __bv2, \
+ __eid1, __eid2, __ev1, __ev2) \
+ TPV_Q_X(PCI_VENDOR_ID_IBM, __bid1, __bid2, \
+ __bv1, __bv2, __eid1, __eid2, __ev1, __ev2)
+
+#define TPV_QL0(__id1, __id2, __bv1, __bv2) \
+ TPV_Q(PCI_VENDOR_ID_LENOVO, __id1, __id2, __bv1, __bv2)
+
+#define TPV_QL1(__id1, __id2, __bv1, __bv2, __ev1, __ev2) \
+ TPV_Q_X(PCI_VENDOR_ID_LENOVO, __id1, __id2, \
+ __bv1, __bv2, __id1, __id2, __ev1, __ev2)
+
+#define TPV_QL2(__bid1, __bid2, __bv1, __bv2, \
+ __eid1, __eid2, __ev1, __ev2) \
+ TPV_Q_X(PCI_VENDOR_ID_LENOVO, __bid1, __bid2, \
+ __bv1, __bv2, __eid1, __eid2, __ev1, __ev2)
+
+static const struct tpacpi_quirk tpacpi_bios_version_qtable[] __initconst = {
+ /* Numeric models ------------------ */
+ /* FW MODEL BIOS VERS */
+ TPV_QI0('I', 'M', '6', '5'), /* 570 */
+ TPV_QI0('I', 'U', '2', '6'), /* 570E */
+ TPV_QI0('I', 'B', '5', '4'), /* 600 */
+ TPV_QI0('I', 'H', '4', '7'), /* 600E */
+ TPV_QI0('I', 'N', '3', '6'), /* 600E */
+ TPV_QI0('I', 'T', '5', '5'), /* 600X */
+ TPV_QI0('I', 'D', '4', '8'), /* 770, 770E, 770ED */
+ TPV_QI0('I', 'I', '4', '2'), /* 770X */
+ TPV_QI0('I', 'O', '2', '3'), /* 770Z */
+
+ /* A-series ------------------------- */
+ /* FW MODEL BIOS VERS EC VERS */
+ TPV_QI0('I', 'W', '5', '9'), /* A20m */
+ TPV_QI0('I', 'V', '6', '9'), /* A20p */
+ TPV_QI0('1', '0', '2', '6'), /* A21e, A22e */
+ TPV_QI0('K', 'U', '3', '6'), /* A21e */
+ TPV_QI0('K', 'X', '3', '6'), /* A21m, A22m */
+ TPV_QI0('K', 'Y', '3', '8'), /* A21p, A22p */
+ TPV_QI0('1', 'B', '1', '7'), /* A22e */
+ TPV_QI0('1', '3', '2', '0'), /* A22m */
+ TPV_QI0('1', 'E', '7', '3'), /* A30/p (0) */
+ TPV_QI1('1', 'G', '4', '1', '1', '7'), /* A31/p (0) */
+ TPV_QI1('1', 'N', '1', '6', '0', '7'), /* A31/p (0) */
+
+ /* G-series ------------------------- */
+ /* FW MODEL BIOS VERS */
+ TPV_QI0('1', 'T', 'A', '6'), /* G40 */
+ TPV_QI0('1', 'X', '5', '7'), /* G41 */
+
+ /* R-series, T-series --------------- */
+ /* FW MODEL BIOS VERS EC VERS */
+ TPV_QI0('1', 'C', 'F', '0'), /* R30 */
+ TPV_QI0('1', 'F', 'F', '1'), /* R31 */
+ TPV_QI0('1', 'M', '9', '7'), /* R32 */
+ TPV_QI0('1', 'O', '6', '1'), /* R40 */
+ TPV_QI0('1', 'P', '6', '5'), /* R40 */
+ TPV_QI0('1', 'S', '7', '0'), /* R40e */
+ TPV_QI1('1', 'R', 'D', 'R', '7', '1'), /* R50/p, R51,
+ T40/p, T41/p, T42/p (1) */
+ TPV_QI1('1', 'V', '7', '1', '2', '8'), /* R50e, R51 (1) */
+ TPV_QI1('7', '8', '7', '1', '0', '6'), /* R51e (1) */
+ TPV_QI1('7', '6', '6', '9', '1', '6'), /* R52 (1) */
+ TPV_QI1('7', '0', '6', '9', '2', '8'), /* R52, T43 (1) */
+
+ TPV_QI0('I', 'Y', '6', '1'), /* T20 */
+ TPV_QI0('K', 'Z', '3', '4'), /* T21 */
+ TPV_QI0('1', '6', '3', '2'), /* T22 */
+ TPV_QI1('1', 'A', '6', '4', '2', '3'), /* T23 (0) */
+ TPV_QI1('1', 'I', '7', '1', '2', '0'), /* T30 (0) */
+ TPV_QI1('1', 'Y', '6', '5', '2', '9'), /* T43/p (1) */
+
+ TPV_QL1('7', '9', 'E', '3', '5', '0'), /* T60/p */
+ TPV_QL1('7', 'C', 'D', '2', '2', '2'), /* R60, R60i */
+ TPV_QL0('7', 'E', 'D', '0'), /* R60e, R60i */
+
+ /* BIOS FW BIOS VERS EC FW EC VERS */
+ TPV_QI2('1', 'W', '9', '0', '1', 'V', '2', '8'), /* R50e (1) */
+ TPV_QL2('7', 'I', '3', '4', '7', '9', '5', '0'), /* T60/p wide */
+
+ /* X-series ------------------------- */
+ /* FW MODEL BIOS VERS EC VERS */
+ TPV_QI0('I', 'Z', '9', 'D'), /* X20, X21 */
+ TPV_QI0('1', 'D', '7', '0'), /* X22, X23, X24 */
+ TPV_QI1('1', 'K', '4', '8', '1', '8'), /* X30 (0) */
+ TPV_QI1('1', 'Q', '9', '7', '2', '3'), /* X31, X32 (0) */
+ TPV_QI1('1', 'U', 'D', '3', 'B', '2'), /* X40 (0) */
+ TPV_QI1('7', '4', '6', '4', '2', '7'), /* X41 (0) */
+ TPV_QI1('7', '5', '6', '0', '2', '0'), /* X41t (0) */
+
+ TPV_QL0('7', 'B', 'D', '7'), /* X60/s */
+ TPV_QL0('7', 'J', '3', '0'), /* X60t */
+
+ /* (0) - older versions lack DMI EC fw string and functionality */
+ /* (1) - older versions known to lack functionality */
+};
+
+#undef TPV_QL1
+#undef TPV_QL0
+#undef TPV_QI2
+#undef TPV_QI1
+#undef TPV_QI0
+#undef TPV_Q_X
+#undef TPV_Q
+
+static void __init tpacpi_check_outdated_fw(void)
+{
+ unsigned long fwvers;
+ u16 ec_version, bios_version;
+
+ fwvers = tpacpi_check_quirks(tpacpi_bios_version_qtable,
+ ARRAY_SIZE(tpacpi_bios_version_qtable));
+
+ if (!fwvers)
+ return;
+
+ bios_version = fwvers & 0xffffU;
+ ec_version = (fwvers >> 16) & 0xffffU;
+
+ /* note that unknown versions are set to 0x0000 and we use that */
+ if ((bios_version > thinkpad_id.bios_release) ||
+ (ec_version > thinkpad_id.ec_release &&
+ ec_version != TPACPI_MATCH_ANY)) {
+ /*
+ * The changelogs would let us track down the exact
+ * reason, but it is just too much of a pain to track
+ * it. We only list BIOSes that are either really
+ * broken, or really stable to begin with, so it is
+ * best if the user upgrades the firmware anyway.
+ */
+ printk(TPACPI_WARN
+ "WARNING: Outdated ThinkPad BIOS/EC firmware\n");
+ printk(TPACPI_WARN
+ "WARNING: This firmware may be missing critical bug "
+ "fixes and/or important features\n");
+ }
+}
+
+static bool __init tpacpi_is_fw_known(void)
+{
+ return tpacpi_check_quirks(tpacpi_bios_version_qtable,
+ ARRAY_SIZE(tpacpi_bios_version_qtable)) != 0;
+}
+
/****************************************************************************
****************************************************************************
*
@@ -1634,6 +1870,7 @@ static int __init thinkpad_acpi_driver_init(struct ibm_init_struct *iibm)
(thinkpad_id.nummodel_str) ?
thinkpad_id.nummodel_str : "unknown");
+ tpacpi_check_outdated_fw();
return 0;
}
@@ -1656,6 +1893,27 @@ static struct ibm_struct thinkpad_acpi_driver_data = {
* Hotkey subdriver
*/
+/*
+ * ThinkPad firmware event model
+ *
+ * The ThinkPad firmware has two main event interfaces: normal ACPI
+ * notifications (which follow the ACPI standard), and a private event
+ * interface.
+ *
+ * The private event interface also issues events for the hotkeys. As
+ * the driver gained features, the event handling code ended up being
+ * built around the hotkey subdriver. This will need to be refactored
+ * to a more formal event API eventually.
+ *
+ * Some "hotkeys" are actually supposed to be used as event reports,
+ * such as "brightness has changed", "volume has changed", depending on
+ * the ThinkPad model and how the firmware is operating.
+ *
+ * Unlike other classes, hotkey-class events have mask/unmask control on
+ * non-ancient firmware. However, how it behaves changes a lot with the
+ * firmware model and version.
+ */
+
enum { /* hot key scan codes (derived from ACPI DSDT) */
TP_ACPI_HOTKEYSCAN_FNF1 = 0,
TP_ACPI_HOTKEYSCAN_FNF2,
@@ -1683,7 +1941,7 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */
TP_ACPI_HOTKEYSCAN_THINKPAD,
};
-enum { /* Keys available through NVRAM polling */
+enum { /* Keys/events available through NVRAM polling */
TPACPI_HKEY_NVRAM_KNOWN_MASK = 0x00fb88c0U,
TPACPI_HKEY_NVRAM_GOOD_MASK = 0x00fb8000U,
};
@@ -1731,16 +1989,50 @@ struct tp_nvram_state {
u8 volume_level;
};
+/* kthread for the hotkey poller */
static struct task_struct *tpacpi_hotkey_task;
-static u32 hotkey_source_mask; /* bit mask 0=ACPI,1=NVRAM */
-static int hotkey_poll_freq = 10; /* Hz */
+
+/* Acquired while the poller kthread is running, use to sync start/stop */
static struct mutex hotkey_thread_mutex;
+
+/*
+ * Acquire mutex to write poller control variables as an
+ * atomic block.
+ *
+ * Increment hotkey_config_change when changing them if you
+ * want the kthread to forget old state.
+ *
+ * See HOTKEY_CONFIG_CRITICAL_START/HOTKEY_CONFIG_CRITICAL_END
+ */
static struct mutex hotkey_thread_data_mutex;
static unsigned int hotkey_config_change;
+/*
+ * hotkey poller control variables
+ *
+ * Must be atomic or readers will also need to acquire mutex
+ *
+ * HOTKEY_CONFIG_CRITICAL_START/HOTKEY_CONFIG_CRITICAL_END
+ * should be used only when the changes need to be taken as
+ * a block, OR when one needs to force the kthread to forget
+ * old state.
+ */
+static u32 hotkey_source_mask; /* bit mask 0=ACPI,1=NVRAM */
+static unsigned int hotkey_poll_freq = 10; /* Hz */
+
+#define HOTKEY_CONFIG_CRITICAL_START \
+ do { \
+ mutex_lock(&hotkey_thread_data_mutex); \
+ hotkey_config_change++; \
+ } while (0);
+#define HOTKEY_CONFIG_CRITICAL_END \
+ mutex_unlock(&hotkey_thread_data_mutex);
+
#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
#define hotkey_source_mask 0U
+#define HOTKEY_CONFIG_CRITICAL_START
+#define HOTKEY_CONFIG_CRITICAL_END
#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
@@ -1754,10 +2046,12 @@ static enum { /* Reasons for waking up */
static int hotkey_autosleep_ack;
-static u32 hotkey_orig_mask;
-static u32 hotkey_all_mask;
-static u32 hotkey_reserved_mask;
-static u32 hotkey_mask;
+static u32 hotkey_orig_mask; /* events the BIOS had enabled */
+static u32 hotkey_all_mask; /* all events supported in fw */
+static u32 hotkey_reserved_mask; /* events better left disabled */
+static u32 hotkey_driver_mask; /* events needed by the driver */
+static u32 hotkey_user_mask; /* events visible to userspace */
+static u32 hotkey_acpi_mask; /* events enabled in firmware */
static unsigned int hotkey_report_mode;
@@ -1765,18 +2059,8 @@ static u16 *hotkey_keycode_map;
static struct attribute_set *hotkey_dev_attributes;
-#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
-#define HOTKEY_CONFIG_CRITICAL_START \
- do { \
- mutex_lock(&hotkey_thread_data_mutex); \
- hotkey_config_change++; \
- } while (0);
-#define HOTKEY_CONFIG_CRITICAL_END \
- mutex_unlock(&hotkey_thread_data_mutex);
-#else
-#define HOTKEY_CONFIG_CRITICAL_START
-#define HOTKEY_CONFIG_CRITICAL_END
-#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+static void tpacpi_driver_event(const unsigned int hkey_event);
+static void hotkey_driver_event(const unsigned int scancode);
/* HKEY.MHKG() return bits */
#define TP_HOTKEY_TABLET_MASK (1 << 3)
@@ -1812,22 +2096,53 @@ static int hotkey_get_tablet_mode(int *status)
}
/*
+ * Reads current event mask from firmware, and updates
+ * hotkey_acpi_mask accordingly. Also resets any bits
+ * from hotkey_user_mask that are unavailable to be
+ * delivered (shadow requirement of the userspace ABI).
+ *
* Call with hotkey_mutex held
*/
static int hotkey_mask_get(void)
{
- u32 m = 0;
-
if (tp_features.hotkey_mask) {
+ u32 m = 0;
+
if (!acpi_evalf(hkey_handle, &m, "DHKN", "d"))
return -EIO;
+
+ hotkey_acpi_mask = m;
+ } else {
+ /* no mask support doesn't mean no event support... */
+ hotkey_acpi_mask = hotkey_all_mask;
}
- hotkey_mask = m | (hotkey_source_mask & hotkey_mask);
+
+ /* sync userspace-visible mask */
+ hotkey_user_mask &= (hotkey_acpi_mask | hotkey_source_mask);
return 0;
}
+void static hotkey_mask_warn_incomplete_mask(void)
+{
+ /* log only what the user can fix... */
+ const u32 wantedmask = hotkey_driver_mask &
+ ~(hotkey_acpi_mask | hotkey_source_mask) &
+ (hotkey_all_mask | TPACPI_HKEY_NVRAM_KNOWN_MASK);
+
+ if (wantedmask)
+ printk(TPACPI_NOTICE
+ "required events 0x%08x not enabled!\n",
+ wantedmask);
+}
+
/*
+ * Set the firmware mask when supported
+ *
+ * Also calls hotkey_mask_get to update hotkey_acpi_mask.
+ *
+ * NOTE: does not set bits in hotkey_user_mask, but may reset them.
+ *
* Call with hotkey_mutex held
*/
static int hotkey_mask_set(u32 mask)
@@ -1835,66 +2150,98 @@ static int hotkey_mask_set(u32 mask)
int i;
int rc = 0;
- if (tp_features.hotkey_mask) {
- if (!tp_warned.hotkey_mask_ff &&
- (mask == 0xffff || mask == 0xffffff ||
- mask == 0xffffffff)) {
- tp_warned.hotkey_mask_ff = 1;
- printk(TPACPI_NOTICE
- "setting the hotkey mask to 0x%08x is likely "
- "not the best way to go about it\n", mask);
- printk(TPACPI_NOTICE
- "please consider using the driver defaults, "
- "and refer to up-to-date thinkpad-acpi "
- "documentation\n");
- }
+ const u32 fwmask = mask & ~hotkey_source_mask;
- HOTKEY_CONFIG_CRITICAL_START
+ if (tp_features.hotkey_mask) {
for (i = 0; i < 32; i++) {
- u32 m = 1 << i;
- /* enable in firmware mask only keys not in NVRAM
- * mode, but enable the key in the cached hotkey_mask
- * regardless of mode, or the key will end up
- * disabled by hotkey_mask_get() */
if (!acpi_evalf(hkey_handle,
NULL, "MHKM", "vdd", i + 1,
- !!((mask & ~hotkey_source_mask) & m))) {
+ !!(mask & (1 << i)))) {
rc = -EIO;
break;
- } else {
- hotkey_mask = (hotkey_mask & ~m) | (mask & m);
}
}
- HOTKEY_CONFIG_CRITICAL_END
+ }
- /* hotkey_mask_get must be called unconditionally below */
- if (!hotkey_mask_get() && !rc &&
- (hotkey_mask & ~hotkey_source_mask) !=
- (mask & ~hotkey_source_mask)) {
- printk(TPACPI_NOTICE
- "requested hot key mask 0x%08x, but "
- "firmware forced it to 0x%08x\n",
- mask, hotkey_mask);
- }
- } else {
-#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
- HOTKEY_CONFIG_CRITICAL_START
- hotkey_mask = mask & hotkey_source_mask;
- HOTKEY_CONFIG_CRITICAL_END
- hotkey_mask_get();
- if (hotkey_mask != mask) {
- printk(TPACPI_NOTICE
- "requested hot key mask 0x%08x, "
- "forced to 0x%08x (NVRAM poll mask is "
- "0x%08x): no firmware mask support\n",
- mask, hotkey_mask, hotkey_source_mask);
- }
-#else
- hotkey_mask_get();
- rc = -ENXIO;
-#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+ /*
+ * We *must* make an inconditional call to hotkey_mask_get to
+ * refresh hotkey_acpi_mask and update hotkey_user_mask
+ *
+ * Take the opportunity to also log when we cannot _enable_
+ * a given event.
+ */
+ if (!hotkey_mask_get() && !rc && (fwmask & ~hotkey_acpi_mask)) {
+ printk(TPACPI_NOTICE
+ "asked for hotkey mask 0x%08x, but "
+ "firmware forced it to 0x%08x\n",
+ fwmask, hotkey_acpi_mask);
}
+ hotkey_mask_warn_incomplete_mask();
+
+ return rc;
+}
+
+/*
+ * Sets hotkey_user_mask and tries to set the firmware mask
+ *
+ * Call with hotkey_mutex held
+ */
+static int hotkey_user_mask_set(const u32 mask)
+{
+ int rc;
+
+ /* Give people a chance to notice they are doing something that
+ * is bound to go boom on their users sooner or later */
+ if (!tp_warned.hotkey_mask_ff &&
+ (mask == 0xffff || mask == 0xffffff ||
+ mask == 0xffffffff)) {
+ tp_warned.hotkey_mask_ff = 1;
+ printk(TPACPI_NOTICE
+ "setting the hotkey mask to 0x%08x is likely "
+ "not the best way to go about it\n", mask);
+ printk(TPACPI_NOTICE
+ "please consider using the driver defaults, "
+ "and refer to up-to-date thinkpad-acpi "
+ "documentation\n");
+ }
+
+ /* Try to enable what the user asked for, plus whatever we need.
+ * this syncs everything but won't enable bits in hotkey_user_mask */
+ rc = hotkey_mask_set((mask | hotkey_driver_mask) & ~hotkey_source_mask);
+
+ /* Enable the available bits in hotkey_user_mask */
+ hotkey_user_mask = mask & (hotkey_acpi_mask | hotkey_source_mask);
+
+ return rc;
+}
+
+/*
+ * Sets the driver hotkey mask.
+ *
+ * Can be called even if the hotkey subdriver is inactive
+ */
+static int tpacpi_hotkey_driver_mask_set(const u32 mask)
+{
+ int rc;
+
+ /* Do the right thing if hotkey_init has not been called yet */
+ if (!tp_features.hotkey) {
+ hotkey_driver_mask = mask;
+ return 0;
+ }
+
+ mutex_lock(&hotkey_mutex);
+
+ HOTKEY_CONFIG_CRITICAL_START
+ hotkey_driver_mask = mask;
+ hotkey_source_mask |= (mask & ~hotkey_all_mask);
+ HOTKEY_CONFIG_CRITICAL_END
+
+ rc = hotkey_mask_set((hotkey_acpi_mask | hotkey_driver_mask) &
+ ~hotkey_source_mask);
+ mutex_unlock(&hotkey_mutex);
+
return rc;
}
@@ -1930,11 +2277,10 @@ static void tpacpi_input_send_tabletsw(void)
}
}
-static void tpacpi_input_send_key(unsigned int scancode)
+/* Do NOT call without validating scancode first */
+static void tpacpi_input_send_key(const unsigned int scancode)
{
- unsigned int keycode;
-
- keycode = hotkey_keycode_map[scancode];
+ const unsigned int keycode = hotkey_keycode_map[scancode];
if (keycode != KEY_RESERVED) {
mutex_lock(&tpacpi_inputdev_send_mutex);
@@ -1955,19 +2301,28 @@ static void tpacpi_input_send_key(unsigned int scancode)
}
}
+/* Do NOT call without validating scancode first */
+static void tpacpi_input_send_key_masked(const unsigned int scancode)
+{
+ hotkey_driver_event(scancode);
+ if (hotkey_user_mask & (1 << scancode))
+ tpacpi_input_send_key(scancode);
+}
+
#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
static struct tp_acpi_drv_struct ibm_hotkey_acpidriver;
+/* Do NOT call without validating scancode first */
static void tpacpi_hotkey_send_key(unsigned int scancode)
{
- tpacpi_input_send_key(scancode);
+ tpacpi_input_send_key_masked(scancode);
if (hotkey_report_mode < 2) {
acpi_bus_generate_proc_event(ibm_hotkey_acpidriver.device,
- 0x80, 0x1001 + scancode);
+ 0x80, TP_HKEY_EV_HOTKEY_BASE + scancode);
}
}
-static void hotkey_read_nvram(struct tp_nvram_state *n, u32 m)
+static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
{
u8 d;
@@ -2003,21 +2358,24 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, u32 m)
}
}
+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+ struct tp_nvram_state *newn,
+ const u32 event_mask)
+{
+
#define TPACPI_COMPARE_KEY(__scancode, __member) \
do { \
- if ((mask & (1 << __scancode)) && \
+ if ((event_mask & (1 << __scancode)) && \
oldn->__member != newn->__member) \
- tpacpi_hotkey_send_key(__scancode); \
+ tpacpi_hotkey_send_key(__scancode); \
} while (0)
#define TPACPI_MAY_SEND_KEY(__scancode) \
- do { if (mask & (1 << __scancode)) \
- tpacpi_hotkey_send_key(__scancode); } while (0)
+ do { \
+ if (event_mask & (1 << __scancode)) \
+ tpacpi_hotkey_send_key(__scancode); \
+ } while (0)
-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
- struct tp_nvram_state *newn,
- u32 mask)
-{
TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
@@ -2063,18 +2421,26 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
}
}
}
-}
#undef TPACPI_COMPARE_KEY
#undef TPACPI_MAY_SEND_KEY
+}
+/*
+ * Polling driver
+ *
+ * We track all events in hotkey_source_mask all the time, since
+ * most of them are edge-based. We only issue those requested by
+ * hotkey_user_mask or hotkey_driver_mask, though.
+ */
static int hotkey_kthread(void *data)
{
struct tp_nvram_state s[2];
- u32 mask;
+ u32 poll_mask, event_mask;
unsigned int si, so;
unsigned long t;
unsigned int change_detector, must_reset;
+ unsigned int poll_freq;
mutex_lock(&hotkey_thread_mutex);
@@ -2090,13 +2456,20 @@ static int hotkey_kthread(void *data)
/* Initial state for compares */
mutex_lock(&hotkey_thread_data_mutex);
change_detector = hotkey_config_change;
- mask = hotkey_source_mask & hotkey_mask;
+ poll_mask = hotkey_source_mask;
+ event_mask = hotkey_source_mask &
+ (hotkey_driver_mask | hotkey_user_mask);
+ poll_freq = hotkey_poll_freq;
mutex_unlock(&hotkey_thread_data_mutex);
- hotkey_read_nvram(&s[so], mask);
+ hotkey_read_nvram(&s[so], poll_mask);
- while (!kthread_should_stop() && hotkey_poll_freq) {
- if (t == 0)
- t = 1000/hotkey_poll_freq;
+ while (!kthread_should_stop()) {
+ if (t == 0) {
+ if (likely(poll_freq))
+ t = 1000/poll_freq;
+ else
+ t = 100; /* should never happen... */
+ }
t = msleep_interruptible(t);
if (unlikely(kthread_should_stop()))
break;
@@ -2111,14 +2484,17 @@ static int hotkey_kthread(void *data)
t = 0;
change_detector = hotkey_config_change;
}
- mask = hotkey_source_mask & hotkey_mask;
+ poll_mask = hotkey_source_mask;
+ event_mask = hotkey_source_mask &
+ (hotkey_driver_mask | hotkey_user_mask);
+ poll_freq = hotkey_poll_freq;
mutex_unlock(&hotkey_thread_data_mutex);
- if (likely(mask)) {
- hotkey_read_nvram(&s[si], mask);
+ if (likely(poll_mask)) {
+ hotkey_read_nvram(&s[si], poll_mask);
if (likely(si != so)) {
hotkey_compare_and_issue_event(&s[so], &s[si],
- mask);
+ event_mask);
}
}
@@ -2131,6 +2507,7 @@ exit:
return 0;
}
+/* call with hotkey_mutex held */
static void hotkey_poll_stop_sync(void)
{
if (tpacpi_hotkey_task) {
@@ -2147,11 +2524,14 @@ static void hotkey_poll_stop_sync(void)
}
/* call with hotkey_mutex held */
-static void hotkey_poll_setup(int may_warn)
+static void hotkey_poll_setup(bool may_warn)
{
- if ((hotkey_source_mask & hotkey_mask) != 0 &&
- hotkey_poll_freq > 0 &&
- (tpacpi_inputdev->users > 0 || hotkey_report_mode < 2)) {
+ const u32 poll_driver_mask = hotkey_driver_mask & hotkey_source_mask;
+ const u32 poll_user_mask = hotkey_user_mask & hotkey_source_mask;
+
+ if (hotkey_poll_freq > 0 &&
+ (poll_driver_mask ||
+ (poll_user_mask && tpacpi_inputdev->users > 0))) {
if (!tpacpi_hotkey_task) {
tpacpi_hotkey_task = kthread_run(hotkey_kthread,
NULL, TPACPI_NVRAM_KTHREAD_NAME);
@@ -2164,26 +2544,36 @@ static void hotkey_poll_setup(int may_warn)
}
} else {
hotkey_poll_stop_sync();
- if (may_warn &&
- hotkey_source_mask != 0 && hotkey_poll_freq == 0) {
+ if (may_warn && (poll_driver_mask || poll_user_mask) &&
+ hotkey_poll_freq == 0) {
printk(TPACPI_NOTICE
- "hot keys 0x%08x require polling, "
- "which is currently disabled\n",
- hotkey_source_mask);
+ "hot keys 0x%08x and/or events 0x%08x "
+ "require polling, which is currently "
+ "disabled\n",
+ poll_user_mask, poll_driver_mask);
}
}
}
-static void hotkey_poll_setup_safe(int may_warn)
+static void hotkey_poll_setup_safe(bool may_warn)
{
mutex_lock(&hotkey_mutex);
hotkey_poll_setup(may_warn);
mutex_unlock(&hotkey_mutex);
}
+/* call with hotkey_mutex held */
+static void hotkey_poll_set_freq(unsigned int freq)
+{
+ if (!freq)
+ hotkey_poll_stop_sync();
+
+ hotkey_poll_freq = freq;
+}
+
#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
-static void hotkey_poll_setup_safe(int __unused)
+static void hotkey_poll_setup_safe(bool __unused)
{
}
@@ -2201,7 +2591,7 @@ static int hotkey_inputdev_open(struct input_dev *dev)
case TPACPI_LIFE_EXITING:
return -EBUSY;
case TPACPI_LIFE_RUNNING:
- hotkey_poll_setup_safe(0);
+ hotkey_poll_setup_safe(false);
return 0;
}
@@ -2213,8 +2603,9 @@ static int hotkey_inputdev_open(struct input_dev *dev)
static void hotkey_inputdev_close(struct input_dev *dev)
{
/* disable hotkey polling when possible */
- if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING)
- hotkey_poll_setup_safe(0);
+ if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING &&
+ !(hotkey_source_mask & hotkey_driver_mask))
+ hotkey_poll_setup_safe(false);
}
/* sysfs hotkey enable ------------------------------------------------- */
@@ -2261,15 +2652,7 @@ static ssize_t hotkey_mask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int res;
-
- if (mutex_lock_killable(&hotkey_mutex))
- return -ERESTARTSYS;
- res = hotkey_mask_get();
- mutex_unlock(&hotkey_mutex);
-
- return (res)?
- res : snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_mask);
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_user_mask);
}
static ssize_t hotkey_mask_store(struct device *dev,
@@ -2285,10 +2668,10 @@ static ssize_t hotkey_mask_store(struct device *dev,
if (mutex_lock_killable(&hotkey_mutex))
return -ERESTARTSYS;
- res = hotkey_mask_set(t);
+ res = hotkey_user_mask_set(t);
#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
- hotkey_poll_setup(1);
+ hotkey_poll_setup(true);
#endif
mutex_unlock(&hotkey_mutex);
@@ -2318,6 +2701,8 @@ static ssize_t hotkey_bios_mask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ printk_deprecated_attribute("hotkey_bios_mask",
+ "This attribute is useless.");
return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_orig_mask);
}
@@ -2365,6 +2750,8 @@ static ssize_t hotkey_source_mask_store(struct device *dev,
const char *buf, size_t count)
{
unsigned long t;
+ u32 r_ev;
+ int rc;
if (parse_strtoul(buf, 0xffffffffUL, &t) ||
((t & ~TPACPI_HKEY_NVRAM_KNOWN_MASK) != 0))
@@ -2377,13 +2764,28 @@ static ssize_t hotkey_source_mask_store(struct device *dev,
hotkey_source_mask = t;
HOTKEY_CONFIG_CRITICAL_END
- hotkey_poll_setup(1);
+ rc = hotkey_mask_set((hotkey_user_mask | hotkey_driver_mask) &
+ ~hotkey_source_mask);
+ hotkey_poll_setup(true);
+
+ /* check if events needed by the driver got disabled */
+ r_ev = hotkey_driver_mask & ~(hotkey_acpi_mask & hotkey_all_mask)
+ & ~hotkey_source_mask & TPACPI_HKEY_NVRAM_KNOWN_MASK;
mutex_unlock(&hotkey_mutex);
+ if (rc < 0)
+ printk(TPACPI_ERR "hotkey_source_mask: failed to update the"
+ "firmware event mask!\n");
+
+ if (r_ev)
+ printk(TPACPI_NOTICE "hotkey_source_mask: "
+ "some important events were disabled: "
+ "0x%04x\n", r_ev);
+
tpacpi_disclose_usertask("hotkey_source_mask", "set to 0x%08lx\n", t);
- return count;
+ return (rc < 0) ? rc : count;
}
static struct device_attribute dev_attr_hotkey_source_mask =
@@ -2410,9 +2812,9 @@ static ssize_t hotkey_poll_freq_store(struct device *dev,
if (mutex_lock_killable(&hotkey_mutex))
return -ERESTARTSYS;
- hotkey_poll_freq = t;
+ hotkey_poll_set_freq(t);
+ hotkey_poll_setup(true);
- hotkey_poll_setup(1);
mutex_unlock(&hotkey_mutex);
tpacpi_disclose_usertask("hotkey_poll_freq", "set to %lu\n", t);
@@ -2501,9 +2903,8 @@ static struct device_attribute dev_attr_hotkey_wakeup_reason =
static void hotkey_wakeup_reason_notify_change(void)
{
- if (tp_features.hotkey_mask)
- sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
- "wakeup_reason");
+ sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
+ "wakeup_reason");
}
/* sysfs wakeup hotunplug_complete (pollable) -------------------------- */
@@ -2520,9 +2921,8 @@ static struct device_attribute dev_attr_hotkey_wakeup_hotunplug_complete =
static void hotkey_wakeup_hotunplug_complete_notify_change(void)
{
- if (tp_features.hotkey_mask)
- sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
- "wakeup_hotunplug_complete");
+ sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
+ "wakeup_hotunplug_complete");
}
/* --------------------------------------------------------------------- */
@@ -2530,27 +2930,19 @@ static void hotkey_wakeup_hotunplug_complete_notify_change(void)
static struct attribute *hotkey_attributes[] __initdata = {
&dev_attr_hotkey_enable.attr,
&dev_attr_hotkey_bios_enabled.attr,
+ &dev_attr_hotkey_bios_mask.attr,
&dev_attr_hotkey_report_mode.attr,
-#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ &dev_attr_hotkey_wakeup_reason.attr,
+ &dev_attr_hotkey_wakeup_hotunplug_complete.attr,
&dev_attr_hotkey_mask.attr,
&dev_attr_hotkey_all_mask.attr,
&dev_attr_hotkey_recommended_mask.attr,
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
&dev_attr_hotkey_source_mask.attr,
&dev_attr_hotkey_poll_freq.attr,
#endif
};
-static struct attribute *hotkey_mask_attributes[] __initdata = {
- &dev_attr_hotkey_bios_mask.attr,
-#ifndef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
- &dev_attr_hotkey_mask.attr,
- &dev_attr_hotkey_all_mask.attr,
- &dev_attr_hotkey_recommended_mask.attr,
-#endif
- &dev_attr_hotkey_wakeup_reason.attr,
- &dev_attr_hotkey_wakeup_hotunplug_complete.attr,
-};
-
/*
* Sync both the hw and sw blocking state of all switches
*/
@@ -2603,7 +2995,9 @@ static void tpacpi_send_radiosw_update(void)
static void hotkey_exit(void)
{
#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ mutex_lock(&hotkey_mutex);
hotkey_poll_stop_sync();
+ mutex_unlock(&hotkey_mutex);
#endif
if (hotkey_dev_attributes)
@@ -2611,18 +3005,56 @@ static void hotkey_exit(void)
kfree(hotkey_keycode_map);
- if (tp_features.hotkey) {
- dbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_HKEY,
- "restoring original hot key mask\n");
- /* no short-circuit boolean operator below! */
- if ((hotkey_mask_set(hotkey_orig_mask) |
- hotkey_status_set(false)) != 0)
- printk(TPACPI_ERR
- "failed to restore hot key mask "
- "to BIOS defaults\n");
+ dbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_HKEY,
+ "restoring original HKEY status and mask\n");
+ /* yes, there is a bitwise or below, we want the
+ * functions to be called even if one of them fail */
+ if (((tp_features.hotkey_mask &&
+ hotkey_mask_set(hotkey_orig_mask)) |
+ hotkey_status_set(false)) != 0)
+ printk(TPACPI_ERR
+ "failed to restore hot key mask "
+ "to BIOS defaults\n");
+}
+
+static void __init hotkey_unmap(const unsigned int scancode)
+{
+ if (hotkey_keycode_map[scancode] != KEY_RESERVED) {
+ clear_bit(hotkey_keycode_map[scancode],
+ tpacpi_inputdev->keybit);
+ hotkey_keycode_map[scancode] = KEY_RESERVED;
}
}
+/*
+ * HKEY quirks:
+ * TPACPI_HK_Q_INIMASK: Supports FN+F3,FN+F4,FN+F12
+ */
+
+#define TPACPI_HK_Q_INIMASK 0x0001
+
+static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
+ TPACPI_Q_IBM('I', 'H', TPACPI_HK_Q_INIMASK), /* 600E */
+ TPACPI_Q_IBM('I', 'N', TPACPI_HK_Q_INIMASK), /* 600E */
+ TPACPI_Q_IBM('I', 'D', TPACPI_HK_Q_INIMASK), /* 770, 770E, 770ED */
+ TPACPI_Q_IBM('I', 'W', TPACPI_HK_Q_INIMASK), /* A20m */
+ TPACPI_Q_IBM('I', 'V', TPACPI_HK_Q_INIMASK), /* A20p */
+ TPACPI_Q_IBM('1', '0', TPACPI_HK_Q_INIMASK), /* A21e, A22e */
+ TPACPI_Q_IBM('K', 'U', TPACPI_HK_Q_INIMASK), /* A21e */
+ TPACPI_Q_IBM('K', 'X', TPACPI_HK_Q_INIMASK), /* A21m, A22m */
+ TPACPI_Q_IBM('K', 'Y', TPACPI_HK_Q_INIMASK), /* A21p, A22p */
+ TPACPI_Q_IBM('1', 'B', TPACPI_HK_Q_INIMASK), /* A22e */
+ TPACPI_Q_IBM('1', '3', TPACPI_HK_Q_INIMASK), /* A22m */
+ TPACPI_Q_IBM('1', 'E', TPACPI_HK_Q_INIMASK), /* A30/p (0) */
+ TPACPI_Q_IBM('1', 'C', TPACPI_HK_Q_INIMASK), /* R30 */
+ TPACPI_Q_IBM('1', 'F', TPACPI_HK_Q_INIMASK), /* R31 */
+ TPACPI_Q_IBM('I', 'Y', TPACPI_HK_Q_INIMASK), /* T20 */
+ TPACPI_Q_IBM('K', 'Z', TPACPI_HK_Q_INIMASK), /* T21 */
+ TPACPI_Q_IBM('1', '6', TPACPI_HK_Q_INIMASK), /* T22 */
+ TPACPI_Q_IBM('I', 'Z', TPACPI_HK_Q_INIMASK), /* X20, X21 */
+ TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
+};
+
static int __init hotkey_init(struct ibm_init_struct *iibm)
{
/* Requirements for changing the default keymaps:
@@ -2665,9 +3097,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
KEY_UNKNOWN, /* 0x0D: FN+INSERT */
KEY_UNKNOWN, /* 0x0E: FN+DELETE */
- /* brightness: firmware always reacts to them, unless
- * X.org did some tricks in the radeon BIOS scratch
- * registers of *some* models */
+ /* brightness: firmware always reacts to them */
KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */
KEY_RESERVED, /* 0x10: FN+END (brightness down) */
@@ -2701,11 +3131,11 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
KEY_UNKNOWN, /* 0x0D: FN+INSERT */
KEY_UNKNOWN, /* 0x0E: FN+DELETE */
- /* These either have to go through ACPI video, or
- * act like in the IBM ThinkPads, so don't ever
- * enable them by default */
- KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */
- KEY_RESERVED, /* 0x10: FN+END (brightness down) */
+ /* These should be enabled --only-- when ACPI video
+ * is disabled (i.e. in "vendor" mode), and are handled
+ * in a special way by the init code */
+ KEY_BRIGHTNESSUP, /* 0x0F: FN+HOME (brightness up) */
+ KEY_BRIGHTNESSDOWN, /* 0x10: FN+END (brightness down) */
KEY_RESERVED, /* 0x11: FN+PGUP (thinklight toggle) */
@@ -2742,6 +3172,8 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
int status;
int hkeyv;
+ unsigned long quirks;
+
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
"initializing hotkey subdriver\n");
@@ -2767,9 +3199,16 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
if (!tp_features.hotkey)
return 1;
+ quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable,
+ ARRAY_SIZE(tpacpi_hotkey_qtable));
+
tpacpi_disable_brightness_delay();
- hotkey_dev_attributes = create_attr_set(13, NULL);
+ /* MUST have enough space for all attributes to be added to
+ * hotkey_dev_attributes */
+ hotkey_dev_attributes = create_attr_set(
+ ARRAY_SIZE(hotkey_attributes) + 2,
+ NULL);
if (!hotkey_dev_attributes)
return -ENOMEM;
res = add_many_to_attr_set(hotkey_dev_attributes,
@@ -2778,7 +3217,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
if (res)
goto err_exit;
- /* mask not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
+ /* mask not supported on 600e/x, 770e, 770x, A21e, A2xm/p,
A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking
for HKEY interface version 0x100 */
if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
@@ -2792,10 +3231,22 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
* MHKV 0x100 in A31, R40, R40e,
* T4x, X31, and later
*/
- tp_features.hotkey_mask = 1;
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
"firmware HKEY interface version: 0x%x\n",
hkeyv);
+
+ /* Paranoia check AND init hotkey_all_mask */
+ if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
+ "MHKA", "qd")) {
+ printk(TPACPI_ERR
+ "missing MHKA handler, "
+ "please report this to %s\n",
+ TPACPI_MAIL);
+ /* Fallback: pre-init for FN+F3,F4,F12 */
+ hotkey_all_mask = 0x080cU;
+ } else {
+ tp_features.hotkey_mask = 1;
+ }
}
}
@@ -2803,47 +3254,25 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
"hotkey masks are %s\n",
str_supported(tp_features.hotkey_mask));
- if (tp_features.hotkey_mask) {
- if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
- "MHKA", "qd")) {
- printk(TPACPI_ERR
- "missing MHKA handler, "
- "please report this to %s\n",
- TPACPI_MAIL);
- /* FN+F12, FN+F4, FN+F3 */
- hotkey_all_mask = 0x080cU;
- }
- }
+ /* Init hotkey_all_mask if not initialized yet */
+ if (!tp_features.hotkey_mask && !hotkey_all_mask &&
+ (quirks & TPACPI_HK_Q_INIMASK))
+ hotkey_all_mask = 0x080cU; /* FN+F12, FN+F4, FN+F3 */
- /* hotkey_source_mask *must* be zero for
- * the first hotkey_mask_get */
+ /* Init hotkey_acpi_mask and hotkey_orig_mask */
if (tp_features.hotkey_mask) {
+ /* hotkey_source_mask *must* be zero for
+ * the first hotkey_mask_get to return hotkey_orig_mask */
res = hotkey_mask_get();
if (res)
goto err_exit;
- hotkey_orig_mask = hotkey_mask;
- res = add_many_to_attr_set(
- hotkey_dev_attributes,
- hotkey_mask_attributes,
- ARRAY_SIZE(hotkey_mask_attributes));
- if (res)
- goto err_exit;
- }
-
-#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
- if (tp_features.hotkey_mask) {
- hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK
- & ~hotkey_all_mask;
+ hotkey_orig_mask = hotkey_acpi_mask;
} else {
- hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK;
+ hotkey_orig_mask = hotkey_all_mask;
+ hotkey_acpi_mask = hotkey_all_mask;
}
- vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
- "hotkey source mask 0x%08x, polling freq %d\n",
- hotkey_source_mask, hotkey_poll_freq);
-#endif
-
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_wlswemul) {
tp_features.hotkey_wlsw = 1;
@@ -2944,17 +3373,26 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
"Disabling thinkpad-acpi brightness events "
"by default...\n");
- /* The hotkey_reserved_mask change below is not
- * necessary while the keys are at KEY_RESERVED in the
- * default map, but better safe than sorry, leave it
- * here as a marker of what we have to do, especially
- * when we finally become able to set this at runtime
- * on response to X.org requests */
+ /* Disable brightness up/down on Lenovo thinkpads when
+ * ACPI is handling them, otherwise it is plain impossible
+ * for userspace to do something even remotely sane */
hotkey_reserved_mask |=
(1 << TP_ACPI_HOTKEYSCAN_FNHOME)
| (1 << TP_ACPI_HOTKEYSCAN_FNEND);
+ hotkey_unmap(TP_ACPI_HOTKEYSCAN_FNHOME);
+ hotkey_unmap(TP_ACPI_HOTKEYSCAN_FNEND);
}
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK
+ & ~hotkey_all_mask
+ & ~hotkey_reserved_mask;
+
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
+ "hotkey source mask 0x%08x, polling freq %u\n",
+ hotkey_source_mask, hotkey_poll_freq);
+#endif
+
dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
"enabling firmware HKEY event interface...\n");
res = hotkey_status_set(true);
@@ -2962,13 +3400,18 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
hotkey_exit();
return res;
}
- res = hotkey_mask_set(((hotkey_all_mask | hotkey_source_mask)
- & ~hotkey_reserved_mask)
- | hotkey_orig_mask);
+ res = hotkey_mask_set(((hotkey_all_mask & ~hotkey_reserved_mask)
+ | hotkey_driver_mask)
+ & ~hotkey_source_mask);
if (res < 0 && res != -ENXIO) {
hotkey_exit();
return res;
}
+ hotkey_user_mask = (hotkey_acpi_mask | hotkey_source_mask)
+ & ~hotkey_reserved_mask;
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
+ "initial masks: user=0x%08x, fw=0x%08x, poll=0x%08x\n",
+ hotkey_user_mask, hotkey_acpi_mask, hotkey_source_mask);
dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
"legacy ibm/hotkey event reporting over procfs %s\n",
@@ -2978,7 +3421,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
tpacpi_inputdev->open = &hotkey_inputdev_open;
tpacpi_inputdev->close = &hotkey_inputdev_close;
- hotkey_poll_setup_safe(1);
+ hotkey_poll_setup_safe(true);
tpacpi_send_radiosw_update();
tpacpi_input_send_tabletsw();
@@ -3003,7 +3446,7 @@ static bool hotkey_notify_hotkey(const u32 hkey,
if (scancode > 0 && scancode < 0x21) {
scancode--;
if (!(hotkey_source_mask & (1 << scancode))) {
- tpacpi_input_send_key(scancode);
+ tpacpi_input_send_key_masked(scancode);
*send_acpi_ev = false;
} else {
*ignore_acpi_ev = true;
@@ -3022,20 +3465,20 @@ static bool hotkey_notify_wakeup(const u32 hkey,
*ignore_acpi_ev = false;
switch (hkey) {
- case 0x2304: /* suspend, undock */
- case 0x2404: /* hibernation, undock */
+ case TP_HKEY_EV_WKUP_S3_UNDOCK: /* suspend, undock */
+ case TP_HKEY_EV_WKUP_S4_UNDOCK: /* hibernation, undock */
hotkey_wakeup_reason = TP_ACPI_WAKEUP_UNDOCK;
*ignore_acpi_ev = true;
break;
- case 0x2305: /* suspend, bay eject */
- case 0x2405: /* hibernation, bay eject */
+ case TP_HKEY_EV_WKUP_S3_BAYEJ: /* suspend, bay eject */
+ case TP_HKEY_EV_WKUP_S4_BAYEJ: /* hibernation, bay eject */
hotkey_wakeup_reason = TP_ACPI_WAKEUP_BAYEJ;
*ignore_acpi_ev = true;
break;
- case 0x2313: /* Battery on critical low level (S3) */
- case 0x2413: /* Battery on critical low level (S4) */
+ case TP_HKEY_EV_WKUP_S3_BATLOW: /* Battery on critical low level/S3 */
+ case TP_HKEY_EV_WKUP_S4_BATLOW: /* Battery on critical low level/S4 */
printk(TPACPI_ALERT
"EMERGENCY WAKEUP: battery almost empty\n");
/* how to auto-heal: */
@@ -3065,21 +3508,21 @@ static bool hotkey_notify_usrevent(const u32 hkey,
*ignore_acpi_ev = false;
switch (hkey) {
- case 0x5010: /* Lenovo new BIOS: brightness changed */
- case 0x500b: /* X61t: tablet pen inserted into bay */
- case 0x500c: /* X61t: tablet pen removed from bay */
+ case TP_HKEY_EV_PEN_INSERTED: /* X61t: tablet pen inserted into bay */
+ case TP_HKEY_EV_PEN_REMOVED: /* X61t: tablet pen removed from bay */
return true;
- case 0x5009: /* X41t-X61t: swivel up (tablet mode) */
- case 0x500a: /* X41t-X61t: swivel down (normal mode) */
+ case TP_HKEY_EV_TABLET_TABLET: /* X41t-X61t: tablet mode */
+ case TP_HKEY_EV_TABLET_NOTEBOOK: /* X41t-X61t: normal mode */
tpacpi_input_send_tabletsw();
hotkey_tablet_mode_notify_change();
*send_acpi_ev = false;
return true;
- case 0x5001:
- case 0x5002:
- /* LID switch events. Do not propagate */
+ case TP_HKEY_EV_LID_CLOSE: /* Lid closed */
+ case TP_HKEY_EV_LID_OPEN: /* Lid opened */
+ case TP_HKEY_EV_BRGHT_CHANGED: /* brightness changed */
+ /* do not propagate these events */
*ignore_acpi_ev = true;
return true;
@@ -3097,30 +3540,30 @@ static bool hotkey_notify_thermal(const u32 hkey,
*ignore_acpi_ev = false;
switch (hkey) {
- case 0x6011:
+ case TP_HKEY_EV_ALARM_BAT_HOT:
printk(TPACPI_CRIT
"THERMAL ALARM: battery is too hot!\n");
/* recommended action: warn user through gui */
return true;
- case 0x6012:
+ case TP_HKEY_EV_ALARM_BAT_XHOT:
printk(TPACPI_ALERT
"THERMAL EMERGENCY: battery is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
return true;
- case 0x6021:
+ case TP_HKEY_EV_ALARM_SENSOR_HOT:
printk(TPACPI_CRIT
"THERMAL ALARM: "
"a sensor reports something is too hot!\n");
/* recommended action: warn user through gui, that */
/* some internal component is too hot */
return true;
- case 0x6022:
+ case TP_HKEY_EV_ALARM_SENSOR_XHOT:
printk(TPACPI_ALERT
"THERMAL EMERGENCY: "
"a sensor reports something is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
return true;
- case 0x6030:
+ case TP_HKEY_EV_THM_TABLE_CHANGED:
printk(TPACPI_INFO
"EC reports that Thermal Table has changed\n");
/* recommended action: do nothing, we don't have
@@ -3178,7 +3621,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
break;
case 3:
/* 0x3000-0x3FFF: bay-related wakeups */
- if (hkey == 0x3003) {
+ if (hkey == TP_HKEY_EV_BAYEJ_ACK) {
hotkey_autosleep_ack = 1;
printk(TPACPI_INFO
"bay ejected\n");
@@ -3190,7 +3633,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
break;
case 4:
/* 0x4000-0x4FFF: dock-related wakeups */
- if (hkey == 0x4003) {
+ if (hkey == TP_HKEY_EV_UNDOCK_ACK) {
hotkey_autosleep_ack = 1;
printk(TPACPI_INFO
"undocked\n");
@@ -3212,7 +3655,8 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
break;
case 7:
/* 0x7000-0x7FFF: misc */
- if (tp_features.hotkey_wlsw && hkey == 0x7000) {
+ if (tp_features.hotkey_wlsw &&
+ hkey == TP_HKEY_EV_RFKILL_CHANGED) {
tpacpi_send_radiosw_update();
send_acpi_ev = 0;
known_ev = true;
@@ -3258,15 +3702,17 @@ static void hotkey_resume(void)
{
tpacpi_disable_brightness_delay();
- if (hotkey_mask_get())
+ if (hotkey_status_set(true) < 0 ||
+ hotkey_mask_set(hotkey_acpi_mask) < 0)
printk(TPACPI_ERR
- "error while trying to read hot key mask "
- "from firmware\n");
+ "error while attempting to reset the event "
+ "firmware interface\n");
+
tpacpi_send_radiosw_update();
hotkey_tablet_mode_notify_change();
hotkey_wakeup_reason_notify_change();
hotkey_wakeup_hotunplug_complete_notify_change();
- hotkey_poll_setup_safe(0);
+ hotkey_poll_setup_safe(false);
}
/* procfs -------------------------------------------------------------- */
@@ -3290,8 +3736,8 @@ static int hotkey_read(char *p)
return res;
len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 0));
- if (tp_features.hotkey_mask) {
- len += sprintf(p + len, "mask:\t\t0x%08x\n", hotkey_mask);
+ if (hotkey_all_mask) {
+ len += sprintf(p + len, "mask:\t\t0x%08x\n", hotkey_user_mask);
len += sprintf(p + len,
"commands:\tenable, disable, reset, <mask>\n");
} else {
@@ -3328,7 +3774,7 @@ static int hotkey_write(char *buf)
if (mutex_lock_killable(&hotkey_mutex))
return -ERESTARTSYS;
- mask = hotkey_mask;
+ mask = hotkey_user_mask;
res = 0;
while ((cmd = next_cmd(&buf))) {
@@ -3338,7 +3784,8 @@ static int hotkey_write(char *buf)
hotkey_enabledisable_warn(0);
res = -EPERM;
} else if (strlencmp(cmd, "reset") == 0) {
- mask = hotkey_orig_mask;
+ mask = (hotkey_all_mask | hotkey_source_mask)
+ & ~hotkey_reserved_mask;
} else if (sscanf(cmd, "0x%x", &mask) == 1) {
/* mask set */
} else if (sscanf(cmd, "%x", &mask) == 1) {
@@ -3349,12 +3796,11 @@ static int hotkey_write(char *buf)
}
}
- if (!res)
+ if (!res) {
tpacpi_disclose_usertask("procfs hotkey",
"set mask to 0x%08x\n", mask);
-
- if (!res && mask != hotkey_mask)
- res = hotkey_mask_set(mask);
+ res = hotkey_user_mask_set(mask);
+ }
errexit:
mutex_unlock(&hotkey_mutex);
@@ -5655,16 +6101,16 @@ static const struct tpacpi_quirk brightness_quirk_table[] __initconst = {
/* Models with ATI GPUs known to require ECNVRAM mode */
TPACPI_Q_IBM('1', 'Y', TPACPI_BRGHT_Q_EC), /* T43/p ATI */
- /* Models with ATI GPUs (waiting confirmation) */
- TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
+ /* Models with ATI GPUs that can use ECNVRAM */
+ TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_EC),
TPACPI_Q_IBM('1', 'Q', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
TPACPI_Q_IBM('7', '8', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
- /* Models with Intel Extreme Graphics 2 (waiting confirmation) */
+ /* Models with Intel Extreme Graphics 2 */
+ TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC),
TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC),
TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC),
- TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC),
/* Models with Intel GMA900 */
TPACPI_Q_IBM('7', '0', TPACPI_BRGHT_Q_NOEC), /* T43, R52 */
@@ -5767,8 +6213,10 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
TPACPI_BACKLIGHT_DEV_NAME, NULL, NULL,
&ibm_backlight_data);
if (IS_ERR(ibm_backlight_device)) {
+ int rc = PTR_ERR(ibm_backlight_device);
+ ibm_backlight_device = NULL;
printk(TPACPI_ERR "Could not register backlight device\n");
- return PTR_ERR(ibm_backlight_device);
+ return rc;
}
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
"brightness is supported\n");
@@ -7256,6 +7704,21 @@ static struct ibm_struct fan_driver_data = {
****************************************************************************
****************************************************************************/
+/*
+ * HKEY event callout for other subdrivers go here
+ * (yes, it is ugly, but it is quick, safe, and gets the job done
+ */
+static void tpacpi_driver_event(const unsigned int hkey_event)
+{
+}
+
+
+
+static void hotkey_driver_event(const unsigned int scancode)
+{
+ tpacpi_driver_event(TP_HKEY_EV_HOTKEY_BASE + scancode);
+}
+
/* sysfs name ---------------------------------------------------------- */
static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev,
struct device_attribute *attr,
@@ -7524,9 +7987,11 @@ static int __init probe_for_thinkpad(void)
/*
* Non-ancient models have better DMI tagging, but very old models
- * don't.
+ * don't. tpacpi_is_fw_known() is a cheat to help in that case.
*/
- is_thinkpad = (thinkpad_id.model_str != NULL);
+ is_thinkpad = (thinkpad_id.model_str != NULL) ||
+ (thinkpad_id.ec_model != 0) ||
+ tpacpi_is_fw_known();
/* ec is required because many other handles are relative to it */
TPACPI_ACPIHANDLE_INIT(ec);
@@ -7537,13 +8002,6 @@ static int __init probe_for_thinkpad(void)
return -ENODEV;
}
- /*
- * Risks a regression on very old machines, but reduces potential
- * false positives a damn great deal
- */
- if (!is_thinkpad)
- is_thinkpad = (thinkpad_id.vendor == PCI_VENDOR_ID_IBM);
-
if (!is_thinkpad && !force_load)
return -ENODEV;
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
new file mode 100644
index 00000000000..02f3d4e9e66
--- /dev/null
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -0,0 +1,265 @@
+/*
+ * ACPI driver for Topstar notebooks (hotkeys support only)
+ *
+ * Copyright (c) 2009 Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+ *
+ * Implementation inspired by existing x86 platform drivers, in special
+ * asus/eepc/fujitsu-laptop, thanks to their authors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/input.h>
+
+#define ACPI_TOPSTAR_CLASS "topstar"
+
+struct topstar_hkey {
+ struct input_dev *inputdev;
+};
+
+struct tps_key_entry {
+ u8 code;
+ u16 keycode;
+};
+
+static struct tps_key_entry topstar_keymap[] = {
+ { 0x80, KEY_BRIGHTNESSUP },
+ { 0x81, KEY_BRIGHTNESSDOWN },
+ { 0x83, KEY_VOLUMEUP },
+ { 0x84, KEY_VOLUMEDOWN },
+ { 0x85, KEY_MUTE },
+ { 0x86, KEY_SWITCHVIDEOMODE },
+ { 0x87, KEY_F13 }, /* touchpad enable/disable key */
+ { 0x88, KEY_WLAN },
+ { 0x8a, KEY_WWW },
+ { 0x8b, KEY_MAIL },
+ { 0x8c, KEY_MEDIA },
+ { 0x96, KEY_F14 }, /* G key? */
+ { }
+};
+
+static struct tps_key_entry *tps_get_key_by_scancode(int code)
+{
+ struct tps_key_entry *key;
+
+ for (key = topstar_keymap; key->code; key++)
+ if (code == key->code)
+ return key;
+
+ return NULL;
+}
+
+static struct tps_key_entry *tps_get_key_by_keycode(int code)
+{
+ struct tps_key_entry *key;
+
+ for (key = topstar_keymap; key->code; key++)
+ if (code == key->keycode)
+ return key;
+
+ return NULL;
+}
+
+static void acpi_topstar_notify(struct acpi_device *device, u32 event)
+{
+ struct tps_key_entry *key;
+ static bool dup_evnt[2];
+ bool *dup;
+ struct topstar_hkey *hkey = acpi_driver_data(device);
+
+ /* 0x83 and 0x84 key events comes duplicated... */
+ if (event == 0x83 || event == 0x84) {
+ dup = &dup_evnt[event - 0x83];
+ if (*dup) {
+ *dup = false;
+ return;
+ }
+ *dup = true;
+ }
+
+ /*
+ * 'G key' generate two event codes, convert to only
+ * one event/key code for now (3G switch?)
+ */
+ if (event == 0x97)
+ event = 0x96;
+
+ key = tps_get_key_by_scancode(event);
+ if (key) {
+ input_report_key(hkey->inputdev, key->keycode, 1);
+ input_sync(hkey->inputdev);
+ input_report_key(hkey->inputdev, key->keycode, 0);
+ input_sync(hkey->inputdev);
+ return;
+ }
+
+ /* Known non hotkey events don't handled or that we don't care yet */
+ if (event == 0x8e || event == 0x8f || event == 0x90)
+ return;
+
+ pr_info("unknown event = 0x%02x\n", event);
+}
+
+static int acpi_topstar_fncx_switch(struct acpi_device *device, bool state)
+{
+ acpi_status status;
+ union acpi_object fncx_params[1] = {
+ { .type = ACPI_TYPE_INTEGER }
+ };
+ struct acpi_object_list fncx_arg_list = { 1, &fncx_params[0] };
+
+ fncx_params[0].integer.value = state ? 0x86 : 0x87;
+ status = acpi_evaluate_object(device->handle, "FNCX", &fncx_arg_list, NULL);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Unable to switch FNCX notifications\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int topstar_getkeycode(struct input_dev *dev, int scancode, int *keycode)
+{
+ struct tps_key_entry *key = tps_get_key_by_scancode(scancode);
+
+ if (!key)
+ return -EINVAL;
+
+ *keycode = key->keycode;
+ return 0;
+}
+
+static int topstar_setkeycode(struct input_dev *dev, int scancode, int keycode)
+{
+ struct tps_key_entry *key;
+ int old_keycode;
+
+ if (keycode < 0 || keycode > KEY_MAX)
+ return -EINVAL;
+
+ key = tps_get_key_by_scancode(scancode);
+
+ if (!key)
+ return -EINVAL;
+
+ old_keycode = key->keycode;
+ key->keycode = keycode;
+ set_bit(keycode, dev->keybit);
+ if (!tps_get_key_by_keycode(old_keycode))
+ clear_bit(old_keycode, dev->keybit);
+ return 0;
+}
+
+static int acpi_topstar_init_hkey(struct topstar_hkey *hkey)
+{
+ struct tps_key_entry *key;
+
+ hkey->inputdev = input_allocate_device();
+ if (!hkey->inputdev) {
+ pr_err("Unable to allocate input device\n");
+ return -ENODEV;
+ }
+ hkey->inputdev->name = "Topstar Laptop extra buttons";
+ hkey->inputdev->phys = "topstar/input0";
+ hkey->inputdev->id.bustype = BUS_HOST;
+ hkey->inputdev->getkeycode = topstar_getkeycode;
+ hkey->inputdev->setkeycode = topstar_setkeycode;
+ for (key = topstar_keymap; key->code; key++) {
+ set_bit(EV_KEY, hkey->inputdev->evbit);
+ set_bit(key->keycode, hkey->inputdev->keybit);
+ }
+ if (input_register_device(hkey->inputdev)) {
+ pr_err("Unable to register input device\n");
+ input_free_device(hkey->inputdev);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int acpi_topstar_add(struct acpi_device *device)
+{
+ struct topstar_hkey *tps_hkey;
+
+ tps_hkey = kzalloc(sizeof(struct topstar_hkey), GFP_KERNEL);
+ if (!tps_hkey)
+ return -ENOMEM;
+
+ strcpy(acpi_device_name(device), "Topstar TPSACPI");
+ strcpy(acpi_device_class(device), ACPI_TOPSTAR_CLASS);
+
+ if (acpi_topstar_fncx_switch(device, true))
+ goto add_err;
+
+ if (acpi_topstar_init_hkey(tps_hkey))
+ goto add_err;
+
+ device->driver_data = tps_hkey;
+ return 0;
+
+add_err:
+ kfree(tps_hkey);
+ return -ENODEV;
+}
+
+static int acpi_topstar_remove(struct acpi_device *device, int type)
+{
+ struct topstar_hkey *tps_hkey = acpi_driver_data(device);
+
+ acpi_topstar_fncx_switch(device, false);
+
+ input_unregister_device(tps_hkey->inputdev);
+ kfree(tps_hkey);
+
+ return 0;
+}
+
+static const struct acpi_device_id topstar_device_ids[] = {
+ { "TPSACPI01", 0 },
+ { "", 0 },
+};
+MODULE_DEVICE_TABLE(acpi, topstar_device_ids);
+
+static struct acpi_driver acpi_topstar_driver = {
+ .name = "Topstar laptop ACPI driver",
+ .class = ACPI_TOPSTAR_CLASS,
+ .ids = topstar_device_ids,
+ .ops = {
+ .add = acpi_topstar_add,
+ .remove = acpi_topstar_remove,
+ .notify = acpi_topstar_notify,
+ },
+};
+
+static int __init topstar_laptop_init(void)
+{
+ int ret;
+
+ ret = acpi_bus_register_driver(&acpi_topstar_driver);
+ if (ret < 0)
+ return ret;
+
+ printk(KERN_INFO "Topstar Laptop ACPI extras driver loaded\n");
+
+ return 0;
+}
+
+static void __exit topstar_laptop_exit(void)
+{
+ acpi_bus_unregister_driver(&acpi_topstar_driver);
+}
+
+module_init(topstar_laptop_init);
+module_exit(topstar_laptop_exit);
+
+MODULE_AUTHOR("Herton Ronaldo Krzesinski");
+MODULE_DESCRIPTION("Topstar Laptop ACPI Extras driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index f215a591919..177f8d767df 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -42,7 +42,6 @@ MODULE_LICENSE("GPL");
#define ACPI_WMI_CLASS "wmi"
-#undef PREFIX
#define PREFIX "ACPI: WMI: "
static DEFINE_MUTEX(wmi_data_lock);
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index 527ee764c93..cd11b113494 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -135,6 +135,15 @@ static int pnp_device_remove(struct device *dev)
return 0;
}
+static void pnp_device_shutdown(struct device *dev)
+{
+ struct pnp_dev *pnp_dev = to_pnp_dev(dev);
+ struct pnp_driver *drv = pnp_dev->driver;
+
+ if (drv && drv->shutdown)
+ drv->shutdown(pnp_dev);
+}
+
static int pnp_bus_match(struct device *dev, struct device_driver *drv)
{
struct pnp_dev *pnp_dev = to_pnp_dev(dev);
@@ -203,6 +212,7 @@ struct bus_type pnp_bus_type = {
.match = pnp_bus_match,
.probe = pnp_device_probe,
.remove = pnp_device_remove,
+ .shutdown = pnp_device_shutdown,
.suspend = pnp_bus_suspend,
.resume = pnp_bus_resume,
.dev_attrs = pnp_interface_attrs,
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 9496494f340..83b8b5ac49c 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -153,6 +153,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
acpi_handle temp = NULL;
acpi_status status;
struct pnp_dev *dev;
+ struct acpi_hardware_id *id;
/*
* If a PnPacpi device is not present , the device
@@ -193,15 +194,12 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
if (dev->capabilities & PNP_CONFIGURABLE)
pnpacpi_parse_resource_option_data(dev);
- if (device->flags.compatible_ids) {
- struct acpi_compatible_id_list *cid_list = device->pnp.cid_list;
- int i;
-
- for (i = 0; i < cid_list->count; i++) {
- if (!ispnpidacpi(cid_list->id[i].value))
- continue;
- pnp_add_id(dev, cid_list->id[i].value);
- }
+ list_for_each_entry(id, &device->pnp.ids, list) {
+ if (!strcmp(id->id, acpi_device_hid(device)))
+ continue;
+ if (!ispnpidacpi(id->id))
+ continue;
+ pnp_add_id(dev, id->id);
}
/* clear out the damaged flags */
@@ -232,9 +230,8 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp)
struct pnp_dev *pnp = _pnp;
/* true means it matched */
- return acpi->flags.hardware_id
- && !acpi_get_physical_device(acpi->handle)
- && compare_pnp_id(pnp->id, acpi->pnp.hardware_id);
+ return !acpi_get_physical_device(acpi->handle)
+ && compare_pnp_id(pnp->id, acpi_device_hid(acpi));
}
static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle)
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index bdbc4f73fcd..cea6cef27e8 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -29,6 +29,13 @@ config APM_POWER
Say Y here to enable support APM status emulation using
battery class devices.
+config WM831X_POWER
+ tristate "WM831X PMU support"
+ depends on MFD_WM831X
+ help
+ Say Y here to enable support for the power management unit
+ provided by Wolfson Microelectronics WM831x PMICs.
+
config WM8350_POWER
tristate "WM8350 PMU support"
depends on MFD_WM8350
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 380d17c9ae2..b96f29d91c2 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_POWER_SUPPLY) += power_supply.o
obj-$(CONFIG_PDA_POWER) += pda_power.o
obj-$(CONFIG_APM_POWER) += apm_power.o
+obj-$(CONFIG_WM831X_POWER) += wm831x_power.o
obj-$(CONFIG_WM8350_POWER) += wm8350_power.o
obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index 520b5c49ff3..6f1dba5a519 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -56,6 +56,7 @@ struct ds2760_device_info {
struct device *w1_dev;
struct workqueue_struct *monitor_wqueue;
struct delayed_work monitor_work;
+ struct delayed_work set_charged_work;
};
static unsigned int cache_time = 1000;
@@ -66,6 +67,14 @@ static unsigned int pmod_enabled;
module_param(pmod_enabled, bool, 0644);
MODULE_PARM_DESC(pmod_enabled, "PMOD enable bit");
+static unsigned int rated_capacity;
+module_param(rated_capacity, uint, 0644);
+MODULE_PARM_DESC(rated_capacity, "rated battery capacity, 10*mAh or index");
+
+static unsigned int current_accum;
+module_param(current_accum, uint, 0644);
+MODULE_PARM_DESC(current_accum, "current accumulator value");
+
/* Some batteries have their rated capacity stored a N * 10 mAh, while
* others use an index into this table. */
static int rated_capacities[] = {
@@ -168,8 +177,13 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di)
di->full_active_uAh = di->raw[DS2760_ACTIVE_FULL] << 8 |
di->raw[DS2760_ACTIVE_FULL + 1];
- scale[0] = di->raw[DS2760_ACTIVE_FULL] << 8 |
- di->raw[DS2760_ACTIVE_FULL + 1];
+ /* If the full_active_uAh value is not given, fall back to the rated
+ * capacity. This is likely to happen when chips are not part of the
+ * battery pack and is therefore not bootstrapped. */
+ if (di->full_active_uAh == 0)
+ di->full_active_uAh = di->rated_capacity / 1000L;
+
+ scale[0] = di->full_active_uAh;
for (i = 1; i < 5; i++)
scale[i] = scale[i - 1] + di->raw[DS2760_ACTIVE_FULL + 2 + i];
@@ -197,15 +211,31 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di)
if (di->rem_capacity > 100)
di->rem_capacity = 100;
- if (di->current_uA)
- di->life_sec = -((di->accum_current_uAh - di->empty_uAh) *
- 3600L) / di->current_uA;
+ if (di->current_uA >= 100L)
+ di->life_sec = -((di->accum_current_uAh - di->empty_uAh) * 36L)
+ / (di->current_uA / 100L);
else
di->life_sec = 0;
return 0;
}
+static void ds2760_battery_set_current_accum(struct ds2760_device_info *di,
+ unsigned int acr_val)
+{
+ unsigned char acr[2];
+
+ /* acr is in units of 0.25 mAh */
+ acr_val *= 4L;
+ acr_val /= 1000;
+
+ acr[0] = acr_val >> 8;
+ acr[1] = acr_val & 0xff;
+
+ if (w1_ds2760_write(di->w1_dev, acr, DS2760_CURRENT_ACCUM_MSB, 2) < 2)
+ dev_warn(di->dev, "ACR write failed\n");
+}
+
static void ds2760_battery_update_status(struct ds2760_device_info *di)
{
int old_charge_status = di->charge_status;
@@ -237,21 +267,9 @@ static void ds2760_battery_update_status(struct ds2760_device_info *di)
if (di->full_counter < 2) {
di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
} else {
- unsigned char acr[2];
- int acr_val;
-
- /* acr is in units of 0.25 mAh */
- acr_val = di->full_active_uAh * 4L / 1000;
-
- acr[0] = acr_val >> 8;
- acr[1] = acr_val & 0xff;
-
- if (w1_ds2760_write(di->w1_dev, acr,
- DS2760_CURRENT_ACCUM_MSB, 2) < 2)
- dev_warn(di->dev,
- "ACR reset failed\n");
-
di->charge_status = POWER_SUPPLY_STATUS_FULL;
+ ds2760_battery_set_current_accum(di,
+ di->full_active_uAh);
}
}
} else {
@@ -274,6 +292,17 @@ static void ds2760_battery_write_status(struct ds2760_device_info *di,
w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1);
}
+static void ds2760_battery_write_rated_capacity(struct ds2760_device_info *di,
+ unsigned char rated_capacity)
+{
+ if (rated_capacity == di->raw[DS2760_RATED_CAPACITY])
+ return;
+
+ w1_ds2760_write(di->w1_dev, &rated_capacity, DS2760_RATED_CAPACITY, 1);
+ w1_ds2760_store_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1);
+ w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1);
+}
+
static void ds2760_battery_work(struct work_struct *work)
{
struct ds2760_device_info *di = container_of(work,
@@ -299,6 +328,52 @@ static void ds2760_battery_external_power_changed(struct power_supply *psy)
queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10);
}
+
+static void ds2760_battery_set_charged_work(struct work_struct *work)
+{
+ char bias;
+ struct ds2760_device_info *di = container_of(work,
+ struct ds2760_device_info, set_charged_work.work);
+
+ dev_dbg(di->dev, "%s\n", __func__);
+
+ ds2760_battery_read_status(di);
+
+ /* When we get notified by external circuitry that the battery is
+ * considered fully charged now, we know that there is no current
+ * flow any more. However, the ds2760's internal current meter is
+ * too inaccurate to rely on - spec say something ~15% failure.
+ * Hence, we use the current offset bias register to compensate
+ * that error.
+ */
+
+ if (!power_supply_am_i_supplied(&di->bat))
+ return;
+
+ bias = (signed char) di->current_raw +
+ (signed char) di->raw[DS2760_CURRENT_OFFSET_BIAS];
+
+ dev_dbg(di->dev, "%s: bias = %d\n", __func__, bias);
+
+ w1_ds2760_write(di->w1_dev, &bias, DS2760_CURRENT_OFFSET_BIAS, 1);
+ w1_ds2760_store_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1);
+ w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1);
+
+ /* Write to the di->raw[] buffer directly - the CURRENT_OFFSET_BIAS
+ * value won't be read back by ds2760_battery_read_status() */
+ di->raw[DS2760_CURRENT_OFFSET_BIAS] = bias;
+}
+
+static void ds2760_battery_set_charged(struct power_supply *psy)
+{
+ struct ds2760_device_info *di = to_ds2760_device_info(psy);
+
+ /* postpone the actual work by 20 secs. This is for debouncing GPIO
+ * signals and to let the current value settle. See AN4188. */
+ cancel_delayed_work(&di->set_charged_work);
+ queue_delayed_work(di->monitor_wqueue, &di->set_charged_work, HZ * 20);
+}
+
static int ds2760_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -337,6 +412,12 @@ static int ds2760_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_TEMP:
val->intval = di->temp_C;
break;
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
+ val->intval = di->life_sec;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = di->rem_capacity;
+ break;
default:
return -EINVAL;
}
@@ -353,6 +434,8 @@ static enum power_supply_property ds2760_battery_props[] = {
POWER_SUPPLY_PROP_CHARGE_EMPTY,
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
};
static int ds2760_battery_probe(struct platform_device *pdev)
@@ -376,17 +459,12 @@ static int ds2760_battery_probe(struct platform_device *pdev)
di->bat.properties = ds2760_battery_props;
di->bat.num_properties = ARRAY_SIZE(ds2760_battery_props);
di->bat.get_property = ds2760_battery_get_property;
+ di->bat.set_charged = ds2760_battery_set_charged;
di->bat.external_power_changed =
ds2760_battery_external_power_changed;
di->charge_status = POWER_SUPPLY_STATUS_UNKNOWN;
- retval = power_supply_register(&pdev->dev, &di->bat);
- if (retval) {
- dev_err(di->dev, "failed to register battery\n");
- goto batt_failed;
- }
-
/* enable sleep mode feature */
ds2760_battery_read_status(di);
status = di->raw[DS2760_STATUS_REG];
@@ -397,7 +475,24 @@ static int ds2760_battery_probe(struct platform_device *pdev)
ds2760_battery_write_status(di, status);
+ /* set rated capacity from module param */
+ if (rated_capacity)
+ ds2760_battery_write_rated_capacity(di, rated_capacity);
+
+ /* set current accumulator if given as parameter.
+ * this should only be done for bootstrapping the value */
+ if (current_accum)
+ ds2760_battery_set_current_accum(di, current_accum);
+
+ retval = power_supply_register(&pdev->dev, &di->bat);
+ if (retval) {
+ dev_err(di->dev, "failed to register battery\n");
+ goto batt_failed;
+ }
+
INIT_DELAYED_WORK(&di->monitor_work, ds2760_battery_work);
+ INIT_DELAYED_WORK(&di->set_charged_work,
+ ds2760_battery_set_charged_work);
di->monitor_wqueue = create_singlethread_workqueue(dev_name(&pdev->dev));
if (!di->monitor_wqueue) {
retval = -ESRCH;
@@ -422,6 +517,8 @@ static int ds2760_battery_remove(struct platform_device *pdev)
cancel_rearming_delayed_workqueue(di->monitor_wqueue,
&di->monitor_work);
+ cancel_rearming_delayed_workqueue(di->monitor_wqueue,
+ &di->set_charged_work);
destroy_workqueue(di->monitor_wqueue);
power_supply_unregister(&di->bat);
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index 58e419299cd..8fefe5a7355 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -10,7 +10,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/types.h>
#include <linux/err.h>
+#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/jiffies.h>
@@ -231,6 +233,14 @@ static int olpc_bat_get_property(struct power_supply *psy,
if (ret)
return ret;
break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ if (ec_byte & BAT_STAT_TRICKLE)
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ else if (ec_byte & BAT_STAT_CHARGING)
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ else
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ break;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = !!(ec_byte & (BAT_STAT_PRESENT |
BAT_STAT_TRICKLE));
@@ -276,6 +286,14 @@ static int olpc_bat_get_property(struct power_supply *psy,
return ret;
val->intval = ec_byte;
break;
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ if (ec_byte & BAT_STAT_FULL)
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+ else if (ec_byte & BAT_STAT_LOW)
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ else
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ break;
case POWER_SUPPLY_PROP_TEMP:
ret = olpc_ec_cmd(EC_BAT_TEMP, NULL, 0, (void *)&ec_word, 2);
if (ret)
@@ -315,12 +333,14 @@ static int olpc_bat_get_property(struct power_supply *psy,
static enum power_supply_property olpc_bat_props[] = {
POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_VOLTAGE_AVG,
POWER_SUPPLY_PROP_CURRENT_AVG,
POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TEMP_AMBIENT,
POWER_SUPPLY_PROP_MANUFACTURER,
@@ -370,6 +390,29 @@ static struct bin_attribute olpc_bat_eeprom = {
.read = olpc_bat_eeprom_read,
};
+/* Allow userspace to see the specific error value pulled from the EC */
+
+static ssize_t olpc_bat_error_read(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ uint8_t ec_byte;
+ ssize_t ret;
+
+ ret = olpc_ec_cmd(EC_BAT_ERRCODE, NULL, 0, &ec_byte, 1);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", ec_byte);
+}
+
+static struct device_attribute olpc_bat_error = {
+ .attr = {
+ .name = "error",
+ .mode = S_IRUGO,
+ },
+ .show = olpc_bat_error_read,
+};
+
/*********************************************************************
* Initialisation
*********************************************************************/
@@ -433,8 +476,14 @@ static int __init olpc_bat_init(void)
if (ret)
goto eeprom_failed;
+ ret = device_create_file(olpc_bat.dev, &olpc_bat_error);
+ if (ret)
+ goto error_failed;
+
goto success;
+error_failed:
+ device_remove_bin_file(olpc_bat.dev, &olpc_bat_eeprom);
eeprom_failed:
power_supply_unregister(&olpc_bat);
battery_failed:
@@ -447,6 +496,7 @@ success:
static void __exit olpc_bat_exit(void)
{
+ device_remove_file(olpc_bat.dev, &olpc_bat_error);
device_remove_bin_file(olpc_bat.dev, &olpc_bat_eeprom);
power_supply_unregister(&olpc_bat);
power_supply_unregister(&olpc_ac);
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 5520040449c..cce75b40b43 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -18,7 +18,9 @@
#include <linux/power_supply.h>
#include "power_supply.h"
+/* exported for the APM Power driver, APM emulation */
struct class *power_supply_class;
+EXPORT_SYMBOL_GPL(power_supply_class);
static int __power_supply_changed_work(struct device *dev, void *data)
{
@@ -55,6 +57,7 @@ void power_supply_changed(struct power_supply *psy)
schedule_work(&psy->changed_work);
}
+EXPORT_SYMBOL_GPL(power_supply_changed);
static int __power_supply_am_i_supplied(struct device *dev, void *data)
{
@@ -86,6 +89,7 @@ int power_supply_am_i_supplied(struct power_supply *psy)
return error;
}
+EXPORT_SYMBOL_GPL(power_supply_am_i_supplied);
static int __power_supply_is_system_supplied(struct device *dev, void *data)
{
@@ -110,6 +114,35 @@ int power_supply_is_system_supplied(void)
return error;
}
+EXPORT_SYMBOL_GPL(power_supply_is_system_supplied);
+
+int power_supply_set_battery_charged(struct power_supply *psy)
+{
+ if (psy->type == POWER_SUPPLY_TYPE_BATTERY && psy->set_charged) {
+ psy->set_charged(psy);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(power_supply_set_battery_charged);
+
+static int power_supply_match_device_by_name(struct device *dev, void *data)
+{
+ const char *name = data;
+ struct power_supply *psy = dev_get_drvdata(dev);
+
+ return strcmp(psy->name, name) == 0;
+}
+
+struct power_supply *power_supply_get_by_name(char *name)
+{
+ struct device *dev = class_find_device(power_supply_class, NULL, name,
+ power_supply_match_device_by_name);
+
+ return dev ? dev_get_drvdata(dev) : NULL;
+}
+EXPORT_SYMBOL_GPL(power_supply_get_by_name);
int power_supply_register(struct device *parent, struct power_supply *psy)
{
@@ -144,6 +177,7 @@ dev_create_failed:
success:
return rc;
}
+EXPORT_SYMBOL_GPL(power_supply_register);
void power_supply_unregister(struct power_supply *psy)
{
@@ -152,6 +186,7 @@ void power_supply_unregister(struct power_supply *psy)
power_supply_remove_attrs(psy);
device_unregister(psy->dev);
}
+EXPORT_SYMBOL_GPL(power_supply_unregister);
static int __init power_supply_class_init(void)
{
@@ -170,15 +205,6 @@ static void __exit power_supply_class_exit(void)
class_destroy(power_supply_class);
}
-EXPORT_SYMBOL_GPL(power_supply_changed);
-EXPORT_SYMBOL_GPL(power_supply_am_i_supplied);
-EXPORT_SYMBOL_GPL(power_supply_is_system_supplied);
-EXPORT_SYMBOL_GPL(power_supply_register);
-EXPORT_SYMBOL_GPL(power_supply_unregister);
-
-/* exported for the APM Power driver, APM emulation */
-EXPORT_SYMBOL_GPL(power_supply_class);
-
subsys_initcall(power_supply_class_init);
module_exit(power_supply_class_exit);
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index da73591017f..08144393d64 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -43,6 +43,9 @@ static ssize_t power_supply_show_property(struct device *dev,
static char *status_text[] = {
"Unknown", "Charging", "Discharging", "Not charging", "Full"
};
+ static char *charge_type[] = {
+ "Unknown", "N/A", "Trickle", "Fast"
+ };
static char *health_text[] = {
"Unknown", "Good", "Overheat", "Dead", "Over voltage",
"Unspecified failure", "Cold",
@@ -51,6 +54,9 @@ static ssize_t power_supply_show_property(struct device *dev,
"Unknown", "NiMH", "Li-ion", "Li-poly", "LiFe", "NiCd",
"LiMn"
};
+ static char *capacity_level_text[] = {
+ "Unknown", "Critical", "Low", "Normal", "High", "Full"
+ };
ssize_t ret;
struct power_supply *psy = dev_get_drvdata(dev);
const ptrdiff_t off = attr - power_supply_attrs;
@@ -67,10 +73,14 @@ static ssize_t power_supply_show_property(struct device *dev,
if (off == POWER_SUPPLY_PROP_STATUS)
return sprintf(buf, "%s\n", status_text[value.intval]);
+ else if (off == POWER_SUPPLY_PROP_CHARGE_TYPE)
+ return sprintf(buf, "%s\n", charge_type[value.intval]);
else if (off == POWER_SUPPLY_PROP_HEALTH)
return sprintf(buf, "%s\n", health_text[value.intval]);
else if (off == POWER_SUPPLY_PROP_TECHNOLOGY)
return sprintf(buf, "%s\n", technology_text[value.intval]);
+ else if (off == POWER_SUPPLY_PROP_CAPACITY_LEVEL)
+ return sprintf(buf, "%s\n", capacity_level_text[value.intval]);
else if (off >= POWER_SUPPLY_PROP_MODEL_NAME)
return sprintf(buf, "%s\n", value.strval);
@@ -81,6 +91,7 @@ static ssize_t power_supply_show_property(struct device *dev,
static struct device_attribute power_supply_attrs[] = {
/* Properties of type `int' */
POWER_SUPPLY_ATTR(status),
+ POWER_SUPPLY_ATTR(charge_type),
POWER_SUPPLY_ATTR(health),
POWER_SUPPLY_ATTR(present),
POWER_SUPPLY_ATTR(online),
@@ -109,6 +120,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(energy_now),
POWER_SUPPLY_ATTR(energy_avg),
POWER_SUPPLY_ATTR(capacity),
+ POWER_SUPPLY_ATTR(capacity_level),
POWER_SUPPLY_ATTR(temp),
POWER_SUPPLY_ATTR(temp_ambient),
POWER_SUPPLY_ATTR(time_to_empty_now),
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
new file mode 100644
index 00000000000..2a4c8b0b829
--- /dev/null
+++ b/drivers/power/wm831x_power.c
@@ -0,0 +1,779 @@
+/*
+ * PMU driver for Wolfson Microelectronics wm831x PMICs
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+
+#include <linux/mfd/wm831x/core.h>
+#include <linux/mfd/wm831x/auxadc.h>
+#include <linux/mfd/wm831x/pmu.h>
+#include <linux/mfd/wm831x/pdata.h>
+
+struct wm831x_power {
+ struct wm831x *wm831x;
+ struct power_supply wall;
+ struct power_supply backup;
+ struct power_supply usb;
+ struct power_supply battery;
+};
+
+static int wm831x_power_check_online(struct wm831x *wm831x, int supply,
+ union power_supply_propval *val)
+{
+ int ret;
+
+ ret = wm831x_reg_read(wm831x, WM831X_SYSTEM_STATUS);
+ if (ret < 0)
+ return ret;
+
+ if (ret & supply)
+ val->intval = 1;
+ else
+ val->intval = 0;
+
+ return 0;
+}
+
+static int wm831x_power_read_voltage(struct wm831x *wm831x,
+ enum wm831x_auxadc src,
+ union power_supply_propval *val)
+{
+ int ret;
+
+ ret = wm831x_auxadc_read_uv(wm831x, src);
+ if (ret >= 0)
+ val->intval = ret;
+
+ return ret;
+}
+
+/*********************************************************************
+ * WALL Power
+ *********************************************************************/
+static int wm831x_wall_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct wm831x_power *wm831x_power = dev_get_drvdata(psy->dev->parent);
+ struct wm831x *wm831x = wm831x_power->wm831x;
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = wm831x_power_check_online(wm831x, WM831X_PWR_WALL, val);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = wm831x_power_read_voltage(wm831x, WM831X_AUX_WALL, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static enum power_supply_property wm831x_wall_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+};
+
+/*********************************************************************
+ * USB Power
+ *********************************************************************/
+static int wm831x_usb_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct wm831x_power *wm831x_power = dev_get_drvdata(psy->dev->parent);
+ struct wm831x *wm831x = wm831x_power->wm831x;
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = wm831x_power_check_online(wm831x, WM831X_PWR_USB, val);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = wm831x_power_read_voltage(wm831x, WM831X_AUX_USB, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static enum power_supply_property wm831x_usb_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+};
+
+/*********************************************************************
+ * Battery properties
+ *********************************************************************/
+
+struct chg_map {
+ int val;
+ int reg_val;
+};
+
+static struct chg_map trickle_ilims[] = {
+ { 50, 0 << WM831X_CHG_TRKL_ILIM_SHIFT },
+ { 100, 1 << WM831X_CHG_TRKL_ILIM_SHIFT },
+ { 150, 2 << WM831X_CHG_TRKL_ILIM_SHIFT },
+ { 200, 3 << WM831X_CHG_TRKL_ILIM_SHIFT },
+};
+
+static struct chg_map vsels[] = {
+ { 4050, 0 << WM831X_CHG_VSEL_SHIFT },
+ { 4100, 1 << WM831X_CHG_VSEL_SHIFT },
+ { 4150, 2 << WM831X_CHG_VSEL_SHIFT },
+ { 4200, 3 << WM831X_CHG_VSEL_SHIFT },
+};
+
+static struct chg_map fast_ilims[] = {
+ { 0, 0 << WM831X_CHG_FAST_ILIM_SHIFT },
+ { 50, 1 << WM831X_CHG_FAST_ILIM_SHIFT },
+ { 100, 2 << WM831X_CHG_FAST_ILIM_SHIFT },
+ { 150, 3 << WM831X_CHG_FAST_ILIM_SHIFT },
+ { 200, 4 << WM831X_CHG_FAST_ILIM_SHIFT },
+ { 250, 5 << WM831X_CHG_FAST_ILIM_SHIFT },
+ { 300, 6 << WM831X_CHG_FAST_ILIM_SHIFT },
+ { 350, 7 << WM831X_CHG_FAST_ILIM_SHIFT },
+ { 400, 8 << WM831X_CHG_FAST_ILIM_SHIFT },
+ { 450, 9 << WM831X_CHG_FAST_ILIM_SHIFT },
+ { 500, 10 << WM831X_CHG_FAST_ILIM_SHIFT },
+ { 600, 11 << WM831X_CHG_FAST_ILIM_SHIFT },
+ { 700, 12 << WM831X_CHG_FAST_ILIM_SHIFT },
+ { 800, 13 << WM831X_CHG_FAST_ILIM_SHIFT },
+ { 900, 14 << WM831X_CHG_FAST_ILIM_SHIFT },
+ { 1000, 15 << WM831X_CHG_FAST_ILIM_SHIFT },
+};
+
+static struct chg_map eoc_iterms[] = {
+ { 20, 0 << WM831X_CHG_ITERM_SHIFT },
+ { 30, 1 << WM831X_CHG_ITERM_SHIFT },
+ { 40, 2 << WM831X_CHG_ITERM_SHIFT },
+ { 50, 3 << WM831X_CHG_ITERM_SHIFT },
+ { 60, 4 << WM831X_CHG_ITERM_SHIFT },
+ { 70, 5 << WM831X_CHG_ITERM_SHIFT },
+ { 80, 6 << WM831X_CHG_ITERM_SHIFT },
+ { 90, 7 << WM831X_CHG_ITERM_SHIFT },
+};
+
+static struct chg_map chg_times[] = {
+ { 60, 0 << WM831X_CHG_TIME_SHIFT },
+ { 90, 1 << WM831X_CHG_TIME_SHIFT },
+ { 120, 2 << WM831X_CHG_TIME_SHIFT },
+ { 150, 3 << WM831X_CHG_TIME_SHIFT },
+ { 180, 4 << WM831X_CHG_TIME_SHIFT },
+ { 210, 5 << WM831X_CHG_TIME_SHIFT },
+ { 240, 6 << WM831X_CHG_TIME_SHIFT },
+ { 270, 7 << WM831X_CHG_TIME_SHIFT },
+ { 300, 8 << WM831X_CHG_TIME_SHIFT },
+ { 330, 9 << WM831X_CHG_TIME_SHIFT },
+ { 360, 10 << WM831X_CHG_TIME_SHIFT },
+ { 390, 11 << WM831X_CHG_TIME_SHIFT },
+ { 420, 12 << WM831X_CHG_TIME_SHIFT },
+ { 450, 13 << WM831X_CHG_TIME_SHIFT },
+ { 480, 14 << WM831X_CHG_TIME_SHIFT },
+ { 510, 15 << WM831X_CHG_TIME_SHIFT },
+};
+
+static void wm831x_battey_apply_config(struct wm831x *wm831x,
+ struct chg_map *map, int count, int val,
+ int *reg, const char *name,
+ const char *units)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ if (val == map[i].val)
+ break;
+ if (i == count) {
+ dev_err(wm831x->dev, "Invalid %s %d%s\n",
+ name, val, units);
+ } else {
+ *reg |= map[i].reg_val;
+ dev_dbg(wm831x->dev, "Set %s of %d%s\n", name, val, units);
+ }
+}
+
+static void wm831x_config_battery(struct wm831x *wm831x)
+{
+ struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data;
+ struct wm831x_battery_pdata *pdata;
+ int ret, reg1, reg2;
+
+ if (!wm831x_pdata || !wm831x_pdata->battery) {
+ dev_warn(wm831x->dev,
+ "No battery charger configuration\n");
+ return;
+ }
+
+ pdata = wm831x_pdata->battery;
+
+ reg1 = 0;
+ reg2 = 0;
+
+ if (!pdata->enable) {
+ dev_info(wm831x->dev, "Battery charger disabled\n");
+ return;
+ }
+
+ reg1 |= WM831X_CHG_ENA;
+ if (pdata->off_mask)
+ reg2 |= WM831X_CHG_OFF_MSK;
+ if (pdata->fast_enable)
+ reg1 |= WM831X_CHG_FAST;
+
+ wm831x_battey_apply_config(wm831x, trickle_ilims,
+ ARRAY_SIZE(trickle_ilims),
+ pdata->trickle_ilim, &reg2,
+ "trickle charge current limit", "mA");
+
+ wm831x_battey_apply_config(wm831x, vsels, ARRAY_SIZE(vsels),
+ pdata->vsel, &reg2,
+ "target voltage", "mV");
+
+ wm831x_battey_apply_config(wm831x, fast_ilims, ARRAY_SIZE(fast_ilims),
+ pdata->fast_ilim, &reg2,
+ "fast charge current limit", "mA");
+
+ wm831x_battey_apply_config(wm831x, eoc_iterms, ARRAY_SIZE(eoc_iterms),
+ pdata->eoc_iterm, &reg1,
+ "end of charge current threshold", "mA");
+
+ wm831x_battey_apply_config(wm831x, chg_times, ARRAY_SIZE(chg_times),
+ pdata->timeout, &reg2,
+ "charger timeout", "min");
+
+ ret = wm831x_reg_unlock(wm831x);
+ if (ret != 0) {
+ dev_err(wm831x->dev, "Failed to unlock registers: %d\n", ret);
+ return;
+ }
+
+ ret = wm831x_set_bits(wm831x, WM831X_CHARGER_CONTROL_1,
+ WM831X_CHG_ENA_MASK |
+ WM831X_CHG_FAST_MASK |
+ WM831X_CHG_ITERM_MASK |
+ WM831X_CHG_ITERM_MASK,
+ reg1);
+ if (ret != 0)
+ dev_err(wm831x->dev, "Failed to set charger control 1: %d\n",
+ ret);
+
+ ret = wm831x_set_bits(wm831x, WM831X_CHARGER_CONTROL_2,
+ WM831X_CHG_OFF_MSK |
+ WM831X_CHG_TIME_MASK |
+ WM831X_CHG_FAST_ILIM_MASK |
+ WM831X_CHG_TRKL_ILIM_MASK |
+ WM831X_CHG_VSEL_MASK,
+ reg2);
+ if (ret != 0)
+ dev_err(wm831x->dev, "Failed to set charger control 2: %d\n",
+ ret);
+
+ wm831x_reg_lock(wm831x);
+}
+
+static int wm831x_bat_check_status(struct wm831x *wm831x, int *status)
+{
+ int ret;
+
+ ret = wm831x_reg_read(wm831x, WM831X_SYSTEM_STATUS);
+ if (ret < 0)
+ return ret;
+
+ if (ret & WM831X_PWR_SRC_BATT) {
+ *status = POWER_SUPPLY_STATUS_DISCHARGING;
+ return 0;
+ }
+
+ ret = wm831x_reg_read(wm831x, WM831X_CHARGER_STATUS);
+ if (ret < 0)
+ return ret;
+
+ switch (ret & WM831X_CHG_STATE_MASK) {
+ case WM831X_CHG_STATE_OFF:
+ *status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case WM831X_CHG_STATE_TRICKLE:
+ case WM831X_CHG_STATE_FAST:
+ *status = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+
+ default:
+ *status = POWER_SUPPLY_STATUS_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
+
+static int wm831x_bat_check_type(struct wm831x *wm831x, int *type)
+{
+ int ret;
+
+ ret = wm831x_reg_read(wm831x, WM831X_CHARGER_STATUS);
+ if (ret < 0)
+ return ret;
+
+ switch (ret & WM831X_CHG_STATE_MASK) {
+ case WM831X_CHG_STATE_TRICKLE:
+ case WM831X_CHG_STATE_TRICKLE_OT:
+ *type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ break;
+ case WM831X_CHG_STATE_FAST:
+ case WM831X_CHG_STATE_FAST_OT:
+ *type = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ break;
+ default:
+ *type = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ break;
+ }
+
+ return 0;
+}
+
+static int wm831x_bat_check_health(struct wm831x *wm831x, int *health)
+{
+ int ret;
+
+ ret = wm831x_reg_read(wm831x, WM831X_CHARGER_STATUS);
+ if (ret < 0)
+ return ret;
+
+ if (ret & WM831X_BATT_HOT_STS) {
+ *health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ return 0;
+ }
+
+ if (ret & WM831X_BATT_COLD_STS) {
+ *health = POWER_SUPPLY_HEALTH_COLD;
+ return 0;
+ }
+
+ if (ret & WM831X_BATT_OV_STS) {
+ *health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ return 0;
+ }
+
+ switch (ret & WM831X_CHG_STATE_MASK) {
+ case WM831X_CHG_STATE_TRICKLE_OT:
+ case WM831X_CHG_STATE_FAST_OT:
+ *health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ break;
+ case WM831X_CHG_STATE_DEFECTIVE:
+ *health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ break;
+ default:
+ *health = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ }
+
+ return 0;
+}
+
+static int wm831x_bat_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct wm831x_power *wm831x_power = dev_get_drvdata(psy->dev->parent);
+ struct wm831x *wm831x = wm831x_power->wm831x;
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = wm831x_bat_check_status(wm831x, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = wm831x_power_check_online(wm831x, WM831X_PWR_SRC_BATT,
+ val);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = wm831x_power_read_voltage(wm831x, WM831X_AUX_BATT, val);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = wm831x_bat_check_health(wm831x, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ ret = wm831x_bat_check_type(wm831x, &val->intval);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static enum power_supply_property wm831x_bat_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+};
+
+static const char *wm831x_bat_irqs[] = {
+ "BATT HOT",
+ "BATT COLD",
+ "BATT FAIL",
+ "OV",
+ "END",
+ "TO",
+ "MODE",
+ "START",
+};
+
+static irqreturn_t wm831x_bat_irq(int irq, void *data)
+{
+ struct wm831x_power *wm831x_power = data;
+ struct wm831x *wm831x = wm831x_power->wm831x;
+
+ dev_dbg(wm831x->dev, "Battery status changed: %d\n", irq);
+
+ /* The battery charger is autonomous so we don't need to do
+ * anything except kick user space */
+ power_supply_changed(&wm831x_power->battery);
+
+ return IRQ_HANDLED;
+}
+
+
+/*********************************************************************
+ * Backup supply properties
+ *********************************************************************/
+
+static void wm831x_config_backup(struct wm831x *wm831x)
+{
+ struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data;
+ struct wm831x_backup_pdata *pdata;
+ int ret, reg;
+
+ if (!wm831x_pdata || !wm831x_pdata->backup) {
+ dev_warn(wm831x->dev,
+ "No backup battery charger configuration\n");
+ return;
+ }
+
+ pdata = wm831x_pdata->backup;
+
+ reg = 0;
+
+ if (pdata->charger_enable)
+ reg |= WM831X_BKUP_CHG_ENA | WM831X_BKUP_BATT_DET_ENA;
+ if (pdata->no_constant_voltage)
+ reg |= WM831X_BKUP_CHG_MODE;
+
+ switch (pdata->vlim) {
+ case 2500:
+ break;
+ case 3100:
+ reg |= WM831X_BKUP_CHG_VLIM;
+ break;
+ default:
+ dev_err(wm831x->dev, "Invalid backup voltage limit %dmV\n",
+ pdata->vlim);
+ }
+
+ switch (pdata->ilim) {
+ case 100:
+ break;
+ case 200:
+ reg |= 1;
+ break;
+ case 300:
+ reg |= 2;
+ break;
+ case 400:
+ reg |= 3;
+ break;
+ default:
+ dev_err(wm831x->dev, "Invalid backup current limit %duA\n",
+ pdata->ilim);
+ }
+
+ ret = wm831x_reg_unlock(wm831x);
+ if (ret != 0) {
+ dev_err(wm831x->dev, "Failed to unlock registers: %d\n", ret);
+ return;
+ }
+
+ ret = wm831x_set_bits(wm831x, WM831X_BACKUP_CHARGER_CONTROL,
+ WM831X_BKUP_CHG_ENA_MASK |
+ WM831X_BKUP_CHG_MODE_MASK |
+ WM831X_BKUP_BATT_DET_ENA_MASK |
+ WM831X_BKUP_CHG_VLIM_MASK |
+ WM831X_BKUP_CHG_ILIM_MASK,
+ reg);
+ if (ret != 0)
+ dev_err(wm831x->dev,
+ "Failed to set backup charger config: %d\n", ret);
+
+ wm831x_reg_lock(wm831x);
+}
+
+static int wm831x_backup_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct wm831x_power *wm831x_power = dev_get_drvdata(psy->dev->parent);
+ struct wm831x *wm831x = wm831x_power->wm831x;
+ int ret = 0;
+
+ ret = wm831x_reg_read(wm831x, WM831X_BACKUP_CHARGER_CONTROL);
+ if (ret < 0)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (ret & WM831X_BKUP_CHG_STS)
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = wm831x_power_read_voltage(wm831x, WM831X_AUX_BKUP_BATT,
+ val);
+ break;
+
+ case POWER_SUPPLY_PROP_PRESENT:
+ if (ret & WM831X_BKUP_CHG_STS)
+ val->intval = 1;
+ else
+ val->intval = 0;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static enum power_supply_property wm831x_backup_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_PRESENT,
+};
+
+/*********************************************************************
+ * Initialisation
+ *********************************************************************/
+
+static irqreturn_t wm831x_syslo_irq(int irq, void *data)
+{
+ struct wm831x_power *wm831x_power = data;
+ struct wm831x *wm831x = wm831x_power->wm831x;
+
+ /* Not much we can actually *do* but tell people for
+ * posterity, we're probably about to run out of power. */
+ dev_crit(wm831x->dev, "SYSVDD under voltage\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wm831x_pwr_src_irq(int irq, void *data)
+{
+ struct wm831x_power *wm831x_power = data;
+ struct wm831x *wm831x = wm831x_power->wm831x;
+
+ dev_dbg(wm831x->dev, "Power source changed\n");
+
+ /* Just notify for everything - little harm in overnotifying.
+ * The backup battery is not a power source while the system
+ * is running so skip that.
+ */
+ power_supply_changed(&wm831x_power->battery);
+ power_supply_changed(&wm831x_power->usb);
+ power_supply_changed(&wm831x_power->wall);
+
+ return IRQ_HANDLED;
+}
+
+static __devinit int wm831x_power_probe(struct platform_device *pdev)
+{
+ struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
+ struct wm831x_power *power;
+ struct power_supply *usb;
+ struct power_supply *battery;
+ struct power_supply *wall;
+ struct power_supply *backup;
+ int ret, irq, i;
+
+ power = kzalloc(sizeof(struct wm831x_power), GFP_KERNEL);
+ if (power == NULL)
+ return -ENOMEM;
+
+ power->wm831x = wm831x;
+ platform_set_drvdata(pdev, power);
+
+ usb = &power->usb;
+ battery = &power->battery;
+ wall = &power->wall;
+ backup = &power->backup;
+
+ /* We ignore configuration failures since we can still read back
+ * the status without enabling either of the chargers.
+ */
+ wm831x_config_battery(wm831x);
+ wm831x_config_backup(wm831x);
+
+ wall->name = "wm831x-wall";
+ wall->type = POWER_SUPPLY_TYPE_MAINS;
+ wall->properties = wm831x_wall_props;
+ wall->num_properties = ARRAY_SIZE(wm831x_wall_props);
+ wall->get_property = wm831x_wall_get_prop;
+ ret = power_supply_register(&pdev->dev, wall);
+ if (ret)
+ goto err_kmalloc;
+
+ battery->name = "wm831x-battery";
+ battery->properties = wm831x_bat_props;
+ battery->num_properties = ARRAY_SIZE(wm831x_bat_props);
+ battery->get_property = wm831x_bat_get_prop;
+ battery->use_for_apm = 1;
+ ret = power_supply_register(&pdev->dev, battery);
+ if (ret)
+ goto err_wall;
+
+ usb->name = "wm831x-usb",
+ usb->type = POWER_SUPPLY_TYPE_USB;
+ usb->properties = wm831x_usb_props;
+ usb->num_properties = ARRAY_SIZE(wm831x_usb_props);
+ usb->get_property = wm831x_usb_get_prop;
+ ret = power_supply_register(&pdev->dev, usb);
+ if (ret)
+ goto err_battery;
+
+ backup->name = "wm831x-backup";
+ backup->type = POWER_SUPPLY_TYPE_BATTERY;
+ backup->properties = wm831x_backup_props;
+ backup->num_properties = ARRAY_SIZE(wm831x_backup_props);
+ backup->get_property = wm831x_backup_get_prop;
+ ret = power_supply_register(&pdev->dev, backup);
+ if (ret)
+ goto err_usb;
+
+ irq = platform_get_irq_byname(pdev, "SYSLO");
+ ret = wm831x_request_irq(wm831x, irq, wm831x_syslo_irq,
+ IRQF_TRIGGER_RISING, "SYSLO",
+ power);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n",
+ irq, ret);
+ goto err_backup;
+ }
+
+ irq = platform_get_irq_byname(pdev, "PWR SRC");
+ ret = wm831x_request_irq(wm831x, irq, wm831x_pwr_src_irq,
+ IRQF_TRIGGER_RISING, "Power source",
+ power);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n",
+ irq, ret);
+ goto err_syslo;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) {
+ irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
+ ret = wm831x_request_irq(wm831x, irq, wm831x_bat_irq,
+ IRQF_TRIGGER_RISING,
+ wm831x_bat_irqs[i],
+ power);
+ if (ret != 0) {
+ dev_err(&pdev->dev,
+ "Failed to request %s IRQ %d: %d\n",
+ wm831x_bat_irqs[i], irq, ret);
+ goto err_bat_irq;
+ }
+ }
+
+ return ret;
+
+err_bat_irq:
+ for (; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
+ wm831x_free_irq(wm831x, irq, power);
+ }
+ irq = platform_get_irq_byname(pdev, "PWR SRC");
+ wm831x_free_irq(wm831x, irq, power);
+err_syslo:
+ irq = platform_get_irq_byname(pdev, "SYSLO");
+ wm831x_free_irq(wm831x, irq, power);
+err_backup:
+ power_supply_unregister(backup);
+err_usb:
+ power_supply_unregister(usb);
+err_battery:
+ power_supply_unregister(battery);
+err_wall:
+ power_supply_unregister(wall);
+err_kmalloc:
+ kfree(power);
+ return ret;
+}
+
+static __devexit int wm831x_power_remove(struct platform_device *pdev)
+{
+ struct wm831x_power *wm831x_power = platform_get_drvdata(pdev);
+ struct wm831x *wm831x = wm831x_power->wm831x;
+ int irq, i;
+
+ for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) {
+ irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
+ wm831x_free_irq(wm831x, irq, wm831x_power);
+ }
+
+ irq = platform_get_irq_byname(pdev, "PWR SRC");
+ wm831x_free_irq(wm831x, irq, wm831x_power);
+
+ irq = platform_get_irq_byname(pdev, "SYSLO");
+ wm831x_free_irq(wm831x, irq, wm831x_power);
+
+ power_supply_unregister(&wm831x_power->backup);
+ power_supply_unregister(&wm831x_power->battery);
+ power_supply_unregister(&wm831x_power->wall);
+ power_supply_unregister(&wm831x_power->usb);
+ return 0;
+}
+
+static struct platform_driver wm831x_power_driver = {
+ .probe = wm831x_power_probe,
+ .remove = __devexit_p(wm831x_power_remove),
+ .driver = {
+ .name = "wm831x-power",
+ },
+};
+
+static int __init wm831x_power_init(void)
+{
+ return platform_driver_register(&wm831x_power_driver);
+}
+module_init(wm831x_power_init);
+
+static void __exit wm831x_power_exit(void)
+{
+ platform_driver_unregister(&wm831x_power_driver);
+}
+module_exit(wm831x_power_exit);
+
+MODULE_DESCRIPTION("Power supply driver for WM831x PMICs");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:wm831x-power");
diff --git a/drivers/power/wm8350_power.c b/drivers/power/wm8350_power.c
index 1b16bf343f2..28b0299c004 100644
--- a/drivers/power/wm8350_power.c
+++ b/drivers/power/wm8350_power.c
@@ -321,6 +321,24 @@ static int wm8350_bat_check_health(struct wm8350 *wm8350)
return POWER_SUPPLY_HEALTH_GOOD;
}
+static int wm8350_bat_get_charge_type(struct wm8350 *wm8350)
+{
+ int state;
+
+ state = wm8350_reg_read(wm8350, WM8350_BATTERY_CHARGER_CONTROL_2) &
+ WM8350_CHG_STS_MASK;
+ switch (state) {
+ case WM8350_CHG_STS_OFF:
+ return POWER_SUPPLY_CHARGE_TYPE_NONE;
+ case WM8350_CHG_STS_TRICKLE:
+ return POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ case WM8350_CHG_STS_FAST:
+ return POWER_SUPPLY_CHARGE_TYPE_FAST;
+ default:
+ return POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+ }
+}
+
static int wm8350_bat_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -342,6 +360,9 @@ static int wm8350_bat_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_HEALTH:
val->intval = wm8350_bat_check_health(wm8350);
break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ val->intval = wm8350_bat_get_charge_type(wm8350);
+ break;
default:
ret = -EINVAL;
break;
@@ -355,6 +376,7 @@ static enum power_supply_property wm8350_bat_props[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
};
/*********************************************************************
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index b787335a841..f2bfd296dba 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -22,17 +22,20 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/gpio.h>
-#include <linux/wm97xx_batt.h>
+#include <linux/irq.h>
static DEFINE_MUTEX(bat_lock);
static struct work_struct bat_work;
struct mutex work_lock;
static int bat_status = POWER_SUPPLY_STATUS_UNKNOWN;
-static struct wm97xx_batt_info *pdata;
+static struct wm97xx_batt_info *gpdata;
static enum power_supply_property *prop;
static unsigned long wm97xx_read_bat(struct power_supply *bat_ps)
{
+ struct wm97xx_pdata *wmdata = bat_ps->dev->parent->platform_data;
+ struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+
return wm97xx_read_aux_adc(dev_get_drvdata(bat_ps->dev->parent),
pdata->batt_aux) * pdata->batt_mult /
pdata->batt_div;
@@ -40,6 +43,9 @@ static unsigned long wm97xx_read_bat(struct power_supply *bat_ps)
static unsigned long wm97xx_read_temp(struct power_supply *bat_ps)
{
+ struct wm97xx_pdata *wmdata = bat_ps->dev->parent->platform_data;
+ struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+
return wm97xx_read_aux_adc(dev_get_drvdata(bat_ps->dev->parent),
pdata->temp_aux) * pdata->temp_mult /
pdata->temp_div;
@@ -49,6 +55,9 @@ static int wm97xx_bat_get_property(struct power_supply *bat_ps,
enum power_supply_property psp,
union power_supply_propval *val)
{
+ struct wm97xx_pdata *wmdata = bat_ps->dev->parent->platform_data;
+ struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
val->intval = bat_status;
@@ -97,6 +106,8 @@ static void wm97xx_bat_external_power_changed(struct power_supply *bat_ps)
static void wm97xx_bat_update(struct power_supply *bat_ps)
{
int old_status = bat_status;
+ struct wm97xx_pdata *wmdata = bat_ps->dev->parent->platform_data;
+ struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
mutex_lock(&work_lock);
@@ -127,21 +138,29 @@ static void wm97xx_bat_work(struct work_struct *work)
wm97xx_bat_update(&bat_ps);
}
+static irqreturn_t wm97xx_chrg_irq(int irq, void *data)
+{
+ schedule_work(&bat_work);
+ return IRQ_HANDLED;
+}
+
#ifdef CONFIG_PM
-static int wm97xx_bat_suspend(struct platform_device *dev, pm_message_t state)
+static int wm97xx_bat_suspend(struct device *dev)
{
flush_scheduled_work();
return 0;
}
-static int wm97xx_bat_resume(struct platform_device *dev)
+static int wm97xx_bat_resume(struct device *dev)
{
schedule_work(&bat_work);
return 0;
}
-#else
-#define wm97xx_bat_suspend NULL
-#define wm97xx_bat_resume NULL
+
+static struct dev_pm_ops wm97xx_bat_pm_ops = {
+ .suspend = wm97xx_bat_suspend,
+ .resume = wm97xx_bat_resume,
+};
#endif
static int __devinit wm97xx_bat_probe(struct platform_device *dev)
@@ -149,6 +168,15 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev)
int ret = 0;
int props = 1; /* POWER_SUPPLY_PROP_PRESENT */
int i = 0;
+ struct wm97xx_pdata *wmdata = dev->dev.platform_data;
+ struct wm97xx_batt_pdata *pdata;
+
+ if (gpdata) {
+ dev_err(&dev->dev, "Do not pass platform_data through "
+ "wm97xx_bat_set_pdata!\n");
+ return -EINVAL;
+ } else
+ pdata = wmdata->batt_pdata;
if (dev->id != -1)
return -EINVAL;
@@ -156,17 +184,22 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev)
mutex_init(&work_lock);
if (!pdata) {
- dev_err(&dev->dev, "Please use wm97xx_bat_set_pdata\n");
+ dev_err(&dev->dev, "No platform_data supplied\n");
return -EINVAL;
}
- if (pdata->charge_gpio >= 0 && gpio_is_valid(pdata->charge_gpio)) {
+ if (gpio_is_valid(pdata->charge_gpio)) {
ret = gpio_request(pdata->charge_gpio, "BATT CHRG");
if (ret)
goto err;
ret = gpio_direction_input(pdata->charge_gpio);
if (ret)
goto err2;
+ ret = request_irq(gpio_to_irq(pdata->charge_gpio),
+ wm97xx_chrg_irq, IRQF_DISABLED,
+ "AC Detect", 0);
+ if (ret)
+ goto err2;
props++; /* POWER_SUPPLY_PROP_STATUS */
}
@@ -183,7 +216,7 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev)
prop = kzalloc(props * sizeof(*prop), GFP_KERNEL);
if (!prop)
- goto err2;
+ goto err3;
prop[i++] = POWER_SUPPLY_PROP_PRESENT;
if (pdata->charge_gpio >= 0)
@@ -216,21 +249,30 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev)
if (!ret)
schedule_work(&bat_work);
else
- goto err3;
+ goto err4;
return 0;
-err3:
+err4:
kfree(prop);
+err3:
+ if (gpio_is_valid(pdata->charge_gpio))
+ free_irq(gpio_to_irq(pdata->charge_gpio), dev);
err2:
- gpio_free(pdata->charge_gpio);
+ if (gpio_is_valid(pdata->charge_gpio))
+ gpio_free(pdata->charge_gpio);
err:
return ret;
}
static int __devexit wm97xx_bat_remove(struct platform_device *dev)
{
- if (pdata && pdata->charge_gpio && pdata->charge_gpio >= 0)
+ struct wm97xx_pdata *wmdata = dev->dev.platform_data;
+ struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+
+ if (pdata && gpio_is_valid(pdata->charge_gpio)) {
+ free_irq(gpio_to_irq(pdata->charge_gpio), dev);
gpio_free(pdata->charge_gpio);
+ }
flush_scheduled_work();
power_supply_unregister(&bat_ps);
kfree(prop);
@@ -241,11 +283,12 @@ static struct platform_driver wm97xx_bat_driver = {
.driver = {
.name = "wm97xx-battery",
.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &wm97xx_bat_pm_ops,
+#endif
},
.probe = wm97xx_bat_probe,
.remove = __devexit_p(wm97xx_bat_remove),
- .suspend = wm97xx_bat_suspend,
- .resume = wm97xx_bat_resume,
};
static int __init wm97xx_bat_init(void)
@@ -258,9 +301,9 @@ static void __exit wm97xx_bat_exit(void)
platform_driver_unregister(&wm97xx_bat_driver);
}
-void __init wm97xx_bat_set_pdata(struct wm97xx_batt_info *data)
+void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data)
{
- pdata = data;
+ gpdata = data;
}
EXPORT_SYMBOL_GPL(wm97xx_bat_set_pdata);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 2dc42bbf6fe..bcbb161bde0 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -1,6 +1,5 @@
menuconfig REGULATOR
bool "Voltage and Current Regulator Support"
- default n
help
Generic Voltage and Current Regulator support.
@@ -30,7 +29,6 @@ config REGULATOR_DEBUG
config REGULATOR_FIXED_VOLTAGE
tristate "Fixed voltage regulator support"
- default n
help
This driver provides support for fixed voltage regulators,
useful for systems which use a combination of software
@@ -38,7 +36,6 @@ config REGULATOR_FIXED_VOLTAGE
config REGULATOR_VIRTUAL_CONSUMER
tristate "Virtual regulator consumer support"
- default n
help
This driver provides a virtual consumer for the voltage and
current regulator API which provides sysfs controls for
@@ -49,17 +46,15 @@ config REGULATOR_VIRTUAL_CONSUMER
config REGULATOR_USERSPACE_CONSUMER
tristate "Userspace regulator consumer support"
- default n
help
There are some classes of devices that are controlled entirely
- from user space. Usersapce consumer driver provides ability to
+ from user space. Userspace consumer driver provides ability to
control power supplies for such devices.
If unsure, say no.
config REGULATOR_BQ24022
tristate "TI bq24022 Dual Input 1-Cell Li-Ion Charger IC"
- default n
help
This driver controls a TI bq24022 Charger attached via
GPIOs. The provided current regulator can enable/disable
@@ -69,7 +64,6 @@ config REGULATOR_BQ24022
config REGULATOR_MAX1586
tristate "Maxim 1586/1587 voltage regulator"
depends on I2C
- default n
help
This driver controls a Maxim 1586 or 1587 voltage output
regulator via I2C bus. The provided regulator is suitable
@@ -147,5 +141,21 @@ config REGULATOR_AB3100
AB3100 analog baseband dealing with power regulators
for the system.
+config REGULATOR_TPS65023
+ tristate "TI TPS65023 Power regulators"
+ depends on I2C
+ help
+ This driver supports TPS65023 voltage regulator chips. TPS65023 provides
+ three step-down converters and two general-purpose LDO voltage regulators.
+ It supports TI's software based Class-2 SmartReflex implementation.
+
+config REGULATOR_TPS6507X
+ tristate "TI TPS6507X Power regulators"
+ depends on I2C
+ help
+ This driver supports TPS6507X voltage regulator chips. TPS6507X provides
+ three step-down converters and two general-purpose LDO voltage regulators.
+ It supports TI's software based Class-2 SmartReflex implementation.
+
endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 768b3316d6e..4257a868377 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -23,4 +23,7 @@ obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
obj-$(CONFIG_REGULATOR_MC13783) += mc13783.o
obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
+obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
+obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
+
ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 91ba9bfaa70..744ea1d0b59 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -37,7 +37,7 @@ static int has_full_constraints;
*/
struct regulator_map {
struct list_head list;
- struct device *dev;
+ const char *dev_name; /* The dev_name() for the consumer */
const char *supply;
struct regulator_dev *regulator;
};
@@ -232,7 +232,7 @@ static ssize_t regulator_name_show(struct device *dev,
struct regulator_dev *rdev = dev_get_drvdata(dev);
const char *name;
- if (rdev->constraints->name)
+ if (rdev->constraints && rdev->constraints->name)
name = rdev->constraints->name;
else if (rdev->desc->name)
name = rdev->desc->name;
@@ -280,8 +280,13 @@ static ssize_t regulator_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ mutex_lock(&rdev->mutex);
+ ret = regulator_print_state(buf, _regulator_is_enabled(rdev));
+ mutex_unlock(&rdev->mutex);
- return regulator_print_state(buf, _regulator_is_enabled(rdev));
+ return ret;
}
static DEVICE_ATTR(state, 0444, regulator_state_show, NULL);
@@ -857,23 +862,39 @@ out:
* set_consumer_device_supply: Bind a regulator to a symbolic supply
* @rdev: regulator source
* @consumer_dev: device the supply applies to
+ * @consumer_dev_name: dev_name() string for device supply applies to
* @supply: symbolic name for supply
*
* Allows platform initialisation code to map physical regulator
* sources to symbolic names for supplies for use by devices. Devices
* should use these symbolic names to request regulators, avoiding the
* need to provide board-specific regulator names as platform data.
+ *
+ * Only one of consumer_dev and consumer_dev_name may be specified.
*/
static int set_consumer_device_supply(struct regulator_dev *rdev,
- struct device *consumer_dev, const char *supply)
+ struct device *consumer_dev, const char *consumer_dev_name,
+ const char *supply)
{
struct regulator_map *node;
+ int has_dev;
+
+ if (consumer_dev && consumer_dev_name)
+ return -EINVAL;
+
+ if (!consumer_dev_name && consumer_dev)
+ consumer_dev_name = dev_name(consumer_dev);
if (supply == NULL)
return -EINVAL;
+ if (consumer_dev_name != NULL)
+ has_dev = 1;
+ else
+ has_dev = 0;
+
list_for_each_entry(node, &regulator_map_list, list) {
- if (consumer_dev != node->dev)
+ if (consumer_dev_name != node->dev_name)
continue;
if (strcmp(node->supply, supply) != 0)
continue;
@@ -886,30 +907,45 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
return -EBUSY;
}
- node = kmalloc(sizeof(struct regulator_map), GFP_KERNEL);
+ node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL);
if (node == NULL)
return -ENOMEM;
node->regulator = rdev;
- node->dev = consumer_dev;
node->supply = supply;
+ if (has_dev) {
+ node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL);
+ if (node->dev_name == NULL) {
+ kfree(node);
+ return -ENOMEM;
+ }
+ }
+
list_add(&node->list, &regulator_map_list);
return 0;
}
static void unset_consumer_device_supply(struct regulator_dev *rdev,
- struct device *consumer_dev)
+ const char *consumer_dev_name, struct device *consumer_dev)
{
struct regulator_map *node, *n;
+ if (consumer_dev && !consumer_dev_name)
+ consumer_dev_name = dev_name(consumer_dev);
+
list_for_each_entry_safe(node, n, &regulator_map_list, list) {
- if (rdev == node->regulator &&
- consumer_dev == node->dev) {
- list_del(&node->list);
- kfree(node);
- return;
- }
+ if (rdev != node->regulator)
+ continue;
+
+ if (consumer_dev_name && node->dev_name &&
+ strcmp(consumer_dev_name, node->dev_name))
+ continue;
+
+ list_del(&node->list);
+ kfree(node->dev_name);
+ kfree(node);
+ return;
}
}
@@ -920,6 +956,7 @@ static void unset_regulator_supplies(struct regulator_dev *rdev)
list_for_each_entry_safe(node, n, &regulator_map_list, list) {
if (rdev == node->regulator) {
list_del(&node->list);
+ kfree(node->dev_name);
kfree(node);
return;
}
@@ -1001,35 +1038,33 @@ overflow_err:
return NULL;
}
-/**
- * regulator_get - lookup and obtain a reference to a regulator.
- * @dev: device for regulator "consumer"
- * @id: Supply name or regulator ID.
- *
- * Returns a struct regulator corresponding to the regulator producer,
- * or IS_ERR() condition containing errno.
- *
- * Use of supply names configured via regulator_set_device_supply() is
- * strongly encouraged. It is recommended that the supply name used
- * should match the name used for the supply and/or the relevant
- * device pins in the datasheet.
- */
-struct regulator *regulator_get(struct device *dev, const char *id)
+/* Internal regulator request function */
+static struct regulator *_regulator_get(struct device *dev, const char *id,
+ int exclusive)
{
struct regulator_dev *rdev;
struct regulator_map *map;
struct regulator *regulator = ERR_PTR(-ENODEV);
+ const char *devname = NULL;
+ int ret;
if (id == NULL) {
printk(KERN_ERR "regulator: get() with no identifier\n");
return regulator;
}
+ if (dev)
+ devname = dev_name(dev);
+
mutex_lock(&regulator_list_mutex);
list_for_each_entry(map, &regulator_map_list, list) {
- if (dev == map->dev &&
- strcmp(map->supply, id) == 0) {
+ /* If the mapping has a device set up it must match */
+ if (map->dev_name &&
+ (!devname || strcmp(map->dev_name, devname)))
+ continue;
+
+ if (strcmp(map->supply, id) == 0) {
rdev = map->regulator;
goto found;
}
@@ -1038,6 +1073,16 @@ struct regulator *regulator_get(struct device *dev, const char *id)
return regulator;
found:
+ if (rdev->exclusive) {
+ regulator = ERR_PTR(-EPERM);
+ goto out;
+ }
+
+ if (exclusive && rdev->open_count) {
+ regulator = ERR_PTR(-EBUSY);
+ goto out;
+ }
+
if (!try_module_get(rdev->owner))
goto out;
@@ -1047,13 +1092,70 @@ found:
module_put(rdev->owner);
}
+ rdev->open_count++;
+ if (exclusive) {
+ rdev->exclusive = 1;
+
+ ret = _regulator_is_enabled(rdev);
+ if (ret > 0)
+ rdev->use_count = 1;
+ else
+ rdev->use_count = 0;
+ }
+
out:
mutex_unlock(&regulator_list_mutex);
+
return regulator;
}
+
+/**
+ * regulator_get - lookup and obtain a reference to a regulator.
+ * @dev: device for regulator "consumer"
+ * @id: Supply name or regulator ID.
+ *
+ * Returns a struct regulator corresponding to the regulator producer,
+ * or IS_ERR() condition containing errno.
+ *
+ * Use of supply names configured via regulator_set_device_supply() is
+ * strongly encouraged. It is recommended that the supply name used
+ * should match the name used for the supply and/or the relevant
+ * device pins in the datasheet.
+ */
+struct regulator *regulator_get(struct device *dev, const char *id)
+{
+ return _regulator_get(dev, id, 0);
+}
EXPORT_SYMBOL_GPL(regulator_get);
/**
+ * regulator_get_exclusive - obtain exclusive access to a regulator.
+ * @dev: device for regulator "consumer"
+ * @id: Supply name or regulator ID.
+ *
+ * Returns a struct regulator corresponding to the regulator producer,
+ * or IS_ERR() condition containing errno. Other consumers will be
+ * unable to obtain this reference is held and the use count for the
+ * regulator will be initialised to reflect the current state of the
+ * regulator.
+ *
+ * This is intended for use by consumers which cannot tolerate shared
+ * use of the regulator such as those which need to force the
+ * regulator off for correct operation of the hardware they are
+ * controlling.
+ *
+ * Use of supply names configured via regulator_set_device_supply() is
+ * strongly encouraged. It is recommended that the supply name used
+ * should match the name used for the supply and/or the relevant
+ * device pins in the datasheet.
+ */
+struct regulator *regulator_get_exclusive(struct device *dev, const char *id)
+{
+ return _regulator_get(dev, id, 1);
+}
+EXPORT_SYMBOL_GPL(regulator_get_exclusive);
+
+/**
* regulator_put - "free" the regulator source
* @regulator: regulator source
*
@@ -1081,21 +1183,29 @@ void regulator_put(struct regulator *regulator)
list_del(&regulator->list);
kfree(regulator);
+ rdev->open_count--;
+ rdev->exclusive = 0;
+
module_put(rdev->owner);
mutex_unlock(&regulator_list_mutex);
}
EXPORT_SYMBOL_GPL(regulator_put);
+static int _regulator_can_change_status(struct regulator_dev *rdev)
+{
+ if (!rdev->constraints)
+ return 0;
+
+ if (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_STATUS)
+ return 1;
+ else
+ return 0;
+}
+
/* locks held by regulator_enable() */
static int _regulator_enable(struct regulator_dev *rdev)
{
- int ret = -EINVAL;
-
- if (!rdev->constraints) {
- printk(KERN_ERR "%s: %s has no constraints\n",
- __func__, rdev->desc->name);
- return ret;
- }
+ int ret;
/* do we need to enable the supply regulator first */
if (rdev->supply) {
@@ -1108,24 +1218,35 @@ static int _regulator_enable(struct regulator_dev *rdev)
}
/* check voltage and requested load before enabling */
- if (rdev->desc->ops->enable) {
-
- if (rdev->constraints &&
- (rdev->constraints->valid_ops_mask &
- REGULATOR_CHANGE_DRMS))
- drms_uA_update(rdev);
-
- ret = rdev->desc->ops->enable(rdev);
- if (ret < 0) {
- printk(KERN_ERR "%s: failed to enable %s: %d\n",
+ if (rdev->constraints &&
+ (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS))
+ drms_uA_update(rdev);
+
+ if (rdev->use_count == 0) {
+ /* The regulator may on if it's not switchable or left on */
+ ret = _regulator_is_enabled(rdev);
+ if (ret == -EINVAL || ret == 0) {
+ if (!_regulator_can_change_status(rdev))
+ return -EPERM;
+
+ if (rdev->desc->ops->enable) {
+ ret = rdev->desc->ops->enable(rdev);
+ if (ret < 0)
+ return ret;
+ } else {
+ return -EINVAL;
+ }
+ } else if (ret < 0) {
+ printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n",
__func__, rdev->desc->name, ret);
return ret;
}
- rdev->use_count++;
- return ret;
+ /* Fallthrough on positive return values - already enabled */
}
- return ret;
+ rdev->use_count++;
+
+ return 0;
}
/**
@@ -1165,7 +1286,8 @@ static int _regulator_disable(struct regulator_dev *rdev)
if (rdev->use_count == 1 && !rdev->constraints->always_on) {
/* we are last user */
- if (rdev->desc->ops->disable) {
+ if (_regulator_can_change_status(rdev) &&
+ rdev->desc->ops->disable) {
ret = rdev->desc->ops->disable(rdev);
if (ret < 0) {
printk(KERN_ERR "%s: failed to disable %s\n",
@@ -1265,20 +1387,11 @@ EXPORT_SYMBOL_GPL(regulator_force_disable);
static int _regulator_is_enabled(struct regulator_dev *rdev)
{
- int ret;
-
- mutex_lock(&rdev->mutex);
-
/* sanity check */
- if (!rdev->desc->ops->is_enabled) {
- ret = -EINVAL;
- goto out;
- }
+ if (!rdev->desc->ops->is_enabled)
+ return -EINVAL;
- ret = rdev->desc->ops->is_enabled(rdev);
-out:
- mutex_unlock(&rdev->mutex);
- return ret;
+ return rdev->desc->ops->is_enabled(rdev);
}
/**
@@ -1295,7 +1408,13 @@ out:
*/
int regulator_is_enabled(struct regulator *regulator)
{
- return _regulator_is_enabled(regulator->rdev);
+ int ret;
+
+ mutex_lock(&regulator->rdev->mutex);
+ ret = _regulator_is_enabled(regulator->rdev);
+ mutex_unlock(&regulator->rdev->mutex);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(regulator_is_enabled);
@@ -1350,6 +1469,35 @@ int regulator_list_voltage(struct regulator *regulator, unsigned selector)
EXPORT_SYMBOL_GPL(regulator_list_voltage);
/**
+ * regulator_is_supported_voltage - check if a voltage range can be supported
+ *
+ * @regulator: Regulator to check.
+ * @min_uV: Minimum required voltage in uV.
+ * @max_uV: Maximum required voltage in uV.
+ *
+ * Returns a boolean or a negative error code.
+ */
+int regulator_is_supported_voltage(struct regulator *regulator,
+ int min_uV, int max_uV)
+{
+ int i, voltages, ret;
+
+ ret = regulator_count_voltages(regulator);
+ if (ret < 0)
+ return ret;
+ voltages = ret;
+
+ for (i = 0; i < voltages; i++) {
+ ret = regulator_list_voltage(regulator, i);
+
+ if (ret >= min_uV && ret <= max_uV)
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
* regulator_set_voltage - set regulator output voltage
* @regulator: regulator source
* @min_uV: Minimum required voltage in uV
@@ -2091,11 +2239,13 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
for (i = 0; i < init_data->num_consumer_supplies; i++) {
ret = set_consumer_device_supply(rdev,
init_data->consumer_supplies[i].dev,
+ init_data->consumer_supplies[i].dev_name,
init_data->consumer_supplies[i].supply);
if (ret < 0) {
for (--i; i >= 0; i--)
unset_consumer_device_supply(rdev,
- init_data->consumer_supplies[i].dev);
+ init_data->consumer_supplies[i].dev_name,
+ init_data->consumer_supplies[i].dev);
goto scrub;
}
}
@@ -2130,6 +2280,7 @@ void regulator_unregister(struct regulator_dev *rdev)
return;
mutex_lock(&regulator_list_mutex);
+ WARN_ON(rdev->open_count);
unset_regulator_supplies(rdev);
list_del(&rdev->list);
if (rdev->supply)
@@ -2277,14 +2428,14 @@ static int __init regulator_init_complete(void)
ops = rdev->desc->ops;
c = rdev->constraints;
- if (c->name)
+ if (c && c->name)
name = c->name;
else if (rdev->desc->name)
name = rdev->desc->name;
else
name = "regulator";
- if (!ops->disable || c->always_on)
+ if (!ops->disable || (c && c->always_on))
continue;
mutex_lock(&rdev->mutex);
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index b8b89ef10a8..aa224d936e0 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -64,6 +64,14 @@
#define DA9034_MDTV2 (0x33)
#define DA9034_MVRC (0x34)
+/* DA9035 Registers. DA9034 Registers are comptabile to DA9035. */
+#define DA9035_OVER3 (0x12)
+#define DA9035_VCC2 (0x1f)
+#define DA9035_3DTV1 (0x2c)
+#define DA9035_3DTV2 (0x2d)
+#define DA9035_3VRC (0x2e)
+#define DA9035_AUTOSKIP (0x2f)
+
struct da903x_regulator_info {
struct regulator_desc desc;
@@ -79,6 +87,10 @@ struct da903x_regulator_info {
int enable_bit;
};
+static int da9034_ldo12_data[] = { 1700, 1750, 1800, 1850, 1900, 1950,
+ 2000, 2050, 2700, 2750, 2800, 2850,
+ 2900, 2950, 3000, 3050 };
+
static inline struct device *to_da903x_dev(struct regulator_dev *rdev)
{
return rdev_get_dev(rdev)->parent->parent;
@@ -162,6 +174,17 @@ static int da903x_is_enabled(struct regulator_dev *rdev)
return !!(reg_val & (1 << info->enable_bit));
}
+static int da903x_list_voltage(struct regulator_dev *rdev, unsigned selector)
+{
+ struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+
+ ret = info->min_uV + info->step_uV * selector;
+ if (ret > info->max_uV)
+ return -EINVAL;
+ return ret;
+}
+
/* DA9030 specific operations */
static int da9030_set_ldo1_15_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV)
@@ -278,7 +301,7 @@ static int da9034_set_ldo12_voltage(struct regulator_dev *rdev,
}
val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
- val = (val > 7 || val < 20) ? 8 : val - 12;
+ val = (val >= 20) ? val - 12 : ((val > 7) ? 8 : val);
val <<= info->vol_shift;
mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
@@ -305,9 +328,18 @@ static int da9034_get_ldo12_voltage(struct regulator_dev *rdev)
return info->min_uV + info->step_uV * val;
}
+static int da9034_list_ldo12_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ if (selector > ARRAY_SIZE(da9034_ldo12_data))
+ return -EINVAL;
+ return da9034_ldo12_data[selector] * 1000;
+}
+
static struct regulator_ops da903x_regulator_ldo_ops = {
.set_voltage = da903x_set_ldo_voltage,
.get_voltage = da903x_get_voltage,
+ .list_voltage = da903x_list_voltage,
.enable = da903x_enable,
.disable = da903x_disable,
.is_enabled = da903x_is_enabled,
@@ -317,6 +349,7 @@ static struct regulator_ops da903x_regulator_ldo_ops = {
static struct regulator_ops da9030_regulator_ldo14_ops = {
.set_voltage = da9030_set_ldo14_voltage,
.get_voltage = da9030_get_ldo14_voltage,
+ .list_voltage = da903x_list_voltage,
.enable = da903x_enable,
.disable = da903x_disable,
.is_enabled = da903x_is_enabled,
@@ -326,6 +359,7 @@ static struct regulator_ops da9030_regulator_ldo14_ops = {
static struct regulator_ops da9030_regulator_ldo1_15_ops = {
.set_voltage = da9030_set_ldo1_15_voltage,
.get_voltage = da903x_get_voltage,
+ .list_voltage = da903x_list_voltage,
.enable = da903x_enable,
.disable = da903x_disable,
.is_enabled = da903x_is_enabled,
@@ -334,6 +368,7 @@ static struct regulator_ops da9030_regulator_ldo1_15_ops = {
static struct regulator_ops da9034_regulator_dvc_ops = {
.set_voltage = da9034_set_dvc_voltage,
.get_voltage = da903x_get_voltage,
+ .list_voltage = da903x_list_voltage,
.enable = da903x_enable,
.disable = da903x_disable,
.is_enabled = da903x_is_enabled,
@@ -343,6 +378,7 @@ static struct regulator_ops da9034_regulator_dvc_ops = {
static struct regulator_ops da9034_regulator_ldo12_ops = {
.set_voltage = da9034_set_ldo12_voltage,
.get_voltage = da9034_get_ldo12_voltage,
+ .list_voltage = da9034_list_ldo12_voltage,
.enable = da903x_enable,
.disable = da903x_disable,
.is_enabled = da903x_is_enabled,
@@ -355,6 +391,7 @@ static struct regulator_ops da9034_regulator_ldo12_ops = {
.ops = &da903x_regulator_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.id = _pmic##_ID_LDO##_id, \
+ .n_voltages = (step) ? ((max - min) / step + 1) : 1, \
.owner = THIS_MODULE, \
}, \
.min_uV = (min) * 1000, \
@@ -367,24 +404,25 @@ static struct regulator_ops da9034_regulator_ldo12_ops = {
.enable_bit = (ebit), \
}
-#define DA9034_DVC(_id, min, max, step, vreg, nbits, ureg, ubit, ereg, ebit) \
+#define DA903x_DVC(_pmic, _id, min, max, step, vreg, nbits, ureg, ubit, ereg, ebit) \
{ \
.desc = { \
.name = #_id, \
.ops = &da9034_regulator_dvc_ops, \
.type = REGULATOR_VOLTAGE, \
- .id = DA9034_ID_##_id, \
+ .id = _pmic##_ID_##_id, \
+ .n_voltages = (step) ? ((max - min) / step + 1) : 1, \
.owner = THIS_MODULE, \
}, \
.min_uV = (min) * 1000, \
.max_uV = (max) * 1000, \
.step_uV = (step) * 1000, \
- .vol_reg = DA9034_##vreg, \
+ .vol_reg = _pmic##_##vreg, \
.vol_shift = (0), \
.vol_nbits = (nbits), \
- .update_reg = DA9034_##ureg, \
+ .update_reg = _pmic##_##ureg, \
.update_bit = (ubit), \
- .enable_reg = DA9034_##ereg, \
+ .enable_reg = _pmic##_##ereg, \
.enable_bit = (ebit), \
}
@@ -394,8 +432,22 @@ static struct regulator_ops da9034_regulator_ldo12_ops = {
#define DA9030_LDO(_id, min, max, step, vreg, shift, nbits, ereg, ebit) \
DA903x_LDO(DA9030, _id, min, max, step, vreg, shift, nbits, ereg, ebit)
+#define DA9030_DVC(_id, min, max, step, vreg, nbits, ureg, ubit, ereg, ebit) \
+ DA903x_DVC(DA9030, _id, min, max, step, vreg, nbits, ureg, ubit, \
+ ereg, ebit)
+
+#define DA9034_DVC(_id, min, max, step, vreg, nbits, ureg, ubit, ereg, ebit) \
+ DA903x_DVC(DA9034, _id, min, max, step, vreg, nbits, ureg, ubit, \
+ ereg, ebit)
+
+#define DA9035_DVC(_id, min, max, step, vreg, nbits, ureg, ubit, ereg, ebit) \
+ DA903x_DVC(DA9035, _id, min, max, step, vreg, nbits, ureg, ubit, \
+ ereg, ebit)
+
static struct da903x_regulator_info da903x_regulator_info[] = {
/* DA9030 */
+ DA9030_DVC(BUCK2, 850, 1625, 25, BUCK2DVM1, 5, BUCK2DVM1, 7, RCTL11, 0),
+
DA9030_LDO( 1, 1200, 3200, 100, LDO1, 0, 5, RCTL12, 1),
DA9030_LDO( 2, 1800, 3200, 100, LDO23, 0, 4, RCTL12, 2),
DA9030_LDO( 3, 1800, 3200, 100, LDO23, 4, 4, RCTL12, 3),
@@ -417,9 +469,9 @@ static struct da903x_regulator_info da903x_regulator_info[] = {
DA9030_LDO(13, 2100, 2100, 0, INVAL, 0, 0, RCTL11, 3), /* fixed @2.1V */
/* DA9034 */
- DA9034_DVC(BUCK1, 725, 1500, 25, ADTV1, 5, VCC1, 0, OVER1, 0),
- DA9034_DVC(BUCK2, 725, 1500, 25, CDTV1, 5, VCC1, 2, OVER1, 1),
- DA9034_DVC(LDO2, 725, 1500, 25, SDTV1, 5, VCC1, 4, OVER1, 2),
+ DA9034_DVC(BUCK1, 725, 1500, 25, ADTV2, 5, VCC1, 0, OVER1, 0),
+ DA9034_DVC(BUCK2, 725, 1500, 25, CDTV2, 5, VCC1, 2, OVER1, 1),
+ DA9034_DVC(LDO2, 725, 1500, 25, SDTV2, 5, VCC1, 4, OVER1, 2),
DA9034_DVC(LDO1, 1700, 2075, 25, MDTV1, 4, VCC1, 6, OVER3, 4),
DA9034_LDO( 3, 1800, 3300, 100, LDO643, 0, 4, OVER3, 5),
@@ -435,6 +487,9 @@ static struct da903x_regulator_info da903x_regulator_info[] = {
DA9034_LDO(14, 1800, 3300, 100, LDO1514, 0, 4, OVER3, 0),
DA9034_LDO(15, 1800, 3300, 100, LDO1514, 4, 4, OVER3, 1),
DA9034_LDO(5, 3100, 3100, 0, INVAL, 0, 0, OVER3, 7), /* fixed @3.1V */
+
+ /* DA9035 */
+ DA9035_DVC(BUCK3, 1800, 2200, 100, 3DTV1, 3, VCC2, 0, OVER3, 3),
};
static inline struct da903x_regulator_info *find_regulator_info(int id)
@@ -462,8 +517,10 @@ static int __devinit da903x_regulator_probe(struct platform_device *pdev)
}
/* Workaround for the weird LDO12 voltage setting */
- if (ri->desc.id == DA9034_ID_LDO12)
+ if (ri->desc.id == DA9034_ID_LDO12) {
ri->desc.ops = &da9034_regulator_ldo12_ops;
+ ri->desc.n_voltages = ARRAY_SIZE(da9034_ldo12_data);
+ }
if (ri->desc.id == DA9030_ID_LDO14)
ri->desc.ops = &da9030_regulator_ldo14_ops;
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index cdc674fb46c..f8b295700d7 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -5,6 +5,9 @@
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
+ * Copyright (c) 2009 Nokia Corporation
+ * Roger Quadros <ext-roger.quadros@nokia.com>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
@@ -20,20 +23,45 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/fixed.h>
+#include <linux/gpio.h>
struct fixed_voltage_data {
struct regulator_desc desc;
struct regulator_dev *dev;
int microvolts;
+ int gpio;
+ unsigned enable_high:1;
+ unsigned is_enabled:1;
};
static int fixed_voltage_is_enabled(struct regulator_dev *dev)
{
- return 1;
+ struct fixed_voltage_data *data = rdev_get_drvdata(dev);
+
+ return data->is_enabled;
}
static int fixed_voltage_enable(struct regulator_dev *dev)
{
+ struct fixed_voltage_data *data = rdev_get_drvdata(dev);
+
+ if (gpio_is_valid(data->gpio)) {
+ gpio_set_value_cansleep(data->gpio, data->enable_high);
+ data->is_enabled = 1;
+ }
+
+ return 0;
+}
+
+static int fixed_voltage_disable(struct regulator_dev *dev)
+{
+ struct fixed_voltage_data *data = rdev_get_drvdata(dev);
+
+ if (gpio_is_valid(data->gpio)) {
+ gpio_set_value_cansleep(data->gpio, !data->enable_high);
+ data->is_enabled = 0;
+ }
+
return 0;
}
@@ -58,6 +86,7 @@ static int fixed_voltage_list_voltage(struct regulator_dev *dev,
static struct regulator_ops fixed_voltage_ops = {
.is_enabled = fixed_voltage_is_enabled,
.enable = fixed_voltage_enable,
+ .disable = fixed_voltage_disable,
.get_voltage = fixed_voltage_get_voltage,
.list_voltage = fixed_voltage_list_voltage,
};
@@ -70,12 +99,14 @@ static int regulator_fixed_voltage_probe(struct platform_device *pdev)
drvdata = kzalloc(sizeof(struct fixed_voltage_data), GFP_KERNEL);
if (drvdata == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate device data\n");
ret = -ENOMEM;
goto err;
}
drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL);
if (drvdata->desc.name == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate supply name\n");
ret = -ENOMEM;
goto err;
}
@@ -85,12 +116,62 @@ static int regulator_fixed_voltage_probe(struct platform_device *pdev)
drvdata->desc.n_voltages = 1;
drvdata->microvolts = config->microvolts;
+ drvdata->gpio = config->gpio;
+
+ if (gpio_is_valid(config->gpio)) {
+ drvdata->enable_high = config->enable_high;
+
+ /* FIXME: Remove below print warning
+ *
+ * config->gpio must be set to -EINVAL by platform code if
+ * GPIO control is not required. However, early adopters
+ * not requiring GPIO control may forget to initialize
+ * config->gpio to -EINVAL. This will cause GPIO 0 to be used
+ * for GPIO control.
+ *
+ * This warning will be removed once there are a couple of users
+ * for this driver.
+ */
+ if (!config->gpio)
+ dev_warn(&pdev->dev,
+ "using GPIO 0 for regulator enable control\n");
+
+ ret = gpio_request(config->gpio, config->supply_name);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Could not obtain regulator enable GPIO %d: %d\n",
+ config->gpio, ret);
+ goto err_name;
+ }
+
+ /* set output direction without changing state
+ * to prevent glitch
+ */
+ drvdata->is_enabled = config->enabled_at_boot;
+ ret = drvdata->is_enabled ?
+ config->enable_high : !config->enable_high;
+
+ ret = gpio_direction_output(config->gpio, ret);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Could not configure regulator enable GPIO %d direction: %d\n",
+ config->gpio, ret);
+ goto err_gpio;
+ }
+
+ } else {
+ /* Regulator without GPIO control is considered
+ * always enabled
+ */
+ drvdata->is_enabled = 1;
+ }
drvdata->dev = regulator_register(&drvdata->desc, &pdev->dev,
config->init_data, drvdata);
if (IS_ERR(drvdata->dev)) {
ret = PTR_ERR(drvdata->dev);
- goto err_name;
+ dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
+ goto err_gpio;
}
platform_set_drvdata(pdev, drvdata);
@@ -100,6 +181,9 @@ static int regulator_fixed_voltage_probe(struct platform_device *pdev)
return 0;
+err_gpio:
+ if (gpio_is_valid(config->gpio))
+ gpio_free(config->gpio);
err_name:
kfree(drvdata->desc.name);
err:
@@ -115,6 +199,9 @@ static int regulator_fixed_voltage_remove(struct platform_device *pdev)
kfree(drvdata->desc.name);
kfree(drvdata);
+ if (gpio_is_valid(drvdata->gpio))
+ gpio_free(drvdata->gpio);
+
return 0;
}
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index a61018a2769..7803a320543 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -541,7 +541,7 @@ static struct i2c_driver lp3971_i2c_driver = {
static int __init lp3971_module_init(void)
{
- int ret = -ENODEV;
+ int ret;
ret = i2c_add_driver(&lp3971_i2c_driver);
if (ret != 0)
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index 8e14900eb68..0803ffe6236 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -24,11 +24,12 @@
#include <linux/mfd/pcf50633/core.h>
#include <linux/mfd/pcf50633/pmic.h>
-#define PCF50633_REGULATOR(_name, _id) \
+#define PCF50633_REGULATOR(_name, _id, _n) \
{ \
.name = _name, \
.id = _id, \
.ops = &pcf50633_regulator_ops, \
+ .n_voltages = _n, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}
@@ -149,33 +150,20 @@ static int pcf50633_regulator_set_voltage(struct regulator_dev *rdev,
return pcf50633_reg_write(pcf, regnr, volt_bits);
}
-static int pcf50633_regulator_get_voltage(struct regulator_dev *rdev)
+static int pcf50633_regulator_voltage_value(enum pcf50633_regulator_id id,
+ u8 bits)
{
- struct pcf50633 *pcf;
- int regulator_id, millivolts, volt_bits;
- u8 regnr;
-
- pcf = rdev_get_drvdata(rdev);;
+ int millivolts;
- regulator_id = rdev_get_id(rdev);
- if (regulator_id >= PCF50633_NUM_REGULATORS)
- return -EINVAL;
-
- regnr = pcf50633_regulator_registers[regulator_id];
-
- volt_bits = pcf50633_reg_read(pcf, regnr);
- if (volt_bits < 0)
- return -1;
-
- switch (regulator_id) {
+ switch (id) {
case PCF50633_REGULATOR_AUTO:
- millivolts = auto_voltage_value(volt_bits);
+ millivolts = auto_voltage_value(bits);
break;
case PCF50633_REGULATOR_DOWN1:
- millivolts = down_voltage_value(volt_bits);
+ millivolts = down_voltage_value(bits);
break;
case PCF50633_REGULATOR_DOWN2:
- millivolts = down_voltage_value(volt_bits);
+ millivolts = down_voltage_value(bits);
break;
case PCF50633_REGULATOR_LDO1:
case PCF50633_REGULATOR_LDO2:
@@ -184,7 +172,7 @@ static int pcf50633_regulator_get_voltage(struct regulator_dev *rdev)
case PCF50633_REGULATOR_LDO5:
case PCF50633_REGULATOR_LDO6:
case PCF50633_REGULATOR_HCLDO:
- millivolts = ldo_voltage_value(volt_bits);
+ millivolts = ldo_voltage_value(bits);
break;
default:
return -EINVAL;
@@ -193,6 +181,49 @@ static int pcf50633_regulator_get_voltage(struct regulator_dev *rdev)
return millivolts * 1000;
}
+static int pcf50633_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct pcf50633 *pcf;
+ int regulator_id;
+ u8 volt_bits, regnr;
+
+ pcf = rdev_get_drvdata(rdev);
+
+ regulator_id = rdev_get_id(rdev);
+ if (regulator_id >= PCF50633_NUM_REGULATORS)
+ return -EINVAL;
+
+ regnr = pcf50633_regulator_registers[regulator_id];
+
+ volt_bits = pcf50633_reg_read(pcf, regnr);
+
+ return pcf50633_regulator_voltage_value(regulator_id, volt_bits);
+}
+
+static int pcf50633_regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned int index)
+{
+ struct pcf50633 *pcf;
+ int regulator_id;
+
+ pcf = rdev_get_drvdata(rdev);
+
+ regulator_id = rdev_get_id(rdev);
+
+ switch (regulator_id) {
+ case PCF50633_REGULATOR_AUTO:
+ index += 0x2f;
+ break;
+ case PCF50633_REGULATOR_HCLDO:
+ index += 0x01;
+ break;
+ default:
+ break;
+ }
+
+ return pcf50633_regulator_voltage_value(regulator_id, index);
+}
+
static int pcf50633_regulator_enable(struct regulator_dev *rdev)
{
struct pcf50633 *pcf = rdev_get_drvdata(rdev);
@@ -246,6 +277,7 @@ static int pcf50633_regulator_is_enabled(struct regulator_dev *rdev)
static struct regulator_ops pcf50633_regulator_ops = {
.set_voltage = pcf50633_regulator_set_voltage,
.get_voltage = pcf50633_regulator_get_voltage,
+ .list_voltage = pcf50633_regulator_list_voltage,
.enable = pcf50633_regulator_enable,
.disable = pcf50633_regulator_disable,
.is_enabled = pcf50633_regulator_is_enabled,
@@ -253,27 +285,27 @@ static struct regulator_ops pcf50633_regulator_ops = {
static struct regulator_desc regulators[] = {
[PCF50633_REGULATOR_AUTO] =
- PCF50633_REGULATOR("auto", PCF50633_REGULATOR_AUTO),
+ PCF50633_REGULATOR("auto", PCF50633_REGULATOR_AUTO, 80),
[PCF50633_REGULATOR_DOWN1] =
- PCF50633_REGULATOR("down1", PCF50633_REGULATOR_DOWN1),
+ PCF50633_REGULATOR("down1", PCF50633_REGULATOR_DOWN1, 95),
[PCF50633_REGULATOR_DOWN2] =
- PCF50633_REGULATOR("down2", PCF50633_REGULATOR_DOWN2),
+ PCF50633_REGULATOR("down2", PCF50633_REGULATOR_DOWN2, 95),
[PCF50633_REGULATOR_LDO1] =
- PCF50633_REGULATOR("ldo1", PCF50633_REGULATOR_LDO1),
+ PCF50633_REGULATOR("ldo1", PCF50633_REGULATOR_LDO1, 27),
[PCF50633_REGULATOR_LDO2] =
- PCF50633_REGULATOR("ldo2", PCF50633_REGULATOR_LDO2),
+ PCF50633_REGULATOR("ldo2", PCF50633_REGULATOR_LDO2, 27),
[PCF50633_REGULATOR_LDO3] =
- PCF50633_REGULATOR("ldo3", PCF50633_REGULATOR_LDO3),
+ PCF50633_REGULATOR("ldo3", PCF50633_REGULATOR_LDO3, 27),
[PCF50633_REGULATOR_LDO4] =
- PCF50633_REGULATOR("ldo4", PCF50633_REGULATOR_LDO4),
+ PCF50633_REGULATOR("ldo4", PCF50633_REGULATOR_LDO4, 27),
[PCF50633_REGULATOR_LDO5] =
- PCF50633_REGULATOR("ldo5", PCF50633_REGULATOR_LDO5),
+ PCF50633_REGULATOR("ldo5", PCF50633_REGULATOR_LDO5, 27),
[PCF50633_REGULATOR_LDO6] =
- PCF50633_REGULATOR("ldo6", PCF50633_REGULATOR_LDO6),
+ PCF50633_REGULATOR("ldo6", PCF50633_REGULATOR_LDO6, 27),
[PCF50633_REGULATOR_HCLDO] =
- PCF50633_REGULATOR("hcldo", PCF50633_REGULATOR_HCLDO),
+ PCF50633_REGULATOR("hcldo", PCF50633_REGULATOR_HCLDO, 26),
[PCF50633_REGULATOR_MEMLDO] =
- PCF50633_REGULATOR("memldo", PCF50633_REGULATOR_MEMLDO),
+ PCF50633_REGULATOR("memldo", PCF50633_REGULATOR_MEMLDO, 0),
};
static int __devinit pcf50633_regulator_probe(struct platform_device *pdev)
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
new file mode 100644
index 00000000000..07fda0a75ad
--- /dev/null
+++ b/drivers/regulator/tps65023-regulator.c
@@ -0,0 +1,632 @@
+/*
+ * tps65023-regulator.c
+ *
+ * Supports TPS65023 Regulator
+ *
+ * Copyright (C) 2009 Texas Instrument Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+
+/* Register definitions */
+#define TPS65023_REG_VERSION 0
+#define TPS65023_REG_PGOODZ 1
+#define TPS65023_REG_MASK 2
+#define TPS65023_REG_REG_CTRL 3
+#define TPS65023_REG_CON_CTRL 4
+#define TPS65023_REG_CON_CTRL2 5
+#define TPS65023_REG_DEF_CORE 6
+#define TPS65023_REG_DEFSLEW 7
+#define TPS65023_REG_LDO_CTRL 8
+
+/* PGOODZ bitfields */
+#define TPS65023_PGOODZ_PWRFAILZ BIT(7)
+#define TPS65023_PGOODZ_LOWBATTZ BIT(6)
+#define TPS65023_PGOODZ_VDCDC1 BIT(5)
+#define TPS65023_PGOODZ_VDCDC2 BIT(4)
+#define TPS65023_PGOODZ_VDCDC3 BIT(3)
+#define TPS65023_PGOODZ_LDO2 BIT(2)
+#define TPS65023_PGOODZ_LDO1 BIT(1)
+
+/* MASK bitfields */
+#define TPS65023_MASK_PWRFAILZ BIT(7)
+#define TPS65023_MASK_LOWBATTZ BIT(6)
+#define TPS65023_MASK_VDCDC1 BIT(5)
+#define TPS65023_MASK_VDCDC2 BIT(4)
+#define TPS65023_MASK_VDCDC3 BIT(3)
+#define TPS65023_MASK_LDO2 BIT(2)
+#define TPS65023_MASK_LDO1 BIT(1)
+
+/* REG_CTRL bitfields */
+#define TPS65023_REG_CTRL_VDCDC1_EN BIT(5)
+#define TPS65023_REG_CTRL_VDCDC2_EN BIT(4)
+#define TPS65023_REG_CTRL_VDCDC3_EN BIT(3)
+#define TPS65023_REG_CTRL_LDO2_EN BIT(2)
+#define TPS65023_REG_CTRL_LDO1_EN BIT(1)
+
+/* LDO_CTRL bitfields */
+#define TPS65023_LDO_CTRL_LDOx_SHIFT(ldo_id) ((ldo_id)*4)
+#define TPS65023_LDO_CTRL_LDOx_MASK(ldo_id) (0xF0 >> ((ldo_id)*4))
+
+/* Number of step-down converters available */
+#define TPS65023_NUM_DCDC 3
+/* Number of LDO voltage regulators available */
+#define TPS65023_NUM_LDO 2
+/* Number of total regulators available */
+#define TPS65023_NUM_REGULATOR (TPS65023_NUM_DCDC + TPS65023_NUM_LDO)
+
+/* DCDCs */
+#define TPS65023_DCDC_1 0
+#define TPS65023_DCDC_2 1
+#define TPS65023_DCDC_3 2
+/* LDOs */
+#define TPS65023_LDO_1 3
+#define TPS65023_LDO_2 4
+
+#define TPS65023_MAX_REG_ID TPS65023_LDO_2
+
+/* Supported voltage values for regulators */
+static const u16 VDCDC1_VSEL_table[] = {
+ 800, 825, 850, 875,
+ 900, 925, 950, 975,
+ 1000, 1025, 1050, 1075,
+ 1100, 1125, 1150, 1175,
+ 1200, 1225, 1250, 1275,
+ 1300, 1325, 1350, 1375,
+ 1400, 1425, 1450, 1475,
+ 1500, 1525, 1550, 1600,
+};
+
+static const u16 LDO1_VSEL_table[] = {
+ 1000, 1100, 1300, 1800,
+ 2200, 2600, 2800, 3150,
+};
+
+static const u16 LDO2_VSEL_table[] = {
+ 1050, 1200, 1300, 1800,
+ 2500, 2800, 3000, 3300,
+};
+
+static unsigned int num_voltages[] = {ARRAY_SIZE(VDCDC1_VSEL_table),
+ 0, 0, ARRAY_SIZE(LDO1_VSEL_table),
+ ARRAY_SIZE(LDO2_VSEL_table)};
+
+/* Regulator specific details */
+struct tps_info {
+ const char *name;
+ unsigned min_uV;
+ unsigned max_uV;
+ bool fixed;
+ u8 table_len;
+ const u16 *table;
+};
+
+/* PMIC details */
+struct tps_pmic {
+ struct regulator_desc desc[TPS65023_NUM_REGULATOR];
+ struct i2c_client *client;
+ struct regulator_dev *rdev[TPS65023_NUM_REGULATOR];
+ const struct tps_info *info[TPS65023_NUM_REGULATOR];
+ struct mutex io_lock;
+};
+
+static inline int tps_65023_read(struct tps_pmic *tps, u8 reg)
+{
+ return i2c_smbus_read_byte_data(tps->client, reg);
+}
+
+static inline int tps_65023_write(struct tps_pmic *tps, u8 reg, u8 val)
+{
+ return i2c_smbus_write_byte_data(tps->client, reg, val);
+}
+
+static int tps_65023_set_bits(struct tps_pmic *tps, u8 reg, u8 mask)
+{
+ int err, data;
+
+ mutex_lock(&tps->io_lock);
+
+ data = tps_65023_read(tps, reg);
+ if (data < 0) {
+ dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg);
+ err = data;
+ goto out;
+ }
+
+ data |= mask;
+ err = tps_65023_write(tps, reg, data);
+ if (err)
+ dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg);
+
+out:
+ mutex_unlock(&tps->io_lock);
+ return err;
+}
+
+static int tps_65023_clear_bits(struct tps_pmic *tps, u8 reg, u8 mask)
+{
+ int err, data;
+
+ mutex_lock(&tps->io_lock);
+
+ data = tps_65023_read(tps, reg);
+ if (data < 0) {
+ dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg);
+ err = data;
+ goto out;
+ }
+
+ data &= ~mask;
+
+ err = tps_65023_write(tps, reg, data);
+ if (err)
+ dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg);
+
+out:
+ mutex_unlock(&tps->io_lock);
+ return err;
+
+}
+
+static int tps_65023_reg_read(struct tps_pmic *tps, u8 reg)
+{
+ int data;
+
+ mutex_lock(&tps->io_lock);
+
+ data = tps_65023_read(tps, reg);
+ if (data < 0)
+ dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg);
+
+ mutex_unlock(&tps->io_lock);
+ return data;
+}
+
+static int tps_65023_reg_write(struct tps_pmic *tps, u8 reg, u8 val)
+{
+ int err;
+
+ mutex_lock(&tps->io_lock);
+
+ err = tps_65023_write(tps, reg, val);
+ if (err < 0)
+ dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg);
+
+ mutex_unlock(&tps->io_lock);
+ return err;
+}
+
+static int tps65023_dcdc_is_enabled(struct regulator_dev *dev)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int data, dcdc = rdev_get_id(dev);
+ u8 shift;
+
+ if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
+ return -EINVAL;
+
+ shift = TPS65023_NUM_REGULATOR - dcdc;
+ data = tps_65023_reg_read(tps, TPS65023_REG_REG_CTRL);
+
+ if (data < 0)
+ return data;
+ else
+ return (data & 1<<shift) ? 1 : 0;
+}
+
+static int tps65023_ldo_is_enabled(struct regulator_dev *dev)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int data, ldo = rdev_get_id(dev);
+ u8 shift;
+
+ if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
+ return -EINVAL;
+
+ shift = (ldo == TPS65023_LDO_1 ? 1 : 2);
+ data = tps_65023_reg_read(tps, TPS65023_REG_REG_CTRL);
+
+ if (data < 0)
+ return data;
+ else
+ return (data & 1<<shift) ? 1 : 0;
+}
+
+static int tps65023_dcdc_enable(struct regulator_dev *dev)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int dcdc = rdev_get_id(dev);
+ u8 shift;
+
+ if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
+ return -EINVAL;
+
+ shift = TPS65023_NUM_REGULATOR - dcdc;
+ return tps_65023_set_bits(tps, TPS65023_REG_REG_CTRL, 1 << shift);
+}
+
+static int tps65023_dcdc_disable(struct regulator_dev *dev)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int dcdc = rdev_get_id(dev);
+ u8 shift;
+
+ if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
+ return -EINVAL;
+
+ shift = TPS65023_NUM_REGULATOR - dcdc;
+ return tps_65023_clear_bits(tps, TPS65023_REG_REG_CTRL, 1 << shift);
+}
+
+static int tps65023_ldo_enable(struct regulator_dev *dev)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int ldo = rdev_get_id(dev);
+ u8 shift;
+
+ if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
+ return -EINVAL;
+
+ shift = (ldo == TPS65023_LDO_1 ? 1 : 2);
+ return tps_65023_set_bits(tps, TPS65023_REG_REG_CTRL, 1 << shift);
+}
+
+static int tps65023_ldo_disable(struct regulator_dev *dev)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int ldo = rdev_get_id(dev);
+ u8 shift;
+
+ if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
+ return -EINVAL;
+
+ shift = (ldo == TPS65023_LDO_1 ? 1 : 2);
+ return tps_65023_clear_bits(tps, TPS65023_REG_REG_CTRL, 1 << shift);
+}
+
+static int tps65023_dcdc_get_voltage(struct regulator_dev *dev)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int data, dcdc = rdev_get_id(dev);
+
+ if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
+ return -EINVAL;
+
+ if (dcdc == TPS65023_DCDC_1) {
+ data = tps_65023_reg_read(tps, TPS65023_REG_DEF_CORE);
+ if (data < 0)
+ return data;
+ data &= (tps->info[dcdc]->table_len - 1);
+ return tps->info[dcdc]->table[data] * 1000;
+ } else
+ return tps->info[dcdc]->min_uV;
+}
+
+static int tps65023_dcdc_set_voltage(struct regulator_dev *dev,
+ int min_uV, int max_uV)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int dcdc = rdev_get_id(dev);
+ int vsel;
+
+ if (dcdc != TPS65023_DCDC_1)
+ return -EINVAL;
+
+ if (min_uV < tps->info[dcdc]->min_uV
+ || min_uV > tps->info[dcdc]->max_uV)
+ return -EINVAL;
+ if (max_uV < tps->info[dcdc]->min_uV
+ || max_uV > tps->info[dcdc]->max_uV)
+ return -EINVAL;
+
+ for (vsel = 0; vsel < tps->info[dcdc]->table_len; vsel++) {
+ int mV = tps->info[dcdc]->table[vsel];
+ int uV = mV * 1000;
+
+ /* Break at the first in-range value */
+ if (min_uV <= uV && uV <= max_uV)
+ break;
+ }
+
+ /* write to the register in case we found a match */
+ if (vsel == tps->info[dcdc]->table_len)
+ return -EINVAL;
+ else
+ return tps_65023_reg_write(tps, TPS65023_REG_DEF_CORE, vsel);
+}
+
+static int tps65023_ldo_get_voltage(struct regulator_dev *dev)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int data, ldo = rdev_get_id(dev);
+
+ if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
+ return -EINVAL;
+
+ data = tps_65023_reg_read(tps, TPS65023_REG_LDO_CTRL);
+ if (data < 0)
+ return data;
+
+ data >>= (TPS65023_LDO_CTRL_LDOx_SHIFT(ldo - TPS65023_LDO_1));
+ data &= (tps->info[ldo]->table_len - 1);
+ return tps->info[ldo]->table[data] * 1000;
+}
+
+static int tps65023_ldo_set_voltage(struct regulator_dev *dev,
+ int min_uV, int max_uV)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int data, vsel, ldo = rdev_get_id(dev);
+
+ if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
+ return -EINVAL;
+
+ if (min_uV < tps->info[ldo]->min_uV || min_uV > tps->info[ldo]->max_uV)
+ return -EINVAL;
+ if (max_uV < tps->info[ldo]->min_uV || max_uV > tps->info[ldo]->max_uV)
+ return -EINVAL;
+
+ for (vsel = 0; vsel < tps->info[ldo]->table_len; vsel++) {
+ int mV = tps->info[ldo]->table[vsel];
+ int uV = mV * 1000;
+
+ /* Break at the first in-range value */
+ if (min_uV <= uV && uV <= max_uV)
+ break;
+ }
+
+ if (vsel == tps->info[ldo]->table_len)
+ return -EINVAL;
+
+ data = tps_65023_reg_read(tps, TPS65023_REG_LDO_CTRL);
+ if (data < 0)
+ return data;
+
+ data &= TPS65023_LDO_CTRL_LDOx_MASK(ldo - TPS65023_LDO_1);
+ data |= (vsel << (TPS65023_LDO_CTRL_LDOx_SHIFT(ldo - TPS65023_LDO_1)));
+ return tps_65023_reg_write(tps, TPS65023_REG_LDO_CTRL, data);
+}
+
+static int tps65023_dcdc_list_voltage(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int dcdc = rdev_get_id(dev);
+
+ if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
+ return -EINVAL;
+
+ if (dcdc == TPS65023_DCDC_1) {
+ if (selector >= tps->info[dcdc]->table_len)
+ return -EINVAL;
+ else
+ return tps->info[dcdc]->table[selector] * 1000;
+ } else
+ return tps->info[dcdc]->min_uV;
+}
+
+static int tps65023_ldo_list_voltage(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int ldo = rdev_get_id(dev);
+
+ if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
+ return -EINVAL;
+
+ if (selector >= tps->info[ldo]->table_len)
+ return -EINVAL;
+ else
+ return tps->info[ldo]->table[selector] * 1000;
+}
+
+/* Operations permitted on VDCDCx */
+static struct regulator_ops tps65023_dcdc_ops = {
+ .is_enabled = tps65023_dcdc_is_enabled,
+ .enable = tps65023_dcdc_enable,
+ .disable = tps65023_dcdc_disable,
+ .get_voltage = tps65023_dcdc_get_voltage,
+ .set_voltage = tps65023_dcdc_set_voltage,
+ .list_voltage = tps65023_dcdc_list_voltage,
+};
+
+/* Operations permitted on LDOx */
+static struct regulator_ops tps65023_ldo_ops = {
+ .is_enabled = tps65023_ldo_is_enabled,
+ .enable = tps65023_ldo_enable,
+ .disable = tps65023_ldo_disable,
+ .get_voltage = tps65023_ldo_get_voltage,
+ .set_voltage = tps65023_ldo_set_voltage,
+ .list_voltage = tps65023_ldo_list_voltage,
+};
+
+static
+int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ static int desc_id;
+ const struct tps_info *info = (void *)id->driver_data;
+ struct regulator_init_data *init_data;
+ struct regulator_dev *rdev;
+ struct tps_pmic *tps;
+ int i;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -EIO;
+
+ /**
+ * init_data points to array of regulator_init structures
+ * coming from the board-evm file.
+ */
+ init_data = client->dev.platform_data;
+
+ if (!init_data)
+ return -EIO;
+
+ tps = kzalloc(sizeof(*tps), GFP_KERNEL);
+ if (!tps)
+ return -ENOMEM;
+
+ mutex_init(&tps->io_lock);
+
+ /* common for all regulators */
+ tps->client = client;
+
+ for (i = 0; i < TPS65023_NUM_REGULATOR; i++, info++, init_data++) {
+ /* Store regulator specific information */
+ tps->info[i] = info;
+
+ tps->desc[i].name = info->name;
+ tps->desc[i].id = desc_id++;
+ tps->desc[i].n_voltages = num_voltages[i];
+ tps->desc[i].ops = (i > TPS65023_DCDC_3 ?
+ &tps65023_ldo_ops : &tps65023_dcdc_ops);
+ tps->desc[i].type = REGULATOR_VOLTAGE;
+ tps->desc[i].owner = THIS_MODULE;
+
+ /* Register the regulators */
+ rdev = regulator_register(&tps->desc[i], &client->dev,
+ init_data, tps);
+ if (IS_ERR(rdev)) {
+ dev_err(&client->dev, "failed to register %s\n",
+ id->name);
+
+ /* Unregister */
+ while (i)
+ regulator_unregister(tps->rdev[--i]);
+
+ tps->client = NULL;
+
+ /* clear the client data in i2c */
+ i2c_set_clientdata(client, NULL);
+ kfree(tps);
+ return PTR_ERR(rdev);
+ }
+
+ /* Save regulator for cleanup */
+ tps->rdev[i] = rdev;
+ }
+
+ i2c_set_clientdata(client, tps);
+
+ return 0;
+}
+
+/**
+ * tps_65023_remove - TPS65023 driver i2c remove handler
+ * @client: i2c driver client device structure
+ *
+ * Unregister TPS driver as an i2c client device driver
+ */
+static int __devexit tps_65023_remove(struct i2c_client *client)
+{
+ struct tps_pmic *tps = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < TPS65023_NUM_REGULATOR; i++)
+ regulator_unregister(tps->rdev[i]);
+
+ tps->client = NULL;
+
+ /* clear the client data in i2c */
+ i2c_set_clientdata(client, NULL);
+ kfree(tps);
+
+ return 0;
+}
+
+static const struct tps_info tps65023_regs[] = {
+ {
+ .name = "VDCDC1",
+ .min_uV = 800000,
+ .max_uV = 1600000,
+ .table_len = ARRAY_SIZE(VDCDC1_VSEL_table),
+ .table = VDCDC1_VSEL_table,
+ },
+ {
+ .name = "VDCDC2",
+ .min_uV = 3300000,
+ .max_uV = 3300000,
+ .fixed = 1,
+ },
+ {
+ .name = "VDCDC3",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .fixed = 1,
+ },
+ {
+ .name = "LDO1",
+ .min_uV = 1000000,
+ .max_uV = 3150000,
+ .table_len = ARRAY_SIZE(LDO1_VSEL_table),
+ .table = LDO1_VSEL_table,
+ },
+ {
+ .name = "LDO2",
+ .min_uV = 1050000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(LDO2_VSEL_table),
+ .table = LDO2_VSEL_table,
+ },
+};
+
+static const struct i2c_device_id tps_65023_id[] = {
+ {.name = "tps65023",
+ .driver_data = (unsigned long) tps65023_regs,},
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, tps_65023_id);
+
+static struct i2c_driver tps_65023_i2c_driver = {
+ .driver = {
+ .name = "tps65023",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps_65023_probe,
+ .remove = __devexit_p(tps_65023_remove),
+ .id_table = tps_65023_id,
+};
+
+/**
+ * tps_65023_init
+ *
+ * Module init function
+ */
+static int __init tps_65023_init(void)
+{
+ return i2c_add_driver(&tps_65023_i2c_driver);
+}
+subsys_initcall(tps_65023_init);
+
+/**
+ * tps_65023_cleanup
+ *
+ * Module exit function
+ */
+static void __exit tps_65023_cleanup(void)
+{
+ i2c_del_driver(&tps_65023_i2c_driver);
+}
+module_exit(tps_65023_cleanup);
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("TPS65023 voltage regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
new file mode 100644
index 00000000000..f8a6dfbef75
--- /dev/null
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -0,0 +1,714 @@
+/*
+ * tps6507x-regulator.c
+ *
+ * Regulator driver for TPS65073 PMIC
+ *
+ * Copyright (C) 2009 Texas Instrument Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+
+/* Register definitions */
+#define TPS6507X_REG_PPATH1 0X01
+#define TPS6507X_REG_INT 0X02
+#define TPS6507X_REG_CHGCONFIG0 0X03
+#define TPS6507X_REG_CHGCONFIG1 0X04
+#define TPS6507X_REG_CHGCONFIG2 0X05
+#define TPS6507X_REG_CHGCONFIG3 0X06
+#define TPS6507X_REG_REG_ADCONFIG 0X07
+#define TPS6507X_REG_TSCMODE 0X08
+#define TPS6507X_REG_ADRESULT_1 0X09
+#define TPS6507X_REG_ADRESULT_2 0X0A
+#define TPS6507X_REG_PGOOD 0X0B
+#define TPS6507X_REG_PGOODMASK 0X0C
+#define TPS6507X_REG_CON_CTRL1 0X0D
+#define TPS6507X_REG_CON_CTRL2 0X0E
+#define TPS6507X_REG_CON_CTRL3 0X0F
+#define TPS6507X_REG_DEFDCDC1 0X10
+#define TPS6507X_REG_DEFDCDC2_LOW 0X11
+#define TPS6507X_REG_DEFDCDC2_HIGH 0X12
+#define TPS6507X_REG_DEFDCDC3_LOW 0X13
+#define TPS6507X_REG_DEFDCDC3_HIGH 0X14
+#define TPS6507X_REG_DEFSLEW 0X15
+#define TPS6507X_REG_LDO_CTRL1 0X16
+#define TPS6507X_REG_DEFLDO2 0X17
+#define TPS6507X_REG_WLED_CTRL1 0X18
+#define TPS6507X_REG_WLED_CTRL2 0X19
+
+/* CON_CTRL1 bitfields */
+#define TPS6507X_CON_CTRL1_DCDC1_ENABLE BIT(4)
+#define TPS6507X_CON_CTRL1_DCDC2_ENABLE BIT(3)
+#define TPS6507X_CON_CTRL1_DCDC3_ENABLE BIT(2)
+#define TPS6507X_CON_CTRL1_LDO1_ENABLE BIT(1)
+#define TPS6507X_CON_CTRL1_LDO2_ENABLE BIT(0)
+
+/* DEFDCDC1 bitfields */
+#define TPS6507X_DEFDCDC1_DCDC1_EXT_ADJ_EN BIT(7)
+#define TPS6507X_DEFDCDC1_DCDC1_MASK 0X3F
+
+/* DEFDCDC2_LOW bitfields */
+#define TPS6507X_DEFDCDC2_LOW_DCDC2_MASK 0X3F
+
+/* DEFDCDC2_HIGH bitfields */
+#define TPS6507X_DEFDCDC2_HIGH_DCDC2_MASK 0X3F
+
+/* DEFDCDC3_LOW bitfields */
+#define TPS6507X_DEFDCDC3_LOW_DCDC3_MASK 0X3F
+
+/* DEFDCDC3_HIGH bitfields */
+#define TPS6507X_DEFDCDC3_HIGH_DCDC3_MASK 0X3F
+
+/* TPS6507X_REG_LDO_CTRL1 bitfields */
+#define TPS6507X_REG_LDO_CTRL1_LDO1_MASK 0X0F
+
+/* TPS6507X_REG_DEFLDO2 bitfields */
+#define TPS6507X_REG_DEFLDO2_LDO2_MASK 0X3F
+
+/* VDCDC MASK */
+#define TPS6507X_DEFDCDCX_DCDC_MASK 0X3F
+
+/* DCDC's */
+#define TPS6507X_DCDC_1 0
+#define TPS6507X_DCDC_2 1
+#define TPS6507X_DCDC_3 2
+/* LDOs */
+#define TPS6507X_LDO_1 3
+#define TPS6507X_LDO_2 4
+
+#define TPS6507X_MAX_REG_ID TPS6507X_LDO_2
+
+/* Number of step-down converters available */
+#define TPS6507X_NUM_DCDC 3
+/* Number of LDO voltage regulators available */
+#define TPS6507X_NUM_LDO 2
+/* Number of total regulators available */
+#define TPS6507X_NUM_REGULATOR (TPS6507X_NUM_DCDC + TPS6507X_NUM_LDO)
+
+/* Supported voltage values for regulators (in milliVolts) */
+static const u16 VDCDCx_VSEL_table[] = {
+ 725, 750, 775, 800,
+ 825, 850, 875, 900,
+ 925, 950, 975, 1000,
+ 1025, 1050, 1075, 1100,
+ 1125, 1150, 1175, 1200,
+ 1225, 1250, 1275, 1300,
+ 1325, 1350, 1375, 1400,
+ 1425, 1450, 1475, 1500,
+ 1550, 1600, 1650, 1700,
+ 1750, 1800, 1850, 1900,
+ 1950, 2000, 2050, 2100,
+ 2150, 2200, 2250, 2300,
+ 2350, 2400, 2450, 2500,
+ 2550, 2600, 2650, 2700,
+ 2750, 2800, 2850, 2900,
+ 3000, 3100, 3200, 3300,
+};
+
+static const u16 LDO1_VSEL_table[] = {
+ 1000, 1100, 1200, 1250,
+ 1300, 1350, 1400, 1500,
+ 1600, 1800, 2500, 2750,
+ 2800, 3000, 3100, 3300,
+};
+
+static const u16 LDO2_VSEL_table[] = {
+ 725, 750, 775, 800,
+ 825, 850, 875, 900,
+ 925, 950, 975, 1000,
+ 1025, 1050, 1075, 1100,
+ 1125, 1150, 1175, 1200,
+ 1225, 1250, 1275, 1300,
+ 1325, 1350, 1375, 1400,
+ 1425, 1450, 1475, 1500,
+ 1550, 1600, 1650, 1700,
+ 1750, 1800, 1850, 1900,
+ 1950, 2000, 2050, 2100,
+ 2150, 2200, 2250, 2300,
+ 2350, 2400, 2450, 2500,
+ 2550, 2600, 2650, 2700,
+ 2750, 2800, 2850, 2900,
+ 3000, 3100, 3200, 3300,
+};
+
+static unsigned int num_voltages[] = {ARRAY_SIZE(VDCDCx_VSEL_table),
+ ARRAY_SIZE(VDCDCx_VSEL_table),
+ ARRAY_SIZE(VDCDCx_VSEL_table),
+ ARRAY_SIZE(LDO1_VSEL_table),
+ ARRAY_SIZE(LDO2_VSEL_table)};
+
+struct tps_info {
+ const char *name;
+ unsigned min_uV;
+ unsigned max_uV;
+ u8 table_len;
+ const u16 *table;
+};
+
+struct tps_pmic {
+ struct regulator_desc desc[TPS6507X_NUM_REGULATOR];
+ struct i2c_client *client;
+ struct regulator_dev *rdev[TPS6507X_NUM_REGULATOR];
+ const struct tps_info *info[TPS6507X_NUM_REGULATOR];
+ struct mutex io_lock;
+};
+
+static inline int tps_6507x_read(struct tps_pmic *tps, u8 reg)
+{
+ return i2c_smbus_read_byte_data(tps->client, reg);
+}
+
+static inline int tps_6507x_write(struct tps_pmic *tps, u8 reg, u8 val)
+{
+ return i2c_smbus_write_byte_data(tps->client, reg, val);
+}
+
+static int tps_6507x_set_bits(struct tps_pmic *tps, u8 reg, u8 mask)
+{
+ int err, data;
+
+ mutex_lock(&tps->io_lock);
+
+ data = tps_6507x_read(tps, reg);
+ if (data < 0) {
+ dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg);
+ err = data;
+ goto out;
+ }
+
+ data |= mask;
+ err = tps_6507x_write(tps, reg, data);
+ if (err)
+ dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg);
+
+out:
+ mutex_unlock(&tps->io_lock);
+ return err;
+}
+
+static int tps_6507x_clear_bits(struct tps_pmic *tps, u8 reg, u8 mask)
+{
+ int err, data;
+
+ mutex_lock(&tps->io_lock);
+
+ data = tps_6507x_read(tps, reg);
+ if (data < 0) {
+ dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg);
+ err = data;
+ goto out;
+ }
+
+ data &= ~mask;
+ err = tps_6507x_write(tps, reg, data);
+ if (err)
+ dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg);
+
+out:
+ mutex_unlock(&tps->io_lock);
+ return err;
+}
+
+static int tps_6507x_reg_read(struct tps_pmic *tps, u8 reg)
+{
+ int data;
+
+ mutex_lock(&tps->io_lock);
+
+ data = tps_6507x_read(tps, reg);
+ if (data < 0)
+ dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg);
+
+ mutex_unlock(&tps->io_lock);
+ return data;
+}
+
+static int tps_6507x_reg_write(struct tps_pmic *tps, u8 reg, u8 val)
+{
+ int err;
+
+ mutex_lock(&tps->io_lock);
+
+ err = tps_6507x_write(tps, reg, val);
+ if (err < 0)
+ dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg);
+
+ mutex_unlock(&tps->io_lock);
+ return err;
+}
+
+static int tps6507x_dcdc_is_enabled(struct regulator_dev *dev)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int data, dcdc = rdev_get_id(dev);
+ u8 shift;
+
+ if (dcdc < TPS6507X_DCDC_1 || dcdc > TPS6507X_DCDC_3)
+ return -EINVAL;
+
+ shift = TPS6507X_MAX_REG_ID - dcdc;
+ data = tps_6507x_reg_read(tps, TPS6507X_REG_CON_CTRL1);
+
+ if (data < 0)
+ return data;
+ else
+ return (data & 1<<shift) ? 1 : 0;
+}
+
+static int tps6507x_ldo_is_enabled(struct regulator_dev *dev)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int data, ldo = rdev_get_id(dev);
+ u8 shift;
+
+ if (ldo < TPS6507X_LDO_1 || ldo > TPS6507X_LDO_2)
+ return -EINVAL;
+
+ shift = TPS6507X_MAX_REG_ID - ldo;
+ data = tps_6507x_reg_read(tps, TPS6507X_REG_CON_CTRL1);
+
+ if (data < 0)
+ return data;
+ else
+ return (data & 1<<shift) ? 1 : 0;
+}
+
+static int tps6507x_dcdc_enable(struct regulator_dev *dev)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int dcdc = rdev_get_id(dev);
+ u8 shift;
+
+ if (dcdc < TPS6507X_DCDC_1 || dcdc > TPS6507X_DCDC_3)
+ return -EINVAL;
+
+ shift = TPS6507X_MAX_REG_ID - dcdc;
+ return tps_6507x_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift);
+}
+
+static int tps6507x_dcdc_disable(struct regulator_dev *dev)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int dcdc = rdev_get_id(dev);
+ u8 shift;
+
+ if (dcdc < TPS6507X_DCDC_1 || dcdc > TPS6507X_DCDC_3)
+ return -EINVAL;
+
+ shift = TPS6507X_MAX_REG_ID - dcdc;
+ return tps_6507x_clear_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift);
+}
+
+static int tps6507x_ldo_enable(struct regulator_dev *dev)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int ldo = rdev_get_id(dev);
+ u8 shift;
+
+ if (ldo < TPS6507X_LDO_1 || ldo > TPS6507X_LDO_2)
+ return -EINVAL;
+
+ shift = TPS6507X_MAX_REG_ID - ldo;
+ return tps_6507x_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift);
+}
+
+static int tps6507x_ldo_disable(struct regulator_dev *dev)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int ldo = rdev_get_id(dev);
+ u8 shift;
+
+ if (ldo < TPS6507X_LDO_1 || ldo > TPS6507X_LDO_2)
+ return -EINVAL;
+
+ shift = TPS6507X_MAX_REG_ID - ldo;
+ return tps_6507x_clear_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift);
+}
+
+static int tps6507x_dcdc_get_voltage(struct regulator_dev *dev)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int data, dcdc = rdev_get_id(dev);
+ u8 reg;
+
+ switch (dcdc) {
+ case TPS6507X_DCDC_1:
+ reg = TPS6507X_REG_DEFDCDC1;
+ break;
+ case TPS6507X_DCDC_2:
+ reg = TPS6507X_REG_DEFDCDC2_LOW;
+ break;
+ case TPS6507X_DCDC_3:
+ reg = TPS6507X_REG_DEFDCDC3_LOW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ data = tps_6507x_reg_read(tps, reg);
+ if (data < 0)
+ return data;
+
+ data &= TPS6507X_DEFDCDCX_DCDC_MASK;
+ return tps->info[dcdc]->table[data] * 1000;
+}
+
+static int tps6507x_dcdc_set_voltage(struct regulator_dev *dev,
+ int min_uV, int max_uV)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int data, vsel, dcdc = rdev_get_id(dev);
+ u8 reg;
+
+ switch (dcdc) {
+ case TPS6507X_DCDC_1:
+ reg = TPS6507X_REG_DEFDCDC1;
+ break;
+ case TPS6507X_DCDC_2:
+ reg = TPS6507X_REG_DEFDCDC2_LOW;
+ break;
+ case TPS6507X_DCDC_3:
+ reg = TPS6507X_REG_DEFDCDC3_LOW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (min_uV < tps->info[dcdc]->min_uV
+ || min_uV > tps->info[dcdc]->max_uV)
+ return -EINVAL;
+ if (max_uV < tps->info[dcdc]->min_uV
+ || max_uV > tps->info[dcdc]->max_uV)
+ return -EINVAL;
+
+ for (vsel = 0; vsel < tps->info[dcdc]->table_len; vsel++) {
+ int mV = tps->info[dcdc]->table[vsel];
+ int uV = mV * 1000;
+
+ /* Break at the first in-range value */
+ if (min_uV <= uV && uV <= max_uV)
+ break;
+ }
+
+ /* write to the register in case we found a match */
+ if (vsel == tps->info[dcdc]->table_len)
+ return -EINVAL;
+
+ data = tps_6507x_reg_read(tps, reg);
+ if (data < 0)
+ return data;
+
+ data &= ~TPS6507X_DEFDCDCX_DCDC_MASK;
+ data |= vsel;
+
+ return tps_6507x_reg_write(tps, reg, data);
+}
+
+static int tps6507x_ldo_get_voltage(struct regulator_dev *dev)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int data, ldo = rdev_get_id(dev);
+ u8 reg, mask;
+
+ if (ldo < TPS6507X_LDO_1 || ldo > TPS6507X_LDO_2)
+ return -EINVAL;
+ else {
+ reg = (ldo == TPS6507X_LDO_1 ?
+ TPS6507X_REG_LDO_CTRL1 : TPS6507X_REG_DEFLDO2);
+ mask = (ldo == TPS6507X_LDO_1 ?
+ TPS6507X_REG_LDO_CTRL1_LDO1_MASK :
+ TPS6507X_REG_DEFLDO2_LDO2_MASK);
+ }
+
+ data = tps_6507x_reg_read(tps, reg);
+ if (data < 0)
+ return data;
+
+ data &= mask;
+ return tps->info[ldo]->table[data] * 1000;
+}
+
+static int tps6507x_ldo_set_voltage(struct regulator_dev *dev,
+ int min_uV, int max_uV)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int data, vsel, ldo = rdev_get_id(dev);
+ u8 reg, mask;
+
+ if (ldo < TPS6507X_LDO_1 || ldo > TPS6507X_LDO_2)
+ return -EINVAL;
+ else {
+ reg = (ldo == TPS6507X_LDO_1 ?
+ TPS6507X_REG_LDO_CTRL1 : TPS6507X_REG_DEFLDO2);
+ mask = (ldo == TPS6507X_LDO_1 ?
+ TPS6507X_REG_LDO_CTRL1_LDO1_MASK :
+ TPS6507X_REG_DEFLDO2_LDO2_MASK);
+ }
+
+ if (min_uV < tps->info[ldo]->min_uV || min_uV > tps->info[ldo]->max_uV)
+ return -EINVAL;
+ if (max_uV < tps->info[ldo]->min_uV || max_uV > tps->info[ldo]->max_uV)
+ return -EINVAL;
+
+ for (vsel = 0; vsel < tps->info[ldo]->table_len; vsel++) {
+ int mV = tps->info[ldo]->table[vsel];
+ int uV = mV * 1000;
+
+ /* Break at the first in-range value */
+ if (min_uV <= uV && uV <= max_uV)
+ break;
+ }
+
+ if (vsel == tps->info[ldo]->table_len)
+ return -EINVAL;
+
+ data = tps_6507x_reg_read(tps, reg);
+ if (data < 0)
+ return data;
+
+ data &= ~mask;
+ data |= vsel;
+
+ return tps_6507x_reg_write(tps, reg, data);
+}
+
+static int tps6507x_dcdc_list_voltage(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int dcdc = rdev_get_id(dev);
+
+ if (dcdc < TPS6507X_DCDC_1 || dcdc > TPS6507X_DCDC_3)
+ return -EINVAL;
+
+ if (selector >= tps->info[dcdc]->table_len)
+ return -EINVAL;
+ else
+ return tps->info[dcdc]->table[selector] * 1000;
+}
+
+static int tps6507x_ldo_list_voltage(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int ldo = rdev_get_id(dev);
+
+ if (ldo < TPS6507X_LDO_1 || ldo > TPS6507X_LDO_2)
+ return -EINVAL;
+
+ if (selector >= tps->info[ldo]->table_len)
+ return -EINVAL;
+ else
+ return tps->info[ldo]->table[selector] * 1000;
+}
+
+/* Operations permitted on VDCDCx */
+static struct regulator_ops tps6507x_dcdc_ops = {
+ .is_enabled = tps6507x_dcdc_is_enabled,
+ .enable = tps6507x_dcdc_enable,
+ .disable = tps6507x_dcdc_disable,
+ .get_voltage = tps6507x_dcdc_get_voltage,
+ .set_voltage = tps6507x_dcdc_set_voltage,
+ .list_voltage = tps6507x_dcdc_list_voltage,
+};
+
+/* Operations permitted on LDOx */
+static struct regulator_ops tps6507x_ldo_ops = {
+ .is_enabled = tps6507x_ldo_is_enabled,
+ .enable = tps6507x_ldo_enable,
+ .disable = tps6507x_ldo_disable,
+ .get_voltage = tps6507x_ldo_get_voltage,
+ .set_voltage = tps6507x_ldo_set_voltage,
+ .list_voltage = tps6507x_ldo_list_voltage,
+};
+
+static
+int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ static int desc_id;
+ const struct tps_info *info = (void *)id->driver_data;
+ struct regulator_init_data *init_data;
+ struct regulator_dev *rdev;
+ struct tps_pmic *tps;
+ int i;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA))
+ return -EIO;
+
+ /**
+ * init_data points to array of regulator_init structures
+ * coming from the board-evm file.
+ */
+ init_data = client->dev.platform_data;
+
+ if (!init_data)
+ return -EIO;
+
+ tps = kzalloc(sizeof(*tps), GFP_KERNEL);
+ if (!tps)
+ return -ENOMEM;
+
+ mutex_init(&tps->io_lock);
+
+ /* common for all regulators */
+ tps->client = client;
+
+ for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) {
+ /* Register the regulators */
+ tps->info[i] = info;
+ tps->desc[i].name = info->name;
+ tps->desc[i].id = desc_id++;
+ tps->desc[i].n_voltages = num_voltages[i];
+ tps->desc[i].ops = (i > TPS6507X_DCDC_3 ?
+ &tps6507x_ldo_ops : &tps6507x_dcdc_ops);
+ tps->desc[i].type = REGULATOR_VOLTAGE;
+ tps->desc[i].owner = THIS_MODULE;
+
+ rdev = regulator_register(&tps->desc[i],
+ &client->dev, init_data, tps);
+ if (IS_ERR(rdev)) {
+ dev_err(&client->dev, "failed to register %s\n",
+ id->name);
+
+ /* Unregister */
+ while (i)
+ regulator_unregister(tps->rdev[--i]);
+
+ tps->client = NULL;
+
+ /* clear the client data in i2c */
+ i2c_set_clientdata(client, NULL);
+
+ kfree(tps);
+ return PTR_ERR(rdev);
+ }
+
+ /* Save regulator for cleanup */
+ tps->rdev[i] = rdev;
+ }
+
+ i2c_set_clientdata(client, tps);
+
+ return 0;
+}
+
+/**
+ * tps_6507x_remove - TPS6507x driver i2c remove handler
+ * @client: i2c driver client device structure
+ *
+ * Unregister TPS driver as an i2c client device driver
+ */
+static int __devexit tps_6507x_remove(struct i2c_client *client)
+{
+ struct tps_pmic *tps = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < TPS6507X_NUM_REGULATOR; i++)
+ regulator_unregister(tps->rdev[i]);
+
+ tps->client = NULL;
+
+ /* clear the client data in i2c */
+ i2c_set_clientdata(client, NULL);
+ kfree(tps);
+
+ return 0;
+}
+
+static const struct tps_info tps6507x_regs[] = {
+ {
+ .name = "VDCDC1",
+ .min_uV = 725000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
+ .table = VDCDCx_VSEL_table,
+ },
+ {
+ .name = "VDCDC2",
+ .min_uV = 725000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
+ .table = VDCDCx_VSEL_table,
+ },
+ {
+ .name = "VDCDC3",
+ .min_uV = 725000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
+ .table = VDCDCx_VSEL_table,
+ },
+ {
+ .name = "LDO1",
+ .min_uV = 1000000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(LDO1_VSEL_table),
+ .table = LDO1_VSEL_table,
+ },
+ {
+ .name = "LDO2",
+ .min_uV = 725000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(LDO2_VSEL_table),
+ .table = LDO2_VSEL_table,
+ },
+};
+
+static const struct i2c_device_id tps_6507x_id[] = {
+ {.name = "tps6507x",
+ .driver_data = (unsigned long) tps6507x_regs,},
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, tps_6507x_id);
+
+static struct i2c_driver tps_6507x_i2c_driver = {
+ .driver = {
+ .name = "tps6507x",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps_6507x_probe,
+ .remove = __devexit_p(tps_6507x_remove),
+ .id_table = tps_6507x_id,
+};
+
+/**
+ * tps_6507x_init
+ *
+ * Module init function
+ */
+static int __init tps_6507x_init(void)
+{
+ return i2c_add_driver(&tps_6507x_i2c_driver);
+}
+subsys_initcall(tps_6507x_init);
+
+/**
+ * tps_6507x_cleanup
+ *
+ * Module exit function
+ */
+static void __exit tps_6507x_cleanup(void)
+{
+ i2c_del_driver(&tps_6507x_i2c_driver);
+}
+module_exit(tps_6507x_cleanup);
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("TPS6507x voltage regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/userspace-consumer.c b/drivers/regulator/userspace-consumer.c
index 06d2fa96a8b..44917da4ac9 100644
--- a/drivers/regulator/userspace-consumer.c
+++ b/drivers/regulator/userspace-consumer.c
@@ -93,16 +93,21 @@ static ssize_t reg_set_state(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(name, 0444, reg_show_name, NULL);
static DEVICE_ATTR(state, 0644, reg_show_state, reg_set_state);
-static struct device_attribute *attributes[] = {
- &dev_attr_name,
- &dev_attr_state,
+static struct attribute *attributes[] = {
+ &dev_attr_name.attr,
+ &dev_attr_state.attr,
+ NULL,
+};
+
+static const struct attribute_group attr_group = {
+ .attrs = attributes,
};
static int regulator_userspace_consumer_probe(struct platform_device *pdev)
{
struct regulator_userspace_consumer_data *pdata;
struct userspace_consumer_data *drvdata;
- int ret, i;
+ int ret;
pdata = pdev->dev.platform_data;
if (!pdata)
@@ -125,31 +130,29 @@ static int regulator_userspace_consumer_probe(struct platform_device *pdev)
goto err_alloc_supplies;
}
- for (i = 0; i < ARRAY_SIZE(attributes); i++) {
- ret = device_create_file(&pdev->dev, attributes[i]);
- if (ret != 0)
- goto err_create_attrs;
- }
+ ret = sysfs_create_group(&pdev->dev.kobj, &attr_group);
+ if (ret != 0)
+ goto err_create_attrs;
- if (pdata->init_on)
+ if (pdata->init_on) {
ret = regulator_bulk_enable(drvdata->num_supplies,
drvdata->supplies);
-
- drvdata->enabled = pdata->init_on;
-
- if (ret) {
- dev_err(&pdev->dev, "Failed to set initial state: %d\n", ret);
- goto err_create_attrs;
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to set initial state: %d\n", ret);
+ goto err_enable;
+ }
}
+ drvdata->enabled = pdata->init_on;
platform_set_drvdata(pdev, drvdata);
return 0;
-err_create_attrs:
- for (i = 0; i < ARRAY_SIZE(attributes); i++)
- device_remove_file(&pdev->dev, attributes[i]);
+err_enable:
+ sysfs_remove_group(&pdev->dev.kobj, &attr_group);
+err_create_attrs:
regulator_bulk_free(drvdata->num_supplies, drvdata->supplies);
err_alloc_supplies:
@@ -160,10 +163,8 @@ err_alloc_supplies:
static int regulator_userspace_consumer_remove(struct platform_device *pdev)
{
struct userspace_consumer_data *data = platform_get_drvdata(pdev);
- int i;
- for (i = 0; i < ARRAY_SIZE(attributes); i++)
- device_remove_file(&pdev->dev, attributes[i]);
+ sysfs_remove_group(&pdev->dev.kobj, &attr_group);
if (data->enabled)
regulator_bulk_disable(data->num_supplies, data->supplies);
diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c
index e7db5664722..addc032c84b 100644
--- a/drivers/regulator/virtual.c
+++ b/drivers/regulator/virtual.c
@@ -27,71 +27,81 @@ struct virtual_consumer_data {
unsigned int mode;
};
-static void update_voltage_constraints(struct virtual_consumer_data *data)
+static void update_voltage_constraints(struct device *dev,
+ struct virtual_consumer_data *data)
{
int ret;
if (data->min_uV && data->max_uV
&& data->min_uV <= data->max_uV) {
+ dev_dbg(dev, "Requesting %d-%duV\n",
+ data->min_uV, data->max_uV);
ret = regulator_set_voltage(data->regulator,
- data->min_uV, data->max_uV);
+ data->min_uV, data->max_uV);
if (ret != 0) {
- printk(KERN_ERR "regulator_set_voltage() failed: %d\n",
- ret);
+ dev_err(dev,
+ "regulator_set_voltage() failed: %d\n", ret);
return;
}
}
if (data->min_uV && data->max_uV && !data->enabled) {
+ dev_dbg(dev, "Enabling regulator\n");
ret = regulator_enable(data->regulator);
if (ret == 0)
data->enabled = 1;
else
- printk(KERN_ERR "regulator_enable() failed: %d\n",
+ dev_err(dev, "regulator_enable() failed: %d\n",
ret);
}
if (!(data->min_uV && data->max_uV) && data->enabled) {
+ dev_dbg(dev, "Disabling regulator\n");
ret = regulator_disable(data->regulator);
if (ret == 0)
data->enabled = 0;
else
- printk(KERN_ERR "regulator_disable() failed: %d\n",
+ dev_err(dev, "regulator_disable() failed: %d\n",
ret);
}
}
-static void update_current_limit_constraints(struct virtual_consumer_data
- *data)
+static void update_current_limit_constraints(struct device *dev,
+ struct virtual_consumer_data *data)
{
int ret;
if (data->max_uA
&& data->min_uA <= data->max_uA) {
+ dev_dbg(dev, "Requesting %d-%duA\n",
+ data->min_uA, data->max_uA);
ret = regulator_set_current_limit(data->regulator,
data->min_uA, data->max_uA);
if (ret != 0) {
- pr_err("regulator_set_current_limit() failed: %d\n",
- ret);
+ dev_err(dev,
+ "regulator_set_current_limit() failed: %d\n",
+ ret);
return;
}
}
if (data->max_uA && !data->enabled) {
+ dev_dbg(dev, "Enabling regulator\n");
ret = regulator_enable(data->regulator);
if (ret == 0)
data->enabled = 1;
else
- printk(KERN_ERR "regulator_enable() failed: %d\n",
+ dev_err(dev, "regulator_enable() failed: %d\n",
ret);
}
if (!(data->min_uA && data->max_uA) && data->enabled) {
+ dev_dbg(dev, "Disabling regulator\n");
ret = regulator_disable(data->regulator);
if (ret == 0)
data->enabled = 0;
else
- printk(KERN_ERR "regulator_disable() failed: %d\n",
+ dev_err(dev, "regulator_disable() failed: %d\n",
ret);
}
}
@@ -115,7 +125,7 @@ static ssize_t set_min_uV(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->lock);
data->min_uV = val;
- update_voltage_constraints(data);
+ update_voltage_constraints(dev, data);
mutex_unlock(&data->lock);
@@ -141,7 +151,7 @@ static ssize_t set_max_uV(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->lock);
data->max_uV = val;
- update_voltage_constraints(data);
+ update_voltage_constraints(dev, data);
mutex_unlock(&data->lock);
@@ -167,7 +177,7 @@ static ssize_t set_min_uA(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->lock);
data->min_uA = val;
- update_current_limit_constraints(data);
+ update_current_limit_constraints(dev, data);
mutex_unlock(&data->lock);
@@ -193,7 +203,7 @@ static ssize_t set_max_uA(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->lock);
data->max_uA = val;
- update_current_limit_constraints(data);
+ update_current_limit_constraints(dev, data);
mutex_unlock(&data->lock);
@@ -276,8 +286,7 @@ static int regulator_virtual_consumer_probe(struct platform_device *pdev)
drvdata = kzalloc(sizeof(struct virtual_consumer_data), GFP_KERNEL);
if (drvdata == NULL) {
- ret = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
mutex_init(&drvdata->lock);
@@ -285,13 +294,18 @@ static int regulator_virtual_consumer_probe(struct platform_device *pdev)
drvdata->regulator = regulator_get(&pdev->dev, reg_id);
if (IS_ERR(drvdata->regulator)) {
ret = PTR_ERR(drvdata->regulator);
+ dev_err(&pdev->dev, "Failed to obtain supply '%s': %d\n",
+ reg_id, ret);
goto err;
}
for (i = 0; i < ARRAY_SIZE(attributes); i++) {
ret = device_create_file(&pdev->dev, attributes[i]);
- if (ret != 0)
- goto err;
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to create attr %d: %d\n",
+ i, ret);
+ goto err_regulator;
+ }
}
drvdata->mode = regulator_get_mode(drvdata->regulator);
@@ -300,6 +314,8 @@ static int regulator_virtual_consumer_probe(struct platform_device *pdev)
return 0;
+err_regulator:
+ regulator_put(drvdata->regulator);
err:
for (i = 0; i < ARRAY_SIZE(attributes); i++)
device_remove_file(&pdev->dev, attributes[i]);
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 17a00b0fafd..768bd0e5b48 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1419,6 +1419,8 @@ int wm8350_register_regulator(struct wm8350 *wm8350, int reg,
{
struct platform_device *pdev;
int ret;
+ if (reg < 0 || reg >= NUM_WM8350_REGULATORS)
+ return -EINVAL;
if (wm8350->pmic.pdev[reg])
return -EBUSY;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 73771b09fbd..3c20dae43ce 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -378,6 +378,15 @@ config RTC_DRV_DS3234
This driver can also be built as a module. If so, the module
will be called rtc-ds3234.
+config RTC_DRV_PCF2123
+ tristate "NXP PCF2123"
+ help
+ If you say yes here you get support for the NXP PCF2123
+ RTC chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-pcf2123.
+
endif # SPI_MASTER
comment "Platform RTC drivers"
@@ -500,6 +509,17 @@ config RTC_DRV_M48T59
This driver can also be built as a module, if so, the module
will be called "rtc-m48t59".
+config RTC_MXC
+ tristate "Freescale MXC Real Time Clock"
+ depends on ARCH_MXC
+ depends on RTC_CLASS
+ help
+ If you say yes here you get support for the Freescale MXC
+ RTC module.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-mxc".
+
config RTC_DRV_BQ4802
tristate "TI BQ4802"
help
@@ -778,4 +798,33 @@ config RTC_DRV_PS3
This driver can also be built as a module. If so, the module
will be called rtc-ps3.
+config RTC_DRV_COH901331
+ tristate "ST-Ericsson COH 901 331 RTC"
+ depends on ARCH_U300
+ help
+ If you say Y here you will get access to ST-Ericsson
+ COH 901 331 RTC clock found in some ST-Ericsson Mobile
+ Platforms.
+
+ This driver can also be built as a module. If so, the module
+ will be called "rtc-coh901331".
+
+
+config RTC_DRV_STMP
+ tristate "Freescale STMP3xxx RTC"
+ depends on ARCH_STMP3XXX
+ help
+ If you say yes here you will get support for the onboard
+ STMP3xxx RTC.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-stmp3xxx.
+
+config RTC_DRV_PCAP
+ tristate "PCAP RTC"
+ depends on EZX_PCAP
+ help
+ If you say Y here you will get support for the RTC found on
+ the PCAP2 ASIC used on some Motorola phones.
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 5e152ffe505..aa3fbd5517a 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -23,7 +23,9 @@ obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o
obj-$(CONFIG_RTC_DRV_AU1XXX) += rtc-au1xxx.o
obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o
+obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o
obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
+obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o
obj-$(CONFIG_RTC_DRV_DM355EVM) += rtc-dm355evm.o
obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
obj-$(CONFIG_RTC_DRV_DS1286) += rtc-ds1286.o
@@ -40,24 +42,26 @@ obj-$(CONFIG_RTC_DRV_DS3234) += rtc-ds3234.o
obj-$(CONFIG_RTC_DRV_EFI) += rtc-efi.o
obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o
obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o
+obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o
obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o
obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o
obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o
obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o
obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
-obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o
-obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o
-obj-$(CONFIG_RTC_DRV_STARFIRE) += rtc-starfire.o
+obj-$(CONFIG_RTC_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
+obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o
obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
+obj-$(CONFIG_RTC_DRV_PCF2123) += rtc-pcf2123.o
+obj-$(CONFIG_RTC_DRV_PCF50633) += rtc-pcf50633.o
obj-$(CONFIG_RTC_DRV_PL030) += rtc-pl030.o
obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
-obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o
+obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o
obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o
obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
@@ -69,7 +73,10 @@ obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o
obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
+obj-$(CONFIG_RTC_DRV_STARFIRE) += rtc-starfire.o
obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
+obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o
+obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl4030.o
obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o
@@ -78,5 +85,3 @@ obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o
obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o
obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
-obj-$(CONFIG_RTC_DRV_PCF50633) += rtc-pcf50633.o
-obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index b5bf9370691..bc8bbca9a2e 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -289,7 +289,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
AT91_RTC_CALEV);
ret = request_irq(AT91_ID_SYS, at91_rtc_interrupt,
- IRQF_DISABLED | IRQF_SHARED,
+ IRQF_SHARED,
"at91_rtc", pdev);
if (ret) {
printk(KERN_ERR "at91_rtc: IRQ %d already in use.\n",
@@ -340,7 +340,7 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
static u32 at91_rtc_imr;
-static int at91_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+static int at91_rtc_suspend(struct device *dev)
{
/* this IRQ is shared with DBGU and other hardware which isn't
* necessarily doing PM like we are...
@@ -348,7 +348,7 @@ static int at91_rtc_suspend(struct platform_device *pdev, pm_message_t state)
at91_rtc_imr = at91_sys_read(AT91_RTC_IMR)
& (AT91_RTC_ALARM|AT91_RTC_SECEV);
if (at91_rtc_imr) {
- if (device_may_wakeup(&pdev->dev))
+ if (device_may_wakeup(dev))
enable_irq_wake(AT91_ID_SYS);
else
at91_sys_write(AT91_RTC_IDR, at91_rtc_imr);
@@ -356,28 +356,34 @@ static int at91_rtc_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int at91_rtc_resume(struct platform_device *pdev)
+static int at91_rtc_resume(struct device *dev)
{
if (at91_rtc_imr) {
- if (device_may_wakeup(&pdev->dev))
+ if (device_may_wakeup(dev))
disable_irq_wake(AT91_ID_SYS);
else
at91_sys_write(AT91_RTC_IER, at91_rtc_imr);
}
return 0;
}
+
+static const struct dev_pm_ops at91_rtc_pm = {
+ .suspend = at91_rtc_suspend,
+ .resume = at91_rtc_resume,
+};
+
+#define at91_rtc_pm_ptr &at91_rtc_pm
+
#else
-#define at91_rtc_suspend NULL
-#define at91_rtc_resume NULL
+#define at91_rtc_pm_ptr NULL
#endif
static struct platform_driver at91_rtc_driver = {
.remove = __exit_p(at91_rtc_remove),
- .suspend = at91_rtc_suspend,
- .resume = at91_rtc_resume,
.driver = {
.name = "at91_rtc",
.owner = THIS_MODULE,
+ .pm = at91_rtc_pm_ptr,
},
};
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index a118eb0f1e6..b11485b9f21 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -383,7 +383,7 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
}
/* Grab the IRQ and init the hardware */
- ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, pdev->name, dev);
+ ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, 0, pdev->name, dev);
if (unlikely(ret))
goto err_reg;
/* sometimes the bootloader touched things, but the write complete was not
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
new file mode 100644
index 00000000000..7fe1fa26c52
--- /dev/null
+++ b/drivers/rtc/rtc-coh901331.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright (C) 2007-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * Real Time Clock interface for ST-Ericsson AB COH 901 331 RTC.
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ * Based on rtc-pl031.c by Deepak Saxena <dsaxena@plexity.net>
+ * Copyright 2006 (c) MontaVista Software, Inc.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/rtc.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+/*
+ * Registers in the COH 901 331
+ */
+/* Alarm value 32bit (R/W) */
+#define COH901331_ALARM 0x00U
+/* Used to set current time 32bit (R/W) */
+#define COH901331_SET_TIME 0x04U
+/* Indication if current time is valid 32bit (R/-) */
+#define COH901331_VALID 0x08U
+/* Read the current time 32bit (R/-) */
+#define COH901331_CUR_TIME 0x0cU
+/* Event register for the "alarm" interrupt */
+#define COH901331_IRQ_EVENT 0x10U
+/* Mask register for the "alarm" interrupt */
+#define COH901331_IRQ_MASK 0x14U
+/* Force register for the "alarm" interrupt */
+#define COH901331_IRQ_FORCE 0x18U
+
+/*
+ * Reference to RTC block clock
+ * Notice that the frequent clk_enable()/clk_disable() on this
+ * clock is mainly to be able to turn on/off other clocks in the
+ * hierarchy as needed, the RTC clock is always on anyway.
+ */
+struct coh901331_port {
+ struct rtc_device *rtc;
+ struct clk *clk;
+ u32 phybase;
+ u32 physize;
+ void __iomem *virtbase;
+ int irq;
+#ifdef CONFIG_PM
+ u32 irqmaskstore;
+#endif
+};
+
+static irqreturn_t coh901331_interrupt(int irq, void *data)
+{
+ struct coh901331_port *rtap = data;
+
+ clk_enable(rtap->clk);
+ /* Ack IRQ */
+ writel(1, rtap->virtbase + COH901331_IRQ_EVENT);
+ clk_disable(rtap->clk);
+ /* Set alarm flag */
+ rtc_update_irq(rtap->rtc, 1, RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static int coh901331_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct coh901331_port *rtap = dev_get_drvdata(dev);
+
+ clk_enable(rtap->clk);
+ /* Check if the time is valid */
+ if (readl(rtap->virtbase + COH901331_VALID)) {
+ rtc_time_to_tm(readl(rtap->virtbase + COH901331_CUR_TIME), tm);
+ clk_disable(rtap->clk);
+ return rtc_valid_tm(tm);
+ }
+ clk_disable(rtap->clk);
+ return -EINVAL;
+}
+
+static int coh901331_set_mmss(struct device *dev, unsigned long secs)
+{
+ struct coh901331_port *rtap = dev_get_drvdata(dev);
+
+ clk_enable(rtap->clk);
+ writel(secs, rtap->virtbase + COH901331_SET_TIME);
+ clk_disable(rtap->clk);
+
+ return 0;
+}
+
+static int coh901331_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct coh901331_port *rtap = dev_get_drvdata(dev);
+
+ clk_enable(rtap->clk);
+ rtc_time_to_tm(readl(rtap->virtbase + COH901331_ALARM), &alarm->time);
+ alarm->pending = readl(rtap->virtbase + COH901331_IRQ_EVENT) & 1U;
+ alarm->enabled = readl(rtap->virtbase + COH901331_IRQ_MASK) & 1U;
+ clk_disable(rtap->clk);
+
+ return 0;
+}
+
+static int coh901331_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct coh901331_port *rtap = dev_get_drvdata(dev);
+ unsigned long time;
+
+ rtc_tm_to_time(&alarm->time, &time);
+ clk_enable(rtap->clk);
+ writel(time, rtap->virtbase + COH901331_ALARM);
+ writel(alarm->enabled, rtap->virtbase + COH901331_IRQ_MASK);
+ clk_disable(rtap->clk);
+
+ return 0;
+}
+
+static int coh901331_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct coh901331_port *rtap = dev_get_drvdata(dev);
+
+ clk_enable(rtap->clk);
+ if (enabled)
+ writel(1, rtap->virtbase + COH901331_IRQ_MASK);
+ else
+ writel(0, rtap->virtbase + COH901331_IRQ_MASK);
+ clk_disable(rtap->clk);
+}
+
+static struct rtc_class_ops coh901331_ops = {
+ .read_time = coh901331_read_time,
+ .set_mmss = coh901331_set_mmss,
+ .read_alarm = coh901331_read_alarm,
+ .set_alarm = coh901331_set_alarm,
+ .alarm_irq_enable = coh901331_alarm_irq_enable,
+};
+
+static int __exit coh901331_remove(struct platform_device *pdev)
+{
+ struct coh901331_port *rtap = dev_get_drvdata(&pdev->dev);
+
+ if (rtap) {
+ free_irq(rtap->irq, rtap);
+ rtc_device_unregister(rtap->rtc);
+ clk_put(rtap->clk);
+ iounmap(rtap->virtbase);
+ release_mem_region(rtap->phybase, rtap->physize);
+ platform_set_drvdata(pdev, NULL);
+ kfree(rtap);
+ }
+
+ return 0;
+}
+
+
+static int __init coh901331_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct coh901331_port *rtap;
+ struct resource *res;
+
+ rtap = kzalloc(sizeof(struct coh901331_port), GFP_KERNEL);
+ if (!rtap)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENOENT;
+ goto out_no_resource;
+ }
+ rtap->phybase = res->start;
+ rtap->physize = resource_size(res);
+
+ if (request_mem_region(rtap->phybase, rtap->physize,
+ "rtc-coh901331") == NULL) {
+ ret = -EBUSY;
+ goto out_no_memregion;
+ }
+
+ rtap->virtbase = ioremap(rtap->phybase, rtap->physize);
+ if (!rtap->virtbase) {
+ ret = -ENOMEM;
+ goto out_no_remap;
+ }
+
+ rtap->irq = platform_get_irq(pdev, 0);
+ if (request_irq(rtap->irq, coh901331_interrupt, IRQF_DISABLED,
+ "RTC COH 901 331 Alarm", rtap)) {
+ ret = -EIO;
+ goto out_no_irq;
+ }
+
+ rtap->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(rtap->clk)) {
+ ret = PTR_ERR(rtap->clk);
+ dev_err(&pdev->dev, "could not get clock\n");
+ goto out_no_clk;
+ }
+
+ /* We enable/disable the clock only to assure it works */
+ ret = clk_enable(rtap->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "could not enable clock\n");
+ goto out_no_clk_enable;
+ }
+ clk_disable(rtap->clk);
+
+ rtap->rtc = rtc_device_register("coh901331", &pdev->dev, &coh901331_ops,
+ THIS_MODULE);
+ if (IS_ERR(rtap->rtc)) {
+ ret = PTR_ERR(rtap->rtc);
+ goto out_no_rtc;
+ }
+
+ platform_set_drvdata(pdev, rtap);
+
+ return 0;
+
+ out_no_rtc:
+ out_no_clk_enable:
+ clk_put(rtap->clk);
+ out_no_clk:
+ free_irq(rtap->irq, rtap);
+ out_no_irq:
+ iounmap(rtap->virtbase);
+ out_no_remap:
+ platform_set_drvdata(pdev, NULL);
+ out_no_memregion:
+ release_mem_region(rtap->phybase, SZ_4K);
+ out_no_resource:
+ kfree(rtap);
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int coh901331_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct coh901331_port *rtap = dev_get_drvdata(&pdev->dev);
+
+ /*
+ * If this RTC alarm will be used for waking the system up,
+ * don't disable it of course. Else we just disable the alarm
+ * and await suspension.
+ */
+ if (device_may_wakeup(&pdev->dev)) {
+ enable_irq_wake(rtap->irq);
+ } else {
+ clk_enable(rtap->clk);
+ rtap->irqmaskstore = readl(rtap->virtbase + COH901331_IRQ_MASK);
+ writel(0, rtap->virtbase + COH901331_IRQ_MASK);
+ clk_disable(rtap->clk);
+ }
+ return 0;
+}
+
+static int coh901331_resume(struct platform_device *pdev)
+{
+ struct coh901331_port *rtap = dev_get_drvdata(&pdev->dev);
+
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(rtap->irq);
+ else
+ clk_enable(rtap->clk);
+ writel(rtap->irqmaskstore, rtap->virtbase + COH901331_IRQ_MASK);
+ clk_disable(rtap->clk);
+ return 0;
+}
+#else
+#define coh901331_suspend NULL
+#define coh901331_resume NULL
+#endif
+
+static void coh901331_shutdown(struct platform_device *pdev)
+{
+ struct coh901331_port *rtap = dev_get_drvdata(&pdev->dev);
+
+ clk_enable(rtap->clk);
+ writel(0, rtap->virtbase + COH901331_IRQ_MASK);
+ clk_disable(rtap->clk);
+}
+
+static struct platform_driver coh901331_driver = {
+ .driver = {
+ .name = "rtc-coh901331",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(coh901331_remove),
+ .suspend = coh901331_suspend,
+ .resume = coh901331_resume,
+ .shutdown = coh901331_shutdown,
+};
+
+static int __init coh901331_init(void)
+{
+ return platform_driver_probe(&coh901331_driver, coh901331_probe);
+}
+
+static void __exit coh901331_exit(void)
+{
+ platform_driver_unregister(&coh901331_driver);
+}
+
+module_init(coh901331_init);
+module_exit(coh901331_exit);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
+MODULE_DESCRIPTION("ST-Ericsson AB COH 901 331 RTC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 8f410e59d9f..2736b11a1b1 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -841,3 +841,4 @@ module_exit(ds1305_exit);
MODULE_DESCRIPTION("RTC driver for DS1305 and DS1306 chips");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:rtc-ds1305");
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 47a93c022d9..eb99ee4fa0f 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -896,8 +896,7 @@ read_rtc:
return 0;
exit_irq:
- if (ds1307->rtc)
- rtc_device_unregister(ds1307->rtc);
+ rtc_device_unregister(ds1307->rtc);
exit_free:
kfree(ds1307);
return err;
diff --git a/drivers/rtc/rtc-ds1390.c b/drivers/rtc/rtc-ds1390.c
index e01b955db07..cdb70505709 100644
--- a/drivers/rtc/rtc-ds1390.c
+++ b/drivers/rtc/rtc-ds1390.c
@@ -189,3 +189,4 @@ module_exit(ds1390_exit);
MODULE_DESCRIPTION("Dallas/Maxim DS1390/93/94 SPI RTC driver");
MODULE_AUTHOR("Mark Jackson <mpfj@mimc.co.uk>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:rtc-ds1390");
diff --git a/drivers/rtc/rtc-ds3234.c b/drivers/rtc/rtc-ds3234.c
index c51589ede5b..a774ca35b5f 100644
--- a/drivers/rtc/rtc-ds3234.c
+++ b/drivers/rtc/rtc-ds3234.c
@@ -188,3 +188,4 @@ module_exit(ds3234_exit);
MODULE_DESCRIPTION("DS3234 SPI RTC driver");
MODULE_AUTHOR("Dennis Aberilla <denzzzhome@yahoo.com>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:ds3234");
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 551332e4ed0..9da02d108b7 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -128,12 +128,16 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL)
- return -ENXIO;
+ if (res == NULL) {
+ err = -ENXIO;
+ goto fail_free;
+ }
res = request_mem_region(res->start, resource_size(res), pdev->name);
- if (res == NULL)
- return -EBUSY;
+ if (res == NULL) {
+ err = -EBUSY;
+ goto fail_free;
+ }
ep93xx_rtc->mmio_base = ioremap(res->start, resource_size(res));
if (ep93xx_rtc->mmio_base == NULL) {
@@ -169,6 +173,8 @@ fail:
pdev->dev.platform_data = NULL;
}
release_mem_region(res->start, resource_size(res));
+fail_free:
+ kfree(ep93xx_rtc);
return err;
}
diff --git a/drivers/rtc/rtc-m41t94.c b/drivers/rtc/rtc-m41t94.c
index c3a18c58daf..c8c97a4169d 100644
--- a/drivers/rtc/rtc-m41t94.c
+++ b/drivers/rtc/rtc-m41t94.c
@@ -171,3 +171,4 @@ module_exit(m41t94_exit);
MODULE_AUTHOR("Kim B. Heino <Kim.Heino@bluegiga.com>");
MODULE_DESCRIPTION("Driver for ST M41T94 SPI RTC");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:rtc-m41t94");
diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c
index 36a8ea9ed8b..657403ebd54 100644
--- a/drivers/rtc/rtc-max6902.c
+++ b/drivers/rtc/rtc-max6902.c
@@ -175,3 +175,4 @@ module_exit(max6902_exit);
MODULE_DESCRIPTION ("max6902 spi RTC driver");
MODULE_AUTHOR ("Raphael Assenat");
MODULE_LICENSE ("GPL");
+MODULE_ALIAS("spi:rtc-max6902");
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
new file mode 100644
index 00000000000..6bd5072d4eb
--- /dev/null
+++ b/drivers/rtc/rtc-mxc.c
@@ -0,0 +1,507 @@
+/*
+ * Copyright 2004-2008 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/io.h>
+#include <linux/rtc.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <mach/hardware.h>
+
+#define RTC_INPUT_CLK_32768HZ (0x00 << 5)
+#define RTC_INPUT_CLK_32000HZ (0x01 << 5)
+#define RTC_INPUT_CLK_38400HZ (0x02 << 5)
+
+#define RTC_SW_BIT (1 << 0)
+#define RTC_ALM_BIT (1 << 2)
+#define RTC_1HZ_BIT (1 << 4)
+#define RTC_2HZ_BIT (1 << 7)
+#define RTC_SAM0_BIT (1 << 8)
+#define RTC_SAM1_BIT (1 << 9)
+#define RTC_SAM2_BIT (1 << 10)
+#define RTC_SAM3_BIT (1 << 11)
+#define RTC_SAM4_BIT (1 << 12)
+#define RTC_SAM5_BIT (1 << 13)
+#define RTC_SAM6_BIT (1 << 14)
+#define RTC_SAM7_BIT (1 << 15)
+#define PIT_ALL_ON (RTC_2HZ_BIT | RTC_SAM0_BIT | RTC_SAM1_BIT | \
+ RTC_SAM2_BIT | RTC_SAM3_BIT | RTC_SAM4_BIT | \
+ RTC_SAM5_BIT | RTC_SAM6_BIT | RTC_SAM7_BIT)
+
+#define RTC_ENABLE_BIT (1 << 7)
+
+#define MAX_PIE_NUM 9
+#define MAX_PIE_FREQ 512
+static const u32 PIE_BIT_DEF[MAX_PIE_NUM][2] = {
+ { 2, RTC_2HZ_BIT },
+ { 4, RTC_SAM0_BIT },
+ { 8, RTC_SAM1_BIT },
+ { 16, RTC_SAM2_BIT },
+ { 32, RTC_SAM3_BIT },
+ { 64, RTC_SAM4_BIT },
+ { 128, RTC_SAM5_BIT },
+ { 256, RTC_SAM6_BIT },
+ { MAX_PIE_FREQ, RTC_SAM7_BIT },
+};
+
+/* Those are the bits from a classic RTC we want to mimic */
+#define RTC_IRQF 0x80 /* any of the following 3 is active */
+#define RTC_PF 0x40 /* Periodic interrupt */
+#define RTC_AF 0x20 /* Alarm interrupt */
+#define RTC_UF 0x10 /* Update interrupt for 1Hz RTC */
+
+#define MXC_RTC_TIME 0
+#define MXC_RTC_ALARM 1
+
+#define RTC_HOURMIN 0x00 /* 32bit rtc hour/min counter reg */
+#define RTC_SECOND 0x04 /* 32bit rtc seconds counter reg */
+#define RTC_ALRM_HM 0x08 /* 32bit rtc alarm hour/min reg */
+#define RTC_ALRM_SEC 0x0C /* 32bit rtc alarm seconds reg */
+#define RTC_RTCCTL 0x10 /* 32bit rtc control reg */
+#define RTC_RTCISR 0x14 /* 32bit rtc interrupt status reg */
+#define RTC_RTCIENR 0x18 /* 32bit rtc interrupt enable reg */
+#define RTC_STPWCH 0x1C /* 32bit rtc stopwatch min reg */
+#define RTC_DAYR 0x20 /* 32bit rtc days counter reg */
+#define RTC_DAYALARM 0x24 /* 32bit rtc day alarm reg */
+#define RTC_TEST1 0x28 /* 32bit rtc test reg 1 */
+#define RTC_TEST2 0x2C /* 32bit rtc test reg 2 */
+#define RTC_TEST3 0x30 /* 32bit rtc test reg 3 */
+
+struct rtc_plat_data {
+ struct rtc_device *rtc;
+ void __iomem *ioaddr;
+ int irq;
+ struct clk *clk;
+ unsigned int irqen;
+ int alrm_sec;
+ int alrm_min;
+ int alrm_hour;
+ int alrm_mday;
+ struct timespec mxc_rtc_delta;
+ struct rtc_time g_rtc_alarm;
+};
+
+/*
+ * This function is used to obtain the RTC time or the alarm value in
+ * second.
+ */
+static u32 get_alarm_or_time(struct device *dev, int time_alarm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
+ u32 day = 0, hr = 0, min = 0, sec = 0, hr_min = 0;
+
+ switch (time_alarm) {
+ case MXC_RTC_TIME:
+ day = readw(ioaddr + RTC_DAYR);
+ hr_min = readw(ioaddr + RTC_HOURMIN);
+ sec = readw(ioaddr + RTC_SECOND);
+ break;
+ case MXC_RTC_ALARM:
+ day = readw(ioaddr + RTC_DAYALARM);
+ hr_min = readw(ioaddr + RTC_ALRM_HM) & 0xffff;
+ sec = readw(ioaddr + RTC_ALRM_SEC);
+ break;
+ }
+
+ hr = hr_min >> 8;
+ min = hr_min & 0xff;
+
+ return (((day * 24 + hr) * 60) + min) * 60 + sec;
+}
+
+/*
+ * This function sets the RTC alarm value or the time value.
+ */
+static void set_alarm_or_time(struct device *dev, int time_alarm, u32 time)
+{
+ u32 day, hr, min, sec, temp;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
+
+ day = time / 86400;
+ time -= day * 86400;
+
+ /* time is within a day now */
+ hr = time / 3600;
+ time -= hr * 3600;
+
+ /* time is within an hour now */
+ min = time / 60;
+ sec = time - min * 60;
+
+ temp = (hr << 8) + min;
+
+ switch (time_alarm) {
+ case MXC_RTC_TIME:
+ writew(day, ioaddr + RTC_DAYR);
+ writew(sec, ioaddr + RTC_SECOND);
+ writew(temp, ioaddr + RTC_HOURMIN);
+ break;
+ case MXC_RTC_ALARM:
+ writew(day, ioaddr + RTC_DAYALARM);
+ writew(sec, ioaddr + RTC_ALRM_SEC);
+ writew(temp, ioaddr + RTC_ALRM_HM);
+ break;
+ }
+}
+
+/*
+ * This function updates the RTC alarm registers and then clears all the
+ * interrupt status bits.
+ */
+static int rtc_update_alarm(struct device *dev, struct rtc_time *alrm)
+{
+ struct rtc_time alarm_tm, now_tm;
+ unsigned long now, time;
+ int ret;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
+
+ now = get_alarm_or_time(dev, MXC_RTC_TIME);
+ rtc_time_to_tm(now, &now_tm);
+ alarm_tm.tm_year = now_tm.tm_year;
+ alarm_tm.tm_mon = now_tm.tm_mon;
+ alarm_tm.tm_mday = now_tm.tm_mday;
+ alarm_tm.tm_hour = alrm->tm_hour;
+ alarm_tm.tm_min = alrm->tm_min;
+ alarm_tm.tm_sec = alrm->tm_sec;
+ rtc_tm_to_time(&now_tm, &now);
+ rtc_tm_to_time(&alarm_tm, &time);
+
+ if (time < now) {
+ time += 60 * 60 * 24;
+ rtc_time_to_tm(time, &alarm_tm);
+ }
+
+ ret = rtc_tm_to_time(&alarm_tm, &time);
+
+ /* clear all the interrupt status bits */
+ writew(readw(ioaddr + RTC_RTCISR), ioaddr + RTC_RTCISR);
+ set_alarm_or_time(dev, MXC_RTC_ALARM, time);
+
+ return ret;
+}
+
+/* This function is the RTC interrupt service routine. */
+static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
+ u32 status;
+ u32 events = 0;
+
+ spin_lock_irq(&pdata->rtc->irq_lock);
+ status = readw(ioaddr + RTC_RTCISR) & readw(ioaddr + RTC_RTCIENR);
+ /* clear interrupt sources */
+ writew(status, ioaddr + RTC_RTCISR);
+
+ /* clear alarm interrupt if it has occurred */
+ if (status & RTC_ALM_BIT)
+ status &= ~RTC_ALM_BIT;
+
+ /* update irq data & counter */
+ if (status & RTC_ALM_BIT)
+ events |= (RTC_AF | RTC_IRQF);
+
+ if (status & RTC_1HZ_BIT)
+ events |= (RTC_UF | RTC_IRQF);
+
+ if (status & PIT_ALL_ON)
+ events |= (RTC_PF | RTC_IRQF);
+
+ if ((status & RTC_ALM_BIT) && rtc_valid_tm(&pdata->g_rtc_alarm))
+ rtc_update_alarm(&pdev->dev, &pdata->g_rtc_alarm);
+
+ rtc_update_irq(pdata->rtc, 1, events);
+ spin_unlock_irq(&pdata->rtc->irq_lock);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Clear all interrupts and release the IRQ
+ */
+static void mxc_rtc_release(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
+
+ spin_lock_irq(&pdata->rtc->irq_lock);
+
+ /* Disable all rtc interrupts */
+ writew(0, ioaddr + RTC_RTCIENR);
+
+ /* Clear all interrupt status */
+ writew(0xffffffff, ioaddr + RTC_RTCISR);
+
+ spin_unlock_irq(&pdata->rtc->irq_lock);
+}
+
+static void mxc_rtc_irq_enable(struct device *dev, unsigned int bit,
+ unsigned int enabled)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
+ u32 reg;
+
+ spin_lock_irq(&pdata->rtc->irq_lock);
+ reg = readw(ioaddr + RTC_RTCIENR);
+
+ if (enabled)
+ reg |= bit;
+ else
+ reg &= ~bit;
+
+ writew(reg, ioaddr + RTC_RTCIENR);
+ spin_unlock_irq(&pdata->rtc->irq_lock);
+}
+
+static int mxc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ mxc_rtc_irq_enable(dev, RTC_ALM_BIT, enabled);
+ return 0;
+}
+
+static int mxc_rtc_update_irq_enable(struct device *dev, unsigned int enabled)
+{
+ mxc_rtc_irq_enable(dev, RTC_1HZ_BIT, enabled);
+ return 0;
+}
+
+/*
+ * This function reads the current RTC time into tm in Gregorian date.
+ */
+static int mxc_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ u32 val;
+
+ /* Avoid roll-over from reading the different registers */
+ do {
+ val = get_alarm_or_time(dev, MXC_RTC_TIME);
+ } while (val != get_alarm_or_time(dev, MXC_RTC_TIME));
+
+ rtc_time_to_tm(val, tm);
+
+ return 0;
+}
+
+/*
+ * This function sets the internal RTC time based on tm in Gregorian date.
+ */
+static int mxc_rtc_set_mmss(struct device *dev, unsigned long time)
+{
+ /* Avoid roll-over from reading the different registers */
+ do {
+ set_alarm_or_time(dev, MXC_RTC_TIME, time);
+ } while (time != get_alarm_or_time(dev, MXC_RTC_TIME));
+
+ return 0;
+}
+
+/*
+ * This function reads the current alarm value into the passed in 'alrm'
+ * argument. It updates the alrm's pending field value based on the whether
+ * an alarm interrupt occurs or not.
+ */
+static int mxc_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
+
+ rtc_time_to_tm(get_alarm_or_time(dev, MXC_RTC_ALARM), &alrm->time);
+ alrm->pending = ((readw(ioaddr + RTC_RTCISR) & RTC_ALM_BIT)) ? 1 : 0;
+
+ return 0;
+}
+
+/*
+ * This function sets the RTC alarm based on passed in alrm.
+ */
+static int mxc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ int ret;
+
+ if (rtc_valid_tm(&alrm->time)) {
+ if (alrm->time.tm_sec > 59 ||
+ alrm->time.tm_hour > 23 ||
+ alrm->time.tm_min > 59)
+ return -EINVAL;
+
+ ret = rtc_update_alarm(dev, &alrm->time);
+ } else {
+ ret = rtc_valid_tm(&alrm->time);
+ if (ret)
+ return ret;
+
+ ret = rtc_update_alarm(dev, &alrm->time);
+ }
+
+ if (ret)
+ return ret;
+
+ memcpy(&pdata->g_rtc_alarm, &alrm->time, sizeof(struct rtc_time));
+ mxc_rtc_irq_enable(dev, RTC_ALM_BIT, alrm->enabled);
+
+ return 0;
+}
+
+/* RTC layer */
+static struct rtc_class_ops mxc_rtc_ops = {
+ .release = mxc_rtc_release,
+ .read_time = mxc_rtc_read_time,
+ .set_mmss = mxc_rtc_set_mmss,
+ .read_alarm = mxc_rtc_read_alarm,
+ .set_alarm = mxc_rtc_set_alarm,
+ .alarm_irq_enable = mxc_rtc_alarm_irq_enable,
+ .update_irq_enable = mxc_rtc_update_irq_enable,
+};
+
+static int __init mxc_rtc_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+ struct resource *res;
+ struct rtc_device *rtc;
+ struct rtc_plat_data *pdata = NULL;
+ u32 reg;
+ int ret, rate;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->ioaddr = ioremap(res->start, resource_size(res));
+
+ clk = clk_get(&pdev->dev, "ckil");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rate = clk_get_rate(clk);
+ clk_put(clk);
+
+ if (rate == 32768)
+ reg = RTC_INPUT_CLK_32768HZ;
+ else if (rate == 32000)
+ reg = RTC_INPUT_CLK_32000HZ;
+ else if (rate == 38400)
+ reg = RTC_INPUT_CLK_38400HZ;
+ else {
+ dev_err(&pdev->dev, "rtc clock is not valid (%lu)\n",
+ clk_get_rate(clk));
+ ret = -EINVAL;
+ goto exit_free_pdata;
+ }
+
+ reg |= RTC_ENABLE_BIT;
+ writew(reg, (pdata->ioaddr + RTC_RTCCTL));
+ if (((readw(pdata->ioaddr + RTC_RTCCTL)) & RTC_ENABLE_BIT) == 0) {
+ dev_err(&pdev->dev, "hardware module can't be enabled!\n");
+ ret = -EIO;
+ goto exit_free_pdata;
+ }
+
+ pdata->clk = clk_get(&pdev->dev, "rtc");
+ if (IS_ERR(pdata->clk)) {
+ dev_err(&pdev->dev, "unable to get clock!\n");
+ ret = PTR_ERR(pdata->clk);
+ goto exit_free_pdata;
+ }
+
+ clk_enable(pdata->clk);
+
+ rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(rtc)) {
+ ret = PTR_ERR(rtc);
+ goto exit_put_clk;
+ }
+
+ pdata->rtc = rtc;
+ platform_set_drvdata(pdev, pdata);
+
+ /* Configure and enable the RTC */
+ pdata->irq = platform_get_irq(pdev, 0);
+
+ if (pdata->irq >= 0 &&
+ request_irq(pdata->irq, mxc_rtc_interrupt, IRQF_SHARED,
+ pdev->name, pdev) < 0) {
+ dev_warn(&pdev->dev, "interrupt not available.\n");
+ pdata->irq = -1;
+ }
+
+ return 0;
+
+exit_put_clk:
+ clk_put(pdata->clk);
+
+exit_free_pdata:
+ kfree(pdata);
+
+ return ret;
+}
+
+static int __exit mxc_rtc_remove(struct platform_device *pdev)
+{
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ rtc_device_unregister(pdata->rtc);
+
+ if (pdata->irq >= 0)
+ free_irq(pdata->irq, pdev);
+
+ clk_disable(pdata->clk);
+ clk_put(pdata->clk);
+ kfree(pdata);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver mxc_rtc_driver = {
+ .driver = {
+ .name = "mxc_rtc",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(mxc_rtc_remove),
+};
+
+static int __init mxc_rtc_init(void)
+{
+ return platform_driver_probe(&mxc_rtc_driver, mxc_rtc_probe);
+}
+
+static void __exit mxc_rtc_exit(void)
+{
+ platform_driver_unregister(&mxc_rtc_driver);
+}
+
+module_init(mxc_rtc_init);
+module_exit(mxc_rtc_exit);
+
+MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
+MODULE_DESCRIPTION("RTC driver for Freescale MXC");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index bd1ce8e2bc1..0587d53987f 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -430,7 +430,7 @@ fail:
static int __exit omap_rtc_remove(struct platform_device *pdev)
{
- struct rtc_device *rtc = platform_get_drvdata(pdev);;
+ struct rtc_device *rtc = platform_get_drvdata(pdev);
device_init_wakeup(&pdev->dev, 0);
diff --git a/drivers/rtc/rtc-pcap.c b/drivers/rtc/rtc-pcap.c
new file mode 100644
index 00000000000..a99c28992d2
--- /dev/null
+++ b/drivers/rtc/rtc-pcap.c
@@ -0,0 +1,224 @@
+/*
+ * pcap rtc code for Motorola EZX phones
+ *
+ * Copyright (c) 2008 guiming zhuo <gmzhuo@gmail.com>
+ * Copyright (c) 2009 Daniel Ribeiro <drwyrm@gmail.com>
+ *
+ * Based on Motorola's rtc.c Copyright (c) 2003-2005 Motorola
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mfd/ezx-pcap.h>
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
+
+struct pcap_rtc {
+ struct pcap_chip *pcap;
+ struct rtc_device *rtc;
+};
+
+static irqreturn_t pcap_rtc_irq(int irq, void *_pcap_rtc)
+{
+ struct pcap_rtc *pcap_rtc = _pcap_rtc;
+ unsigned long rtc_events;
+
+ if (irq == pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ))
+ rtc_events = RTC_IRQF | RTC_UF;
+ else if (irq == pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA))
+ rtc_events = RTC_IRQF | RTC_AF;
+ else
+ rtc_events = 0;
+
+ rtc_update_irq(pcap_rtc->rtc, 1, rtc_events);
+ return IRQ_HANDLED;
+}
+
+static int pcap_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev);
+ struct rtc_time *tm = &alrm->time;
+ unsigned long secs;
+ u32 tod; /* time of day, seconds since midnight */
+ u32 days; /* days since 1/1/1970 */
+
+ ezx_pcap_read(pcap_rtc->pcap, PCAP_REG_RTC_TODA, &tod);
+ secs = tod & PCAP_RTC_TOD_MASK;
+
+ ezx_pcap_read(pcap_rtc->pcap, PCAP_REG_RTC_DAYA, &days);
+ secs += (days & PCAP_RTC_DAY_MASK) * SEC_PER_DAY;
+
+ rtc_time_to_tm(secs, tm);
+
+ return 0;
+}
+
+static int pcap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev);
+ struct rtc_time *tm = &alrm->time;
+ unsigned long secs;
+ u32 tod, days;
+
+ rtc_tm_to_time(tm, &secs);
+
+ tod = secs % SEC_PER_DAY;
+ ezx_pcap_write(pcap_rtc->pcap, PCAP_REG_RTC_TODA, tod);
+
+ days = secs / SEC_PER_DAY;
+ ezx_pcap_write(pcap_rtc->pcap, PCAP_REG_RTC_DAYA, days);
+
+ return 0;
+}
+
+static int pcap_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev);
+ unsigned long secs;
+ u32 tod, days;
+
+ ezx_pcap_read(pcap_rtc->pcap, PCAP_REG_RTC_TOD, &tod);
+ secs = tod & PCAP_RTC_TOD_MASK;
+
+ ezx_pcap_read(pcap_rtc->pcap, PCAP_REG_RTC_DAY, &days);
+ secs += (days & PCAP_RTC_DAY_MASK) * SEC_PER_DAY;
+
+ rtc_time_to_tm(secs, tm);
+
+ return rtc_valid_tm(tm);
+}
+
+static int pcap_rtc_set_mmss(struct device *dev, unsigned long secs)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev);
+ u32 tod, days;
+
+ tod = secs % SEC_PER_DAY;
+ ezx_pcap_write(pcap_rtc->pcap, PCAP_REG_RTC_TOD, tod);
+
+ days = secs / SEC_PER_DAY;
+ ezx_pcap_write(pcap_rtc->pcap, PCAP_REG_RTC_DAY, days);
+
+ return 0;
+}
+
+static int pcap_rtc_irq_enable(struct device *dev, int pirq, unsigned int en)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev);
+
+ if (en)
+ enable_irq(pcap_to_irq(pcap_rtc->pcap, pirq));
+ else
+ disable_irq(pcap_to_irq(pcap_rtc->pcap, pirq));
+
+ return 0;
+}
+
+static int pcap_rtc_alarm_irq_enable(struct device *dev, unsigned int en)
+{
+ return pcap_rtc_irq_enable(dev, PCAP_IRQ_TODA, en);
+}
+
+static int pcap_rtc_update_irq_enable(struct device *dev, unsigned int en)
+{
+ return pcap_rtc_irq_enable(dev, PCAP_IRQ_1HZ, en);
+}
+
+static const struct rtc_class_ops pcap_rtc_ops = {
+ .read_time = pcap_rtc_read_time,
+ .read_alarm = pcap_rtc_read_alarm,
+ .set_alarm = pcap_rtc_set_alarm,
+ .set_mmss = pcap_rtc_set_mmss,
+ .alarm_irq_enable = pcap_rtc_alarm_irq_enable,
+ .update_irq_enable = pcap_rtc_update_irq_enable,
+};
+
+static int __devinit pcap_rtc_probe(struct platform_device *pdev)
+{
+ struct pcap_rtc *pcap_rtc;
+ int timer_irq, alarm_irq;
+ int err = -ENOMEM;
+
+ pcap_rtc = kmalloc(sizeof(struct pcap_rtc), GFP_KERNEL);
+ if (!pcap_rtc)
+ return err;
+
+ pcap_rtc->pcap = dev_get_drvdata(pdev->dev.parent);
+
+ pcap_rtc->rtc = rtc_device_register("pcap", &pdev->dev,
+ &pcap_rtc_ops, THIS_MODULE);
+ if (IS_ERR(pcap_rtc->rtc)) {
+ err = PTR_ERR(pcap_rtc->rtc);
+ goto fail_rtc;
+ }
+
+ platform_set_drvdata(pdev, pcap_rtc);
+
+ timer_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ);
+ alarm_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA);
+
+ err = request_irq(timer_irq, pcap_rtc_irq, 0, "RTC Timer", pcap_rtc);
+ if (err)
+ goto fail_timer;
+
+ err = request_irq(alarm_irq, pcap_rtc_irq, 0, "RTC Alarm", pcap_rtc);
+ if (err)
+ goto fail_alarm;
+
+ return 0;
+fail_alarm:
+ free_irq(timer_irq, pcap_rtc);
+fail_timer:
+ rtc_device_unregister(pcap_rtc->rtc);
+fail_rtc:
+ kfree(pcap_rtc);
+ return err;
+}
+
+static int __devexit pcap_rtc_remove(struct platform_device *pdev)
+{
+ struct pcap_rtc *pcap_rtc = platform_get_drvdata(pdev);
+
+ free_irq(pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ), pcap_rtc);
+ free_irq(pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA), pcap_rtc);
+ rtc_device_unregister(pcap_rtc->rtc);
+ kfree(pcap_rtc);
+
+ return 0;
+}
+
+static struct platform_driver pcap_rtc_driver = {
+ .remove = __devexit_p(pcap_rtc_remove),
+ .driver = {
+ .name = "pcap-rtc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init rtc_pcap_init(void)
+{
+ return platform_driver_probe(&pcap_rtc_driver, pcap_rtc_probe);
+}
+
+static void __exit rtc_pcap_exit(void)
+{
+ platform_driver_unregister(&pcap_rtc_driver);
+}
+
+module_init(rtc_pcap_init);
+module_exit(rtc_pcap_exit);
+
+MODULE_DESCRIPTION("Motorola pcap rtc driver");
+MODULE_AUTHOR("guiming zhuo <gmzhuo@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
new file mode 100644
index 00000000000..e75df9d50e2
--- /dev/null
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -0,0 +1,364 @@
+/*
+ * An SPI driver for the Philips PCF2123 RTC
+ * Copyright 2009 Cyber Switching, Inc.
+ *
+ * Author: Chris Verges <chrisv@cyberswitching.com>
+ * Maintainers: http://www.cyberswitching.com
+ *
+ * based on the RS5C348 driver in this same directory.
+ *
+ * Thanks to Christian Pellegrin <chripell@fsfe.org> for
+ * the sysfs contributions to this driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Please note that the CS is active high, so platform data
+ * should look something like:
+ *
+ * static struct spi_board_info ek_spi_devices[] = {
+ * ...
+ * {
+ * .modalias = "rtc-pcf2123",
+ * .chip_select = 1,
+ * .controller_data = (void *)AT91_PIN_PA10,
+ * .max_speed_hz = 1000 * 1000,
+ * .mode = SPI_CS_HIGH,
+ * .bus_num = 0,
+ * },
+ * ...
+ *};
+ *
+ */
+
+#include <linux/bcd.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/rtc.h>
+#include <linux/spi/spi.h>
+
+#define DRV_VERSION "0.6"
+
+#define PCF2123_REG_CTRL1 (0x00) /* Control Register 1 */
+#define PCF2123_REG_CTRL2 (0x01) /* Control Register 2 */
+#define PCF2123_REG_SC (0x02) /* datetime */
+#define PCF2123_REG_MN (0x03)
+#define PCF2123_REG_HR (0x04)
+#define PCF2123_REG_DM (0x05)
+#define PCF2123_REG_DW (0x06)
+#define PCF2123_REG_MO (0x07)
+#define PCF2123_REG_YR (0x08)
+
+#define PCF2123_SUBADDR (1 << 4)
+#define PCF2123_WRITE ((0 << 7) | PCF2123_SUBADDR)
+#define PCF2123_READ ((1 << 7) | PCF2123_SUBADDR)
+
+static struct spi_driver pcf2123_driver;
+
+struct pcf2123_sysfs_reg {
+ struct device_attribute attr;
+ char name[2];
+};
+
+struct pcf2123_plat_data {
+ struct rtc_device *rtc;
+ struct pcf2123_sysfs_reg regs[16];
+};
+
+/*
+ * Causes a 30 nanosecond delay to ensure that the PCF2123 chip select
+ * is released properly after an SPI write. This function should be
+ * called after EVERY read/write call over SPI.
+ */
+static inline void pcf2123_delay_trec(void)
+{
+ ndelay(30);
+}
+
+static ssize_t pcf2123_show(struct device *dev, struct device_attribute *attr,
+ char *buffer)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct pcf2123_sysfs_reg *r;
+ u8 txbuf[1], rxbuf[1];
+ unsigned long reg;
+ int ret;
+
+ r = container_of(attr, struct pcf2123_sysfs_reg, attr);
+
+ if (strict_strtoul(r->name, 16, &reg))
+ return -EINVAL;
+
+ txbuf[0] = PCF2123_READ | reg;
+ ret = spi_write_then_read(spi, txbuf, 1, rxbuf, 1);
+ if (ret < 0)
+ return -EIO;
+ pcf2123_delay_trec();
+ return sprintf(buffer, "0x%x\n", rxbuf[0]);
+}
+
+static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr,
+ const char *buffer, size_t count) {
+ struct spi_device *spi = to_spi_device(dev);
+ struct pcf2123_sysfs_reg *r;
+ u8 txbuf[2];
+ unsigned long reg;
+ unsigned long val;
+
+ int ret;
+
+ r = container_of(attr, struct pcf2123_sysfs_reg, attr);
+
+ if (strict_strtoul(r->name, 16, &reg)
+ || strict_strtoul(buffer, 10, &val))
+ return -EINVAL;
+
+ txbuf[0] = PCF2123_WRITE | reg;
+ txbuf[1] = val;
+ ret = spi_write(spi, txbuf, sizeof(txbuf));
+ if (ret < 0)
+ return -EIO;
+ pcf2123_delay_trec();
+ return count;
+}
+
+static int pcf2123_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ u8 txbuf[1], rxbuf[7];
+ int ret;
+
+ txbuf[0] = PCF2123_READ | PCF2123_REG_SC;
+ ret = spi_write_then_read(spi, txbuf, sizeof(txbuf),
+ rxbuf, sizeof(rxbuf));
+ if (ret < 0)
+ return ret;
+ pcf2123_delay_trec();
+
+ tm->tm_sec = bcd2bin(rxbuf[0] & 0x7F);
+ tm->tm_min = bcd2bin(rxbuf[1] & 0x7F);
+ tm->tm_hour = bcd2bin(rxbuf[2] & 0x3F); /* rtc hr 0-23 */
+ tm->tm_mday = bcd2bin(rxbuf[3] & 0x3F);
+ tm->tm_wday = rxbuf[4] & 0x07;
+ tm->tm_mon = bcd2bin(rxbuf[5] & 0x1F) - 1; /* rtc mn 1-12 */
+ tm->tm_year = bcd2bin(rxbuf[6]);
+ if (tm->tm_year < 70)
+ tm->tm_year += 100; /* assume we are in 1970...2069 */
+
+ dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
+ "mday=%d, mon=%d, year=%d, wday=%d\n",
+ __func__,
+ tm->tm_sec, tm->tm_min, tm->tm_hour,
+ tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
+
+ /* the clock can give out invalid datetime, but we cannot return
+ * -EINVAL otherwise hwclock will refuse to set the time on bootup.
+ */
+ if (rtc_valid_tm(tm) < 0)
+ dev_err(dev, "retrieved date/time is not valid.\n");
+
+ return 0;
+}
+
+static int pcf2123_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ u8 txbuf[8];
+ int ret;
+
+ dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
+ "mday=%d, mon=%d, year=%d, wday=%d\n",
+ __func__,
+ tm->tm_sec, tm->tm_min, tm->tm_hour,
+ tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
+
+ /* Stop the counter first */
+ txbuf[0] = PCF2123_WRITE | PCF2123_REG_CTRL1;
+ txbuf[1] = 0x20;
+ ret = spi_write(spi, txbuf, 2);
+ if (ret < 0)
+ return ret;
+ pcf2123_delay_trec();
+
+ /* Set the new time */
+ txbuf[0] = PCF2123_WRITE | PCF2123_REG_SC;
+ txbuf[1] = bin2bcd(tm->tm_sec & 0x7F);
+ txbuf[2] = bin2bcd(tm->tm_min & 0x7F);
+ txbuf[3] = bin2bcd(tm->tm_hour & 0x3F);
+ txbuf[4] = bin2bcd(tm->tm_mday & 0x3F);
+ txbuf[5] = tm->tm_wday & 0x07;
+ txbuf[6] = bin2bcd((tm->tm_mon + 1) & 0x1F); /* rtc mn 1-12 */
+ txbuf[7] = bin2bcd(tm->tm_year < 100 ? tm->tm_year : tm->tm_year - 100);
+
+ ret = spi_write(spi, txbuf, sizeof(txbuf));
+ if (ret < 0)
+ return ret;
+ pcf2123_delay_trec();
+
+ /* Start the counter */
+ txbuf[0] = PCF2123_WRITE | PCF2123_REG_CTRL1;
+ txbuf[1] = 0x00;
+ ret = spi_write(spi, txbuf, 2);
+ if (ret < 0)
+ return ret;
+ pcf2123_delay_trec();
+
+ return 0;
+}
+
+static const struct rtc_class_ops pcf2123_rtc_ops = {
+ .read_time = pcf2123_rtc_read_time,
+ .set_time = pcf2123_rtc_set_time,
+};
+
+static int __devinit pcf2123_probe(struct spi_device *spi)
+{
+ struct rtc_device *rtc;
+ struct pcf2123_plat_data *pdata;
+ u8 txbuf[2], rxbuf[2];
+ int ret, i;
+
+ pdata = kzalloc(sizeof(struct pcf2123_plat_data), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ spi->dev.platform_data = pdata;
+
+ /* Send a software reset command */
+ txbuf[0] = PCF2123_WRITE | PCF2123_REG_CTRL1;
+ txbuf[1] = 0x58;
+ dev_dbg(&spi->dev, "resetting RTC (0x%02X 0x%02X)\n",
+ txbuf[0], txbuf[1]);
+ ret = spi_write(spi, txbuf, 2 * sizeof(u8));
+ if (ret < 0)
+ goto kfree_exit;
+ pcf2123_delay_trec();
+
+ /* Stop the counter */
+ txbuf[0] = PCF2123_WRITE | PCF2123_REG_CTRL1;
+ txbuf[1] = 0x20;
+ dev_dbg(&spi->dev, "stopping RTC (0x%02X 0x%02X)\n",
+ txbuf[0], txbuf[1]);
+ ret = spi_write(spi, txbuf, 2 * sizeof(u8));
+ if (ret < 0)
+ goto kfree_exit;
+ pcf2123_delay_trec();
+
+ /* See if the counter was actually stopped */
+ txbuf[0] = PCF2123_READ | PCF2123_REG_CTRL1;
+ dev_dbg(&spi->dev, "checking for presence of RTC (0x%02X)\n",
+ txbuf[0]);
+ ret = spi_write_then_read(spi, txbuf, 1 * sizeof(u8),
+ rxbuf, 2 * sizeof(u8));
+ dev_dbg(&spi->dev, "received data from RTC (0x%02X 0x%02X)\n",
+ rxbuf[0], rxbuf[1]);
+ if (ret < 0)
+ goto kfree_exit;
+ pcf2123_delay_trec();
+
+ if (!(rxbuf[0] & 0x20)) {
+ dev_err(&spi->dev, "chip not found\n");
+ goto kfree_exit;
+ }
+
+ dev_info(&spi->dev, "chip found, driver version " DRV_VERSION "\n");
+ dev_info(&spi->dev, "spiclk %u KHz.\n",
+ (spi->max_speed_hz + 500) / 1000);
+
+ /* Start the counter */
+ txbuf[0] = PCF2123_WRITE | PCF2123_REG_CTRL1;
+ txbuf[1] = 0x00;
+ ret = spi_write(spi, txbuf, sizeof(txbuf));
+ if (ret < 0)
+ goto kfree_exit;
+ pcf2123_delay_trec();
+
+ /* Finalize the initialization */
+ rtc = rtc_device_register(pcf2123_driver.driver.name, &spi->dev,
+ &pcf2123_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(rtc)) {
+ dev_err(&spi->dev, "failed to register.\n");
+ ret = PTR_ERR(rtc);
+ goto kfree_exit;
+ }
+
+ pdata->rtc = rtc;
+
+ for (i = 0; i < 16; i++) {
+ sprintf(pdata->regs[i].name, "%1x", i);
+ pdata->regs[i].attr.attr.mode = S_IRUGO | S_IWUSR;
+ pdata->regs[i].attr.attr.name = pdata->regs[i].name;
+ pdata->regs[i].attr.show = pcf2123_show;
+ pdata->regs[i].attr.store = pcf2123_store;
+ ret = device_create_file(&spi->dev, &pdata->regs[i].attr);
+ if (ret) {
+ dev_err(&spi->dev, "Unable to create sysfs %s\n",
+ pdata->regs[i].name);
+ goto sysfs_exit;
+ }
+ }
+
+ return 0;
+
+sysfs_exit:
+ for (i--; i >= 0; i--)
+ device_remove_file(&spi->dev, &pdata->regs[i].attr);
+
+kfree_exit:
+ kfree(pdata);
+ spi->dev.platform_data = NULL;
+ return ret;
+}
+
+static int pcf2123_remove(struct spi_device *spi)
+{
+ struct pcf2123_plat_data *pdata = spi->dev.platform_data;
+ int i;
+
+ if (pdata) {
+ struct rtc_device *rtc = pdata->rtc;
+
+ if (rtc)
+ rtc_device_unregister(rtc);
+ for (i = 0; i < 16; i++)
+ if (pdata->regs[i].name[0])
+ device_remove_file(&spi->dev,
+ &pdata->regs[i].attr);
+ kfree(pdata);
+ }
+
+ return 0;
+}
+
+static struct spi_driver pcf2123_driver = {
+ .driver = {
+ .name = "rtc-pcf2123",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = pcf2123_probe,
+ .remove = __devexit_p(pcf2123_remove),
+};
+
+static int __init pcf2123_init(void)
+{
+ return spi_register_driver(&pcf2123_driver);
+}
+
+static void __exit pcf2123_exit(void)
+{
+ spi_unregister_driver(&pcf2123_driver);
+}
+
+MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>");
+MODULE_DESCRIPTION("NXP PCF2123 RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(pcf2123_init);
+module_exit(pcf2123_exit);
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index bb8cc05605a..747ca194fad 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -438,34 +438,37 @@ static int __exit pxa_rtc_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int pxa_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+static int pxa_rtc_suspend(struct device *dev)
{
- struct pxa_rtc *pxa_rtc = platform_get_drvdata(pdev);
+ struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
- if (device_may_wakeup(&pdev->dev))
+ if (device_may_wakeup(dev))
enable_irq_wake(pxa_rtc->irq_Alrm);
return 0;
}
-static int pxa_rtc_resume(struct platform_device *pdev)
+static int pxa_rtc_resume(struct device *dev)
{
- struct pxa_rtc *pxa_rtc = platform_get_drvdata(pdev);
+ struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
- if (device_may_wakeup(&pdev->dev))
+ if (device_may_wakeup(dev))
disable_irq_wake(pxa_rtc->irq_Alrm);
return 0;
}
-#else
-#define pxa_rtc_suspend NULL
-#define pxa_rtc_resume NULL
+
+static struct dev_pm_ops pxa_rtc_pm_ops = {
+ .suspend = pxa_rtc_suspend,
+ .resume = pxa_rtc_resume,
+};
#endif
static struct platform_driver pxa_rtc_driver = {
.remove = __exit_p(pxa_rtc_remove),
- .suspend = pxa_rtc_suspend,
- .resume = pxa_rtc_resume,
.driver = {
- .name = "pxa-rtc",
+ .name = "pxa-rtc",
+#ifdef CONFIG_PM
+ .pm = &pxa_rtc_pm_ops,
+#endif
},
};
diff --git a/drivers/rtc/rtc-r9701.c b/drivers/rtc/rtc-r9701.c
index 42028f233be..9beba49c3c5 100644
--- a/drivers/rtc/rtc-r9701.c
+++ b/drivers/rtc/rtc-r9701.c
@@ -174,3 +174,4 @@ module_exit(r9701_exit);
MODULE_DESCRIPTION("r9701 spi RTC driver");
MODULE_AUTHOR("Magnus Damm <damm@opensource.se>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:rtc-r9701");
diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
index dd1e2bc7a47..2099037cb3e 100644
--- a/drivers/rtc/rtc-rs5c348.c
+++ b/drivers/rtc/rtc-rs5c348.c
@@ -251,3 +251,4 @@ MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
MODULE_DESCRIPTION("Ricoh RS5C348 RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("spi:rtc-rs5c348");
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 021b2928f0b..29f98a70586 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -393,31 +393,34 @@ static int sa1100_rtc_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int sa1100_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+static int sa1100_rtc_suspend(struct device *dev)
{
- if (device_may_wakeup(&pdev->dev))
+ if (device_may_wakeup(dev))
enable_irq_wake(IRQ_RTCAlrm);
return 0;
}
-static int sa1100_rtc_resume(struct platform_device *pdev)
+static int sa1100_rtc_resume(struct device *dev)
{
- if (device_may_wakeup(&pdev->dev))
+ if (device_may_wakeup(dev))
disable_irq_wake(IRQ_RTCAlrm);
return 0;
}
-#else
-#define sa1100_rtc_suspend NULL
-#define sa1100_rtc_resume NULL
+
+static struct dev_pm_ops sa1100_rtc_pm_ops = {
+ .suspend = sa1100_rtc_suspend,
+ .resume = sa1100_rtc_resume,
+};
#endif
static struct platform_driver sa1100_rtc_driver = {
.probe = sa1100_rtc_probe,
.remove = sa1100_rtc_remove,
- .suspend = sa1100_rtc_suspend,
- .resume = sa1100_rtc_resume,
.driver = {
- .name = "sa1100-rtc",
+ .name = "sa1100-rtc",
+#ifdef CONFIG_PM
+ .pm = &sa1100_rtc_pm_ops,
+#endif
},
};
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
new file mode 100644
index 00000000000..d7ce1a5c857
--- /dev/null
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -0,0 +1,304 @@
+/*
+ * Freescale STMP37XX/STMP378X Real Time Clock driver
+ *
+ * Copyright (c) 2007 Sigmatel, Inc.
+ * Peter Hartley, <peter.hartley@sigmatel.com>
+ *
+ * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/rtc.h>
+
+#include <mach/platform.h>
+#include <mach/stmp3xxx.h>
+#include <mach/regs-rtc.h>
+
+struct stmp3xxx_rtc_data {
+ struct rtc_device *rtc;
+ unsigned irq_count;
+ void __iomem *io;
+ int irq_alarm, irq_1msec;
+};
+
+static void stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data)
+{
+ /*
+ * The datasheet doesn't say which way round the
+ * NEW_REGS/STALE_REGS bitfields go. In fact it's 0x1=P0,
+ * 0x2=P1, .., 0x20=P5, 0x40=ALARM, 0x80=SECONDS
+ */
+ while (__raw_readl(rtc_data->io + HW_RTC_STAT) &
+ BF(0x80, RTC_STAT_STALE_REGS))
+ cpu_relax();
+}
+
+/* Time read/write */
+static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
+{
+ struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
+
+ stmp3xxx_wait_time(rtc_data);
+ rtc_time_to_tm(__raw_readl(rtc_data->io + HW_RTC_SECONDS), rtc_tm);
+ return 0;
+}
+
+static int stmp3xxx_rtc_set_mmss(struct device *dev, unsigned long t)
+{
+ struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
+
+ __raw_writel(t, rtc_data->io + HW_RTC_SECONDS);
+ stmp3xxx_wait_time(rtc_data);
+ return 0;
+}
+
+/* interrupt(s) handler */
+static irqreturn_t stmp3xxx_rtc_interrupt(int irq, void *dev_id)
+{
+ struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev_id);
+ u32 status;
+ u32 events = 0;
+
+ status = __raw_readl(rtc_data->io + HW_RTC_CTRL) &
+ (BM_RTC_CTRL_ALARM_IRQ | BM_RTC_CTRL_ONEMSEC_IRQ);
+
+ if (status & BM_RTC_CTRL_ALARM_IRQ) {
+ stmp3xxx_clearl(BM_RTC_CTRL_ALARM_IRQ,
+ rtc_data->io + HW_RTC_CTRL);
+ events |= RTC_AF | RTC_IRQF;
+ }
+
+ if (status & BM_RTC_CTRL_ONEMSEC_IRQ) {
+ stmp3xxx_clearl(BM_RTC_CTRL_ONEMSEC_IRQ,
+ rtc_data->io + HW_RTC_CTRL);
+ if (++rtc_data->irq_count % 1000 == 0) {
+ events |= RTC_UF | RTC_IRQF;
+ rtc_data->irq_count = 0;
+ }
+ }
+
+ if (events)
+ rtc_update_irq(rtc_data->rtc, 1, events);
+
+ return IRQ_HANDLED;
+}
+
+static int stmp3xxx_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
+ void __iomem *p = rtc_data->io + HW_RTC_PERSISTENT0,
+ *ctl = rtc_data->io + HW_RTC_CTRL;
+
+ if (enabled) {
+ stmp3xxx_setl(BM_RTC_PERSISTENT0_ALARM_EN |
+ BM_RTC_PERSISTENT0_ALARM_WAKE_EN, p);
+ stmp3xxx_setl(BM_RTC_CTRL_ALARM_IRQ_EN, ctl);
+ } else {
+ stmp3xxx_clearl(BM_RTC_PERSISTENT0_ALARM_EN |
+ BM_RTC_PERSISTENT0_ALARM_WAKE_EN, p);
+ stmp3xxx_clearl(BM_RTC_CTRL_ALARM_IRQ_EN, ctl);
+ }
+ return 0;
+}
+
+static int stmp3xxx_update_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
+
+ if (enabled)
+ stmp3xxx_setl(BM_RTC_CTRL_ONEMSEC_IRQ_EN,
+ rtc_data->io + HW_RTC_CTRL);
+ else
+ stmp3xxx_clearl(BM_RTC_CTRL_ONEMSEC_IRQ_EN,
+ rtc_data->io + HW_RTC_CTRL);
+ return 0;
+}
+
+static int stmp3xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
+
+ rtc_time_to_tm(__raw_readl(rtc_data->io + HW_RTC_ALARM), &alm->time);
+ return 0;
+}
+
+static int stmp3xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ unsigned long t;
+ struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
+
+ rtc_tm_to_time(&alm->time, &t);
+ __raw_writel(t, rtc_data->io + HW_RTC_ALARM);
+ return 0;
+}
+
+static struct rtc_class_ops stmp3xxx_rtc_ops = {
+ .alarm_irq_enable =
+ stmp3xxx_alarm_irq_enable,
+ .update_irq_enable =
+ stmp3xxx_update_irq_enable,
+ .read_time = stmp3xxx_rtc_gettime,
+ .set_mmss = stmp3xxx_rtc_set_mmss,
+ .read_alarm = stmp3xxx_rtc_read_alarm,
+ .set_alarm = stmp3xxx_rtc_set_alarm,
+};
+
+static int stmp3xxx_rtc_remove(struct platform_device *pdev)
+{
+ struct stmp3xxx_rtc_data *rtc_data = platform_get_drvdata(pdev);
+
+ if (!rtc_data)
+ return 0;
+
+ stmp3xxx_clearl(BM_RTC_CTRL_ONEMSEC_IRQ_EN | BM_RTC_CTRL_ALARM_IRQ_EN,
+ rtc_data->io + HW_RTC_CTRL);
+ free_irq(rtc_data->irq_alarm, &pdev->dev);
+ free_irq(rtc_data->irq_1msec, &pdev->dev);
+ rtc_device_unregister(rtc_data->rtc);
+ iounmap(rtc_data->io);
+ kfree(rtc_data);
+
+ return 0;
+}
+
+static int stmp3xxx_rtc_probe(struct platform_device *pdev)
+{
+ struct stmp3xxx_rtc_data *rtc_data;
+ struct resource *r;
+ int err;
+
+ rtc_data = kzalloc(sizeof *rtc_data, GFP_KERNEL);
+ if (!rtc_data)
+ return -ENOMEM;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "failed to get resource\n");
+ err = -ENXIO;
+ goto out_free;
+ }
+
+ rtc_data->io = ioremap(r->start, resource_size(r));
+ if (!rtc_data->io) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ err = -EIO;
+ goto out_free;
+ }
+
+ rtc_data->irq_alarm = platform_get_irq(pdev, 0);
+ rtc_data->irq_1msec = platform_get_irq(pdev, 1);
+
+ if (!(__raw_readl(HW_RTC_STAT + rtc_data->io) &
+ BM_RTC_STAT_RTC_PRESENT)) {
+ dev_err(&pdev->dev, "no device onboard\n");
+ err = -ENODEV;
+ goto out_remap;
+ }
+
+ stmp3xxx_reset_block(rtc_data->io, true);
+ stmp3xxx_clearl(BM_RTC_PERSISTENT0_ALARM_EN |
+ BM_RTC_PERSISTENT0_ALARM_WAKE_EN |
+ BM_RTC_PERSISTENT0_ALARM_WAKE,
+ rtc_data->io + HW_RTC_PERSISTENT0);
+ rtc_data->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &stmp3xxx_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc_data->rtc)) {
+ err = PTR_ERR(rtc_data->rtc);
+ goto out_remap;
+ }
+
+ rtc_data->irq_count = 0;
+ err = request_irq(rtc_data->irq_alarm, stmp3xxx_rtc_interrupt,
+ IRQF_DISABLED, "RTC alarm", &pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot claim IRQ%d\n",
+ rtc_data->irq_alarm);
+ goto out_irq_alarm;
+ }
+ err = request_irq(rtc_data->irq_1msec, stmp3xxx_rtc_interrupt,
+ IRQF_DISABLED, "RTC tick", &pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot claim IRQ%d\n",
+ rtc_data->irq_1msec);
+ goto out_irq1;
+ }
+
+ platform_set_drvdata(pdev, rtc_data);
+
+ return 0;
+
+out_irq1:
+ free_irq(rtc_data->irq_alarm, &pdev->dev);
+out_irq_alarm:
+ stmp3xxx_clearl(BM_RTC_CTRL_ONEMSEC_IRQ_EN | BM_RTC_CTRL_ALARM_IRQ_EN,
+ rtc_data->io + HW_RTC_CTRL);
+ rtc_device_unregister(rtc_data->rtc);
+out_remap:
+ iounmap(rtc_data->io);
+out_free:
+ kfree(rtc_data);
+ return err;
+}
+
+#ifdef CONFIG_PM
+static int stmp3xxx_rtc_suspend(struct platform_device *dev, pm_message_t state)
+{
+ return 0;
+}
+
+static int stmp3xxx_rtc_resume(struct platform_device *dev)
+{
+ struct stmp3xxx_rtc_data *rtc_data = platform_get_drvdata(dev);
+
+ stmp3xxx_reset_block(rtc_data->io, true);
+ stmp3xxx_clearl(BM_RTC_PERSISTENT0_ALARM_EN |
+ BM_RTC_PERSISTENT0_ALARM_WAKE_EN |
+ BM_RTC_PERSISTENT0_ALARM_WAKE,
+ rtc_data->io + HW_RTC_PERSISTENT0);
+ return 0;
+}
+#else
+#define stmp3xxx_rtc_suspend NULL
+#define stmp3xxx_rtc_resume NULL
+#endif
+
+static struct platform_driver stmp3xxx_rtcdrv = {
+ .probe = stmp3xxx_rtc_probe,
+ .remove = stmp3xxx_rtc_remove,
+ .suspend = stmp3xxx_rtc_suspend,
+ .resume = stmp3xxx_rtc_resume,
+ .driver = {
+ .name = "stmp3xxx-rtc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init stmp3xxx_rtc_init(void)
+{
+ return platform_driver_register(&stmp3xxx_rtcdrv);
+}
+
+static void __exit stmp3xxx_rtc_exit(void)
+{
+ platform_driver_unregister(&stmp3xxx_rtcdrv);
+}
+
+module_init(stmp3xxx_rtc_init);
+module_exit(stmp3xxx_rtc_exit);
+
+MODULE_DESCRIPTION("STMP3xxx RTC Driver");
+MODULE_AUTHOR("dmitry pervushin <dpervushin@embeddedalley.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index 2531ce4c9db..7dd23a6fc82 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -102,6 +102,19 @@ rtc_sysfs_set_max_user_freq(struct device *dev, struct device_attribute *attr,
return n;
}
+static ssize_t
+rtc_sysfs_show_hctosys(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+#ifdef CONFIG_RTC_HCTOSYS_DEVICE
+ if (strcmp(dev_name(&to_rtc_device(dev)->dev),
+ CONFIG_RTC_HCTOSYS_DEVICE) == 0)
+ return sprintf(buf, "1\n");
+ else
+#endif
+ return sprintf(buf, "0\n");
+}
+
static struct device_attribute rtc_attrs[] = {
__ATTR(name, S_IRUGO, rtc_sysfs_show_name, NULL),
__ATTR(date, S_IRUGO, rtc_sysfs_show_date, NULL),
@@ -109,6 +122,7 @@ static struct device_attribute rtc_attrs[] = {
__ATTR(since_epoch, S_IRUGO, rtc_sysfs_show_since_epoch, NULL),
__ATTR(max_user_freq, S_IRUGO | S_IWUSR, rtc_sysfs_show_max_user_freq,
rtc_sysfs_set_max_user_freq),
+ __ATTR(hctosys, S_IRUGO, rtc_sysfs_show_hctosys, NULL),
{ },
};
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index e109da4583a..dad0449475b 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2146,7 +2146,7 @@ static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-struct block_device_operations
+const struct block_device_operations
dasd_device_operations = {
.owner = THIS_MODULE,
.open = dasd_open,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index a1ce573648a..ab352175558 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -706,7 +706,7 @@ static int dasd_eckd_generate_uid(struct dasd_device *device,
sizeof(uid->serial) - 1);
EBCASC(uid->serial, sizeof(uid->serial) - 1);
uid->ssid = private->gneq->subsystemID;
- uid->real_unit_addr = private->ned->unit_addr;;
+ uid->real_unit_addr = private->ned->unit_addr;
if (private->sneq) {
uid->type = private->sneq->sua_flags;
if (uid->type == UA_BASE_PAV_ALIAS)
@@ -935,6 +935,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
struct dasd_eckd_private *private;
private = (struct dasd_eckd_private *) device->private;
+ memset(&private->features, 0, sizeof(struct dasd_rssd_features));
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_features)),
@@ -982,7 +983,9 @@ static int dasd_eckd_read_features(struct dasd_device *device)
features = (struct dasd_rssd_features *) (prssdp + 1);
memcpy(&private->features, features,
sizeof(struct dasd_rssd_features));
- }
+ } else
+ dev_warn(&device->cdev->dev, "Reading device feature codes"
+ " failed with rc=%d\n", rc);
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
@@ -1144,9 +1147,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
}
/* Read Feature Codes */
- rc = dasd_eckd_read_features(device);
- if (rc)
- goto out_err3;
+ dasd_eckd_read_features(device);
/* Read Device Characteristics */
rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
@@ -3241,9 +3242,7 @@ int dasd_eckd_restore_device(struct dasd_device *device)
}
/* Read Feature Codes */
- rc = dasd_eckd_read_features(device);
- if (rc)
- goto out_err;
+ dasd_eckd_read_features(device);
/* Read Device Characteristics */
memset(&private->rdc_data, 0, sizeof(private->rdc_data));
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 5e47a1ee52b..8afd9fa0087 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -540,7 +540,7 @@ dasd_check_blocksize(int bsize)
extern debug_info_t *dasd_debug_area;
extern struct dasd_profile_info_t dasd_global_profile;
extern unsigned int dasd_profile_level;
-extern struct block_device_operations dasd_device_operations;
+extern const struct block_device_operations dasd_device_operations;
extern struct kmem_cache *dasd_page_cache;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index d34617682a6..f76f4bd82b9 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -34,7 +34,7 @@ static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
static int dcssblk_major;
-static struct block_device_operations dcssblk_devops = {
+static const struct block_device_operations dcssblk_devops = {
.owner = THIS_MODULE,
.open = dcssblk_open,
.release = dcssblk_release,
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index ee604e92a5f..116d1b3eeb1 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -244,7 +244,7 @@ static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static struct block_device_operations xpram_devops =
+static const struct block_device_operations xpram_devops =
{
.owner = THIS_MODULE,
.getgeo = xpram_getgeo,
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 4cb9e70507a..64f57ef2763 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -50,7 +50,7 @@ static int tapeblock_ioctl(struct block_device *, fmode_t, unsigned int,
static int tapeblock_medium_changed(struct gendisk *);
static int tapeblock_revalidate_disk(struct gendisk *);
-static struct block_device_operations tapeblock_fops = {
+static const struct block_device_operations tapeblock_fops = {
.owner = THIS_MODULE,
.open = tapeblock_open,
.release = tapeblock_release,
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index c431198bdbc..82daa3c1dc9 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -14,7 +14,6 @@
#include <linux/init.h>
#include <linux/miscdevice.h>
-#include <linux/utsname.h>
#include <linux/debugfs.h>
#include <asm/ipl.h>
#include <asm/sclp.h>
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 393c73c47f8..91c25706fa8 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -31,8 +31,7 @@
#include "chp.h"
int css_init_done = 0;
-static int need_reprobe = 0;
-static int max_ssid = 0;
+int max_ssid;
struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
@@ -315,12 +314,18 @@ int css_probe_device(struct subchannel_id schid)
int ret;
struct subchannel *sch;
- sch = css_alloc_subchannel(schid);
- if (IS_ERR(sch))
- return PTR_ERR(sch);
+ if (cio_is_console(schid))
+ sch = cio_get_console_subchannel();
+ else {
+ sch = css_alloc_subchannel(schid);
+ if (IS_ERR(sch))
+ return PTR_ERR(sch);
+ }
ret = css_register_subchannel(sch);
- if (ret)
- put_device(&sch->dev);
+ if (ret) {
+ if (!cio_is_console(schid))
+ put_device(&sch->dev);
+ }
return ret;
}
@@ -409,10 +414,14 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
static struct idset *slow_subchannel_set;
static spinlock_t slow_subchannel_lock;
+static wait_queue_head_t css_eval_wq;
+static atomic_t css_eval_scheduled;
static int __init slow_subchannel_init(void)
{
spin_lock_init(&slow_subchannel_lock);
+ atomic_set(&css_eval_scheduled, 0);
+ init_waitqueue_head(&css_eval_wq);
slow_subchannel_set = idset_sch_new();
if (!slow_subchannel_set) {
CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
@@ -468,9 +477,17 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
static void css_slow_path_func(struct work_struct *unused)
{
+ unsigned long flags;
+
CIO_TRACE_EVENT(4, "slowpath");
for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
NULL);
+ spin_lock_irqsave(&slow_subchannel_lock, flags);
+ if (idset_is_empty(slow_subchannel_set)) {
+ atomic_set(&css_eval_scheduled, 0);
+ wake_up(&css_eval_wq);
+ }
+ spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
static DECLARE_WORK(slow_path_work, css_slow_path_func);
@@ -482,6 +499,7 @@ void css_schedule_eval(struct subchannel_id schid)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_sch_add(slow_subchannel_set, schid);
+ atomic_set(&css_eval_scheduled, 1);
queue_work(slow_path_wq, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
@@ -492,80 +510,53 @@ void css_schedule_eval_all(void)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_fill(slow_subchannel_set);
+ atomic_set(&css_eval_scheduled, 1);
queue_work(slow_path_wq, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
-void css_wait_for_slow_path(void)
+static int __unset_registered(struct device *dev, void *data)
{
- flush_workqueue(slow_path_wq);
-}
-
-/* Reprobe subchannel if unregistered. */
-static int reprobe_subchannel(struct subchannel_id schid, void *data)
-{
- int ret;
-
- CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
- schid.ssid, schid.sch_no);
- if (need_reprobe)
- return -EAGAIN;
-
- ret = css_probe_device(schid);
- switch (ret) {
- case 0:
- break;
- case -ENXIO:
- case -ENOMEM:
- case -EIO:
- /* These should abort looping */
- break;
- default:
- ret = 0;
- }
-
- return ret;
-}
+ struct idset *set = data;
+ struct subchannel *sch = to_subchannel(dev);
-static void reprobe_after_idle(struct work_struct *unused)
-{
- /* Make sure initial subchannel scan is done. */
- wait_event(ccw_device_init_wq,
- atomic_read(&ccw_device_init_count) == 0);
- if (need_reprobe)
- css_schedule_reprobe();
+ idset_sch_del(set, sch->schid);
+ return 0;
}
-static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle);
-
-/* Work function used to reprobe all unregistered subchannels. */
-static void reprobe_all(struct work_struct *unused)
+void css_schedule_eval_all_unreg(void)
{
- int ret;
-
- CIO_MSG_EVENT(4, "reprobe start\n");
+ unsigned long flags;
+ struct idset *unreg_set;
- /* Make sure initial subchannel scan is done. */
- if (atomic_read(&ccw_device_init_count) != 0) {
- queue_work(ccw_device_work, &reprobe_idle_work);
+ /* Find unregistered subchannels. */
+ unreg_set = idset_sch_new();
+ if (!unreg_set) {
+ /* Fallback. */
+ css_schedule_eval_all();
return;
}
- need_reprobe = 0;
- ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
-
- CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
- need_reprobe);
+ idset_fill(unreg_set);
+ bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
+ /* Apply to slow_subchannel_set. */
+ spin_lock_irqsave(&slow_subchannel_lock, flags);
+ idset_add_set(slow_subchannel_set, unreg_set);
+ atomic_set(&css_eval_scheduled, 1);
+ queue_work(slow_path_wq, &slow_path_work);
+ spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+ idset_free(unreg_set);
}
-static DECLARE_WORK(css_reprobe_work, reprobe_all);
+void css_wait_for_slow_path(void)
+{
+ flush_workqueue(slow_path_wq);
+}
/* Schedule reprobing of all unregistered subchannels. */
void css_schedule_reprobe(void)
{
- need_reprobe = 1;
- queue_work(slow_path_wq, &css_reprobe_work);
+ css_schedule_eval_all_unreg();
}
-
EXPORT_SYMBOL_GPL(css_schedule_reprobe);
/*
@@ -601,49 +592,6 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
css_evaluate_subchannel(mchk_schid, 0);
}
-static int __init
-__init_channel_subsystem(struct subchannel_id schid, void *data)
-{
- struct subchannel *sch;
- int ret;
-
- if (cio_is_console(schid))
- sch = cio_get_console_subchannel();
- else {
- sch = css_alloc_subchannel(schid);
- if (IS_ERR(sch))
- ret = PTR_ERR(sch);
- else
- ret = 0;
- switch (ret) {
- case 0:
- break;
- case -ENOMEM:
- panic("Out of memory in init_channel_subsystem\n");
- /* -ENXIO: no more subchannels. */
- case -ENXIO:
- return ret;
- /* -EIO: this subchannel set not supported. */
- case -EIO:
- return ret;
- default:
- return 0;
- }
- }
- /*
- * We register ALL valid subchannels in ioinfo, even those
- * that have been present before init_channel_subsystem.
- * These subchannels can't have been registered yet (kmalloc
- * not working) so we do it now. This is true e.g. for the
- * console subchannel.
- */
- if (css_register_subchannel(sch)) {
- if (!cio_is_console(schid))
- put_device(&sch->dev);
- }
- return 0;
-}
-
static void __init
css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
{
@@ -854,19 +802,30 @@ static struct notifier_block css_power_notifier = {
* The struct subchannel's are created during probing (except for the
* static console subchannel).
*/
-static int __init
-init_channel_subsystem (void)
+static int __init css_bus_init(void)
{
int ret, i;
ret = chsc_determine_css_characteristics();
if (ret == -ENOMEM)
- goto out; /* No need to continue. */
+ goto out;
ret = chsc_alloc_sei_area();
if (ret)
goto out;
+ /* Try to enable MSS. */
+ ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
+ switch (ret) {
+ case 0: /* Success. */
+ max_ssid = __MAX_SSID;
+ break;
+ case -ENOMEM:
+ goto out;
+ default:
+ max_ssid = 0;
+ }
+
ret = slow_subchannel_init();
if (ret)
goto out;
@@ -878,17 +837,6 @@ init_channel_subsystem (void)
if ((ret = bus_register(&css_bus_type)))
goto out;
- /* Try to enable MSS. */
- ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
- switch (ret) {
- case 0: /* Success. */
- max_ssid = __MAX_SSID;
- break;
- case -ENOMEM:
- goto out_bus;
- default:
- max_ssid = 0;
- }
/* Setup css structure. */
for (i = 0; i <= __MAX_CSSID; i++) {
struct channel_subsystem *css;
@@ -934,7 +882,6 @@ init_channel_subsystem (void)
/* Enable default isc for I/O subchannels. */
isc_register(IO_SCH_ISC);
- for_each_subchannel(__init_channel_subsystem, NULL);
return 0;
out_file:
if (css_chsc_characteristics.secm)
@@ -955,17 +902,76 @@ out_unregister:
&dev_attr_cm_enable);
device_unregister(&css->device);
}
-out_bus:
bus_unregister(&css_bus_type);
out:
crw_unregister_handler(CRW_RSC_CSS);
chsc_free_sei_area();
- kfree(slow_subchannel_set);
+ idset_free(slow_subchannel_set);
pr_alert("The CSS device driver initialization failed with "
"errno=%d\n", ret);
return ret;
}
+static void __init css_bus_cleanup(void)
+{
+ struct channel_subsystem *css;
+ int i;
+
+ for (i = 0; i <= __MAX_CSSID; i++) {
+ css = channel_subsystems[i];
+ device_unregister(&css->pseudo_subchannel->dev);
+ css->pseudo_subchannel = NULL;
+ if (css_chsc_characteristics.secm)
+ device_remove_file(&css->device, &dev_attr_cm_enable);
+ device_unregister(&css->device);
+ }
+ bus_unregister(&css_bus_type);
+ crw_unregister_handler(CRW_RSC_CSS);
+ chsc_free_sei_area();
+ idset_free(slow_subchannel_set);
+ isc_unregister(IO_SCH_ISC);
+}
+
+static int __init channel_subsystem_init(void)
+{
+ int ret;
+
+ ret = css_bus_init();
+ if (ret)
+ return ret;
+
+ ret = io_subchannel_init();
+ if (ret)
+ css_bus_cleanup();
+
+ return ret;
+}
+subsys_initcall(channel_subsystem_init);
+
+static int css_settle(struct device_driver *drv, void *unused)
+{
+ struct css_driver *cssdrv = to_cssdriver(drv);
+
+ if (cssdrv->settle)
+ cssdrv->settle();
+ return 0;
+}
+
+/*
+ * Wait for the initialization of devices to finish, to make sure we are
+ * done with our setup if the search for the root device starts.
+ */
+static int __init channel_subsystem_init_sync(void)
+{
+ /* Start initial subchannel evaluation. */
+ css_schedule_eval_all();
+ /* Wait for the evaluation of subchannels to finish. */
+ wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0);
+ /* Wait for the subchannel type specific initialization to finish */
+ return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
+}
+subsys_initcall_sync(channel_subsystem_init_sync);
+
int sch_is_pseudo_sch(struct subchannel *sch)
{
return sch == to_css(sch->dev.parent)->pseudo_subchannel;
@@ -1135,7 +1141,5 @@ void css_driver_unregister(struct css_driver *cdrv)
}
EXPORT_SYMBOL_GPL(css_driver_unregister);
-subsys_initcall(init_channel_subsystem);
-
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(css_bus_type);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 9763eeec745..68d6b0bf151 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -75,6 +75,7 @@ struct chp_link;
* @freeze: callback for freezing during hibernation snapshotting
* @thaw: undo work done in @freeze
* @restore: callback for restoring after hibernation
+ * @settle: wait for asynchronous work to finish
* @name: name of the device driver
*/
struct css_driver {
@@ -92,6 +93,7 @@ struct css_driver {
int (*freeze)(struct subchannel *);
int (*thaw) (struct subchannel *);
int (*restore)(struct subchannel *);
+ void (*settle)(void);
const char *name;
};
@@ -109,6 +111,7 @@ extern void css_sch_device_unregister(struct subchannel *);
extern int css_probe_device(struct subchannel_id);
extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
extern int css_init_done;
+extern int max_ssid;
int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
int (*fn_unknown)(struct subchannel_id,
void *), void *data);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 6527f3f3449..f780bdd3a04 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -131,6 +131,10 @@ static void io_subchannel_shutdown(struct subchannel *);
static int io_subchannel_sch_event(struct subchannel *, int);
static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
int);
+static void recovery_func(unsigned long data);
+struct workqueue_struct *ccw_device_work;
+wait_queue_head_t ccw_device_init_wq;
+atomic_t ccw_device_init_count;
static struct css_device_id io_subchannel_ids[] = {
{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
@@ -151,6 +155,13 @@ static int io_subchannel_prepare(struct subchannel *sch)
return 0;
}
+static void io_subchannel_settle(void)
+{
+ wait_event(ccw_device_init_wq,
+ atomic_read(&ccw_device_init_count) == 0);
+ flush_workqueue(ccw_device_work);
+}
+
static struct css_driver io_subchannel_driver = {
.owner = THIS_MODULE,
.subchannel_type = io_subchannel_ids,
@@ -162,16 +173,10 @@ static struct css_driver io_subchannel_driver = {
.remove = io_subchannel_remove,
.shutdown = io_subchannel_shutdown,
.prepare = io_subchannel_prepare,
+ .settle = io_subchannel_settle,
};
-struct workqueue_struct *ccw_device_work;
-wait_queue_head_t ccw_device_init_wq;
-atomic_t ccw_device_init_count;
-
-static void recovery_func(unsigned long data);
-
-static int __init
-init_ccw_bus_type (void)
+int __init io_subchannel_init(void)
{
int ret;
@@ -181,10 +186,10 @@ init_ccw_bus_type (void)
ccw_device_work = create_singlethread_workqueue("cio");
if (!ccw_device_work)
- return -ENOMEM; /* FIXME: better errno ? */
+ return -ENOMEM;
slow_path_wq = create_singlethread_workqueue("kslowcrw");
if (!slow_path_wq) {
- ret = -ENOMEM; /* FIXME: better errno ? */
+ ret = -ENOMEM;
goto out_err;
}
if ((ret = bus_register (&ccw_bus_type)))
@@ -194,9 +199,6 @@ init_ccw_bus_type (void)
if (ret)
goto out_err;
- wait_event(ccw_device_init_wq,
- atomic_read(&ccw_device_init_count) == 0);
- flush_workqueue(ccw_device_work);
return 0;
out_err:
if (ccw_device_work)
@@ -206,16 +208,6 @@ out_err:
return ret;
}
-static void __exit
-cleanup_ccw_bus_type (void)
-{
- css_driver_unregister(&io_subchannel_driver);
- bus_unregister(&ccw_bus_type);
- destroy_workqueue(ccw_device_work);
-}
-
-subsys_initcall(init_ccw_bus_type);
-module_exit(cleanup_ccw_bus_type);
/************************ device handling **************************/
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index e3975107a57..ed39a2caaf4 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -74,6 +74,7 @@ dev_fsm_final_state(struct ccw_device *cdev)
extern struct workqueue_struct *ccw_device_work;
extern wait_queue_head_t ccw_device_init_wq;
extern atomic_t ccw_device_init_count;
+int __init io_subchannel_init(void);
void io_subchannel_recog_done(struct ccw_device *cdev);
void io_subchannel_init_config(struct subchannel *sch);
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
index cf8f24a4b5e..4d10981c7cc 100644
--- a/drivers/s390/cio/idset.c
+++ b/drivers/s390/cio/idset.c
@@ -78,7 +78,7 @@ static inline int idset_get_first(struct idset *set, int *ssid, int *id)
struct idset *idset_sch_new(void)
{
- return idset_new(__MAX_SSID + 1, __MAX_SUBCHANNEL + 1);
+ return idset_new(max_ssid + 1, __MAX_SUBCHANNEL + 1);
}
void idset_sch_add(struct idset *set, struct subchannel_id schid)
@@ -110,3 +110,23 @@ int idset_sch_get_first(struct idset *set, struct subchannel_id *schid)
}
return rc;
}
+
+int idset_is_empty(struct idset *set)
+{
+ int bitnum;
+
+ bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
+ if (bitnum >= set->num_ssid * set->num_id)
+ return 1;
+ return 0;
+}
+
+void idset_add_set(struct idset *to, struct idset *from)
+{
+ unsigned long i, len;
+
+ len = min(__BITOPS_WORDS(to->num_ssid * to->num_id),
+ __BITOPS_WORDS(from->num_ssid * from->num_id));
+ for (i = 0; i < len ; i++)
+ to->bitmap[i] |= from->bitmap[i];
+}
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
index 528065cb502..7543da4529f 100644
--- a/drivers/s390/cio/idset.h
+++ b/drivers/s390/cio/idset.h
@@ -21,5 +21,7 @@ void idset_sch_add(struct idset *set, struct subchannel_id id);
void idset_sch_del(struct idset *set, struct subchannel_id id);
int idset_sch_contains(struct idset *set, struct subchannel_id id);
int idset_sch_get_first(struct idset *set, struct subchannel_id *id);
+int idset_is_empty(struct idset *set);
+void idset_add_set(struct idset *to, struct idset *from);
#endif /* S390_IDSET_H */
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 9aef402a5f1..4be6e84b959 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -401,7 +401,7 @@ static void announce_buffer_error(struct qdio_q *q, int count)
if ((!q->is_input_q &&
(q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
qdio_perf_stat_inc(&perf_stats.outbound_target_full);
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%3d",
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
q->first_to_check);
return;
}
@@ -418,7 +418,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
{
int new;
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %3d", count);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
/* for QEBSM the ACK was already set by EQBS */
if (is_qebsm(q)) {
@@ -455,6 +455,8 @@ static inline void inbound_primed(struct qdio_q *q, int count)
count--;
if (!count)
return;
+ /* need to change ALL buffers to get more interrupts */
+ set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
}
static int get_inbound_buffer_frontier(struct qdio_q *q)
@@ -545,7 +547,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
* has (probably) not moved (see qdio_inbound_processing).
*/
if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d",
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
q->first_to_check);
return 1;
} else
@@ -565,11 +567,10 @@ static void qdio_kick_handler(struct qdio_q *q)
if (q->is_input_q) {
qdio_perf_stat_inc(&perf_stats.inbound_handler);
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count);
- } else {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: nr:%1d", q->nr);
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count);
- }
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
+ } else
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
+ start, count);
q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
q->irq_ptr->int_parm);
@@ -633,7 +634,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
switch (state) {
case SLSB_P_OUTPUT_EMPTY:
/* the adapter got it */
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %3d", q->nr, count);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count);
atomic_sub(count, &q->nr_buf_used);
q->first_to_check = add_buf(q->first_to_check, count);
@@ -1481,10 +1482,9 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
get_buf_state(q, prev_buf(bufnr), &state, 0);
if (state != SLSB_CU_OUTPUT_PRIMED)
rc = qdio_kick_outbound_q(q);
- else {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "fast-req");
+ else
qdio_perf_stat_inc(&perf_stats.fast_requeue);
- }
+
out:
tasklet_schedule(&q->tasklet);
return rc;
@@ -1510,12 +1510,8 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
if (!irq_ptr)
return -ENODEV;
- if (callflags & QDIO_FLAG_SYNC_INPUT)
- DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO input");
- else
- DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO output");
- DBF_DEV_EVENT(DBF_INFO, irq_ptr, "q:%1d flag:%4x", q_nr, callflags);
- DBF_DEV_EVENT(DBF_INFO, irq_ptr, "buf:%2d cnt:%3d", bufnr, count);
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr,
+ "do%02x b:%02x c:%02x", callflags, bufnr, count);
if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
return -EBUSY;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 090b32a339c..1294876bf7b 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -60,6 +60,7 @@ static int ap_device_probe(struct device *dev);
static void ap_interrupt_handler(void *unused1, void *unused2);
static void ap_reset(struct ap_device *ap_dev);
static void ap_config_timeout(unsigned long ptr);
+static int ap_select_domain(void);
/*
* Module description.
@@ -109,6 +110,10 @@ static unsigned long long poll_timeout = 250000;
/* Suspend flag */
static int ap_suspend_flag;
+/* Flag to check if domain was set through module parameter domain=. This is
+ * important when supsend and resume is done in a z/VM environment where the
+ * domain might change. */
+static int user_set_domain = 0;
static struct bus_type ap_bus_type;
/**
@@ -643,6 +648,7 @@ static int ap_bus_suspend(struct device *dev, pm_message_t state)
destroy_workqueue(ap_work_queue);
ap_work_queue = NULL;
}
+
tasklet_disable(&ap_tasklet);
}
/* Poll on the device until all requests are finished. */
@@ -653,7 +659,10 @@ static int ap_bus_suspend(struct device *dev, pm_message_t state)
spin_unlock_bh(&ap_dev->lock);
} while ((flags & 1) || (flags & 2));
- ap_device_remove(dev);
+ spin_lock_bh(&ap_dev->lock);
+ ap_dev->unregistered = 1;
+ spin_unlock_bh(&ap_dev->lock);
+
return 0;
}
@@ -666,11 +675,10 @@ static int ap_bus_resume(struct device *dev)
ap_suspend_flag = 0;
if (!ap_interrupts_available())
ap_interrupt_indicator = NULL;
- ap_device_probe(dev);
- ap_reset(ap_dev);
- setup_timer(&ap_dev->timeout, ap_request_timeout,
- (unsigned long) ap_dev);
- ap_scan_bus(NULL);
+ if (!user_set_domain) {
+ ap_domain_index = -1;
+ ap_select_domain();
+ }
init_timer(&ap_config_timer);
ap_config_timer.function = ap_config_timeout;
ap_config_timer.data = 0;
@@ -686,12 +694,14 @@ static int ap_bus_resume(struct device *dev)
tasklet_schedule(&ap_tasklet);
if (ap_thread_flag)
rc = ap_poll_thread_start();
- } else {
- ap_device_probe(dev);
- ap_reset(ap_dev);
- setup_timer(&ap_dev->timeout, ap_request_timeout,
- (unsigned long) ap_dev);
}
+ if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) {
+ spin_lock_bh(&ap_dev->lock);
+ ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid),
+ ap_domain_index);
+ spin_unlock_bh(&ap_dev->lock);
+ }
+ queue_work(ap_work_queue, &ap_config_work);
return rc;
}
@@ -1079,6 +1089,8 @@ static void ap_scan_bus(struct work_struct *unused)
spin_lock_bh(&ap_dev->lock);
if (rc || ap_dev->unregistered) {
spin_unlock_bh(&ap_dev->lock);
+ if (ap_dev->unregistered)
+ i--;
device_unregister(dev);
put_device(dev);
continue;
@@ -1586,6 +1598,12 @@ int __init ap_module_init(void)
ap_domain_index);
return -EINVAL;
}
+ /* In resume callback we need to know if the user had set the domain.
+ * If so, we can not just reset it.
+ */
+ if (ap_domain_index >= 0)
+ user_set_domain = 1;
+
if (ap_instructions_available() != 0) {
pr_warning("The hardware system does not support "
"AP instructions\n");
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index a4b2c576144..c84eadd3602 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -2113,7 +2113,7 @@ static ssize_t remove_write (struct device_driver *drv,
IUCV_DBF_TEXT(trace, 3, __func__);
if (count >= IFNAMSIZ)
- count = IFNAMSIZ - 1;;
+ count = IFNAMSIZ - 1;
for (i = 0, p = buf; i < count && *p; i++, p++) {
if (*p == '\n' || *p == ' ')
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 3ff726afafc..0e1a34627a2 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -102,7 +102,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
if (unlikely((status & ZFCP_STATUS_COMMON_ERP_FAILED) ||
!(status & ZFCP_STATUS_COMMON_RUNNING))) {
zfcp_scsi_command_fail(scpnt, DID_ERROR);
- return 0;;
+ return 0;
}
ret = zfcp_fsf_send_fcp_command_task(unit, scpnt);
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 6d465168468..869a30b49ed 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -452,7 +452,7 @@ static const struct file_operations jsf_fops = {
static struct miscdevice jsf_dev = { JSF_MINOR, "jsflash", &jsf_fops };
-static struct block_device_operations jsfd_fops = {
+static const struct block_device_operations jsfd_fops = {
.owner = THIS_MODULE,
};
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index e6f2bb7365e..8dfb59d5899 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -5223,7 +5223,7 @@ ahc_chip_init(struct ahc_softc *ahc)
/*
* Setup the allowed SCSI Sequences based on operational mode.
- * If we are a target, we'll enalbe select in operations once
+ * If we are a target, we'll enable select in operations once
* we've had a lun enabled.
*/
scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 906cef5cda8..41e1b0e7e2e 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1340,7 +1340,7 @@ static int bnx2i_process_login_resp(struct iscsi_session *session,
resp_hdr->opcode = login->op_code;
resp_hdr->flags = login->response_flags;
resp_hdr->max_version = login->version_max;
- resp_hdr->active_version = login->version_active;;
+ resp_hdr->active_version = login->version_active;
resp_hdr->hlength = 0;
hton24(resp_hdr->dlength, login->data_length);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 9df7ed38e1b..9a1bd9534d7 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1207,7 +1207,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
vport->ct_flags &= ~FC_CT_RFF_ID;
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RFF_ID);
- CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID);;
+ CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID);
CtReq->un.rff.fbits = FC4_FEATURE_INIT;
CtReq->un.rff.type_code = FC_FCP_DATA;
cmpl = lpfc_cmpl_ct_cmd_rff_id;
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 7dc3d1894b1..a39addc3a59 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -718,7 +718,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
* megasas_build_ldio - Prepares IOs to logical devices
* @instance: Adapter soft state
* @scp: SCSI command
- * @cmd: Command to to be prepared
+ * @cmd: Command to be prepared
*
* Frames (and accompanying SGLs) for regular SCSI IOs use this function.
*/
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 40e3cafb3a9..83c8b5e4fc8 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1422,7 +1422,7 @@ static void qla4xxx_slave_destroy(struct scsi_device *sdev)
/**
* qla4xxx_del_from_active_array - returns an active srb
* @ha: Pointer to host adapter structure.
- * @index: index into to the active_array
+ * @index: index into the active_array
*
* This routine removes and returns the srb at the specified index
**/
@@ -1500,7 +1500,7 @@ static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
/**
* qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
- * @ha: pointer to to HBA
+ * @ha: pointer to HBA
* @t: target id
* @l: lun id
*
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a89c421dab5..8dd96dcd716 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -956,7 +956,7 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
}
#endif
-static struct block_device_operations sd_fops = {
+static const struct block_device_operations sd_fops = {
.owner = THIS_MODULE,
.open = sd_open,
.release = sd_release,
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 4968c4ced38..848b5946685 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2233,7 +2233,7 @@ static struct file_operations dev_fops = {
.open = sg_proc_open_dev,
.release = seq_release,
};
-static struct seq_operations dev_seq_ops = {
+static const struct seq_operations dev_seq_ops = {
.start = dev_seq_start,
.next = dev_seq_next,
.stop = dev_seq_stop,
@@ -2246,7 +2246,7 @@ static struct file_operations devstrs_fops = {
.open = sg_proc_open_devstrs,
.release = seq_release,
};
-static struct seq_operations devstrs_seq_ops = {
+static const struct seq_operations devstrs_seq_ops = {
.start = dev_seq_start,
.next = dev_seq_next,
.stop = dev_seq_stop,
@@ -2259,7 +2259,7 @@ static struct file_operations debug_fops = {
.open = sg_proc_open_debug,
.release = seq_release,
};
-static struct seq_operations debug_seq_ops = {
+static const struct seq_operations debug_seq_ops = {
.start = dev_seq_start,
.next = dev_seq_next,
.stop = dev_seq_stop,
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index cce0fe4c8a3..eb61f7a70e1 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -525,7 +525,7 @@ static int sr_block_media_changed(struct gendisk *disk)
return cdrom_media_changed(&cd->cdi);
}
-static struct block_device_operations sr_bdops =
+static const struct block_device_operations sr_bdops =
{
.owner = THIS_MODULE,
.open = sr_block_open,
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index 8d349b23249..300cea768d7 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -649,7 +649,7 @@ static int cpm_uart_tx_pump(struct uart_port *port)
u8 *p;
int count;
struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
- struct circ_buf *xmit = &port->info->xmit;
+ struct circ_buf *xmit = &port->state->xmit;
/* Handle xon/xoff */
if (port->x_char) {
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index 7be52fe288e..31f172397af 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -18,6 +18,7 @@ static char *serial_version = "$Revision: 1.25 $";
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/major.h>
+#include <linux/smp_lock.h>
#include <linux/string.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
diff --git a/drivers/serial/max3100.c b/drivers/serial/max3100.c
index 75ab00631c4..3c30c56aa2e 100644
--- a/drivers/serial/max3100.c
+++ b/drivers/serial/max3100.c
@@ -925,3 +925,4 @@ module_exit(max3100_exit);
MODULE_DESCRIPTION("MAX3100 driver");
MODULE_AUTHOR("Christian Pellegrin <chripell@evolware.org>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:max3100");
diff --git a/drivers/serial/pxa.c b/drivers/serial/pxa.c
index 6443b7ff274..b8629d74f6a 100644
--- a/drivers/serial/pxa.c
+++ b/drivers/serial/pxa.c
@@ -726,9 +726,10 @@ static struct uart_driver serial_pxa_reg = {
.cons = PXA_CONSOLE,
};
-static int serial_pxa_suspend(struct platform_device *dev, pm_message_t state)
+#ifdef CONFIG_PM
+static int serial_pxa_suspend(struct device *dev)
{
- struct uart_pxa_port *sport = platform_get_drvdata(dev);
+ struct uart_pxa_port *sport = dev_get_drvdata(dev);
if (sport)
uart_suspend_port(&serial_pxa_reg, &sport->port);
@@ -736,9 +737,9 @@ static int serial_pxa_suspend(struct platform_device *dev, pm_message_t state)
return 0;
}
-static int serial_pxa_resume(struct platform_device *dev)
+static int serial_pxa_resume(struct device *dev)
{
- struct uart_pxa_port *sport = platform_get_drvdata(dev);
+ struct uart_pxa_port *sport = dev_get_drvdata(dev);
if (sport)
uart_resume_port(&serial_pxa_reg, &sport->port);
@@ -746,6 +747,12 @@ static int serial_pxa_resume(struct platform_device *dev)
return 0;
}
+static struct dev_pm_ops serial_pxa_pm_ops = {
+ .suspend = serial_pxa_suspend,
+ .resume = serial_pxa_resume,
+};
+#endif
+
static int serial_pxa_probe(struct platform_device *dev)
{
struct uart_pxa_port *sport;
@@ -825,11 +832,12 @@ static struct platform_driver serial_pxa_driver = {
.probe = serial_pxa_probe,
.remove = serial_pxa_remove,
- .suspend = serial_pxa_suspend,
- .resume = serial_pxa_resume,
.driver = {
.name = "pxa2xx-uart",
.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &serial_pxa_pm_ops,
+#endif
},
};
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 2514d00c0f6..1689bda1d13 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -2426,7 +2426,7 @@ struct tty_driver *uart_console_device(struct console *co, int *index)
/**
* uart_add_one_port - attach a driver-defined port structure
* @drv: pointer to the uart low level driver structure for this port
- * @port: uart port structure to use for this port.
+ * @uport: uart port structure to use for this port.
*
* This allows the driver to register its own uart_port structure
* with the core driver. The main purpose is to allow the low
@@ -2499,7 +2499,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
/**
* uart_remove_one_port - detach a driver defined port structure
* @drv: pointer to the uart low level driver structure for this port
- * @port: uart port structure for this port
+ * @uport: uart port structure for this port
*
* This unhooks (and hangs up) the specified port structure from the
* core driver. No further calls will be made to the low-level code
diff --git a/drivers/sfi/Kconfig b/drivers/sfi/Kconfig
new file mode 100644
index 00000000000..dd115121e0b
--- /dev/null
+++ b/drivers/sfi/Kconfig
@@ -0,0 +1,17 @@
+#
+# SFI Configuration
+#
+
+menuconfig SFI
+ bool "SFI (Simple Firmware Interface) Support"
+ ---help---
+ The Simple Firmware Interface (SFI) provides a lightweight method
+ for platform firmware to pass information to the operating system
+ via static tables in memory. Kernel SFI support is required to
+ boot on SFI-only platforms. Currently, all SFI-only platforms are
+ based on the 2nd generation Intel Atom processor platform,
+ code-named Moorestown.
+
+ For more information, see http://simplefirmware.org
+
+ Say 'Y' here to enable the kernel to boot on SFI-only platforms.
diff --git a/drivers/sfi/Makefile b/drivers/sfi/Makefile
new file mode 100644
index 00000000000..2343732aefe
--- /dev/null
+++ b/drivers/sfi/Makefile
@@ -0,0 +1,3 @@
+obj-y += sfi_acpi.o
+obj-y += sfi_core.o
+
diff --git a/drivers/sfi/sfi_acpi.c b/drivers/sfi/sfi_acpi.c
new file mode 100644
index 00000000000..34aba30eb84
--- /dev/null
+++ b/drivers/sfi/sfi_acpi.c
@@ -0,0 +1,175 @@
+/* sfi_acpi.c Simple Firmware Interface - ACPI extensions */
+
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2009 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2009 Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#define KMSG_COMPONENT "SFI"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <acpi/acpi.h>
+
+#include <linux/sfi.h>
+#include "sfi_core.h"
+
+/*
+ * SFI can access ACPI-defined tables via an optional ACPI XSDT.
+ *
+ * This allows re-use, and avoids re-definition, of standard tables.
+ * For example, the "MCFG" table is defined by PCI, reserved by ACPI,
+ * and is expected to be present many SFI-only systems.
+ */
+
+static struct acpi_table_xsdt *xsdt_va __read_mostly;
+
+#define XSDT_GET_NUM_ENTRIES(ptable, entry_type) \
+ ((ptable->header.length - sizeof(struct acpi_table_header)) / \
+ (sizeof(entry_type)))
+
+static inline struct sfi_table_header *acpi_to_sfi_th(
+ struct acpi_table_header *th)
+{
+ return (struct sfi_table_header *)th;
+}
+
+static inline struct acpi_table_header *sfi_to_acpi_th(
+ struct sfi_table_header *th)
+{
+ return (struct acpi_table_header *)th;
+}
+
+/*
+ * sfi_acpi_parse_xsdt()
+ *
+ * Parse the ACPI XSDT for later access by sfi_acpi_table_parse().
+ */
+static int __init sfi_acpi_parse_xsdt(struct sfi_table_header *th)
+{
+ struct sfi_table_key key = SFI_ANY_KEY;
+ int tbl_cnt, i;
+ void *ret;
+
+ xsdt_va = (struct acpi_table_xsdt *)th;
+ tbl_cnt = XSDT_GET_NUM_ENTRIES(xsdt_va, u64);
+ for (i = 0; i < tbl_cnt; i++) {
+ ret = sfi_check_table(xsdt_va->table_offset_entry[i], &key);
+ if (IS_ERR(ret)) {
+ disable_sfi();
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int __init sfi_acpi_init(void)
+{
+ struct sfi_table_key xsdt_key = { .sig = SFI_SIG_XSDT };
+
+ sfi_table_parse(SFI_SIG_XSDT, NULL, NULL, sfi_acpi_parse_xsdt);
+
+ /* Only call the get_table to keep the table mapped */
+ xsdt_va = (struct acpi_table_xsdt *)sfi_get_table(&xsdt_key);
+ return 0;
+}
+
+static struct acpi_table_header *sfi_acpi_get_table(struct sfi_table_key *key)
+{
+ u32 tbl_cnt, i;
+ void *ret;
+
+ tbl_cnt = XSDT_GET_NUM_ENTRIES(xsdt_va, u64);
+ for (i = 0; i < tbl_cnt; i++) {
+ ret = sfi_check_table(xsdt_va->table_offset_entry[i], key);
+ if (!IS_ERR(ret) && ret)
+ return sfi_to_acpi_th(ret);
+ }
+
+ return NULL;
+}
+
+static void sfi_acpi_put_table(struct acpi_table_header *table)
+{
+ sfi_put_table(acpi_to_sfi_th(table));
+}
+
+/*
+ * sfi_acpi_table_parse()
+ *
+ * Find specified table in XSDT, run handler on it and return its return value
+ */
+int sfi_acpi_table_parse(char *signature, char *oem_id, char *oem_table_id,
+ int(*handler)(struct acpi_table_header *))
+{
+ struct acpi_table_header *table = NULL;
+ struct sfi_table_key key;
+ int ret = 0;
+
+ if (sfi_disabled)
+ return -1;
+
+ key.sig = signature;
+ key.oem_id = oem_id;
+ key.oem_table_id = oem_table_id;
+
+ table = sfi_acpi_get_table(&key);
+ if (!table)
+ return -EINVAL;
+
+ ret = handler(table);
+ sfi_acpi_put_table(table);
+ return ret;
+}
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c
new file mode 100644
index 00000000000..d3b49680047
--- /dev/null
+++ b/drivers/sfi/sfi_core.c
@@ -0,0 +1,407 @@
+/* sfi_core.c Simple Firmware Interface - core internals */
+
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2009 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2009 Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#define KMSG_COMPONENT "SFI"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/bootmem.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+
+#include "sfi_core.h"
+
+#define ON_SAME_PAGE(addr1, addr2) \
+ (((unsigned long)(addr1) & PAGE_MASK) == \
+ ((unsigned long)(addr2) & PAGE_MASK))
+#define TABLE_ON_PAGE(page, table, size) (ON_SAME_PAGE(page, table) && \
+ ON_SAME_PAGE(page, table + size))
+
+int sfi_disabled __read_mostly;
+EXPORT_SYMBOL(sfi_disabled);
+
+static u64 syst_pa __read_mostly;
+static struct sfi_table_simple *syst_va __read_mostly;
+
+/*
+ * FW creates and saves the SFI tables in memory. When these tables get
+ * used, they may need to be mapped to virtual address space, and the mapping
+ * can happen before or after the ioremap() is ready, so a flag is needed
+ * to indicating this
+ */
+static u32 sfi_use_ioremap __read_mostly;
+
+static void __iomem *sfi_map_memory(u64 phys, u32 size)
+{
+ if (!phys || !size)
+ return NULL;
+
+ if (sfi_use_ioremap)
+ return ioremap(phys, size);
+ else
+ return early_ioremap(phys, size);
+}
+
+static void sfi_unmap_memory(void __iomem *virt, u32 size)
+{
+ if (!virt || !size)
+ return;
+
+ if (sfi_use_ioremap)
+ iounmap(virt);
+ else
+ early_iounmap(virt, size);
+}
+
+static void sfi_print_table_header(unsigned long long pa,
+ struct sfi_table_header *header)
+{
+ pr_info("%4.4s %llX, %04X (v%d %6.6s %8.8s)\n",
+ header->sig, pa,
+ header->len, header->rev, header->oem_id,
+ header->oem_table_id);
+}
+
+/*
+ * sfi_verify_table()
+ * Sanity check table lengh, calculate checksum
+ */
+static __init int sfi_verify_table(struct sfi_table_header *table)
+{
+
+ u8 checksum = 0;
+ u8 *puchar = (u8 *)table;
+ u32 length = table->len;
+
+ /* Sanity check table length against arbitrary 1MB limit */
+ if (length > 0x100000) {
+ pr_err("Invalid table length 0x%x\n", length);
+ return -1;
+ }
+
+ while (length--)
+ checksum += *puchar++;
+
+ if (checksum) {
+ pr_err("Checksum %2.2X should be %2.2X\n",
+ table->csum, table->csum - checksum);
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * sfi_map_table()
+ *
+ * Return address of mapped table
+ * Check for common case that we can re-use mapping to SYST,
+ * which requires syst_pa, syst_va to be initialized.
+ */
+struct sfi_table_header *sfi_map_table(u64 pa)
+{
+ struct sfi_table_header *th;
+ u32 length;
+
+ if (!TABLE_ON_PAGE(syst_pa, pa, sizeof(struct sfi_table_header)))
+ th = sfi_map_memory(pa, sizeof(struct sfi_table_header));
+ else
+ th = (void *)syst_va + (pa - syst_pa);
+
+ /* If table fits on same page as its header, we are done */
+ if (TABLE_ON_PAGE(th, th, th->len))
+ return th;
+
+ /* Entire table does not fit on same page as SYST */
+ length = th->len;
+ if (!TABLE_ON_PAGE(syst_pa, pa, sizeof(struct sfi_table_header)))
+ sfi_unmap_memory(th, sizeof(struct sfi_table_header));
+
+ return sfi_map_memory(pa, length);
+}
+
+/*
+ * sfi_unmap_table()
+ *
+ * Undoes effect of sfi_map_table() by unmapping table
+ * if it did not completely fit on same page as SYST.
+ */
+void sfi_unmap_table(struct sfi_table_header *th)
+{
+ if (!TABLE_ON_PAGE(syst_va, th, th->len))
+ sfi_unmap_memory(th, TABLE_ON_PAGE(th, th, th->len) ?
+ sizeof(*th) : th->len);
+}
+
+static int sfi_table_check_key(struct sfi_table_header *th,
+ struct sfi_table_key *key)
+{
+
+ if (strncmp(th->sig, key->sig, SFI_SIGNATURE_SIZE)
+ || (key->oem_id && strncmp(th->oem_id,
+ key->oem_id, SFI_OEM_ID_SIZE))
+ || (key->oem_table_id && strncmp(th->oem_table_id,
+ key->oem_table_id, SFI_OEM_TABLE_ID_SIZE)))
+ return -1;
+
+ return 0;
+}
+
+/*
+ * This function will be used in 2 cases:
+ * 1. used to enumerate and verify the tables addressed by SYST/XSDT,
+ * thus no signature will be given (in kernel boot phase)
+ * 2. used to parse one specific table, signature must exist, and
+ * the mapped virt address will be returned, and the virt space
+ * will be released by call sfi_put_table() later
+ *
+ * Return value:
+ * NULL: when can't find a table matching the key
+ * ERR_PTR(error): error value
+ * virt table address: when a matched table is found
+ */
+struct sfi_table_header *sfi_check_table(u64 pa, struct sfi_table_key *key)
+{
+ struct sfi_table_header *th;
+ void *ret = NULL;
+
+ th = sfi_map_table(pa);
+ if (!th)
+ return ERR_PTR(-ENOMEM);
+
+ if (!key->sig) {
+ sfi_print_table_header(pa, th);
+ if (sfi_verify_table(th))
+ ret = ERR_PTR(-EINVAL);
+ } else {
+ if (!sfi_table_check_key(th, key))
+ return th; /* Success */
+ }
+
+ sfi_unmap_table(th);
+ return ret;
+}
+
+/*
+ * sfi_get_table()
+ *
+ * Search SYST for the specified table with the signature in
+ * the key, and return the mapped table
+ */
+struct sfi_table_header *sfi_get_table(struct sfi_table_key *key)
+{
+ struct sfi_table_header *th;
+ u32 tbl_cnt, i;
+
+ tbl_cnt = SFI_GET_NUM_ENTRIES(syst_va, u64);
+ for (i = 0; i < tbl_cnt; i++) {
+ th = sfi_check_table(syst_va->pentry[i], key);
+ if (!IS_ERR(th) && th)
+ return th;
+ }
+
+ return NULL;
+}
+
+void sfi_put_table(struct sfi_table_header *th)
+{
+ sfi_unmap_table(th);
+}
+
+/* Find table with signature, run handler on it */
+int sfi_table_parse(char *signature, char *oem_id, char *oem_table_id,
+ sfi_table_handler handler)
+{
+ struct sfi_table_header *table = NULL;
+ struct sfi_table_key key;
+ int ret = -EINVAL;
+
+ if (sfi_disabled || !handler || !signature)
+ goto exit;
+
+ key.sig = signature;
+ key.oem_id = oem_id;
+ key.oem_table_id = oem_table_id;
+
+ table = sfi_get_table(&key);
+ if (!table)
+ goto exit;
+
+ ret = handler(table);
+ sfi_put_table(table);
+exit:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sfi_table_parse);
+
+/*
+ * sfi_parse_syst()
+ * Checksum all the tables in SYST and print their headers
+ *
+ * success: set syst_va, return 0
+ */
+static int __init sfi_parse_syst(void)
+{
+ struct sfi_table_key key = SFI_ANY_KEY;
+ int tbl_cnt, i;
+ void *ret;
+
+ syst_va = sfi_map_memory(syst_pa, sizeof(struct sfi_table_simple));
+ if (!syst_va)
+ return -ENOMEM;
+
+ tbl_cnt = SFI_GET_NUM_ENTRIES(syst_va, u64);
+ for (i = 0; i < tbl_cnt; i++) {
+ ret = sfi_check_table(syst_va->pentry[i], &key);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+ }
+
+ return 0;
+}
+
+/*
+ * The OS finds the System Table by searching 16-byte boundaries between
+ * physical address 0x000E0000 and 0x000FFFFF. The OS shall search this region
+ * starting at the low address and shall stop searching when the 1st valid SFI
+ * System Table is found.
+ *
+ * success: set syst_pa, return 0
+ * fail: return -1
+ */
+static __init int sfi_find_syst(void)
+{
+ unsigned long offset, len;
+ void *start;
+
+ len = SFI_SYST_SEARCH_END - SFI_SYST_SEARCH_BEGIN;
+ start = sfi_map_memory(SFI_SYST_SEARCH_BEGIN, len);
+ if (!start)
+ return -1;
+
+ for (offset = 0; offset < len; offset += 16) {
+ struct sfi_table_header *syst_hdr;
+
+ syst_hdr = start + offset;
+ if (strncmp(syst_hdr->sig, SFI_SIG_SYST,
+ SFI_SIGNATURE_SIZE))
+ continue;
+
+ if (syst_hdr->len > PAGE_SIZE)
+ continue;
+
+ sfi_print_table_header(SFI_SYST_SEARCH_BEGIN + offset,
+ syst_hdr);
+
+ if (sfi_verify_table(syst_hdr))
+ continue;
+
+ /*
+ * Enforce SFI spec mandate that SYST reside within a page.
+ */
+ if (!ON_SAME_PAGE(syst_pa, syst_pa + syst_hdr->len)) {
+ pr_info("SYST 0x%llx + 0x%x crosses page\n",
+ syst_pa, syst_hdr->len);
+ continue;
+ }
+
+ /* Success */
+ syst_pa = SFI_SYST_SEARCH_BEGIN + offset;
+ sfi_unmap_memory(start, len);
+ return 0;
+ }
+
+ sfi_unmap_memory(start, len);
+ return -1;
+}
+
+void __init sfi_init(void)
+{
+ if (!acpi_disabled)
+ disable_sfi();
+
+ if (sfi_disabled)
+ return;
+
+ pr_info("Simple Firmware Interface v0.7 http://simplefirmware.org\n");
+
+ if (sfi_find_syst() || sfi_parse_syst() || sfi_platform_init())
+ disable_sfi();
+
+ return;
+}
+
+void __init sfi_init_late(void)
+{
+ int length;
+
+ if (sfi_disabled)
+ return;
+
+ length = syst_va->header.len;
+ sfi_unmap_memory(syst_va, sizeof(struct sfi_table_simple));
+
+ /* Use ioremap now after it is ready */
+ sfi_use_ioremap = 1;
+ syst_va = sfi_map_memory(syst_pa, length);
+
+ sfi_acpi_init();
+}
diff --git a/drivers/sfi/sfi_core.h b/drivers/sfi/sfi_core.h
new file mode 100644
index 00000000000..da82d39e104
--- /dev/null
+++ b/drivers/sfi/sfi_core.h
@@ -0,0 +1,70 @@
+/* sfi_core.h Simple Firmware Interface, internal header */
+
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2009 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2009 Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+struct sfi_table_key{
+ char *sig;
+ char *oem_id;
+ char *oem_table_id;
+};
+
+#define SFI_ANY_KEY { .sig = NULL, .oem_id = NULL, .oem_table_id = NULL }
+
+extern int __init sfi_acpi_init(void);
+extern struct sfi_table_header *sfi_check_table(u64 paddr,
+ struct sfi_table_key *key);
+struct sfi_table_header *sfi_get_table(struct sfi_table_key *key);
+extern void sfi_put_table(struct sfi_table_header *table);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 2c733c27db2..4b6f7cba3b3 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -117,10 +117,11 @@ config SPI_GPIO
speed with a custom version of this driver; see the source code.
config SPI_IMX
- tristate "Freescale iMX SPI controller"
- depends on ARCH_MX1 && EXPERIMENTAL
+ tristate "Freescale i.MX SPI controllers"
+ depends on ARCH_MXC
+ select SPI_BITBANG
help
- This enables using the Freescale iMX SPI controller in master
+ This enables using the Freescale i.MX SPI controllers in master
mode.
config SPI_LM70_LLP
@@ -173,11 +174,21 @@ config SPI_PL022
tristate "ARM AMBA PL022 SSP controller (EXPERIMENTAL)"
depends on ARM_AMBA && EXPERIMENTAL
default y if MACH_U300
+ default y if ARCH_REALVIEW
+ default y if INTEGRATOR_IMPD1
+ default y if ARCH_VERSATILE
help
This selects the ARM(R) AMBA(R) PrimeCell PL022 SSP
controller. If you have an embedded system with an AMBA(R)
bus and a PL022 controller, say Y or M here.
+config SPI_PPC4xx
+ tristate "PPC4xx SPI Controller"
+ depends on PPC32 && 4xx && SPI_MASTER
+ select SPI_BITBANG
+ help
+ This selects a driver for the PPC4xx SPI Controller.
+
config SPI_PXA2XX
tristate "PXA2xx SSP SPI master"
depends on ARCH_PXA && EXPERIMENTAL
@@ -211,6 +222,12 @@ config SPI_SH_SCI
help
SPI driver for SuperH SCI blocks.
+config SPI_STMP3XXX
+ tristate "Freescale STMP37xx/378x SPI/SSP controller"
+ depends on ARCH_STMP3XXX && SPI_MASTER
+ help
+ SPI driver for Freescale STMP37xx/378x SoC SSP interface
+
config SPI_TXX9
tristate "Toshiba TXx9 SPI controller"
depends on GENERIC_GPIO && CPU_TX49XX
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 3de408d294b..6d7a3f82c54 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -17,7 +17,7 @@ obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
obj-$(CONFIG_SPI_GPIO) += spi_gpio.o
-obj-$(CONFIG_SPI_IMX) += spi_imx.o
+obj-$(CONFIG_SPI_IMX) += mxc_spi.o
obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
@@ -26,11 +26,13 @@ obj-$(CONFIG_SPI_ORION) += orion_spi.o
obj-$(CONFIG_SPI_PL022) += amba-pl022.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o
+obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o
obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o
obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
+obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o
# ... add above this line ...
# SPI protocol drivers (device/link on bus)
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index c0f950a7cbe..958a3ffc898 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -532,7 +532,7 @@ static void restore_state(struct pl022 *pl022)
GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \
GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \
GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
- GEN_MASK_BITS(SSP_CLK_FALLING_EDGE, SSP_CR0_MASK_SPH, 7) | \
+ GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \
GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \
@@ -1247,8 +1247,8 @@ static int verify_controller_parameters(struct pl022 *pl022,
return -EINVAL;
}
if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) {
- if ((chip_info->clk_phase != SSP_CLK_RISING_EDGE)
- && (chip_info->clk_phase != SSP_CLK_FALLING_EDGE)) {
+ if ((chip_info->clk_phase != SSP_CLK_FIRST_EDGE)
+ && (chip_info->clk_phase != SSP_CLK_SECOND_EDGE)) {
dev_err(chip_info->dev,
"Clock Phase is configured incorrectly\n");
return -EINVAL;
@@ -1485,7 +1485,7 @@ static int pl022_setup(struct spi_device *spi)
chip_info->data_size = SSP_DATA_BITS_12;
chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM;
chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC;
- chip_info->clk_phase = SSP_CLK_FALLING_EDGE;
+ chip_info->clk_phase = SSP_CLK_SECOND_EDGE;
chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW;
chip_info->ctrl_len = SSP_BITS_8;
chip_info->wait_state = SSP_MWIRE_WAIT_ZERO;
diff --git a/drivers/spi/mxc_spi.c b/drivers/spi/mxc_spi.c
new file mode 100644
index 00000000000..b1447236ae8
--- /dev/null
+++ b/drivers/spi/mxc_spi.c
@@ -0,0 +1,685 @@
+/*
+ * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2008 Juergen Beisert
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation
+ * 51 Franklin Street, Fifth Floor
+ * Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/types.h>
+
+#include <mach/spi.h>
+
+#define DRIVER_NAME "spi_imx"
+
+#define MXC_CSPIRXDATA 0x00
+#define MXC_CSPITXDATA 0x04
+#define MXC_CSPICTRL 0x08
+#define MXC_CSPIINT 0x0c
+#define MXC_RESET 0x1c
+
+/* generic defines to abstract from the different register layouts */
+#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
+#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
+
+struct mxc_spi_config {
+ unsigned int speed_hz;
+ unsigned int bpw;
+ unsigned int mode;
+ int cs;
+};
+
+struct mxc_spi_data {
+ struct spi_bitbang bitbang;
+
+ struct completion xfer_done;
+ void *base;
+ int irq;
+ struct clk *clk;
+ unsigned long spi_clk;
+ int *chipselect;
+
+ unsigned int count;
+ void (*tx)(struct mxc_spi_data *);
+ void (*rx)(struct mxc_spi_data *);
+ void *rx_buf;
+ const void *tx_buf;
+ unsigned int txfifo; /* number of words pushed in tx FIFO */
+
+ /* SoC specific functions */
+ void (*intctrl)(struct mxc_spi_data *, int);
+ int (*config)(struct mxc_spi_data *, struct mxc_spi_config *);
+ void (*trigger)(struct mxc_spi_data *);
+ int (*rx_available)(struct mxc_spi_data *);
+};
+
+#define MXC_SPI_BUF_RX(type) \
+static void mxc_spi_buf_rx_##type(struct mxc_spi_data *mxc_spi) \
+{ \
+ unsigned int val = readl(mxc_spi->base + MXC_CSPIRXDATA); \
+ \
+ if (mxc_spi->rx_buf) { \
+ *(type *)mxc_spi->rx_buf = val; \
+ mxc_spi->rx_buf += sizeof(type); \
+ } \
+}
+
+#define MXC_SPI_BUF_TX(type) \
+static void mxc_spi_buf_tx_##type(struct mxc_spi_data *mxc_spi) \
+{ \
+ type val = 0; \
+ \
+ if (mxc_spi->tx_buf) { \
+ val = *(type *)mxc_spi->tx_buf; \
+ mxc_spi->tx_buf += sizeof(type); \
+ } \
+ \
+ mxc_spi->count -= sizeof(type); \
+ \
+ writel(val, mxc_spi->base + MXC_CSPITXDATA); \
+}
+
+MXC_SPI_BUF_RX(u8)
+MXC_SPI_BUF_TX(u8)
+MXC_SPI_BUF_RX(u16)
+MXC_SPI_BUF_TX(u16)
+MXC_SPI_BUF_RX(u32)
+MXC_SPI_BUF_TX(u32)
+
+/* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
+ * (which is currently not the case in this driver)
+ */
+static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
+ 256, 384, 512, 768, 1024};
+
+/* MX21, MX27 */
+static unsigned int mxc_spi_clkdiv_1(unsigned int fin,
+ unsigned int fspi)
+{
+ int i, max;
+
+ if (cpu_is_mx21())
+ max = 18;
+ else
+ max = 16;
+
+ for (i = 2; i < max; i++)
+ if (fspi * mxc_clkdivs[i] >= fin)
+ return i;
+
+ return max;
+}
+
+/* MX1, MX31, MX35 */
+static unsigned int mxc_spi_clkdiv_2(unsigned int fin,
+ unsigned int fspi)
+{
+ int i, div = 4;
+
+ for (i = 0; i < 7; i++) {
+ if (fspi * div >= fin)
+ return i;
+ div <<= 1;
+ }
+
+ return 7;
+}
+
+#define MX31_INTREG_TEEN (1 << 0)
+#define MX31_INTREG_RREN (1 << 3)
+
+#define MX31_CSPICTRL_ENABLE (1 << 0)
+#define MX31_CSPICTRL_MASTER (1 << 1)
+#define MX31_CSPICTRL_XCH (1 << 2)
+#define MX31_CSPICTRL_POL (1 << 4)
+#define MX31_CSPICTRL_PHA (1 << 5)
+#define MX31_CSPICTRL_SSCTL (1 << 6)
+#define MX31_CSPICTRL_SSPOL (1 << 7)
+#define MX31_CSPICTRL_BC_SHIFT 8
+#define MX35_CSPICTRL_BL_SHIFT 20
+#define MX31_CSPICTRL_CS_SHIFT 24
+#define MX35_CSPICTRL_CS_SHIFT 12
+#define MX31_CSPICTRL_DR_SHIFT 16
+
+#define MX31_CSPISTATUS 0x14
+#define MX31_STATUS_RR (1 << 3)
+
+/* These functions also work for the i.MX35, but be aware that
+ * the i.MX35 has a slightly different register layout for bits
+ * we do not use here.
+ */
+static void mx31_intctrl(struct mxc_spi_data *mxc_spi, int enable)
+{
+ unsigned int val = 0;
+
+ if (enable & MXC_INT_TE)
+ val |= MX31_INTREG_TEEN;
+ if (enable & MXC_INT_RR)
+ val |= MX31_INTREG_RREN;
+
+ writel(val, mxc_spi->base + MXC_CSPIINT);
+}
+
+static void mx31_trigger(struct mxc_spi_data *mxc_spi)
+{
+ unsigned int reg;
+
+ reg = readl(mxc_spi->base + MXC_CSPICTRL);
+ reg |= MX31_CSPICTRL_XCH;
+ writel(reg, mxc_spi->base + MXC_CSPICTRL);
+}
+
+static int mx31_config(struct mxc_spi_data *mxc_spi,
+ struct mxc_spi_config *config)
+{
+ unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
+
+ reg |= mxc_spi_clkdiv_2(mxc_spi->spi_clk, config->speed_hz) <<
+ MX31_CSPICTRL_DR_SHIFT;
+
+ if (cpu_is_mx31())
+ reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT;
+ else if (cpu_is_mx35()) {
+ reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT;
+ reg |= MX31_CSPICTRL_SSCTL;
+ }
+
+ if (config->mode & SPI_CPHA)
+ reg |= MX31_CSPICTRL_PHA;
+ if (config->mode & SPI_CPOL)
+ reg |= MX31_CSPICTRL_POL;
+ if (config->mode & SPI_CS_HIGH)
+ reg |= MX31_CSPICTRL_SSPOL;
+ if (config->cs < 0) {
+ if (cpu_is_mx31())
+ reg |= (config->cs + 32) << MX31_CSPICTRL_CS_SHIFT;
+ else if (cpu_is_mx35())
+ reg |= (config->cs + 32) << MX35_CSPICTRL_CS_SHIFT;
+ }
+
+ writel(reg, mxc_spi->base + MXC_CSPICTRL);
+
+ return 0;
+}
+
+static int mx31_rx_available(struct mxc_spi_data *mxc_spi)
+{
+ return readl(mxc_spi->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
+}
+
+#define MX27_INTREG_RR (1 << 4)
+#define MX27_INTREG_TEEN (1 << 9)
+#define MX27_INTREG_RREN (1 << 13)
+
+#define MX27_CSPICTRL_POL (1 << 5)
+#define MX27_CSPICTRL_PHA (1 << 6)
+#define MX27_CSPICTRL_SSPOL (1 << 8)
+#define MX27_CSPICTRL_XCH (1 << 9)
+#define MX27_CSPICTRL_ENABLE (1 << 10)
+#define MX27_CSPICTRL_MASTER (1 << 11)
+#define MX27_CSPICTRL_DR_SHIFT 14
+#define MX27_CSPICTRL_CS_SHIFT 19
+
+static void mx27_intctrl(struct mxc_spi_data *mxc_spi, int enable)
+{
+ unsigned int val = 0;
+
+ if (enable & MXC_INT_TE)
+ val |= MX27_INTREG_TEEN;
+ if (enable & MXC_INT_RR)
+ val |= MX27_INTREG_RREN;
+
+ writel(val, mxc_spi->base + MXC_CSPIINT);
+}
+
+static void mx27_trigger(struct mxc_spi_data *mxc_spi)
+{
+ unsigned int reg;
+
+ reg = readl(mxc_spi->base + MXC_CSPICTRL);
+ reg |= MX27_CSPICTRL_XCH;
+ writel(reg, mxc_spi->base + MXC_CSPICTRL);
+}
+
+static int mx27_config(struct mxc_spi_data *mxc_spi,
+ struct mxc_spi_config *config)
+{
+ unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER;
+
+ reg |= mxc_spi_clkdiv_1(mxc_spi->spi_clk, config->speed_hz) <<
+ MX27_CSPICTRL_DR_SHIFT;
+ reg |= config->bpw - 1;
+
+ if (config->mode & SPI_CPHA)
+ reg |= MX27_CSPICTRL_PHA;
+ if (config->mode & SPI_CPOL)
+ reg |= MX27_CSPICTRL_POL;
+ if (config->mode & SPI_CS_HIGH)
+ reg |= MX27_CSPICTRL_SSPOL;
+ if (config->cs < 0)
+ reg |= (config->cs + 32) << MX27_CSPICTRL_CS_SHIFT;
+
+ writel(reg, mxc_spi->base + MXC_CSPICTRL);
+
+ return 0;
+}
+
+static int mx27_rx_available(struct mxc_spi_data *mxc_spi)
+{
+ return readl(mxc_spi->base + MXC_CSPIINT) & MX27_INTREG_RR;
+}
+
+#define MX1_INTREG_RR (1 << 3)
+#define MX1_INTREG_TEEN (1 << 8)
+#define MX1_INTREG_RREN (1 << 11)
+
+#define MX1_CSPICTRL_POL (1 << 4)
+#define MX1_CSPICTRL_PHA (1 << 5)
+#define MX1_CSPICTRL_XCH (1 << 8)
+#define MX1_CSPICTRL_ENABLE (1 << 9)
+#define MX1_CSPICTRL_MASTER (1 << 10)
+#define MX1_CSPICTRL_DR_SHIFT 13
+
+static void mx1_intctrl(struct mxc_spi_data *mxc_spi, int enable)
+{
+ unsigned int val = 0;
+
+ if (enable & MXC_INT_TE)
+ val |= MX1_INTREG_TEEN;
+ if (enable & MXC_INT_RR)
+ val |= MX1_INTREG_RREN;
+
+ writel(val, mxc_spi->base + MXC_CSPIINT);
+}
+
+static void mx1_trigger(struct mxc_spi_data *mxc_spi)
+{
+ unsigned int reg;
+
+ reg = readl(mxc_spi->base + MXC_CSPICTRL);
+ reg |= MX1_CSPICTRL_XCH;
+ writel(reg, mxc_spi->base + MXC_CSPICTRL);
+}
+
+static int mx1_config(struct mxc_spi_data *mxc_spi,
+ struct mxc_spi_config *config)
+{
+ unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
+
+ reg |= mxc_spi_clkdiv_2(mxc_spi->spi_clk, config->speed_hz) <<
+ MX1_CSPICTRL_DR_SHIFT;
+ reg |= config->bpw - 1;
+
+ if (config->mode & SPI_CPHA)
+ reg |= MX1_CSPICTRL_PHA;
+ if (config->mode & SPI_CPOL)
+ reg |= MX1_CSPICTRL_POL;
+
+ writel(reg, mxc_spi->base + MXC_CSPICTRL);
+
+ return 0;
+}
+
+static int mx1_rx_available(struct mxc_spi_data *mxc_spi)
+{
+ return readl(mxc_spi->base + MXC_CSPIINT) & MX1_INTREG_RR;
+}
+
+static void mxc_spi_chipselect(struct spi_device *spi, int is_active)
+{
+ struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master);
+ unsigned int cs = 0;
+ int gpio = mxc_spi->chipselect[spi->chip_select];
+ struct mxc_spi_config config;
+
+ if (spi->mode & SPI_CS_HIGH)
+ cs = 1;
+
+ if (is_active == BITBANG_CS_INACTIVE) {
+ if (gpio >= 0)
+ gpio_set_value(gpio, !cs);
+ return;
+ }
+
+ config.bpw = spi->bits_per_word;
+ config.speed_hz = spi->max_speed_hz;
+ config.mode = spi->mode;
+ config.cs = mxc_spi->chipselect[spi->chip_select];
+
+ mxc_spi->config(mxc_spi, &config);
+
+ /* Initialize the functions for transfer */
+ if (config.bpw <= 8) {
+ mxc_spi->rx = mxc_spi_buf_rx_u8;
+ mxc_spi->tx = mxc_spi_buf_tx_u8;
+ } else if (config.bpw <= 16) {
+ mxc_spi->rx = mxc_spi_buf_rx_u16;
+ mxc_spi->tx = mxc_spi_buf_tx_u16;
+ } else if (config.bpw <= 32) {
+ mxc_spi->rx = mxc_spi_buf_rx_u32;
+ mxc_spi->tx = mxc_spi_buf_tx_u32;
+ } else
+ BUG();
+
+ if (gpio >= 0)
+ gpio_set_value(gpio, cs);
+
+ return;
+}
+
+static void mxc_spi_push(struct mxc_spi_data *mxc_spi)
+{
+ while (mxc_spi->txfifo < 8) {
+ if (!mxc_spi->count)
+ break;
+ mxc_spi->tx(mxc_spi);
+ mxc_spi->txfifo++;
+ }
+
+ mxc_spi->trigger(mxc_spi);
+}
+
+static irqreturn_t mxc_spi_isr(int irq, void *dev_id)
+{
+ struct mxc_spi_data *mxc_spi = dev_id;
+
+ while (mxc_spi->rx_available(mxc_spi)) {
+ mxc_spi->rx(mxc_spi);
+ mxc_spi->txfifo--;
+ }
+
+ if (mxc_spi->count) {
+ mxc_spi_push(mxc_spi);
+ return IRQ_HANDLED;
+ }
+
+ if (mxc_spi->txfifo) {
+ /* No data left to push, but still waiting for rx data,
+ * enable receive data available interrupt.
+ */
+ mxc_spi->intctrl(mxc_spi, MXC_INT_RR);
+ return IRQ_HANDLED;
+ }
+
+ mxc_spi->intctrl(mxc_spi, 0);
+ complete(&mxc_spi->xfer_done);
+
+ return IRQ_HANDLED;
+}
+
+static int mxc_spi_setupxfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master);
+ struct mxc_spi_config config;
+
+ config.bpw = t ? t->bits_per_word : spi->bits_per_word;
+ config.speed_hz = t ? t->speed_hz : spi->max_speed_hz;
+ config.mode = spi->mode;
+
+ mxc_spi->config(mxc_spi, &config);
+
+ return 0;
+}
+
+static int mxc_spi_transfer(struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master);
+
+ mxc_spi->tx_buf = transfer->tx_buf;
+ mxc_spi->rx_buf = transfer->rx_buf;
+ mxc_spi->count = transfer->len;
+ mxc_spi->txfifo = 0;
+
+ init_completion(&mxc_spi->xfer_done);
+
+ mxc_spi_push(mxc_spi);
+
+ mxc_spi->intctrl(mxc_spi, MXC_INT_TE);
+
+ wait_for_completion(&mxc_spi->xfer_done);
+
+ return transfer->len;
+}
+
+static int mxc_spi_setup(struct spi_device *spi)
+{
+ if (!spi->bits_per_word)
+ spi->bits_per_word = 8;
+
+ pr_debug("%s: mode %d, %u bpw, %d hz\n", __func__,
+ spi->mode, spi->bits_per_word, spi->max_speed_hz);
+
+ mxc_spi_chipselect(spi, BITBANG_CS_INACTIVE);
+
+ return 0;
+}
+
+static void mxc_spi_cleanup(struct spi_device *spi)
+{
+}
+
+static int __init mxc_spi_probe(struct platform_device *pdev)
+{
+ struct spi_imx_master *mxc_platform_info;
+ struct spi_master *master;
+ struct mxc_spi_data *mxc_spi;
+ struct resource *res;
+ int i, ret;
+
+ mxc_platform_info = (struct spi_imx_master *)pdev->dev.platform_data;
+ if (!mxc_platform_info) {
+ dev_err(&pdev->dev, "can't get the platform data\n");
+ return -EINVAL;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct mxc_spi_data));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ master->bus_num = pdev->id;
+ master->num_chipselect = mxc_platform_info->num_chipselect;
+
+ mxc_spi = spi_master_get_devdata(master);
+ mxc_spi->bitbang.master = spi_master_get(master);
+ mxc_spi->chipselect = mxc_platform_info->chipselect;
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ if (mxc_spi->chipselect[i] < 0)
+ continue;
+ ret = gpio_request(mxc_spi->chipselect[i], DRIVER_NAME);
+ if (ret) {
+ i--;
+ while (i > 0)
+ if (mxc_spi->chipselect[i] >= 0)
+ gpio_free(mxc_spi->chipselect[i--]);
+ dev_err(&pdev->dev, "can't get cs gpios");
+ goto out_master_put;
+ }
+ gpio_direction_output(mxc_spi->chipselect[i], 1);
+ }
+
+ mxc_spi->bitbang.chipselect = mxc_spi_chipselect;
+ mxc_spi->bitbang.setup_transfer = mxc_spi_setupxfer;
+ mxc_spi->bitbang.txrx_bufs = mxc_spi_transfer;
+ mxc_spi->bitbang.master->setup = mxc_spi_setup;
+ mxc_spi->bitbang.master->cleanup = mxc_spi_cleanup;
+
+ init_completion(&mxc_spi->xfer_done);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "can't get platform resource\n");
+ ret = -ENOMEM;
+ goto out_gpio_free;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "request_mem_region failed\n");
+ ret = -EBUSY;
+ goto out_gpio_free;
+ }
+
+ mxc_spi->base = ioremap(res->start, resource_size(res));
+ if (!mxc_spi->base) {
+ ret = -EINVAL;
+ goto out_release_mem;
+ }
+
+ mxc_spi->irq = platform_get_irq(pdev, 0);
+ if (!mxc_spi->irq) {
+ ret = -EINVAL;
+ goto out_iounmap;
+ }
+
+ ret = request_irq(mxc_spi->irq, mxc_spi_isr, 0, DRIVER_NAME, mxc_spi);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get irq%d: %d\n", mxc_spi->irq, ret);
+ goto out_iounmap;
+ }
+
+ if (cpu_is_mx31() || cpu_is_mx35()) {
+ mxc_spi->intctrl = mx31_intctrl;
+ mxc_spi->config = mx31_config;
+ mxc_spi->trigger = mx31_trigger;
+ mxc_spi->rx_available = mx31_rx_available;
+ } else if (cpu_is_mx27() || cpu_is_mx21()) {
+ mxc_spi->intctrl = mx27_intctrl;
+ mxc_spi->config = mx27_config;
+ mxc_spi->trigger = mx27_trigger;
+ mxc_spi->rx_available = mx27_rx_available;
+ } else if (cpu_is_mx1()) {
+ mxc_spi->intctrl = mx1_intctrl;
+ mxc_spi->config = mx1_config;
+ mxc_spi->trigger = mx1_trigger;
+ mxc_spi->rx_available = mx1_rx_available;
+ } else
+ BUG();
+
+ mxc_spi->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(mxc_spi->clk)) {
+ dev_err(&pdev->dev, "unable to get clock\n");
+ ret = PTR_ERR(mxc_spi->clk);
+ goto out_free_irq;
+ }
+
+ clk_enable(mxc_spi->clk);
+ mxc_spi->spi_clk = clk_get_rate(mxc_spi->clk);
+
+ if (!cpu_is_mx31() || !cpu_is_mx35())
+ writel(1, mxc_spi->base + MXC_RESET);
+
+ mxc_spi->intctrl(mxc_spi, 0);
+
+ ret = spi_bitbang_start(&mxc_spi->bitbang);
+ if (ret) {
+ dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
+ goto out_clk_put;
+ }
+
+ dev_info(&pdev->dev, "probed\n");
+
+ return ret;
+
+out_clk_put:
+ clk_disable(mxc_spi->clk);
+ clk_put(mxc_spi->clk);
+out_free_irq:
+ free_irq(mxc_spi->irq, mxc_spi);
+out_iounmap:
+ iounmap(mxc_spi->base);
+out_release_mem:
+ release_mem_region(res->start, resource_size(res));
+out_gpio_free:
+ for (i = 0; i < master->num_chipselect; i++)
+ if (mxc_spi->chipselect[i] >= 0)
+ gpio_free(mxc_spi->chipselect[i]);
+out_master_put:
+ spi_master_put(master);
+ kfree(master);
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+static int __exit mxc_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct mxc_spi_data *mxc_spi = spi_master_get_devdata(master);
+ int i;
+
+ spi_bitbang_stop(&mxc_spi->bitbang);
+
+ writel(0, mxc_spi->base + MXC_CSPICTRL);
+ clk_disable(mxc_spi->clk);
+ clk_put(mxc_spi->clk);
+ free_irq(mxc_spi->irq, mxc_spi);
+ iounmap(mxc_spi->base);
+
+ for (i = 0; i < master->num_chipselect; i++)
+ if (mxc_spi->chipselect[i] >= 0)
+ gpio_free(mxc_spi->chipselect[i]);
+
+ spi_master_put(master);
+
+ release_mem_region(res->start, resource_size(res));
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver mxc_spi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = mxc_spi_probe,
+ .remove = __exit_p(mxc_spi_remove),
+};
+
+static int __init mxc_spi_init(void)
+{
+ return platform_driver_register(&mxc_spi_driver);
+}
+
+static void __exit mxc_spi_exit(void)
+{
+ platform_driver_unregister(&mxc_spi_driver);
+}
+
+module_init(mxc_spi_init);
+module_exit(mxc_spi_exit);
+
+MODULE_DESCRIPTION("SPI Master Controller driver");
+MODULE_AUTHOR("Sascha Hauer, Pengutronix");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index 9b80ad36dbb..ba1a872b221 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -41,6 +41,9 @@
#define OMAP2_MCSPI_MAX_FREQ 48000000
+/* OMAP2 has 3 SPI controllers, while OMAP3 has 4 */
+#define OMAP2_MCSPI_MAX_CTRL 4
+
#define OMAP2_MCSPI_REVISION 0x00
#define OMAP2_MCSPI_SYSCONFIG 0x10
#define OMAP2_MCSPI_SYSSTATUS 0x14
@@ -59,40 +62,40 @@
/* per-register bitmasks: */
-#define OMAP2_MCSPI_SYSCONFIG_SMARTIDLE (2 << 3)
-#define OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP (1 << 2)
-#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE (1 << 0)
-#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET (1 << 1)
+#define OMAP2_MCSPI_SYSCONFIG_SMARTIDLE BIT(4)
+#define OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP BIT(2)
+#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE BIT(0)
+#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET BIT(1)
-#define OMAP2_MCSPI_SYSSTATUS_RESETDONE (1 << 0)
+#define OMAP2_MCSPI_SYSSTATUS_RESETDONE BIT(0)
-#define OMAP2_MCSPI_MODULCTRL_SINGLE (1 << 0)
-#define OMAP2_MCSPI_MODULCTRL_MS (1 << 2)
-#define OMAP2_MCSPI_MODULCTRL_STEST (1 << 3)
+#define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0)
+#define OMAP2_MCSPI_MODULCTRL_MS BIT(2)
+#define OMAP2_MCSPI_MODULCTRL_STEST BIT(3)
-#define OMAP2_MCSPI_CHCONF_PHA (1 << 0)
-#define OMAP2_MCSPI_CHCONF_POL (1 << 1)
+#define OMAP2_MCSPI_CHCONF_PHA BIT(0)
+#define OMAP2_MCSPI_CHCONF_POL BIT(1)
#define OMAP2_MCSPI_CHCONF_CLKD_MASK (0x0f << 2)
-#define OMAP2_MCSPI_CHCONF_EPOL (1 << 6)
+#define OMAP2_MCSPI_CHCONF_EPOL BIT(6)
#define OMAP2_MCSPI_CHCONF_WL_MASK (0x1f << 7)
-#define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY (0x01 << 12)
-#define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY (0x02 << 12)
+#define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY BIT(12)
+#define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY BIT(13)
#define OMAP2_MCSPI_CHCONF_TRM_MASK (0x03 << 12)
-#define OMAP2_MCSPI_CHCONF_DMAW (1 << 14)
-#define OMAP2_MCSPI_CHCONF_DMAR (1 << 15)
-#define OMAP2_MCSPI_CHCONF_DPE0 (1 << 16)
-#define OMAP2_MCSPI_CHCONF_DPE1 (1 << 17)
-#define OMAP2_MCSPI_CHCONF_IS (1 << 18)
-#define OMAP2_MCSPI_CHCONF_TURBO (1 << 19)
-#define OMAP2_MCSPI_CHCONF_FORCE (1 << 20)
+#define OMAP2_MCSPI_CHCONF_DMAW BIT(14)
+#define OMAP2_MCSPI_CHCONF_DMAR BIT(15)
+#define OMAP2_MCSPI_CHCONF_DPE0 BIT(16)
+#define OMAP2_MCSPI_CHCONF_DPE1 BIT(17)
+#define OMAP2_MCSPI_CHCONF_IS BIT(18)
+#define OMAP2_MCSPI_CHCONF_TURBO BIT(19)
+#define OMAP2_MCSPI_CHCONF_FORCE BIT(20)
-#define OMAP2_MCSPI_CHSTAT_RXS (1 << 0)
-#define OMAP2_MCSPI_CHSTAT_TXS (1 << 1)
-#define OMAP2_MCSPI_CHSTAT_EOT (1 << 2)
+#define OMAP2_MCSPI_CHSTAT_RXS BIT(0)
+#define OMAP2_MCSPI_CHSTAT_TXS BIT(1)
+#define OMAP2_MCSPI_CHSTAT_EOT BIT(2)
-#define OMAP2_MCSPI_CHCTRL_EN (1 << 0)
+#define OMAP2_MCSPI_CHCTRL_EN BIT(0)
-#define OMAP2_MCSPI_WAKEUPENABLE_WKEN (1 << 0)
+#define OMAP2_MCSPI_WAKEUPENABLE_WKEN BIT(0)
/* We have 2 DMA channels per CS, one for RX and one for TX */
struct omap2_mcspi_dma {
@@ -131,8 +134,23 @@ struct omap2_mcspi_cs {
void __iomem *base;
unsigned long phys;
int word_len;
+ struct list_head node;
+ /* Context save and restore shadow register */
+ u32 chconf0;
+};
+
+/* used for context save and restore, structure members to be updated whenever
+ * corresponding registers are modified.
+ */
+struct omap2_mcspi_regs {
+ u32 sysconfig;
+ u32 modulctrl;
+ u32 wakeupenable;
+ struct list_head cs;
};
+static struct omap2_mcspi_regs omap2_mcspi_ctx[OMAP2_MCSPI_MAX_CTRL];
+
static struct workqueue_struct *omap2_mcspi_wq;
#define MOD_REG_BIT(val, mask, set) do { \
@@ -172,12 +190,27 @@ static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
return __raw_readl(cs->base + idx);
}
+static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+
+ return cs->chconf0;
+}
+
+static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+
+ cs->chconf0 = val;
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val);
+}
+
static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
int is_read, int enable)
{
u32 l, rw;
- l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+ l = mcspi_cached_chconf0(spi);
if (is_read) /* 1 is read, 0 write */
rw = OMAP2_MCSPI_CHCONF_DMAR;
@@ -185,7 +218,7 @@ static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
rw = OMAP2_MCSPI_CHCONF_DMAW;
MOD_REG_BIT(l, rw, enable);
- mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l);
+ mcspi_write_chconf0(spi, l);
}
static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
@@ -200,9 +233,9 @@ static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
{
u32 l;
- l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+ l = mcspi_cached_chconf0(spi);
MOD_REG_BIT(l, OMAP2_MCSPI_CHCONF_FORCE, cs_active);
- mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l);
+ mcspi_write_chconf0(spi, l);
}
static void omap2_mcspi_set_master_mode(struct spi_master *master)
@@ -217,6 +250,46 @@ static void omap2_mcspi_set_master_mode(struct spi_master *master)
MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_MS, 0);
MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_SINGLE, 1);
mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
+
+ omap2_mcspi_ctx[master->bus_num - 1].modulctrl = l;
+}
+
+static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
+{
+ struct spi_master *spi_cntrl;
+ struct omap2_mcspi_cs *cs;
+ spi_cntrl = mcspi->master;
+
+ /* McSPI: context restore */
+ mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL,
+ omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl);
+
+ mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_SYSCONFIG,
+ omap2_mcspi_ctx[spi_cntrl->bus_num - 1].sysconfig);
+
+ mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE,
+ omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable);
+
+ list_for_each_entry(cs, &omap2_mcspi_ctx[spi_cntrl->bus_num - 1].cs,
+ node)
+ __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
+}
+static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi)
+{
+ clk_disable(mcspi->ick);
+ clk_disable(mcspi->fck);
+}
+
+static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi)
+{
+ if (clk_enable(mcspi->ick))
+ return -ENODEV;
+ if (clk_enable(mcspi->fck))
+ return -ENODEV;
+
+ omap2_mcspi_restore_ctx(mcspi);
+
+ return 0;
}
static unsigned
@@ -357,7 +430,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
c = count;
word_len = cs->word_len;
- l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+ l = mcspi_cached_chconf0(spi);
l &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
/* We store the pre-calculated register addresses on stack to speed
@@ -397,8 +470,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
* more word i/o: switch to rx+tx
*/
if (c == 0 && tx == NULL)
- mcspi_write_cs_reg(spi,
- OMAP2_MCSPI_CHCONF0, l);
+ mcspi_write_chconf0(spi, l);
*rx++ = __raw_readl(rx_reg);
#ifdef VERBOSE
dev_dbg(&spi->dev, "read-%d %02x\n",
@@ -436,8 +508,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
* more word i/o: switch to rx+tx
*/
if (c == 0 && tx == NULL)
- mcspi_write_cs_reg(spi,
- OMAP2_MCSPI_CHCONF0, l);
+ mcspi_write_chconf0(spi, l);
*rx++ = __raw_readl(rx_reg);
#ifdef VERBOSE
dev_dbg(&spi->dev, "read-%d %04x\n",
@@ -475,8 +546,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
* more word i/o: switch to rx+tx
*/
if (c == 0 && tx == NULL)
- mcspi_write_cs_reg(spi,
- OMAP2_MCSPI_CHCONF0, l);
+ mcspi_write_chconf0(spi, l);
*rx++ = __raw_readl(rx_reg);
#ifdef VERBOSE
dev_dbg(&spi->dev, "read-%d %04x\n",
@@ -505,10 +575,12 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
{
struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi *mcspi;
+ struct spi_master *spi_cntrl;
u32 l = 0, div = 0;
u8 word_len = spi->bits_per_word;
mcspi = spi_master_get_devdata(spi->master);
+ spi_cntrl = mcspi->master;
if (t != NULL && t->bits_per_word)
word_len = t->bits_per_word;
@@ -522,7 +594,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
} else
div = 15;
- l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+ l = mcspi_cached_chconf0(spi);
/* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
* REVISIT: this controller could support SPI_3WIRE mode.
@@ -554,7 +626,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
else
l &= ~OMAP2_MCSPI_CHCONF_PHA;
- mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l);
+ mcspi_write_chconf0(spi, l);
dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
OMAP2_MCSPI_MAX_FREQ / (1 << div),
@@ -647,7 +719,11 @@ static int omap2_mcspi_setup(struct spi_device *spi)
return -ENOMEM;
cs->base = mcspi->base + spi->chip_select * 0x14;
cs->phys = mcspi->phys + spi->chip_select * 0x14;
+ cs->chconf0 = 0;
spi->controller_state = cs;
+ /* Link this to context save list */
+ list_add_tail(&cs->node,
+ &omap2_mcspi_ctx[mcspi->master->bus_num - 1].cs);
}
if (mcspi_dma->dma_rx_channel == -1
@@ -657,11 +733,11 @@ static int omap2_mcspi_setup(struct spi_device *spi)
return ret;
}
- clk_enable(mcspi->ick);
- clk_enable(mcspi->fck);
+ if (omap2_mcspi_enable_clocks(mcspi))
+ return -ENODEV;
+
ret = omap2_mcspi_setup_transfer(spi, NULL);
- clk_disable(mcspi->fck);
- clk_disable(mcspi->ick);
+ omap2_mcspi_disable_clocks(mcspi);
return ret;
}
@@ -670,10 +746,15 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
{
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
+ struct omap2_mcspi_cs *cs;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+ /* Unlink controller state from context save list */
+ cs = spi->controller_state;
+ list_del(&cs->node);
+
kfree(spi->controller_state);
if (mcspi_dma->dma_rx_channel != -1) {
@@ -693,8 +774,8 @@ static void omap2_mcspi_work(struct work_struct *work)
mcspi = container_of(work, struct omap2_mcspi, work);
spin_lock_irq(&mcspi->lock);
- clk_enable(mcspi->ick);
- clk_enable(mcspi->fck);
+ if (omap2_mcspi_enable_clocks(mcspi))
+ goto out;
/* We only enable one channel at a time -- the one whose message is
* at the head of the queue -- although this controller would gladly
@@ -741,13 +822,13 @@ static void omap2_mcspi_work(struct work_struct *work)
cs_active = 1;
}
- chconf = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+ chconf = mcspi_cached_chconf0(spi);
chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
if (t->tx_buf == NULL)
chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
else if (t->rx_buf == NULL)
chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
- mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, chconf);
+ mcspi_write_chconf0(spi, chconf);
if (t->len) {
unsigned count;
@@ -796,9 +877,9 @@ static void omap2_mcspi_work(struct work_struct *work)
spin_lock_irq(&mcspi->lock);
}
- clk_disable(mcspi->fck);
- clk_disable(mcspi->ick);
+ omap2_mcspi_disable_clocks(mcspi);
+out:
spin_unlock_irq(&mcspi->lock);
}
@@ -885,8 +966,8 @@ static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi)
struct spi_master *master = mcspi->master;
u32 tmp;
- clk_enable(mcspi->ick);
- clk_enable(mcspi->fck);
+ if (omap2_mcspi_enable_clocks(mcspi))
+ return -1;
mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG,
OMAP2_MCSPI_SYSCONFIG_SOFTRESET);
@@ -894,18 +975,18 @@ static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi)
tmp = mcspi_read_reg(master, OMAP2_MCSPI_SYSSTATUS);
} while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE));
- mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG,
- OMAP2_MCSPI_SYSCONFIG_AUTOIDLE |
- OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP |
- OMAP2_MCSPI_SYSCONFIG_SMARTIDLE);
+ tmp = OMAP2_MCSPI_SYSCONFIG_AUTOIDLE |
+ OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP |
+ OMAP2_MCSPI_SYSCONFIG_SMARTIDLE;
+ mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, tmp);
+ omap2_mcspi_ctx[master->bus_num - 1].sysconfig = tmp;
- mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
- OMAP2_MCSPI_WAKEUPENABLE_WKEN);
+ tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
+ mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp);
+ omap2_mcspi_ctx[master->bus_num - 1].wakeupenable = tmp;
omap2_mcspi_set_master_mode(master);
-
- clk_disable(mcspi->fck);
- clk_disable(mcspi->ick);
+ omap2_mcspi_disable_clocks(mcspi);
return 0;
}
@@ -933,7 +1014,8 @@ static u8 __initdata spi2_txdma_id[] = {
OMAP24XX_DMA_SPI2_TX1,
};
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX) \
+ || defined(CONFIG_ARCH_OMAP4)
static u8 __initdata spi3_rxdma_id[] = {
OMAP24XX_DMA_SPI3_RX0,
OMAP24XX_DMA_SPI3_RX1,
@@ -945,7 +1027,7 @@ static u8 __initdata spi3_txdma_id[] = {
};
#endif
-#ifdef CONFIG_ARCH_OMAP3
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
static u8 __initdata spi4_rxdma_id[] = {
OMAP34XX_DMA_SPI4_RX0,
};
@@ -975,14 +1057,15 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
txdma_id = spi2_txdma_id;
num_chipselect = 2;
break;
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3)
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \
+ || defined(CONFIG_ARCH_OMAP4)
case 3:
rxdma_id = spi3_rxdma_id;
txdma_id = spi3_txdma_id;
num_chipselect = 2;
break;
#endif
-#ifdef CONFIG_ARCH_OMAP3
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
case 4:
rxdma_id = spi4_rxdma_id;
txdma_id = spi4_txdma_id;
@@ -1038,6 +1121,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
spin_lock_init(&mcspi->lock);
INIT_LIST_HEAD(&mcspi->msg_queue);
+ INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs);
mcspi->ick = clk_get(&pdev->dev, "ick");
if (IS_ERR(mcspi->ick)) {
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c
index 8980a5640bd..e75ba9b2889 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/omap_uwire.c
@@ -213,7 +213,7 @@ static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t)
unsigned bits = ust->bits_per_word;
unsigned bytes;
u16 val, w;
- int status = 0;;
+ int status = 0;
if (!t->tx_buf && !t->rx_buf)
return 0;
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index d949dbf1141..c8c2b693ffa 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1668,10 +1668,9 @@ static void pxa2xx_spi_shutdown(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-
-static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
+static int pxa2xx_spi_suspend(struct device *dev)
{
- struct driver_data *drv_data = platform_get_drvdata(pdev);
+ struct driver_data *drv_data = dev_get_drvdata(dev);
struct ssp_device *ssp = drv_data->ssp;
int status = 0;
@@ -1684,9 +1683,9 @@ static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int pxa2xx_spi_resume(struct platform_device *pdev)
+static int pxa2xx_spi_resume(struct device *dev)
{
- struct driver_data *drv_data = platform_get_drvdata(pdev);
+ struct driver_data *drv_data = dev_get_drvdata(dev);
struct ssp_device *ssp = drv_data->ssp;
int status = 0;
@@ -1703,33 +1702,36 @@ static int pxa2xx_spi_resume(struct platform_device *pdev)
/* Start the queue running */
status = start_queue(drv_data);
if (status != 0) {
- dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
+ dev_err(dev, "problem starting queue (%d)\n", status);
return status;
}
return 0;
}
-#else
-#define pxa2xx_spi_suspend NULL
-#define pxa2xx_spi_resume NULL
-#endif /* CONFIG_PM */
+
+static struct dev_pm_ops pxa2xx_spi_pm_ops = {
+ .suspend = pxa2xx_spi_suspend,
+ .resume = pxa2xx_spi_resume,
+};
+#endif
static struct platform_driver driver = {
.driver = {
- .name = "pxa2xx-spi",
- .owner = THIS_MODULE,
+ .name = "pxa2xx-spi",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &pxa2xx_spi_pm_ops,
+#endif
},
.remove = pxa2xx_spi_remove,
.shutdown = pxa2xx_spi_shutdown,
- .suspend = pxa2xx_spi_suspend,
- .resume = pxa2xx_spi_resume,
};
static int __init pxa2xx_spi_init(void)
{
return platform_driver_probe(&driver, pxa2xx_spi_probe);
}
-module_init(pxa2xx_spi_init);
+subsys_initcall(pxa2xx_spi_init);
static void __exit pxa2xx_spi_exit(void)
{
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 70845ccd85c..b76f2468a84 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/cache.h>
#include <linux/mutex.h>
+#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
@@ -59,9 +60,32 @@ static struct device_attribute spi_dev_attrs[] = {
* and the sysfs version makes coldplug work too.
*/
+static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
+ const struct spi_device *sdev)
+{
+ while (id->name[0]) {
+ if (!strcmp(sdev->modalias, id->name))
+ return id;
+ id++;
+ }
+ return NULL;
+}
+
+const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
+{
+ const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
+
+ return spi_match_id(sdrv->id_table, sdev);
+}
+EXPORT_SYMBOL_GPL(spi_get_device_id);
+
static int spi_match_device(struct device *dev, struct device_driver *drv)
{
const struct spi_device *spi = to_spi_device(dev);
+ const struct spi_driver *sdrv = to_spi_driver(drv);
+
+ if (sdrv->id_table)
+ return !!spi_match_id(sdrv->id_table, spi);
return strcmp(spi->modalias, drv->name) == 0;
}
@@ -70,7 +94,7 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
{
const struct spi_device *spi = to_spi_device(dev);
- add_uevent_var(env, "MODALIAS=%s", spi->modalias);
+ add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
return 0;
}
@@ -639,6 +663,65 @@ int spi_setup(struct spi_device *spi)
}
EXPORT_SYMBOL_GPL(spi_setup);
+/**
+ * spi_async - asynchronous SPI transfer
+ * @spi: device with which data will be exchanged
+ * @message: describes the data transfers, including completion callback
+ * Context: any (irqs may be blocked, etc)
+ *
+ * This call may be used in_irq and other contexts which can't sleep,
+ * as well as from task contexts which can sleep.
+ *
+ * The completion callback is invoked in a context which can't sleep.
+ * Before that invocation, the value of message->status is undefined.
+ * When the callback is issued, message->status holds either zero (to
+ * indicate complete success) or a negative error code. After that
+ * callback returns, the driver which issued the transfer request may
+ * deallocate the associated memory; it's no longer in use by any SPI
+ * core or controller driver code.
+ *
+ * Note that although all messages to a spi_device are handled in
+ * FIFO order, messages may go to different devices in other orders.
+ * Some device might be higher priority, or have various "hard" access
+ * time requirements, for example.
+ *
+ * On detection of any fault during the transfer, processing of
+ * the entire message is aborted, and the device is deselected.
+ * Until returning from the associated message completion callback,
+ * no other spi_message queued to that device will be processed.
+ * (This rule applies equally to all the synchronous transfer calls,
+ * which are wrappers around this core asynchronous primitive.)
+ */
+int spi_async(struct spi_device *spi, struct spi_message *message)
+{
+ struct spi_master *master = spi->master;
+
+ /* Half-duplex links include original MicroWire, and ones with
+ * only one data pin like SPI_3WIRE (switches direction) or where
+ * either MOSI or MISO is missing. They can also be caused by
+ * software limitations.
+ */
+ if ((master->flags & SPI_MASTER_HALF_DUPLEX)
+ || (spi->mode & SPI_3WIRE)) {
+ struct spi_transfer *xfer;
+ unsigned flags = master->flags;
+
+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ if (xfer->rx_buf && xfer->tx_buf)
+ return -EINVAL;
+ if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
+ return -EINVAL;
+ if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
+ return -EINVAL;
+ }
+ }
+
+ message->spi = spi;
+ message->status = -EINPROGRESS;
+ return master->transfer(spi, message);
+}
+EXPORT_SYMBOL_GPL(spi_async);
+
/*-------------------------------------------------------------------------*/
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
deleted file mode 100644
index c195e45f7f3..00000000000
--- a/drivers/spi/spi_imx.c
+++ /dev/null
@@ -1,1770 +0,0 @@
-/*
- * drivers/spi/spi_imx.c
- *
- * Copyright (C) 2006 SWAPP
- * Andrea Paterniani <a.paterniani@swapp-eng.it>
- *
- * Initial version inspired by:
- * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/ioport.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/spi/spi.h>
-#include <linux/workqueue.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/delay.h>
-
-#include <mach/hardware.h>
-#include <mach/imx-dma.h>
-#include <mach/spi_imx.h>
-
-/*-------------------------------------------------------------------------*/
-/* SPI Registers offsets from peripheral base address */
-#define SPI_RXDATA (0x00)
-#define SPI_TXDATA (0x04)
-#define SPI_CONTROL (0x08)
-#define SPI_INT_STATUS (0x0C)
-#define SPI_TEST (0x10)
-#define SPI_PERIOD (0x14)
-#define SPI_DMA (0x18)
-#define SPI_RESET (0x1C)
-
-/* SPI Control Register Bit Fields & Masks */
-#define SPI_CONTROL_BITCOUNT_MASK (0xF) /* Bit Count Mask */
-#define SPI_CONTROL_BITCOUNT(n) (((n) - 1) & SPI_CONTROL_BITCOUNT_MASK)
-#define SPI_CONTROL_POL (0x1 << 4) /* Clock Polarity Mask */
-#define SPI_CONTROL_POL_ACT_HIGH (0x0 << 4) /* Active high pol. (0=idle) */
-#define SPI_CONTROL_POL_ACT_LOW (0x1 << 4) /* Active low pol. (1=idle) */
-#define SPI_CONTROL_PHA (0x1 << 5) /* Clock Phase Mask */
-#define SPI_CONTROL_PHA_0 (0x0 << 5) /* Clock Phase 0 */
-#define SPI_CONTROL_PHA_1 (0x1 << 5) /* Clock Phase 1 */
-#define SPI_CONTROL_SSCTL (0x1 << 6) /* /SS Waveform Select Mask */
-#define SPI_CONTROL_SSCTL_0 (0x0 << 6) /* Master: /SS stays low between SPI burst
- Slave: RXFIFO advanced by BIT_COUNT */
-#define SPI_CONTROL_SSCTL_1 (0x1 << 6) /* Master: /SS insert pulse between SPI burst
- Slave: RXFIFO advanced by /SS rising edge */
-#define SPI_CONTROL_SSPOL (0x1 << 7) /* /SS Polarity Select Mask */
-#define SPI_CONTROL_SSPOL_ACT_LOW (0x0 << 7) /* /SS Active low */
-#define SPI_CONTROL_SSPOL_ACT_HIGH (0x1 << 7) /* /SS Active high */
-#define SPI_CONTROL_XCH (0x1 << 8) /* Exchange */
-#define SPI_CONTROL_SPIEN (0x1 << 9) /* SPI Module Enable */
-#define SPI_CONTROL_MODE (0x1 << 10) /* SPI Mode Select Mask */
-#define SPI_CONTROL_MODE_SLAVE (0x0 << 10) /* SPI Mode Slave */
-#define SPI_CONTROL_MODE_MASTER (0x1 << 10) /* SPI Mode Master */
-#define SPI_CONTROL_DRCTL (0x3 << 11) /* /SPI_RDY Control Mask */
-#define SPI_CONTROL_DRCTL_0 (0x0 << 11) /* Ignore /SPI_RDY */
-#define SPI_CONTROL_DRCTL_1 (0x1 << 11) /* /SPI_RDY falling edge triggers input */
-#define SPI_CONTROL_DRCTL_2 (0x2 << 11) /* /SPI_RDY active low level triggers input */
-#define SPI_CONTROL_DATARATE (0x7 << 13) /* Data Rate Mask */
-#define SPI_PERCLK2_DIV_MIN (0) /* PERCLK2:4 */
-#define SPI_PERCLK2_DIV_MAX (7) /* PERCLK2:512 */
-#define SPI_CONTROL_DATARATE_MIN (SPI_PERCLK2_DIV_MAX << 13)
-#define SPI_CONTROL_DATARATE_MAX (SPI_PERCLK2_DIV_MIN << 13)
-#define SPI_CONTROL_DATARATE_BAD (SPI_CONTROL_DATARATE_MIN + 1)
-
-/* SPI Interrupt/Status Register Bit Fields & Masks */
-#define SPI_STATUS_TE (0x1 << 0) /* TXFIFO Empty Status */
-#define SPI_STATUS_TH (0x1 << 1) /* TXFIFO Half Status */
-#define SPI_STATUS_TF (0x1 << 2) /* TXFIFO Full Status */
-#define SPI_STATUS_RR (0x1 << 3) /* RXFIFO Data Ready Status */
-#define SPI_STATUS_RH (0x1 << 4) /* RXFIFO Half Status */
-#define SPI_STATUS_RF (0x1 << 5) /* RXFIFO Full Status */
-#define SPI_STATUS_RO (0x1 << 6) /* RXFIFO Overflow */
-#define SPI_STATUS_BO (0x1 << 7) /* Bit Count Overflow */
-#define SPI_STATUS (0xFF) /* SPI Status Mask */
-#define SPI_INTEN_TE (0x1 << 8) /* TXFIFO Empty Interrupt Enable */
-#define SPI_INTEN_TH (0x1 << 9) /* TXFIFO Half Interrupt Enable */
-#define SPI_INTEN_TF (0x1 << 10) /* TXFIFO Full Interrupt Enable */
-#define SPI_INTEN_RE (0x1 << 11) /* RXFIFO Data Ready Interrupt Enable */
-#define SPI_INTEN_RH (0x1 << 12) /* RXFIFO Half Interrupt Enable */
-#define SPI_INTEN_RF (0x1 << 13) /* RXFIFO Full Interrupt Enable */
-#define SPI_INTEN_RO (0x1 << 14) /* RXFIFO Overflow Interrupt Enable */
-#define SPI_INTEN_BO (0x1 << 15) /* Bit Count Overflow Interrupt Enable */
-#define SPI_INTEN (0xFF << 8) /* SPI Interrupt Enable Mask */
-
-/* SPI Test Register Bit Fields & Masks */
-#define SPI_TEST_TXCNT (0xF << 0) /* TXFIFO Counter */
-#define SPI_TEST_RXCNT_LSB (4) /* RXFIFO Counter LSB */
-#define SPI_TEST_RXCNT (0xF << 4) /* RXFIFO Counter */
-#define SPI_TEST_SSTATUS (0xF << 8) /* State Machine Status */
-#define SPI_TEST_LBC (0x1 << 14) /* Loop Back Control */
-
-/* SPI Period Register Bit Fields & Masks */
-#define SPI_PERIOD_WAIT (0x7FFF << 0) /* Wait Between Transactions */
-#define SPI_PERIOD_MAX_WAIT (0x7FFF) /* Max Wait Between
- Transactions */
-#define SPI_PERIOD_CSRC (0x1 << 15) /* Period Clock Source Mask */
-#define SPI_PERIOD_CSRC_BCLK (0x0 << 15) /* Period Clock Source is
- Bit Clock */
-#define SPI_PERIOD_CSRC_32768 (0x1 << 15) /* Period Clock Source is
- 32.768 KHz Clock */
-
-/* SPI DMA Register Bit Fields & Masks */
-#define SPI_DMA_RHDMA (0x1 << 4) /* RXFIFO Half Status */
-#define SPI_DMA_RFDMA (0x1 << 5) /* RXFIFO Full Status */
-#define SPI_DMA_TEDMA (0x1 << 6) /* TXFIFO Empty Status */
-#define SPI_DMA_THDMA (0x1 << 7) /* TXFIFO Half Status */
-#define SPI_DMA_RHDEN (0x1 << 12) /* RXFIFO Half DMA Request Enable */
-#define SPI_DMA_RFDEN (0x1 << 13) /* RXFIFO Full DMA Request Enable */
-#define SPI_DMA_TEDEN (0x1 << 14) /* TXFIFO Empty DMA Request Enable */
-#define SPI_DMA_THDEN (0x1 << 15) /* TXFIFO Half DMA Request Enable */
-
-/* SPI Soft Reset Register Bit Fields & Masks */
-#define SPI_RESET_START (0x1) /* Start */
-
-/* Default SPI configuration values */
-#define SPI_DEFAULT_CONTROL \
-( \
- SPI_CONTROL_BITCOUNT(16) | \
- SPI_CONTROL_POL_ACT_HIGH | \
- SPI_CONTROL_PHA_0 | \
- SPI_CONTROL_SPIEN | \
- SPI_CONTROL_SSCTL_1 | \
- SPI_CONTROL_MODE_MASTER | \
- SPI_CONTROL_DRCTL_0 | \
- SPI_CONTROL_DATARATE_MIN \
-)
-#define SPI_DEFAULT_ENABLE_LOOPBACK (0)
-#define SPI_DEFAULT_ENABLE_DMA (0)
-#define SPI_DEFAULT_PERIOD_WAIT (8)
-/*-------------------------------------------------------------------------*/
-
-
-/*-------------------------------------------------------------------------*/
-/* TX/RX SPI FIFO size */
-#define SPI_FIFO_DEPTH (8)
-#define SPI_FIFO_BYTE_WIDTH (2)
-#define SPI_FIFO_OVERFLOW_MARGIN (2)
-
-/* DMA burst length for half full/empty request trigger */
-#define SPI_DMA_BLR (SPI_FIFO_DEPTH * SPI_FIFO_BYTE_WIDTH / 2)
-
-/* Dummy char output to achieve reads.
- Choosing something different from all zeroes may help pattern recogition
- for oscilloscope analysis, but may break some drivers. */
-#define SPI_DUMMY_u8 0
-#define SPI_DUMMY_u16 ((SPI_DUMMY_u8 << 8) | SPI_DUMMY_u8)
-#define SPI_DUMMY_u32 ((SPI_DUMMY_u16 << 16) | SPI_DUMMY_u16)
-
-/**
- * Macro to change a u32 field:
- * @r : register to edit
- * @m : bit mask
- * @v : new value for the field correctly bit-alligned
-*/
-#define u32_EDIT(r, m, v) r = (r & ~(m)) | (v)
-
-/* Message state */
-#define START_STATE ((void*)0)
-#define RUNNING_STATE ((void*)1)
-#define DONE_STATE ((void*)2)
-#define ERROR_STATE ((void*)-1)
-
-/* Queue state */
-#define QUEUE_RUNNING (0)
-#define QUEUE_STOPPED (1)
-
-#define IS_DMA_ALIGNED(x) (((u32)(x) & 0x03) == 0)
-#define DMA_ALIGNMENT 4
-/*-------------------------------------------------------------------------*/
-
-
-/*-------------------------------------------------------------------------*/
-/* Driver data structs */
-
-/* Context */
-struct driver_data {
- /* Driver model hookup */
- struct platform_device *pdev;
-
- /* SPI framework hookup */
- struct spi_master *master;
-
- /* IMX hookup */
- struct spi_imx_master *master_info;
-
- /* Memory resources and SPI regs virtual address */
- struct resource *ioarea;
- void __iomem *regs;
-
- /* SPI RX_DATA physical address */
- dma_addr_t rd_data_phys;
-
- /* Driver message queue */
- struct workqueue_struct *workqueue;
- struct work_struct work;
- spinlock_t lock;
- struct list_head queue;
- int busy;
- int run;
-
- /* Message Transfer pump */
- struct tasklet_struct pump_transfers;
-
- /* Current message, transfer and state */
- struct spi_message *cur_msg;
- struct spi_transfer *cur_transfer;
- struct chip_data *cur_chip;
-
- /* Rd / Wr buffers pointers */
- size_t len;
- void *tx;
- void *tx_end;
- void *rx;
- void *rx_end;
-
- u8 rd_only;
- u8 n_bytes;
- int cs_change;
-
- /* Function pointers */
- irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
- void (*cs_control)(u32 command);
-
- /* DMA setup */
- int rx_channel;
- int tx_channel;
- dma_addr_t rx_dma;
- dma_addr_t tx_dma;
- int rx_dma_needs_unmap;
- int tx_dma_needs_unmap;
- size_t tx_map_len;
- u32 dummy_dma_buf ____cacheline_aligned;
-
- struct clk *clk;
-};
-
-/* Runtime state */
-struct chip_data {
- u32 control;
- u32 period;
- u32 test;
-
- u8 enable_dma:1;
- u8 bits_per_word;
- u8 n_bytes;
- u32 max_speed_hz;
-
- void (*cs_control)(u32 command);
-};
-/*-------------------------------------------------------------------------*/
-
-
-static void pump_messages(struct work_struct *work);
-
-static void flush(struct driver_data *drv_data)
-{
- void __iomem *regs = drv_data->regs;
- u32 control;
-
- dev_dbg(&drv_data->pdev->dev, "flush\n");
-
- /* Wait for end of transaction */
- do {
- control = readl(regs + SPI_CONTROL);
- } while (control & SPI_CONTROL_XCH);
-
- /* Release chip select if requested, transfer delays are
- handled in pump_transfers */
- if (drv_data->cs_change)
- drv_data->cs_control(SPI_CS_DEASSERT);
-
- /* Disable SPI to flush FIFOs */
- writel(control & ~SPI_CONTROL_SPIEN, regs + SPI_CONTROL);
- writel(control, regs + SPI_CONTROL);
-}
-
-static void restore_state(struct driver_data *drv_data)
-{
- void __iomem *regs = drv_data->regs;
- struct chip_data *chip = drv_data->cur_chip;
-
- /* Load chip registers */
- dev_dbg(&drv_data->pdev->dev,
- "restore_state\n"
- " test = 0x%08X\n"
- " control = 0x%08X\n",
- chip->test,
- chip->control);
- writel(chip->test, regs + SPI_TEST);
- writel(chip->period, regs + SPI_PERIOD);
- writel(0, regs + SPI_INT_STATUS);
- writel(chip->control, regs + SPI_CONTROL);
-}
-
-static void null_cs_control(u32 command)
-{
-}
-
-static inline u32 data_to_write(struct driver_data *drv_data)
-{
- return ((u32)(drv_data->tx_end - drv_data->tx)) / drv_data->n_bytes;
-}
-
-static inline u32 data_to_read(struct driver_data *drv_data)
-{
- return ((u32)(drv_data->rx_end - drv_data->rx)) / drv_data->n_bytes;
-}
-
-static int write(struct driver_data *drv_data)
-{
- void __iomem *regs = drv_data->regs;
- void *tx = drv_data->tx;
- void *tx_end = drv_data->tx_end;
- u8 n_bytes = drv_data->n_bytes;
- u32 remaining_writes;
- u32 fifo_avail_space;
- u32 n;
- u16 d;
-
- /* Compute how many fifo writes to do */
- remaining_writes = (u32)(tx_end - tx) / n_bytes;
- fifo_avail_space = SPI_FIFO_DEPTH -
- (readl(regs + SPI_TEST) & SPI_TEST_TXCNT);
- if (drv_data->rx && (fifo_avail_space > SPI_FIFO_OVERFLOW_MARGIN))
- /* Fix misunderstood receive overflow */
- fifo_avail_space -= SPI_FIFO_OVERFLOW_MARGIN;
- n = min(remaining_writes, fifo_avail_space);
-
- dev_dbg(&drv_data->pdev->dev,
- "write type %s\n"
- " remaining writes = %d\n"
- " fifo avail space = %d\n"
- " fifo writes = %d\n",
- (n_bytes == 1) ? "u8" : "u16",
- remaining_writes,
- fifo_avail_space,
- n);
-
- if (n > 0) {
- /* Fill SPI TXFIFO */
- if (drv_data->rd_only) {
- tx += n * n_bytes;
- while (n--)
- writel(SPI_DUMMY_u16, regs + SPI_TXDATA);
- } else {
- if (n_bytes == 1) {
- while (n--) {
- d = *(u8*)tx;
- writel(d, regs + SPI_TXDATA);
- tx += 1;
- }
- } else {
- while (n--) {
- d = *(u16*)tx;
- writel(d, regs + SPI_TXDATA);
- tx += 2;
- }
- }
- }
-
- /* Trigger transfer */
- writel(readl(regs + SPI_CONTROL) | SPI_CONTROL_XCH,
- regs + SPI_CONTROL);
-
- /* Update tx pointer */
- drv_data->tx = tx;
- }
-
- return (tx >= tx_end);
-}
-
-static int read(struct driver_data *drv_data)
-{
- void __iomem *regs = drv_data->regs;
- void *rx = drv_data->rx;
- void *rx_end = drv_data->rx_end;
- u8 n_bytes = drv_data->n_bytes;
- u32 remaining_reads;
- u32 fifo_rxcnt;
- u32 n;
- u16 d;
-
- /* Compute how many fifo reads to do */
- remaining_reads = (u32)(rx_end - rx) / n_bytes;
- fifo_rxcnt = (readl(regs + SPI_TEST) & SPI_TEST_RXCNT) >>
- SPI_TEST_RXCNT_LSB;
- n = min(remaining_reads, fifo_rxcnt);
-
- dev_dbg(&drv_data->pdev->dev,
- "read type %s\n"
- " remaining reads = %d\n"
- " fifo rx count = %d\n"
- " fifo reads = %d\n",
- (n_bytes == 1) ? "u8" : "u16",
- remaining_reads,
- fifo_rxcnt,
- n);
-
- if (n > 0) {
- /* Read SPI RXFIFO */
- if (n_bytes == 1) {
- while (n--) {
- d = readl(regs + SPI_RXDATA);
- *((u8*)rx) = d;
- rx += 1;
- }
- } else {
- while (n--) {
- d = readl(regs + SPI_RXDATA);
- *((u16*)rx) = d;
- rx += 2;
- }
- }
-
- /* Update rx pointer */
- drv_data->rx = rx;
- }
-
- return (rx >= rx_end);
-}
-
-static void *next_transfer(struct driver_data *drv_data)
-{
- struct spi_message *msg = drv_data->cur_msg;
- struct spi_transfer *trans = drv_data->cur_transfer;
-
- /* Move to next transfer */
- if (trans->transfer_list.next != &msg->transfers) {
- drv_data->cur_transfer =
- list_entry(trans->transfer_list.next,
- struct spi_transfer,
- transfer_list);
- return RUNNING_STATE;
- }
-
- return DONE_STATE;
-}
-
-static int map_dma_buffers(struct driver_data *drv_data)
-{
- struct spi_message *msg;
- struct device *dev;
- void *buf;
-
- drv_data->rx_dma_needs_unmap = 0;
- drv_data->tx_dma_needs_unmap = 0;
-
- if (!drv_data->master_info->enable_dma ||
- !drv_data->cur_chip->enable_dma)
- return -1;
-
- msg = drv_data->cur_msg;
- dev = &msg->spi->dev;
- if (msg->is_dma_mapped) {
- if (drv_data->tx_dma)
- /* The caller provided at least dma and cpu virtual
- address for write; pump_transfers() will consider the
- transfer as write only if cpu rx virtual address is
- NULL */
- return 0;
-
- if (drv_data->rx_dma) {
- /* The caller provided dma and cpu virtual address to
- performe read only transfer -->
- use drv_data->dummy_dma_buf for dummy writes to
- achive reads */
- buf = &drv_data->dummy_dma_buf;
- drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf);
- drv_data->tx_dma = dma_map_single(dev,
- buf,
- drv_data->tx_map_len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, drv_data->tx_dma))
- return -1;
-
- drv_data->tx_dma_needs_unmap = 1;
-
- /* Flags transfer as rd_only for pump_transfers() DMA
- regs programming (should be redundant) */
- drv_data->tx = NULL;
-
- return 0;
- }
- }
-
- if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
- return -1;
-
- if (drv_data->tx == NULL) {
- /* Read only message --> use drv_data->dummy_dma_buf for dummy
- writes to achive reads */
- buf = &drv_data->dummy_dma_buf;
- drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf);
- } else {
- buf = drv_data->tx;
- drv_data->tx_map_len = drv_data->len;
- }
- drv_data->tx_dma = dma_map_single(dev,
- buf,
- drv_data->tx_map_len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, drv_data->tx_dma))
- return -1;
- drv_data->tx_dma_needs_unmap = 1;
-
- /* NULL rx means write-only transfer and no map needed
- * since rx DMA will not be used */
- if (drv_data->rx) {
- buf = drv_data->rx;
- drv_data->rx_dma = dma_map_single(dev,
- buf,
- drv_data->len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, drv_data->rx_dma)) {
- if (drv_data->tx_dma) {
- dma_unmap_single(dev,
- drv_data->tx_dma,
- drv_data->tx_map_len,
- DMA_TO_DEVICE);
- drv_data->tx_dma_needs_unmap = 0;
- }
- return -1;
- }
- drv_data->rx_dma_needs_unmap = 1;
- }
-
- return 0;
-}
-
-static void unmap_dma_buffers(struct driver_data *drv_data)
-{
- struct spi_message *msg = drv_data->cur_msg;
- struct device *dev = &msg->spi->dev;
-
- if (drv_data->rx_dma_needs_unmap) {
- dma_unmap_single(dev,
- drv_data->rx_dma,
- drv_data->len,
- DMA_FROM_DEVICE);
- drv_data->rx_dma_needs_unmap = 0;
- }
- if (drv_data->tx_dma_needs_unmap) {
- dma_unmap_single(dev,
- drv_data->tx_dma,
- drv_data->tx_map_len,
- DMA_TO_DEVICE);
- drv_data->tx_dma_needs_unmap = 0;
- }
-}
-
-/* Caller already set message->status (dma is already blocked) */
-static void giveback(struct spi_message *message, struct driver_data *drv_data)
-{
- void __iomem *regs = drv_data->regs;
-
- /* Bring SPI to sleep; restore_state() and pump_transfer()
- will do new setup */
- writel(0, regs + SPI_INT_STATUS);
- writel(0, regs + SPI_DMA);
-
- /* Unconditioned deselct */
- drv_data->cs_control(SPI_CS_DEASSERT);
-
- message->state = NULL;
- if (message->complete)
- message->complete(message->context);
-
- drv_data->cur_msg = NULL;
- drv_data->cur_transfer = NULL;
- drv_data->cur_chip = NULL;
- queue_work(drv_data->workqueue, &drv_data->work);
-}
-
-static void dma_err_handler(int channel, void *data, int errcode)
-{
- struct driver_data *drv_data = data;
- struct spi_message *msg = drv_data->cur_msg;
-
- dev_dbg(&drv_data->pdev->dev, "dma_err_handler\n");
-
- /* Disable both rx and tx dma channels */
- imx_dma_disable(drv_data->rx_channel);
- imx_dma_disable(drv_data->tx_channel);
- unmap_dma_buffers(drv_data);
-
- flush(drv_data);
-
- msg->state = ERROR_STATE;
- tasklet_schedule(&drv_data->pump_transfers);
-}
-
-static void dma_tx_handler(int channel, void *data)
-{
- struct driver_data *drv_data = data;
-
- dev_dbg(&drv_data->pdev->dev, "dma_tx_handler\n");
-
- imx_dma_disable(channel);
-
- /* Now waits for TX FIFO empty */
- writel(SPI_INTEN_TE, drv_data->regs + SPI_INT_STATUS);
-}
-
-static irqreturn_t dma_transfer(struct driver_data *drv_data)
-{
- u32 status;
- struct spi_message *msg = drv_data->cur_msg;
- void __iomem *regs = drv_data->regs;
-
- status = readl(regs + SPI_INT_STATUS);
-
- if ((status & (SPI_INTEN_RO | SPI_STATUS_RO))
- == (SPI_INTEN_RO | SPI_STATUS_RO)) {
- writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
-
- imx_dma_disable(drv_data->tx_channel);
- imx_dma_disable(drv_data->rx_channel);
- unmap_dma_buffers(drv_data);
-
- flush(drv_data);
-
- dev_warn(&drv_data->pdev->dev,
- "dma_transfer - fifo overun\n");
-
- msg->state = ERROR_STATE;
- tasklet_schedule(&drv_data->pump_transfers);
-
- return IRQ_HANDLED;
- }
-
- if (status & SPI_STATUS_TE) {
- writel(status & ~SPI_INTEN_TE, regs + SPI_INT_STATUS);
-
- if (drv_data->rx) {
- /* Wait end of transfer before read trailing data */
- while (readl(regs + SPI_CONTROL) & SPI_CONTROL_XCH)
- cpu_relax();
-
- imx_dma_disable(drv_data->rx_channel);
- unmap_dma_buffers(drv_data);
-
- /* Release chip select if requested, transfer delays are
- handled in pump_transfers() */
- if (drv_data->cs_change)
- drv_data->cs_control(SPI_CS_DEASSERT);
-
- /* Calculate number of trailing data and read them */
- dev_dbg(&drv_data->pdev->dev,
- "dma_transfer - test = 0x%08X\n",
- readl(regs + SPI_TEST));
- drv_data->rx = drv_data->rx_end -
- ((readl(regs + SPI_TEST) &
- SPI_TEST_RXCNT) >>
- SPI_TEST_RXCNT_LSB)*drv_data->n_bytes;
- read(drv_data);
- } else {
- /* Write only transfer */
- unmap_dma_buffers(drv_data);
-
- flush(drv_data);
- }
-
- /* End of transfer, update total byte transfered */
- msg->actual_length += drv_data->len;
-
- /* Move to next transfer */
- msg->state = next_transfer(drv_data);
-
- /* Schedule transfer tasklet */
- tasklet_schedule(&drv_data->pump_transfers);
-
- return IRQ_HANDLED;
- }
-
- /* Opps problem detected */
- return IRQ_NONE;
-}
-
-static irqreturn_t interrupt_wronly_transfer(struct driver_data *drv_data)
-{
- struct spi_message *msg = drv_data->cur_msg;
- void __iomem *regs = drv_data->regs;
- u32 status;
- irqreturn_t handled = IRQ_NONE;
-
- status = readl(regs + SPI_INT_STATUS);
-
- if (status & SPI_INTEN_TE) {
- /* TXFIFO Empty Interrupt on the last transfered word */
- writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
- dev_dbg(&drv_data->pdev->dev,
- "interrupt_wronly_transfer - end of tx\n");
-
- flush(drv_data);
-
- /* Update total byte transfered */
- msg->actual_length += drv_data->len;
-
- /* Move to next transfer */
- msg->state = next_transfer(drv_data);
-
- /* Schedule transfer tasklet */
- tasklet_schedule(&drv_data->pump_transfers);
-
- return IRQ_HANDLED;
- } else {
- while (status & SPI_STATUS_TH) {
- dev_dbg(&drv_data->pdev->dev,
- "interrupt_wronly_transfer - status = 0x%08X\n",
- status);
-
- /* Pump data */
- if (write(drv_data)) {
- /* End of TXFIFO writes,
- now wait until TXFIFO is empty */
- writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
- return IRQ_HANDLED;
- }
-
- status = readl(regs + SPI_INT_STATUS);
-
- /* We did something */
- handled = IRQ_HANDLED;
- }
- }
-
- return handled;
-}
-
-static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
-{
- struct spi_message *msg = drv_data->cur_msg;
- void __iomem *regs = drv_data->regs;
- u32 status, control;
- irqreturn_t handled = IRQ_NONE;
- unsigned long limit;
-
- status = readl(regs + SPI_INT_STATUS);
-
- if (status & SPI_INTEN_TE) {
- /* TXFIFO Empty Interrupt on the last transfered word */
- writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
- dev_dbg(&drv_data->pdev->dev,
- "interrupt_transfer - end of tx\n");
-
- if (msg->state == ERROR_STATE) {
- /* RXFIFO overrun was detected and message aborted */
- flush(drv_data);
- } else {
- /* Wait for end of transaction */
- do {
- control = readl(regs + SPI_CONTROL);
- } while (control & SPI_CONTROL_XCH);
-
- /* Release chip select if requested, transfer delays are
- handled in pump_transfers */
- if (drv_data->cs_change)
- drv_data->cs_control(SPI_CS_DEASSERT);
-
- /* Read trailing bytes */
- limit = loops_per_jiffy << 1;
- while ((read(drv_data) == 0) && --limit)
- cpu_relax();
-
- if (limit == 0)
- dev_err(&drv_data->pdev->dev,
- "interrupt_transfer - "
- "trailing byte read failed\n");
- else
- dev_dbg(&drv_data->pdev->dev,
- "interrupt_transfer - end of rx\n");
-
- /* Update total byte transfered */
- msg->actual_length += drv_data->len;
-
- /* Move to next transfer */
- msg->state = next_transfer(drv_data);
- }
-
- /* Schedule transfer tasklet */
- tasklet_schedule(&drv_data->pump_transfers);
-
- return IRQ_HANDLED;
- } else {
- while (status & (SPI_STATUS_TH | SPI_STATUS_RO)) {
- dev_dbg(&drv_data->pdev->dev,
- "interrupt_transfer - status = 0x%08X\n",
- status);
-
- if (status & SPI_STATUS_RO) {
- /* RXFIFO overrun, abort message end wait
- until TXFIFO is empty */
- writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
-
- dev_warn(&drv_data->pdev->dev,
- "interrupt_transfer - fifo overun\n"
- " data not yet written = %d\n"
- " data not yet read = %d\n",
- data_to_write(drv_data),
- data_to_read(drv_data));
-
- msg->state = ERROR_STATE;
-
- return IRQ_HANDLED;
- }
-
- /* Pump data */
- read(drv_data);
- if (write(drv_data)) {
- /* End of TXFIFO writes,
- now wait until TXFIFO is empty */
- writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
- return IRQ_HANDLED;
- }
-
- status = readl(regs + SPI_INT_STATUS);
-
- /* We did something */
- handled = IRQ_HANDLED;
- }
- }
-
- return handled;
-}
-
-static irqreturn_t spi_int(int irq, void *dev_id)
-{
- struct driver_data *drv_data = (struct driver_data *)dev_id;
-
- if (!drv_data->cur_msg) {
- dev_err(&drv_data->pdev->dev,
- "spi_int - bad message state\n");
- /* Never fail */
- return IRQ_HANDLED;
- }
-
- return drv_data->transfer_handler(drv_data);
-}
-
-static inline u32 spi_speed_hz(struct driver_data *drv_data, u32 data_rate)
-{
- return clk_get_rate(drv_data->clk) / (4 << ((data_rate) >> 13));
-}
-
-static u32 spi_data_rate(struct driver_data *drv_data, u32 speed_hz)
-{
- u32 div;
- u32 quantized_hz = clk_get_rate(drv_data->clk) >> 2;
-
- for (div = SPI_PERCLK2_DIV_MIN;
- div <= SPI_PERCLK2_DIV_MAX;
- div++, quantized_hz >>= 1) {
- if (quantized_hz <= speed_hz)
- /* Max available speed LEQ required speed */
- return div << 13;
- }
- return SPI_CONTROL_DATARATE_BAD;
-}
-
-static void pump_transfers(unsigned long data)
-{
- struct driver_data *drv_data = (struct driver_data *)data;
- struct spi_message *message;
- struct spi_transfer *transfer, *previous;
- struct chip_data *chip;
- void __iomem *regs;
- u32 tmp, control;
-
- dev_dbg(&drv_data->pdev->dev, "pump_transfer\n");
-
- message = drv_data->cur_msg;
-
- /* Handle for abort */
- if (message->state == ERROR_STATE) {
- message->status = -EIO;
- giveback(message, drv_data);
- return;
- }
-
- /* Handle end of message */
- if (message->state == DONE_STATE) {
- message->status = 0;
- giveback(message, drv_data);
- return;
- }
-
- chip = drv_data->cur_chip;
-
- /* Delay if requested at end of transfer*/
- transfer = drv_data->cur_transfer;
- if (message->state == RUNNING_STATE) {
- previous = list_entry(transfer->transfer_list.prev,
- struct spi_transfer,
- transfer_list);
- if (previous->delay_usecs)
- udelay(previous->delay_usecs);
- } else {
- /* START_STATE */
- message->state = RUNNING_STATE;
- drv_data->cs_control = chip->cs_control;
- }
-
- transfer = drv_data->cur_transfer;
- drv_data->tx = (void *)transfer->tx_buf;
- drv_data->tx_end = drv_data->tx + transfer->len;
- drv_data->rx = transfer->rx_buf;
- drv_data->rx_end = drv_data->rx + transfer->len;
- drv_data->rx_dma = transfer->rx_dma;
- drv_data->tx_dma = transfer->tx_dma;
- drv_data->len = transfer->len;
- drv_data->cs_change = transfer->cs_change;
- drv_data->rd_only = (drv_data->tx == NULL);
-
- regs = drv_data->regs;
- control = readl(regs + SPI_CONTROL);
-
- /* Bits per word setup */
- tmp = transfer->bits_per_word;
- if (tmp == 0) {
- /* Use device setup */
- tmp = chip->bits_per_word;
- drv_data->n_bytes = chip->n_bytes;
- } else
- /* Use per-transfer setup */
- drv_data->n_bytes = (tmp <= 8) ? 1 : 2;
- u32_EDIT(control, SPI_CONTROL_BITCOUNT_MASK, tmp - 1);
-
- /* Speed setup (surely valid because already checked) */
- tmp = transfer->speed_hz;
- if (tmp == 0)
- tmp = chip->max_speed_hz;
- tmp = spi_data_rate(drv_data, tmp);
- u32_EDIT(control, SPI_CONTROL_DATARATE, tmp);
-
- writel(control, regs + SPI_CONTROL);
-
- /* Assert device chip-select */
- drv_data->cs_control(SPI_CS_ASSERT);
-
- /* DMA cannot read/write SPI FIFOs other than 16 bits at a time; hence
- if bits_per_word is less or equal 8 PIO transfers are performed.
- Moreover DMA is convinient for transfer length bigger than FIFOs
- byte size. */
- if ((drv_data->n_bytes == 2) &&
- (drv_data->len > SPI_FIFO_DEPTH*SPI_FIFO_BYTE_WIDTH) &&
- (map_dma_buffers(drv_data) == 0)) {
- dev_dbg(&drv_data->pdev->dev,
- "pump dma transfer\n"
- " tx = %p\n"
- " tx_dma = %08X\n"
- " rx = %p\n"
- " rx_dma = %08X\n"
- " len = %d\n",
- drv_data->tx,
- (unsigned int)drv_data->tx_dma,
- drv_data->rx,
- (unsigned int)drv_data->rx_dma,
- drv_data->len);
-
- /* Ensure we have the correct interrupt handler */
- drv_data->transfer_handler = dma_transfer;
-
- /* Trigger transfer */
- writel(readl(regs + SPI_CONTROL) | SPI_CONTROL_XCH,
- regs + SPI_CONTROL);
-
- /* Setup tx DMA */
- if (drv_data->tx)
- /* Linear source address */
- CCR(drv_data->tx_channel) =
- CCR_DMOD_FIFO |
- CCR_SMOD_LINEAR |
- CCR_SSIZ_32 | CCR_DSIZ_16 |
- CCR_REN;
- else
- /* Read only transfer -> fixed source address for
- dummy write to achive read */
- CCR(drv_data->tx_channel) =
- CCR_DMOD_FIFO |
- CCR_SMOD_FIFO |
- CCR_SSIZ_32 | CCR_DSIZ_16 |
- CCR_REN;
-
- imx_dma_setup_single(
- drv_data->tx_channel,
- drv_data->tx_dma,
- drv_data->len,
- drv_data->rd_data_phys + 4,
- DMA_MODE_WRITE);
-
- if (drv_data->rx) {
- /* Setup rx DMA for linear destination address */
- CCR(drv_data->rx_channel) =
- CCR_DMOD_LINEAR |
- CCR_SMOD_FIFO |
- CCR_DSIZ_32 | CCR_SSIZ_16 |
- CCR_REN;
- imx_dma_setup_single(
- drv_data->rx_channel,
- drv_data->rx_dma,
- drv_data->len,
- drv_data->rd_data_phys,
- DMA_MODE_READ);
- imx_dma_enable(drv_data->rx_channel);
-
- /* Enable SPI interrupt */
- writel(SPI_INTEN_RO, regs + SPI_INT_STATUS);
-
- /* Set SPI to request DMA service on both
- Rx and Tx half fifo watermark */
- writel(SPI_DMA_RHDEN | SPI_DMA_THDEN, regs + SPI_DMA);
- } else
- /* Write only access -> set SPI to request DMA
- service on Tx half fifo watermark */
- writel(SPI_DMA_THDEN, regs + SPI_DMA);
-
- imx_dma_enable(drv_data->tx_channel);
- } else {
- dev_dbg(&drv_data->pdev->dev,
- "pump pio transfer\n"
- " tx = %p\n"
- " rx = %p\n"
- " len = %d\n",
- drv_data->tx,
- drv_data->rx,
- drv_data->len);
-
- /* Ensure we have the correct interrupt handler */
- if (drv_data->rx)
- drv_data->transfer_handler = interrupt_transfer;
- else
- drv_data->transfer_handler = interrupt_wronly_transfer;
-
- /* Enable SPI interrupt */
- if (drv_data->rx)
- writel(SPI_INTEN_TH | SPI_INTEN_RO,
- regs + SPI_INT_STATUS);
- else
- writel(SPI_INTEN_TH, regs + SPI_INT_STATUS);
- }
-}
-
-static void pump_messages(struct work_struct *work)
-{
- struct driver_data *drv_data =
- container_of(work, struct driver_data, work);
- unsigned long flags;
-
- /* Lock queue and check for queue work */
- spin_lock_irqsave(&drv_data->lock, flags);
- if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
- drv_data->busy = 0;
- spin_unlock_irqrestore(&drv_data->lock, flags);
- return;
- }
-
- /* Make sure we are not already running a message */
- if (drv_data->cur_msg) {
- spin_unlock_irqrestore(&drv_data->lock, flags);
- return;
- }
-
- /* Extract head of queue */
- drv_data->cur_msg = list_entry(drv_data->queue.next,
- struct spi_message, queue);
- list_del_init(&drv_data->cur_msg->queue);
- drv_data->busy = 1;
- spin_unlock_irqrestore(&drv_data->lock, flags);
-
- /* Initial message state */
- drv_data->cur_msg->state = START_STATE;
- drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
- struct spi_transfer,
- transfer_list);
-
- /* Setup the SPI using the per chip configuration */
- drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
- restore_state(drv_data);
-
- /* Mark as busy and launch transfers */
- tasklet_schedule(&drv_data->pump_transfers);
-}
-
-static int transfer(struct spi_device *spi, struct spi_message *msg)
-{
- struct driver_data *drv_data = spi_master_get_devdata(spi->master);
- u32 min_speed_hz, max_speed_hz, tmp;
- struct spi_transfer *trans;
- unsigned long flags;
-
- msg->actual_length = 0;
-
- /* Per transfer setup check */
- min_speed_hz = spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN);
- max_speed_hz = spi->max_speed_hz;
- list_for_each_entry(trans, &msg->transfers, transfer_list) {
- tmp = trans->bits_per_word;
- if (tmp > 16) {
- dev_err(&drv_data->pdev->dev,
- "message rejected : "
- "invalid transfer bits_per_word (%d bits)\n",
- tmp);
- goto msg_rejected;
- }
- tmp = trans->speed_hz;
- if (tmp) {
- if (tmp < min_speed_hz) {
- dev_err(&drv_data->pdev->dev,
- "message rejected : "
- "device min speed (%d Hz) exceeds "
- "required transfer speed (%d Hz)\n",
- min_speed_hz,
- tmp);
- goto msg_rejected;
- } else if (tmp > max_speed_hz) {
- dev_err(&drv_data->pdev->dev,
- "message rejected : "
- "transfer speed (%d Hz) exceeds "
- "device max speed (%d Hz)\n",
- tmp,
- max_speed_hz);
- goto msg_rejected;
- }
- }
- }
-
- /* Message accepted */
- msg->status = -EINPROGRESS;
- msg->state = START_STATE;
-
- spin_lock_irqsave(&drv_data->lock, flags);
- if (drv_data->run == QUEUE_STOPPED) {
- spin_unlock_irqrestore(&drv_data->lock, flags);
- return -ESHUTDOWN;
- }
-
- list_add_tail(&msg->queue, &drv_data->queue);
- if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
- queue_work(drv_data->workqueue, &drv_data->work);
-
- spin_unlock_irqrestore(&drv_data->lock, flags);
- return 0;
-
-msg_rejected:
- /* Message rejected and not queued */
- msg->status = -EINVAL;
- msg->state = ERROR_STATE;
- if (msg->complete)
- msg->complete(msg->context);
- return -EINVAL;
-}
-
-/* On first setup bad values must free chip_data memory since will cause
- spi_new_device to fail. Bad value setup from protocol driver are simply not
- applied and notified to the calling driver. */
-static int setup(struct spi_device *spi)
-{
- struct driver_data *drv_data = spi_master_get_devdata(spi->master);
- struct spi_imx_chip *chip_info;
- struct chip_data *chip;
- int first_setup = 0;
- u32 tmp;
- int status = 0;
-
- /* Get controller data */
- chip_info = spi->controller_data;
-
- /* Get controller_state */
- chip = spi_get_ctldata(spi);
- if (chip == NULL) {
- first_setup = 1;
-
- chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
- if (!chip) {
- dev_err(&spi->dev,
- "setup - cannot allocate controller state\n");
- return -ENOMEM;
- }
- chip->control = SPI_DEFAULT_CONTROL;
-
- if (chip_info == NULL) {
- /* spi_board_info.controller_data not is supplied */
- chip_info = kzalloc(sizeof(struct spi_imx_chip),
- GFP_KERNEL);
- if (!chip_info) {
- dev_err(&spi->dev,
- "setup - "
- "cannot allocate controller data\n");
- status = -ENOMEM;
- goto err_first_setup;
- }
- /* Set controller data default value */
- chip_info->enable_loopback =
- SPI_DEFAULT_ENABLE_LOOPBACK;
- chip_info->enable_dma = SPI_DEFAULT_ENABLE_DMA;
- chip_info->ins_ss_pulse = 1;
- chip_info->bclk_wait = SPI_DEFAULT_PERIOD_WAIT;
- chip_info->cs_control = null_cs_control;
- }
- }
-
- /* Now set controller state based on controller data */
-
- if (first_setup) {
- /* SPI loopback */
- if (chip_info->enable_loopback)
- chip->test = SPI_TEST_LBC;
- else
- chip->test = 0;
-
- /* SPI dma driven */
- chip->enable_dma = chip_info->enable_dma;
-
- /* SPI /SS pulse between spi burst */
- if (chip_info->ins_ss_pulse)
- u32_EDIT(chip->control,
- SPI_CONTROL_SSCTL, SPI_CONTROL_SSCTL_1);
- else
- u32_EDIT(chip->control,
- SPI_CONTROL_SSCTL, SPI_CONTROL_SSCTL_0);
-
- /* SPI bclk waits between each bits_per_word spi burst */
- if (chip_info->bclk_wait > SPI_PERIOD_MAX_WAIT) {
- dev_err(&spi->dev,
- "setup - "
- "bclk_wait exceeds max allowed (%d)\n",
- SPI_PERIOD_MAX_WAIT);
- goto err_first_setup;
- }
- chip->period = SPI_PERIOD_CSRC_BCLK |
- (chip_info->bclk_wait & SPI_PERIOD_WAIT);
- }
-
- /* SPI mode */
- tmp = spi->mode;
- if (tmp & SPI_CS_HIGH) {
- u32_EDIT(chip->control,
- SPI_CONTROL_SSPOL, SPI_CONTROL_SSPOL_ACT_HIGH);
- }
- switch (tmp & SPI_MODE_3) {
- case SPI_MODE_0:
- tmp = 0;
- break;
- case SPI_MODE_1:
- tmp = SPI_CONTROL_PHA_1;
- break;
- case SPI_MODE_2:
- tmp = SPI_CONTROL_POL_ACT_LOW;
- break;
- default:
- /* SPI_MODE_3 */
- tmp = SPI_CONTROL_PHA_1 | SPI_CONTROL_POL_ACT_LOW;
- break;
- }
- u32_EDIT(chip->control, SPI_CONTROL_POL | SPI_CONTROL_PHA, tmp);
-
- /* SPI word width */
- tmp = spi->bits_per_word;
- if (tmp > 16) {
- status = -EINVAL;
- dev_err(&spi->dev,
- "setup - "
- "invalid bits_per_word (%d)\n",
- tmp);
- if (first_setup)
- goto err_first_setup;
- else {
- /* Undo setup using chip as backup copy */
- tmp = chip->bits_per_word;
- spi->bits_per_word = tmp;
- }
- }
- chip->bits_per_word = tmp;
- u32_EDIT(chip->control, SPI_CONTROL_BITCOUNT_MASK, tmp - 1);
- chip->n_bytes = (tmp <= 8) ? 1 : 2;
-
- /* SPI datarate */
- tmp = spi_data_rate(drv_data, spi->max_speed_hz);
- if (tmp == SPI_CONTROL_DATARATE_BAD) {
- status = -EINVAL;
- dev_err(&spi->dev,
- "setup - "
- "HW min speed (%d Hz) exceeds required "
- "max speed (%d Hz)\n",
- spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN),
- spi->max_speed_hz);
- if (first_setup)
- goto err_first_setup;
- else
- /* Undo setup using chip as backup copy */
- spi->max_speed_hz = chip->max_speed_hz;
- } else {
- u32_EDIT(chip->control, SPI_CONTROL_DATARATE, tmp);
- /* Actual rounded max_speed_hz */
- tmp = spi_speed_hz(drv_data, tmp);
- spi->max_speed_hz = tmp;
- chip->max_speed_hz = tmp;
- }
-
- /* SPI chip-select management */
- if (chip_info->cs_control)
- chip->cs_control = chip_info->cs_control;
- else
- chip->cs_control = null_cs_control;
-
- /* Save controller_state */
- spi_set_ctldata(spi, chip);
-
- /* Summary */
- dev_dbg(&spi->dev,
- "setup succeded\n"
- " loopback enable = %s\n"
- " dma enable = %s\n"
- " insert /ss pulse = %s\n"
- " period wait = %d\n"
- " mode = %d\n"
- " bits per word = %d\n"
- " min speed = %d Hz\n"
- " rounded max speed = %d Hz\n",
- chip->test & SPI_TEST_LBC ? "Yes" : "No",
- chip->enable_dma ? "Yes" : "No",
- chip->control & SPI_CONTROL_SSCTL ? "Yes" : "No",
- chip->period & SPI_PERIOD_WAIT,
- spi->mode,
- spi->bits_per_word,
- spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN),
- spi->max_speed_hz);
- return status;
-
-err_first_setup:
- kfree(chip);
- return status;
-}
-
-static void cleanup(struct spi_device *spi)
-{
- kfree(spi_get_ctldata(spi));
-}
-
-static int __init init_queue(struct driver_data *drv_data)
-{
- INIT_LIST_HEAD(&drv_data->queue);
- spin_lock_init(&drv_data->lock);
-
- drv_data->run = QUEUE_STOPPED;
- drv_data->busy = 0;
-
- tasklet_init(&drv_data->pump_transfers,
- pump_transfers, (unsigned long)drv_data);
-
- INIT_WORK(&drv_data->work, pump_messages);
- drv_data->workqueue = create_singlethread_workqueue(
- dev_name(drv_data->master->dev.parent));
- if (drv_data->workqueue == NULL)
- return -EBUSY;
-
- return 0;
-}
-
-static int start_queue(struct driver_data *drv_data)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&drv_data->lock, flags);
-
- if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
- spin_unlock_irqrestore(&drv_data->lock, flags);
- return -EBUSY;
- }
-
- drv_data->run = QUEUE_RUNNING;
- drv_data->cur_msg = NULL;
- drv_data->cur_transfer = NULL;
- drv_data->cur_chip = NULL;
- spin_unlock_irqrestore(&drv_data->lock, flags);
-
- queue_work(drv_data->workqueue, &drv_data->work);
-
- return 0;
-}
-
-static int stop_queue(struct driver_data *drv_data)
-{
- unsigned long flags;
- unsigned limit = 500;
- int status = 0;
-
- spin_lock_irqsave(&drv_data->lock, flags);
-
- /* This is a bit lame, but is optimized for the common execution path.
- * A wait_queue on the drv_data->busy could be used, but then the common
- * execution path (pump_messages) would be required to call wake_up or
- * friends on every SPI message. Do this instead */
- drv_data->run = QUEUE_STOPPED;
- while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
- spin_unlock_irqrestore(&drv_data->lock, flags);
- msleep(10);
- spin_lock_irqsave(&drv_data->lock, flags);
- }
-
- if (!list_empty(&drv_data->queue) || drv_data->busy)
- status = -EBUSY;
-
- spin_unlock_irqrestore(&drv_data->lock, flags);
-
- return status;
-}
-
-static int destroy_queue(struct driver_data *drv_data)
-{
- int status;
-
- status = stop_queue(drv_data);
- if (status != 0)
- return status;
-
- if (drv_data->workqueue)
- destroy_workqueue(drv_data->workqueue);
-
- return 0;
-}
-
-static int __init spi_imx_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct spi_imx_master *platform_info;
- struct spi_master *master;
- struct driver_data *drv_data;
- struct resource *res;
- int irq, status = 0;
-
- platform_info = dev->platform_data;
- if (platform_info == NULL) {
- dev_err(&pdev->dev, "probe - no platform data supplied\n");
- status = -ENODEV;
- goto err_no_pdata;
- }
-
- /* Allocate master with space for drv_data */
- master = spi_alloc_master(dev, sizeof(struct driver_data));
- if (!master) {
- dev_err(&pdev->dev, "probe - cannot alloc spi_master\n");
- status = -ENOMEM;
- goto err_no_mem;
- }
- drv_data = spi_master_get_devdata(master);
- drv_data->master = master;
- drv_data->master_info = platform_info;
- drv_data->pdev = pdev;
-
- /* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
-
- master->bus_num = pdev->id;
- master->num_chipselect = platform_info->num_chipselect;
- master->dma_alignment = DMA_ALIGNMENT;
- master->cleanup = cleanup;
- master->setup = setup;
- master->transfer = transfer;
-
- drv_data->dummy_dma_buf = SPI_DUMMY_u32;
-
- drv_data->clk = clk_get(&pdev->dev, "perclk2");
- if (IS_ERR(drv_data->clk)) {
- dev_err(&pdev->dev, "probe - cannot get clock\n");
- status = PTR_ERR(drv_data->clk);
- goto err_no_clk;
- }
- clk_enable(drv_data->clk);
-
- /* Find and map resources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "probe - MEM resources not defined\n");
- status = -ENODEV;
- goto err_no_iores;
- }
- drv_data->ioarea = request_mem_region(res->start,
- res->end - res->start + 1,
- pdev->name);
- if (drv_data->ioarea == NULL) {
- dev_err(&pdev->dev, "probe - cannot reserve region\n");
- status = -ENXIO;
- goto err_no_iores;
- }
- drv_data->regs = ioremap(res->start, res->end - res->start + 1);
- if (drv_data->regs == NULL) {
- dev_err(&pdev->dev, "probe - cannot map IO\n");
- status = -ENXIO;
- goto err_no_iomap;
- }
- drv_data->rd_data_phys = (dma_addr_t)res->start;
-
- /* Attach to IRQ */
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "probe - IRQ resource not defined\n");
- status = -ENODEV;
- goto err_no_irqres;
- }
- status = request_irq(irq, spi_int, IRQF_DISABLED,
- dev_name(dev), drv_data);
- if (status < 0) {
- dev_err(&pdev->dev, "probe - cannot get IRQ (%d)\n", status);
- goto err_no_irqres;
- }
-
- /* Setup DMA if requested */
- drv_data->tx_channel = -1;
- drv_data->rx_channel = -1;
- if (platform_info->enable_dma) {
- /* Get rx DMA channel */
- drv_data->rx_channel = imx_dma_request_by_prio("spi_imx_rx",
- DMA_PRIO_HIGH);
- if (drv_data->rx_channel < 0) {
- dev_err(dev,
- "probe - problem (%d) requesting rx channel\n",
- drv_data->rx_channel);
- goto err_no_rxdma;
- } else
- imx_dma_setup_handlers(drv_data->rx_channel, NULL,
- dma_err_handler, drv_data);
-
- /* Get tx DMA channel */
- drv_data->tx_channel = imx_dma_request_by_prio("spi_imx_tx",
- DMA_PRIO_MEDIUM);
- if (drv_data->tx_channel < 0) {
- dev_err(dev,
- "probe - problem (%d) requesting tx channel\n",
- drv_data->tx_channel);
- imx_dma_free(drv_data->rx_channel);
- goto err_no_txdma;
- } else
- imx_dma_setup_handlers(drv_data->tx_channel,
- dma_tx_handler, dma_err_handler,
- drv_data);
-
- /* Set request source and burst length for allocated channels */
- switch (drv_data->pdev->id) {
- case 1:
- /* Using SPI1 */
- RSSR(drv_data->rx_channel) = DMA_REQ_SPI1_R;
- RSSR(drv_data->tx_channel) = DMA_REQ_SPI1_T;
- break;
- case 2:
- /* Using SPI2 */
- RSSR(drv_data->rx_channel) = DMA_REQ_SPI2_R;
- RSSR(drv_data->tx_channel) = DMA_REQ_SPI2_T;
- break;
- default:
- dev_err(dev, "probe - bad SPI Id\n");
- imx_dma_free(drv_data->rx_channel);
- imx_dma_free(drv_data->tx_channel);
- status = -ENODEV;
- goto err_no_devid;
- }
- BLR(drv_data->rx_channel) = SPI_DMA_BLR;
- BLR(drv_data->tx_channel) = SPI_DMA_BLR;
- }
-
- /* Load default SPI configuration */
- writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
- writel(0, drv_data->regs + SPI_RESET);
- writel(SPI_DEFAULT_CONTROL, drv_data->regs + SPI_CONTROL);
-
- /* Initial and start queue */
- status = init_queue(drv_data);
- if (status != 0) {
- dev_err(&pdev->dev, "probe - problem initializing queue\n");
- goto err_init_queue;
- }
- status = start_queue(drv_data);
- if (status != 0) {
- dev_err(&pdev->dev, "probe - problem starting queue\n");
- goto err_start_queue;
- }
-
- /* Register with the SPI framework */
- platform_set_drvdata(pdev, drv_data);
- status = spi_register_master(master);
- if (status != 0) {
- dev_err(&pdev->dev, "probe - problem registering spi master\n");
- goto err_spi_register;
- }
-
- dev_dbg(dev, "probe succeded\n");
- return 0;
-
-err_init_queue:
-err_start_queue:
-err_spi_register:
- destroy_queue(drv_data);
-
-err_no_rxdma:
-err_no_txdma:
-err_no_devid:
- free_irq(irq, drv_data);
-
-err_no_irqres:
- iounmap(drv_data->regs);
-
-err_no_iomap:
- release_resource(drv_data->ioarea);
- kfree(drv_data->ioarea);
-
-err_no_iores:
- clk_disable(drv_data->clk);
- clk_put(drv_data->clk);
-
-err_no_clk:
- spi_master_put(master);
-
-err_no_pdata:
-err_no_mem:
- return status;
-}
-
-static int __exit spi_imx_remove(struct platform_device *pdev)
-{
- struct driver_data *drv_data = platform_get_drvdata(pdev);
- int irq;
- int status = 0;
-
- if (!drv_data)
- return 0;
-
- tasklet_kill(&drv_data->pump_transfers);
-
- /* Remove the queue */
- status = destroy_queue(drv_data);
- if (status != 0) {
- dev_err(&pdev->dev, "queue remove failed (%d)\n", status);
- return status;
- }
-
- /* Reset SPI */
- writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
- writel(0, drv_data->regs + SPI_RESET);
-
- /* Release DMA */
- if (drv_data->master_info->enable_dma) {
- RSSR(drv_data->rx_channel) = 0;
- RSSR(drv_data->tx_channel) = 0;
- imx_dma_free(drv_data->tx_channel);
- imx_dma_free(drv_data->rx_channel);
- }
-
- /* Release IRQ */
- irq = platform_get_irq(pdev, 0);
- if (irq >= 0)
- free_irq(irq, drv_data);
-
- clk_disable(drv_data->clk);
- clk_put(drv_data->clk);
-
- /* Release map resources */
- iounmap(drv_data->regs);
- release_resource(drv_data->ioarea);
- kfree(drv_data->ioarea);
-
- /* Disconnect from the SPI framework */
- spi_unregister_master(drv_data->master);
- spi_master_put(drv_data->master);
-
- /* Prevent double remove */
- platform_set_drvdata(pdev, NULL);
-
- dev_dbg(&pdev->dev, "remove succeded\n");
-
- return 0;
-}
-
-static void spi_imx_shutdown(struct platform_device *pdev)
-{
- struct driver_data *drv_data = platform_get_drvdata(pdev);
-
- /* Reset SPI */
- writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
- writel(0, drv_data->regs + SPI_RESET);
-
- dev_dbg(&pdev->dev, "shutdown succeded\n");
-}
-
-#ifdef CONFIG_PM
-
-static int spi_imx_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct driver_data *drv_data = platform_get_drvdata(pdev);
- int status = 0;
-
- status = stop_queue(drv_data);
- if (status != 0) {
- dev_warn(&pdev->dev, "suspend cannot stop queue\n");
- return status;
- }
-
- dev_dbg(&pdev->dev, "suspended\n");
-
- return 0;
-}
-
-static int spi_imx_resume(struct platform_device *pdev)
-{
- struct driver_data *drv_data = platform_get_drvdata(pdev);
- int status = 0;
-
- /* Start the queue running */
- status = start_queue(drv_data);
- if (status != 0)
- dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
- else
- dev_dbg(&pdev->dev, "resumed\n");
-
- return status;
-}
-#else
-#define spi_imx_suspend NULL
-#define spi_imx_resume NULL
-#endif /* CONFIG_PM */
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:spi_imx");
-
-static struct platform_driver driver = {
- .driver = {
- .name = "spi_imx",
- .owner = THIS_MODULE,
- },
- .remove = __exit_p(spi_imx_remove),
- .shutdown = spi_imx_shutdown,
- .suspend = spi_imx_suspend,
- .resume = spi_imx_resume,
-};
-
-static int __init spi_imx_init(void)
-{
- return platform_driver_probe(&driver, spi_imx_probe);
-}
-module_init(spi_imx_init);
-
-static void __exit spi_imx_exit(void)
-{
- platform_driver_unregister(&driver);
-}
-module_exit(spi_imx_exit);
-
-MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
-MODULE_DESCRIPTION("iMX SPI Controller Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi_ppc4xx.c
new file mode 100644
index 00000000000..140a18d6cf3
--- /dev/null
+++ b/drivers/spi/spi_ppc4xx.c
@@ -0,0 +1,612 @@
+/*
+ * SPI_PPC4XX SPI controller driver.
+ *
+ * Copyright (C) 2007 Gary Jennejohn <garyj@denx.de>
+ * Copyright 2008 Stefan Roese <sr@denx.de>, DENX Software Engineering
+ * Copyright 2009 Harris Corporation, Steven A. Falco <sfalco@harris.com>
+ *
+ * Based in part on drivers/spi/spi_s3c24xx.c
+ *
+ * Copyright (c) 2006 Ben Dooks
+ * Copyright (c) 2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+/*
+ * The PPC4xx SPI controller has no FIFO so each sent/received byte will
+ * generate an interrupt to the CPU. This can cause high CPU utilization.
+ * This driver allows platforms to reduce the interrupt load on the CPU
+ * during SPI transfers by setting max_speed_hz via the device tree.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/of_platform.h>
+#include <linux/of_spi.h>
+#include <linux/of_gpio.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#include <asm/io.h>
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+
+/* bits in mode register - bit 0 is MSb */
+
+/*
+ * SPI_PPC4XX_MODE_SCP = 0 means "data latched on trailing edge of clock"
+ * SPI_PPC4XX_MODE_SCP = 1 means "data latched on leading edge of clock"
+ * Note: This is the inverse of CPHA.
+ */
+#define SPI_PPC4XX_MODE_SCP (0x80 >> 3)
+
+/* SPI_PPC4XX_MODE_SPE = 1 means "port enabled" */
+#define SPI_PPC4XX_MODE_SPE (0x80 >> 4)
+
+/*
+ * SPI_PPC4XX_MODE_RD = 0 means "MSB first" - this is the normal mode
+ * SPI_PPC4XX_MODE_RD = 1 means "LSB first" - this is bit-reversed mode
+ * Note: This is identical to SPI_LSB_FIRST.
+ */
+#define SPI_PPC4XX_MODE_RD (0x80 >> 5)
+
+/*
+ * SPI_PPC4XX_MODE_CI = 0 means "clock idles low"
+ * SPI_PPC4XX_MODE_CI = 1 means "clock idles high"
+ * Note: This is identical to CPOL.
+ */
+#define SPI_PPC4XX_MODE_CI (0x80 >> 6)
+
+/*
+ * SPI_PPC4XX_MODE_IL = 0 means "loopback disable"
+ * SPI_PPC4XX_MODE_IL = 1 means "loopback enable"
+ */
+#define SPI_PPC4XX_MODE_IL (0x80 >> 7)
+
+/* bits in control register */
+/* starts a transfer when set */
+#define SPI_PPC4XX_CR_STR (0x80 >> 7)
+
+/* bits in status register */
+/* port is busy with a transfer */
+#define SPI_PPC4XX_SR_BSY (0x80 >> 6)
+/* RxD ready */
+#define SPI_PPC4XX_SR_RBR (0x80 >> 7)
+
+/* clock settings (SCP and CI) for various SPI modes */
+#define SPI_CLK_MODE0 (SPI_PPC4XX_MODE_SCP | 0)
+#define SPI_CLK_MODE1 (0 | 0)
+#define SPI_CLK_MODE2 (SPI_PPC4XX_MODE_SCP | SPI_PPC4XX_MODE_CI)
+#define SPI_CLK_MODE3 (0 | SPI_PPC4XX_MODE_CI)
+
+#define DRIVER_NAME "spi_ppc4xx_of"
+
+struct spi_ppc4xx_regs {
+ u8 mode;
+ u8 rxd;
+ u8 txd;
+ u8 cr;
+ u8 sr;
+ u8 dummy;
+ /*
+ * Clock divisor modulus register
+ * This uses the follwing formula:
+ * SCPClkOut = OPBCLK/(4(CDM + 1))
+ * or
+ * CDM = (OPBCLK/4*SCPClkOut) - 1
+ * bit 0 is the MSb!
+ */
+ u8 cdm;
+};
+
+/* SPI Controller driver's private data. */
+struct ppc4xx_spi {
+ /* bitbang has to be first */
+ struct spi_bitbang bitbang;
+ struct completion done;
+
+ u64 mapbase;
+ u64 mapsize;
+ int irqnum;
+ /* need this to set the SPI clock */
+ unsigned int opb_freq;
+
+ /* for transfers */
+ int len;
+ int count;
+ /* data buffers */
+ const unsigned char *tx;
+ unsigned char *rx;
+
+ int *gpios;
+
+ struct spi_ppc4xx_regs __iomem *regs; /* pointer to the registers */
+ struct spi_master *master;
+ struct device *dev;
+};
+
+/* need this so we can set the clock in the chipselect routine */
+struct spi_ppc4xx_cs {
+ u8 mode;
+};
+
+static int spi_ppc4xx_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct ppc4xx_spi *hw;
+ u8 data;
+
+ dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
+ t->tx_buf, t->rx_buf, t->len);
+
+ hw = spi_master_get_devdata(spi->master);
+
+ hw->tx = t->tx_buf;
+ hw->rx = t->rx_buf;
+ hw->len = t->len;
+ hw->count = 0;
+
+ /* send the first byte */
+ data = hw->tx ? hw->tx[0] : 0;
+ out_8(&hw->regs->txd, data);
+ out_8(&hw->regs->cr, SPI_PPC4XX_CR_STR);
+ wait_for_completion(&hw->done);
+
+ return hw->count;
+}
+
+static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct ppc4xx_spi *hw = spi_master_get_devdata(spi->master);
+ struct spi_ppc4xx_cs *cs = spi->controller_state;
+ int scr;
+ u8 cdm = 0;
+ u32 speed;
+ u8 bits_per_word;
+
+ /* Start with the generic configuration for this device. */
+ bits_per_word = spi->bits_per_word;
+ speed = spi->max_speed_hz;
+
+ /*
+ * Modify the configuration if the transfer overrides it. Do not allow
+ * the transfer to overwrite the generic configuration with zeros.
+ */
+ if (t) {
+ if (t->bits_per_word)
+ bits_per_word = t->bits_per_word;
+
+ if (t->speed_hz)
+ speed = min(t->speed_hz, spi->max_speed_hz);
+ }
+
+ if (bits_per_word != 8) {
+ dev_err(&spi->dev, "invalid bits-per-word (%d)\n",
+ bits_per_word);
+ return -EINVAL;
+ }
+
+ if (!speed || (speed > spi->max_speed_hz)) {
+ dev_err(&spi->dev, "invalid speed_hz (%d)\n", speed);
+ return -EINVAL;
+ }
+
+ /* Write new configration */
+ out_8(&hw->regs->mode, cs->mode);
+
+ /* Set the clock */
+ /* opb_freq was already divided by 4 */
+ scr = (hw->opb_freq / speed) - 1;
+ if (scr > 0)
+ cdm = min(scr, 0xff);
+
+ dev_dbg(&spi->dev, "setting pre-scaler to %d (hz %d)\n", cdm, speed);
+
+ if (in_8(&hw->regs->cdm) != cdm)
+ out_8(&hw->regs->cdm, cdm);
+
+ spin_lock(&hw->bitbang.lock);
+ if (!hw->bitbang.busy) {
+ hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE);
+ /* Need to ndelay here? */
+ }
+ spin_unlock(&hw->bitbang.lock);
+
+ return 0;
+}
+
+static int spi_ppc4xx_setup(struct spi_device *spi)
+{
+ struct spi_ppc4xx_cs *cs = spi->controller_state;
+
+ if (spi->bits_per_word != 8) {
+ dev_err(&spi->dev, "invalid bits-per-word (%d)\n",
+ spi->bits_per_word);
+ return -EINVAL;
+ }
+
+ if (!spi->max_speed_hz) {
+ dev_err(&spi->dev, "invalid max_speed_hz (must be non-zero)\n");
+ return -EINVAL;
+ }
+
+ if (cs == NULL) {
+ cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ spi->controller_state = cs;
+ }
+
+ /*
+ * We set all bits of the SPI0_MODE register, so,
+ * no need to read-modify-write
+ */
+ cs->mode = SPI_PPC4XX_MODE_SPE;
+
+ switch (spi->mode & (SPI_CPHA | SPI_CPOL)) {
+ case SPI_MODE_0:
+ cs->mode |= SPI_CLK_MODE0;
+ break;
+ case SPI_MODE_1:
+ cs->mode |= SPI_CLK_MODE1;
+ break;
+ case SPI_MODE_2:
+ cs->mode |= SPI_CLK_MODE2;
+ break;
+ case SPI_MODE_3:
+ cs->mode |= SPI_CLK_MODE3;
+ break;
+ }
+
+ if (spi->mode & SPI_LSB_FIRST)
+ cs->mode |= SPI_PPC4XX_MODE_RD;
+
+ return 0;
+}
+
+static void spi_ppc4xx_chipsel(struct spi_device *spi, int value)
+{
+ struct ppc4xx_spi *hw = spi_master_get_devdata(spi->master);
+ unsigned int cs = spi->chip_select;
+ unsigned int cspol;
+
+ /*
+ * If there are no chip selects at all, or if this is the special
+ * case of a non-existent (dummy) chip select, do nothing.
+ */
+
+ if (!hw->master->num_chipselect || hw->gpios[cs] == -EEXIST)
+ return;
+
+ cspol = spi->mode & SPI_CS_HIGH ? 1 : 0;
+ if (value == BITBANG_CS_INACTIVE)
+ cspol = !cspol;
+
+ gpio_set_value(hw->gpios[cs], cspol);
+}
+
+static irqreturn_t spi_ppc4xx_int(int irq, void *dev_id)
+{
+ struct ppc4xx_spi *hw;
+ u8 status;
+ u8 data;
+ unsigned int count;
+
+ hw = (struct ppc4xx_spi *)dev_id;
+
+ status = in_8(&hw->regs->sr);
+ if (!status)
+ return IRQ_NONE;
+
+ /*
+ * BSY de-asserts one cycle after the transfer is complete. The
+ * interrupt is asserted after the transfer is complete. The exact
+ * relationship is not documented, hence this code.
+ */
+
+ if (unlikely(status & SPI_PPC4XX_SR_BSY)) {
+ u8 lstatus;
+ int cnt = 0;
+
+ dev_dbg(hw->dev, "got interrupt but spi still busy?\n");
+ do {
+ ndelay(10);
+ lstatus = in_8(&hw->regs->sr);
+ } while (++cnt < 100 && lstatus & SPI_PPC4XX_SR_BSY);
+
+ if (cnt >= 100) {
+ dev_err(hw->dev, "busywait: too many loops!\n");
+ complete(&hw->done);
+ return IRQ_HANDLED;
+ } else {
+ /* status is always 1 (RBR) here */
+ status = in_8(&hw->regs->sr);
+ dev_dbg(hw->dev, "loops %d status %x\n", cnt, status);
+ }
+ }
+
+ count = hw->count;
+ hw->count++;
+
+ /* RBR triggered this interrupt. Therefore, data must be ready. */
+ data = in_8(&hw->regs->rxd);
+ if (hw->rx)
+ hw->rx[count] = data;
+
+ count++;
+
+ if (count < hw->len) {
+ data = hw->tx ? hw->tx[count] : 0;
+ out_8(&hw->regs->txd, data);
+ out_8(&hw->regs->cr, SPI_PPC4XX_CR_STR);
+ } else {
+ complete(&hw->done);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void spi_ppc4xx_cleanup(struct spi_device *spi)
+{
+ kfree(spi->controller_state);
+}
+
+static void spi_ppc4xx_enable(struct ppc4xx_spi *hw)
+{
+ /*
+ * On all 4xx PPC's the SPI bus is shared/multiplexed with
+ * the 2nd I2C bus. We need to enable the the SPI bus before
+ * using it.
+ */
+
+ /* need to clear bit 14 to enable SPC */
+ dcri_clrset(SDR0, SDR0_PFC1, 0x80000000 >> 14, 0);
+}
+
+static void free_gpios(struct ppc4xx_spi *hw)
+{
+ if (hw->master->num_chipselect) {
+ int i;
+ for (i = 0; i < hw->master->num_chipselect; i++)
+ if (gpio_is_valid(hw->gpios[i]))
+ gpio_free(hw->gpios[i]);
+
+ kfree(hw->gpios);
+ hw->gpios = NULL;
+ }
+}
+
+/*
+ * of_device layer stuff...
+ */
+static int __init spi_ppc4xx_of_probe(struct of_device *op,
+ const struct of_device_id *match)
+{
+ struct ppc4xx_spi *hw;
+ struct spi_master *master;
+ struct spi_bitbang *bbp;
+ struct resource resource;
+ struct device_node *np = op->node;
+ struct device *dev = &op->dev;
+ struct device_node *opbnp;
+ int ret;
+ int num_gpios;
+ const unsigned int *clk;
+
+ master = spi_alloc_master(dev, sizeof *hw);
+ if (master == NULL)
+ return -ENOMEM;
+ dev_set_drvdata(dev, master);
+ hw = spi_master_get_devdata(master);
+ hw->master = spi_master_get(master);
+ hw->dev = dev;
+
+ init_completion(&hw->done);
+
+ /*
+ * A count of zero implies a single SPI device without any chip-select.
+ * Note that of_gpio_count counts all gpios assigned to this spi master.
+ * This includes both "null" gpio's and real ones.
+ */
+ num_gpios = of_gpio_count(np);
+ if (num_gpios) {
+ int i;
+
+ hw->gpios = kzalloc(sizeof(int) * num_gpios, GFP_KERNEL);
+ if (!hw->gpios) {
+ ret = -ENOMEM;
+ goto free_master;
+ }
+
+ for (i = 0; i < num_gpios; i++) {
+ int gpio;
+ enum of_gpio_flags flags;
+
+ gpio = of_get_gpio_flags(np, i, &flags);
+ hw->gpios[i] = gpio;
+
+ if (gpio_is_valid(gpio)) {
+ /* Real CS - set the initial state. */
+ ret = gpio_request(gpio, np->name);
+ if (ret < 0) {
+ dev_err(dev, "can't request gpio "
+ "#%d: %d\n", i, ret);
+ goto free_gpios;
+ }
+
+ gpio_direction_output(gpio,
+ !!(flags & OF_GPIO_ACTIVE_LOW));
+ } else if (gpio == -EEXIST) {
+ ; /* No CS, but that's OK. */
+ } else {
+ dev_err(dev, "invalid gpio #%d: %d\n", i, gpio);
+ ret = -EINVAL;
+ goto free_gpios;
+ }
+ }
+ }
+
+ /* Setup the state for the bitbang driver */
+ bbp = &hw->bitbang;
+ bbp->master = hw->master;
+ bbp->setup_transfer = spi_ppc4xx_setupxfer;
+ bbp->chipselect = spi_ppc4xx_chipsel;
+ bbp->txrx_bufs = spi_ppc4xx_txrx;
+ bbp->use_dma = 0;
+ bbp->master->setup = spi_ppc4xx_setup;
+ bbp->master->cleanup = spi_ppc4xx_cleanup;
+
+ /* Allocate bus num dynamically. */
+ bbp->master->bus_num = -1;
+
+ /* the spi->mode bits understood by this driver: */
+ bbp->master->mode_bits =
+ SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST;
+
+ /* this many pins in all GPIO controllers */
+ bbp->master->num_chipselect = num_gpios;
+
+ /* Get the clock for the OPB */
+ opbnp = of_find_compatible_node(NULL, NULL, "ibm,opb");
+ if (opbnp == NULL) {
+ dev_err(dev, "OPB: cannot find node\n");
+ ret = -ENODEV;
+ goto free_gpios;
+ }
+ /* Get the clock (Hz) for the OPB */
+ clk = of_get_property(opbnp, "clock-frequency", NULL);
+ if (clk == NULL) {
+ dev_err(dev, "OPB: no clock-frequency property set\n");
+ of_node_put(opbnp);
+ ret = -ENODEV;
+ goto free_gpios;
+ }
+ hw->opb_freq = *clk;
+ hw->opb_freq >>= 2;
+ of_node_put(opbnp);
+
+ ret = of_address_to_resource(np, 0, &resource);
+ if (ret) {
+ dev_err(dev, "error while parsing device node resource\n");
+ goto free_gpios;
+ }
+ hw->mapbase = resource.start;
+ hw->mapsize = resource.end - resource.start + 1;
+
+ /* Sanity check */
+ if (hw->mapsize < sizeof(struct spi_ppc4xx_regs)) {
+ dev_err(dev, "too small to map registers\n");
+ ret = -EINVAL;
+ goto free_gpios;
+ }
+
+ /* Request IRQ */
+ hw->irqnum = irq_of_parse_and_map(np, 0);
+ ret = request_irq(hw->irqnum, spi_ppc4xx_int,
+ IRQF_DISABLED, "spi_ppc4xx_of", (void *)hw);
+ if (ret) {
+ dev_err(dev, "unable to allocate interrupt\n");
+ goto free_gpios;
+ }
+
+ if (!request_mem_region(hw->mapbase, hw->mapsize, DRIVER_NAME)) {
+ dev_err(dev, "resource unavailable\n");
+ ret = -EBUSY;
+ goto request_mem_error;
+ }
+
+ hw->regs = ioremap(hw->mapbase, sizeof(struct spi_ppc4xx_regs));
+
+ if (!hw->regs) {
+ dev_err(dev, "unable to memory map registers\n");
+ ret = -ENXIO;
+ goto map_io_error;
+ }
+
+ spi_ppc4xx_enable(hw);
+
+ /* Finally register our spi controller */
+ dev->dma_mask = 0;
+ ret = spi_bitbang_start(bbp);
+ if (ret) {
+ dev_err(dev, "failed to register SPI master\n");
+ goto unmap_regs;
+ }
+
+ dev_info(dev, "driver initialized\n");
+ of_register_spi_devices(master, np);
+
+ return 0;
+
+unmap_regs:
+ iounmap(hw->regs);
+map_io_error:
+ release_mem_region(hw->mapbase, hw->mapsize);
+request_mem_error:
+ free_irq(hw->irqnum, hw);
+free_gpios:
+ free_gpios(hw);
+free_master:
+ dev_set_drvdata(dev, NULL);
+ spi_master_put(master);
+
+ dev_err(dev, "initialization failed\n");
+ return ret;
+}
+
+static int __exit spi_ppc4xx_of_remove(struct of_device *op)
+{
+ struct spi_master *master = dev_get_drvdata(&op->dev);
+ struct ppc4xx_spi *hw = spi_master_get_devdata(master);
+
+ spi_bitbang_stop(&hw->bitbang);
+ dev_set_drvdata(&op->dev, NULL);
+ release_mem_region(hw->mapbase, hw->mapsize);
+ free_irq(hw->irqnum, hw);
+ iounmap(hw->regs);
+ free_gpios(hw);
+ return 0;
+}
+
+static struct of_device_id spi_ppc4xx_of_match[] = {
+ { .compatible = "ibm,ppc4xx-spi", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, spi_ppc4xx_of_match);
+
+static struct of_platform_driver spi_ppc4xx_of_driver = {
+ .match_table = spi_ppc4xx_of_match,
+ .probe = spi_ppc4xx_of_probe,
+ .remove = __exit_p(spi_ppc4xx_of_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init spi_ppc4xx_init(void)
+{
+ return of_register_platform_driver(&spi_ppc4xx_of_driver);
+}
+module_init(spi_ppc4xx_init);
+
+static void __exit spi_ppc4xx_exit(void)
+{
+ of_unregister_platform_driver(&spi_ppc4xx_of_driver);
+}
+module_exit(spi_ppc4xx_exit);
+
+MODULE_AUTHOR("Gary Jennejohn & Stefan Roese");
+MODULE_DESCRIPTION("Simple PPC4xx SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index 3f3119d760d..33d94f76b9e 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -20,17 +20,28 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
+#include <linux/io.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <mach/hardware.h>
-
#include <plat/regs-spi.h>
#include <mach/spi.h>
+/**
+ * s3c24xx_spi_devstate - per device data
+ * @hz: Last frequency calculated for @sppre field.
+ * @mode: Last mode setting for the @spcon field.
+ * @spcon: Value to write to the SPCON register.
+ * @sppre: Value to write to the SPPRE register.
+ */
+struct s3c24xx_spi_devstate {
+ unsigned int hz;
+ unsigned int mode;
+ u8 spcon;
+ u8 sppre;
+};
+
struct s3c24xx_spi {
/* bitbang has to be first */
struct spi_bitbang bitbang;
@@ -71,43 +82,31 @@ static void s3c24xx_spi_gpiocs(struct s3c2410_spi_info *spi, int cs, int pol)
static void s3c24xx_spi_chipsel(struct spi_device *spi, int value)
{
+ struct s3c24xx_spi_devstate *cs = spi->controller_state;
struct s3c24xx_spi *hw = to_hw(spi);
unsigned int cspol = spi->mode & SPI_CS_HIGH ? 1 : 0;
- unsigned int spcon;
+
+ /* change the chipselect state and the state of the spi engine clock */
switch (value) {
case BITBANG_CS_INACTIVE:
hw->set_cs(hw->pdata, spi->chip_select, cspol^1);
+ writeb(cs->spcon, hw->regs + S3C2410_SPCON);
break;
case BITBANG_CS_ACTIVE:
- spcon = readb(hw->regs + S3C2410_SPCON);
-
- if (spi->mode & SPI_CPHA)
- spcon |= S3C2410_SPCON_CPHA_FMTB;
- else
- spcon &= ~S3C2410_SPCON_CPHA_FMTB;
-
- if (spi->mode & SPI_CPOL)
- spcon |= S3C2410_SPCON_CPOL_HIGH;
- else
- spcon &= ~S3C2410_SPCON_CPOL_HIGH;
-
- spcon |= S3C2410_SPCON_ENSCK;
-
- /* write new configration */
-
- writeb(spcon, hw->regs + S3C2410_SPCON);
+ writeb(cs->spcon | S3C2410_SPCON_ENSCK,
+ hw->regs + S3C2410_SPCON);
hw->set_cs(hw->pdata, spi->chip_select, cspol);
-
break;
}
}
-static int s3c24xx_spi_setupxfer(struct spi_device *spi,
- struct spi_transfer *t)
+static int s3c24xx_spi_update_state(struct spi_device *spi,
+ struct spi_transfer *t)
{
struct s3c24xx_spi *hw = to_hw(spi);
+ struct s3c24xx_spi_devstate *cs = spi->controller_state;
unsigned int bpw;
unsigned int hz;
unsigned int div;
@@ -127,41 +126,89 @@ static int s3c24xx_spi_setupxfer(struct spi_device *spi,
return -EINVAL;
}
- clk = clk_get_rate(hw->clk);
- div = DIV_ROUND_UP(clk, hz * 2) - 1;
+ if (spi->mode != cs->mode) {
+ u8 spcon = SPCON_DEFAULT;
- if (div > 255)
- div = 255;
+ if (spi->mode & SPI_CPHA)
+ spcon |= S3C2410_SPCON_CPHA_FMTB;
- dev_dbg(&spi->dev, "setting pre-scaler to %d (wanted %d, got %ld)\n",
- div, hz, clk / (2 * (div + 1)));
+ if (spi->mode & SPI_CPOL)
+ spcon |= S3C2410_SPCON_CPOL_HIGH;
+ cs->mode = spi->mode;
+ cs->spcon = spcon;
+ }
- writeb(div, hw->regs + S3C2410_SPPRE);
+ if (cs->hz != hz) {
+ clk = clk_get_rate(hw->clk);
+ div = DIV_ROUND_UP(clk, hz * 2) - 1;
- spin_lock(&hw->bitbang.lock);
- if (!hw->bitbang.busy) {
- hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE);
- /* need to ndelay for 0.5 clocktick ? */
+ if (div > 255)
+ div = 255;
+
+ dev_dbg(&spi->dev, "pre-scaler=%d (wanted %d, got %ld)\n",
+ div, hz, clk / (2 * (div + 1)));
+
+ cs->hz = hz;
+ cs->sppre = div;
}
- spin_unlock(&hw->bitbang.lock);
return 0;
}
+static int s3c24xx_spi_setupxfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct s3c24xx_spi_devstate *cs = spi->controller_state;
+ struct s3c24xx_spi *hw = to_hw(spi);
+ int ret;
+
+ ret = s3c24xx_spi_update_state(spi, t);
+ if (!ret)
+ writeb(cs->sppre, hw->regs + S3C2410_SPPRE);
+
+ return ret;
+}
+
static int s3c24xx_spi_setup(struct spi_device *spi)
{
+ struct s3c24xx_spi_devstate *cs = spi->controller_state;
+ struct s3c24xx_spi *hw = to_hw(spi);
int ret;
- ret = s3c24xx_spi_setupxfer(spi, NULL);
- if (ret < 0) {
- dev_err(&spi->dev, "setupxfer returned %d\n", ret);
+ /* allocate settings on the first call */
+ if (!cs) {
+ cs = kzalloc(sizeof(struct s3c24xx_spi_devstate), GFP_KERNEL);
+ if (!cs) {
+ dev_err(&spi->dev, "no memory for controller state\n");
+ return -ENOMEM;
+ }
+
+ cs->spcon = SPCON_DEFAULT;
+ cs->hz = -1;
+ spi->controller_state = cs;
+ }
+
+ /* initialise the state from the device */
+ ret = s3c24xx_spi_update_state(spi, NULL);
+ if (ret)
return ret;
+
+ spin_lock(&hw->bitbang.lock);
+ if (!hw->bitbang.busy) {
+ hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE);
+ /* need to ndelay for 0.5 clocktick ? */
}
+ spin_unlock(&hw->bitbang.lock);
return 0;
}
+static void s3c24xx_spi_cleanup(struct spi_device *spi)
+{
+ kfree(spi->controller_state);
+}
+
static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count)
{
return hw->tx ? hw->tx[count] : 0;
@@ -289,7 +336,9 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev)
hw->bitbang.setup_transfer = s3c24xx_spi_setupxfer;
hw->bitbang.chipselect = s3c24xx_spi_chipsel;
hw->bitbang.txrx_bufs = s3c24xx_spi_txrx;
- hw->bitbang.master->setup = s3c24xx_spi_setup;
+
+ hw->master->setup = s3c24xx_spi_setup;
+ hw->master->cleanup = s3c24xx_spi_cleanup;
dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang);
@@ -302,7 +351,7 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev)
goto err_no_iores;
}
- hw->ioarea = request_mem_region(res->start, (res->end - res->start)+1,
+ hw->ioarea = request_mem_region(res->start, resource_size(res),
pdev->name);
if (hw->ioarea == NULL) {
@@ -311,7 +360,7 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev)
goto err_no_iores;
}
- hw->regs = ioremap(res->start, (res->end - res->start)+1);
+ hw->regs = ioremap(res->start, resource_size(res));
if (hw->regs == NULL) {
dev_err(&pdev->dev, "Cannot map IO\n");
err = -ENXIO;
@@ -388,7 +437,7 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev)
err_no_iores:
err_no_pdata:
- spi_master_put(hw->master);;
+ spi_master_put(hw->master);
err_nomem:
return err;
@@ -421,9 +470,9 @@ static int __exit s3c24xx_spi_remove(struct platform_device *dev)
#ifdef CONFIG_PM
-static int s3c24xx_spi_suspend(struct platform_device *pdev, pm_message_t msg)
+static int s3c24xx_spi_suspend(struct device *dev)
{
- struct s3c24xx_spi *hw = platform_get_drvdata(pdev);
+ struct s3c24xx_spi *hw = platform_get_drvdata(to_platform_device(dev));
if (hw->pdata && hw->pdata->gpio_setup)
hw->pdata->gpio_setup(hw->pdata, 0);
@@ -432,27 +481,31 @@ static int s3c24xx_spi_suspend(struct platform_device *pdev, pm_message_t msg)
return 0;
}
-static int s3c24xx_spi_resume(struct platform_device *pdev)
+static int s3c24xx_spi_resume(struct device *dev)
{
- struct s3c24xx_spi *hw = platform_get_drvdata(pdev);
+ struct s3c24xx_spi *hw = platform_get_drvdata(to_platform_device(dev));
s3c24xx_spi_initialsetup(hw);
return 0;
}
+static struct dev_pm_ops s3c24xx_spi_pmops = {
+ .suspend = s3c24xx_spi_suspend,
+ .resume = s3c24xx_spi_resume,
+};
+
+#define S3C24XX_SPI_PMOPS &s3c24xx_spi_pmops
#else
-#define s3c24xx_spi_suspend NULL
-#define s3c24xx_spi_resume NULL
-#endif
+#define S3C24XX_SPI_PMOPS NULL
+#endif /* CONFIG_PM */
MODULE_ALIAS("platform:s3c2410-spi");
static struct platform_driver s3c24xx_spi_driver = {
.remove = __exit_p(s3c24xx_spi_remove),
- .suspend = s3c24xx_spi_suspend,
- .resume = s3c24xx_spi_resume,
.driver = {
.name = "s3c2410-spi",
.owner = THIS_MODULE,
+ .pm = S3C24XX_SPI_PMOPS,
},
};
diff --git a/drivers/spi/spi_stmp.c b/drivers/spi/spi_stmp.c
new file mode 100644
index 00000000000..d871dc23909
--- /dev/null
+++ b/drivers/spi/spi_stmp.c
@@ -0,0 +1,679 @@
+/*
+ * Freescale STMP378X SPI master driver
+ *
+ * Author: dmitry pervushin <dimka@embeddedalley.com>
+ *
+ * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+
+#include <mach/platform.h>
+#include <mach/stmp3xxx.h>
+#include <mach/dma.h>
+#include <mach/regs-ssp.h>
+#include <mach/regs-apbh.h>
+
+
+/* 0 means DMA mode(recommended, default), !0 - PIO mode */
+static int pio;
+static int clock;
+
+/* default timeout for busy waits is 2 seconds */
+#define STMP_SPI_TIMEOUT (2 * HZ)
+
+struct stmp_spi {
+ int id;
+
+ void * __iomem regs; /* vaddr of the control registers */
+
+ int irq, err_irq;
+ u32 dma;
+ struct stmp3xxx_dma_descriptor d;
+
+ u32 speed_khz;
+ u32 saved_timings;
+ u32 divider;
+
+ struct clk *clk;
+ struct device *master_dev;
+
+ struct work_struct work;
+ struct workqueue_struct *workqueue;
+
+ /* lock protects queue access */
+ spinlock_t lock;
+ struct list_head queue;
+
+ struct completion done;
+};
+
+#define busy_wait(cond) \
+ ({ \
+ unsigned long end_jiffies = jiffies + STMP_SPI_TIMEOUT; \
+ bool succeeded = false; \
+ do { \
+ if (cond) { \
+ succeeded = true; \
+ break; \
+ } \
+ cpu_relax(); \
+ } while (time_before(end_jiffies, jiffies)); \
+ succeeded; \
+ })
+
+/**
+ * stmp_spi_init_hw
+ * Initialize the SSP port
+ */
+static int stmp_spi_init_hw(struct stmp_spi *ss)
+{
+ int err = 0;
+ void *pins = ss->master_dev->platform_data;
+
+ err = stmp3xxx_request_pin_group(pins, dev_name(ss->master_dev));
+ if (err)
+ goto out;
+
+ ss->clk = clk_get(NULL, "ssp");
+ if (IS_ERR(ss->clk)) {
+ err = PTR_ERR(ss->clk);
+ goto out_free_pins;
+ }
+ clk_enable(ss->clk);
+
+ stmp3xxx_reset_block(ss->regs, false);
+ stmp3xxx_dma_reset_channel(ss->dma);
+
+ return 0;
+
+out_free_pins:
+ stmp3xxx_release_pin_group(pins, dev_name(ss->master_dev));
+out:
+ return err;
+}
+
+static void stmp_spi_release_hw(struct stmp_spi *ss)
+{
+ void *pins = ss->master_dev->platform_data;
+
+ if (ss->clk && !IS_ERR(ss->clk)) {
+ clk_disable(ss->clk);
+ clk_put(ss->clk);
+ }
+ stmp3xxx_release_pin_group(pins, dev_name(ss->master_dev));
+}
+
+static int stmp_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ u8 bits_per_word;
+ u32 hz;
+ struct stmp_spi *ss = spi_master_get_devdata(spi->master);
+ u16 rate;
+
+ bits_per_word = spi->bits_per_word;
+ if (t && t->bits_per_word)
+ bits_per_word = t->bits_per_word;
+
+ /*
+ * Calculate speed:
+ * - by default, use maximum speed from ssp clk
+ * - if device overrides it, use it
+ * - if transfer specifies other speed, use transfer's one
+ */
+ hz = 1000 * ss->speed_khz / ss->divider;
+ if (spi->max_speed_hz)
+ hz = min(hz, spi->max_speed_hz);
+ if (t && t->speed_hz)
+ hz = min(hz, t->speed_hz);
+
+ if (hz == 0) {
+ dev_err(&spi->dev, "Cannot continue with zero clock\n");
+ return -EINVAL;
+ }
+
+ if (bits_per_word != 8) {
+ dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
+ __func__, bits_per_word);
+ return -EINVAL;
+ }
+
+ dev_dbg(&spi->dev, "Requested clk rate = %uHz, max = %uHz/%d = %uHz\n",
+ hz, ss->speed_khz, ss->divider,
+ ss->speed_khz * 1000 / ss->divider);
+
+ if (ss->speed_khz * 1000 / ss->divider < hz) {
+ dev_err(&spi->dev, "%s, unsupported clock rate %uHz\n",
+ __func__, hz);
+ return -EINVAL;
+ }
+
+ rate = 1000 * ss->speed_khz/ss->divider/hz;
+
+ writel(BF(ss->divider, SSP_TIMING_CLOCK_DIVIDE) |
+ BF(rate - 1, SSP_TIMING_CLOCK_RATE),
+ HW_SSP_TIMING + ss->regs);
+
+ writel(BF(1 /* mode SPI */, SSP_CTRL1_SSP_MODE) |
+ BF(4 /* 8 bits */, SSP_CTRL1_WORD_LENGTH) |
+ ((spi->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
+ ((spi->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0) |
+ (pio ? 0 : BM_SSP_CTRL1_DMA_ENABLE),
+ ss->regs + HW_SSP_CTRL1);
+
+ return 0;
+}
+
+static int stmp_spi_setup(struct spi_device *spi)
+{
+ /* spi_setup() does basic checks,
+ * stmp_spi_setup_transfer() does more later
+ */
+ if (spi->bits_per_word != 8) {
+ dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
+ __func__, spi->bits_per_word);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline u32 stmp_spi_cs(unsigned cs)
+{
+ return ((cs & 1) ? BM_SSP_CTRL0_WAIT_FOR_CMD : 0) |
+ ((cs & 2) ? BM_SSP_CTRL0_WAIT_FOR_IRQ : 0);
+}
+
+static int stmp_spi_txrx_dma(struct stmp_spi *ss, int cs,
+ unsigned char *buf, dma_addr_t dma_buf, int len,
+ int first, int last, bool write)
+{
+ u32 c0 = 0;
+ dma_addr_t spi_buf_dma = dma_buf;
+ int status = 0;
+ enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ c0 |= (first ? BM_SSP_CTRL0_LOCK_CS : 0);
+ c0 |= (last ? BM_SSP_CTRL0_IGNORE_CRC : 0);
+ c0 |= (write ? 0 : BM_SSP_CTRL0_READ);
+ c0 |= BM_SSP_CTRL0_DATA_XFER;
+
+ c0 |= stmp_spi_cs(cs);
+
+ c0 |= BF(len, SSP_CTRL0_XFER_COUNT);
+
+ if (!dma_buf)
+ spi_buf_dma = dma_map_single(ss->master_dev, buf, len, dir);
+
+ ss->d.command->cmd =
+ BF(len, APBH_CHn_CMD_XFER_COUNT) |
+ BF(1, APBH_CHn_CMD_CMDWORDS) |
+ BM_APBH_CHn_CMD_WAIT4ENDCMD |
+ BM_APBH_CHn_CMD_IRQONCMPLT |
+ BF(write ? BV_APBH_CHn_CMD_COMMAND__DMA_READ :
+ BV_APBH_CHn_CMD_COMMAND__DMA_WRITE,
+ APBH_CHn_CMD_COMMAND);
+ ss->d.command->pio_words[0] = c0;
+ ss->d.command->buf_ptr = spi_buf_dma;
+
+ stmp3xxx_dma_reset_channel(ss->dma);
+ stmp3xxx_dma_clear_interrupt(ss->dma);
+ stmp3xxx_dma_enable_interrupt(ss->dma);
+ init_completion(&ss->done);
+ stmp3xxx_dma_go(ss->dma, &ss->d, 1);
+ wait_for_completion(&ss->done);
+
+ if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) & BM_SSP_CTRL0_RUN))
+ status = ETIMEDOUT;
+
+ if (!dma_buf)
+ dma_unmap_single(ss->master_dev, spi_buf_dma, len, dir);
+
+ return status;
+}
+
+static inline void stmp_spi_enable(struct stmp_spi *ss)
+{
+ stmp3xxx_setl(BM_SSP_CTRL0_LOCK_CS, ss->regs + HW_SSP_CTRL0);
+ stmp3xxx_clearl(BM_SSP_CTRL0_IGNORE_CRC, ss->regs + HW_SSP_CTRL0);
+}
+
+static inline void stmp_spi_disable(struct stmp_spi *ss)
+{
+ stmp3xxx_clearl(BM_SSP_CTRL0_LOCK_CS, ss->regs + HW_SSP_CTRL0);
+ stmp3xxx_setl(BM_SSP_CTRL0_IGNORE_CRC, ss->regs + HW_SSP_CTRL0);
+}
+
+static int stmp_spi_txrx_pio(struct stmp_spi *ss, int cs,
+ unsigned char *buf, int len,
+ bool first, bool last, bool write)
+{
+ if (first)
+ stmp_spi_enable(ss);
+
+ stmp3xxx_setl(stmp_spi_cs(cs), ss->regs + HW_SSP_CTRL0);
+
+ while (len--) {
+ if (last && len <= 0)
+ stmp_spi_disable(ss);
+
+ stmp3xxx_clearl(BM_SSP_CTRL0_XFER_COUNT,
+ ss->regs + HW_SSP_CTRL0);
+ stmp3xxx_setl(1, ss->regs + HW_SSP_CTRL0);
+
+ if (write)
+ stmp3xxx_clearl(BM_SSP_CTRL0_READ,
+ ss->regs + HW_SSP_CTRL0);
+ else
+ stmp3xxx_setl(BM_SSP_CTRL0_READ,
+ ss->regs + HW_SSP_CTRL0);
+
+ /* Run! */
+ stmp3xxx_setl(BM_SSP_CTRL0_RUN, ss->regs + HW_SSP_CTRL0);
+
+ if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) &
+ BM_SSP_CTRL0_RUN))
+ break;
+
+ if (write)
+ writel(*buf, ss->regs + HW_SSP_DATA);
+
+ /* Set TRANSFER */
+ stmp3xxx_setl(BM_SSP_CTRL0_DATA_XFER, ss->regs + HW_SSP_CTRL0);
+
+ if (!write) {
+ if (busy_wait((readl(ss->regs + HW_SSP_STATUS) &
+ BM_SSP_STATUS_FIFO_EMPTY)))
+ break;
+ *buf = readl(ss->regs + HW_SSP_DATA) & 0xFF;
+ }
+
+ if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) &
+ BM_SSP_CTRL0_RUN))
+ break;
+
+ /* advance to the next byte */
+ buf++;
+ }
+
+ return len < 0 ? 0 : -ETIMEDOUT;
+}
+
+static int stmp_spi_handle_message(struct stmp_spi *ss, struct spi_message *m)
+{
+ bool first, last;
+ struct spi_transfer *t, *tmp_t;
+ int status = 0;
+ int cs;
+
+ cs = m->spi->chip_select;
+
+ list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) {
+
+ first = (&t->transfer_list == m->transfers.next);
+ last = (&t->transfer_list == m->transfers.prev);
+
+ if (first || t->speed_hz || t->bits_per_word)
+ stmp_spi_setup_transfer(m->spi, t);
+
+ /* reject "not last" transfers which request to change cs */
+ if (t->cs_change && !last) {
+ dev_err(&m->spi->dev,
+ "Message with t->cs_change has been skipped\n");
+ continue;
+ }
+
+ if (t->tx_buf) {
+ status = pio ?
+ stmp_spi_txrx_pio(ss, cs, (void *)t->tx_buf,
+ t->len, first, last, true) :
+ stmp_spi_txrx_dma(ss, cs, (void *)t->tx_buf,
+ t->tx_dma, t->len, first, last, true);
+#ifdef DEBUG
+ if (t->len < 0x10)
+ print_hex_dump_bytes("Tx ",
+ DUMP_PREFIX_OFFSET,
+ t->tx_buf, t->len);
+ else
+ pr_debug("Tx: %d bytes\n", t->len);
+#endif
+ }
+ if (t->rx_buf) {
+ status = pio ?
+ stmp_spi_txrx_pio(ss, cs, t->rx_buf,
+ t->len, first, last, false) :
+ stmp_spi_txrx_dma(ss, cs, t->rx_buf,
+ t->rx_dma, t->len, first, last, false);
+#ifdef DEBUG
+ if (t->len < 0x10)
+ print_hex_dump_bytes("Rx ",
+ DUMP_PREFIX_OFFSET,
+ t->rx_buf, t->len);
+ else
+ pr_debug("Rx: %d bytes\n", t->len);
+#endif
+ }
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (status)
+ break;
+
+ }
+ return status;
+}
+
+/**
+ * stmp_spi_handle - handle messages from the queue
+ */
+static void stmp_spi_handle(struct work_struct *w)
+{
+ struct stmp_spi *ss = container_of(w, struct stmp_spi, work);
+ unsigned long flags;
+ struct spi_message *m;
+
+ spin_lock_irqsave(&ss->lock, flags);
+ while (!list_empty(&ss->queue)) {
+ m = list_entry(ss->queue.next, struct spi_message, queue);
+ list_del_init(&m->queue);
+ spin_unlock_irqrestore(&ss->lock, flags);
+
+ m->status = stmp_spi_handle_message(ss, m);
+ m->complete(m->context);
+
+ spin_lock_irqsave(&ss->lock, flags);
+ }
+ spin_unlock_irqrestore(&ss->lock, flags);
+
+ return;
+}
+
+/**
+ * stmp_spi_transfer - perform message transfer.
+ * Called indirectly from spi_async, queues all the messages to
+ * spi_handle_message.
+ * @spi: spi device
+ * @m: message to be queued
+ */
+static int stmp_spi_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct stmp_spi *ss = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ m->status = -EINPROGRESS;
+ spin_lock_irqsave(&ss->lock, flags);
+ list_add_tail(&m->queue, &ss->queue);
+ queue_work(ss->workqueue, &ss->work);
+ spin_unlock_irqrestore(&ss->lock, flags);
+ return 0;
+}
+
+static irqreturn_t stmp_spi_irq(int irq, void *dev_id)
+{
+ struct stmp_spi *ss = dev_id;
+
+ stmp3xxx_dma_clear_interrupt(ss->dma);
+ complete(&ss->done);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stmp_spi_irq_err(int irq, void *dev_id)
+{
+ struct stmp_spi *ss = dev_id;
+ u32 c1, st;
+
+ c1 = readl(ss->regs + HW_SSP_CTRL1);
+ st = readl(ss->regs + HW_SSP_STATUS);
+ dev_err(ss->master_dev, "%s: status = 0x%08X, c1 = 0x%08X\n",
+ __func__, st, c1);
+ stmp3xxx_clearl(c1 & 0xCCCC0000, ss->regs + HW_SSP_CTRL1);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit stmp_spi_probe(struct platform_device *dev)
+{
+ int err = 0;
+ struct spi_master *master;
+ struct stmp_spi *ss;
+ struct resource *r;
+
+ master = spi_alloc_master(&dev->dev, sizeof(struct stmp_spi));
+ if (master == NULL) {
+ err = -ENOMEM;
+ goto out0;
+ }
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+
+ ss = spi_master_get_devdata(master);
+ platform_set_drvdata(dev, master);
+
+ /* Get resources(memory, IRQ) associated with the device */
+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ err = -ENODEV;
+ goto out_put_master;
+ }
+ ss->regs = ioremap(r->start, resource_size(r));
+ if (!ss->regs) {
+ err = -EINVAL;
+ goto out_put_master;
+ }
+
+ ss->master_dev = &dev->dev;
+ ss->id = dev->id;
+
+ INIT_WORK(&ss->work, stmp_spi_handle);
+ INIT_LIST_HEAD(&ss->queue);
+ spin_lock_init(&ss->lock);
+
+ ss->workqueue = create_singlethread_workqueue(dev_name(&dev->dev));
+ if (!ss->workqueue) {
+ err = -ENXIO;
+ goto out_put_master;
+ }
+ master->transfer = stmp_spi_transfer;
+ master->setup = stmp_spi_setup;
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+
+ ss->irq = platform_get_irq(dev, 0);
+ if (ss->irq < 0) {
+ err = ss->irq;
+ goto out_put_master;
+ }
+ ss->err_irq = platform_get_irq(dev, 1);
+ if (ss->err_irq < 0) {
+ err = ss->err_irq;
+ goto out_put_master;
+ }
+
+ r = platform_get_resource(dev, IORESOURCE_DMA, 0);
+ if (r == NULL) {
+ err = -ENODEV;
+ goto out_put_master;
+ }
+
+ ss->dma = r->start;
+ err = stmp3xxx_dma_request(ss->dma, &dev->dev, dev_name(&dev->dev));
+ if (err)
+ goto out_put_master;
+
+ err = stmp3xxx_dma_allocate_command(ss->dma, &ss->d);
+ if (err)
+ goto out_free_dma;
+
+ master->bus_num = dev->id;
+ master->num_chipselect = 1;
+
+ /* SPI controller initializations */
+ err = stmp_spi_init_hw(ss);
+ if (err) {
+ dev_dbg(&dev->dev, "cannot initialize hardware\n");
+ goto out_free_dma_desc;
+ }
+
+ if (clock) {
+ dev_info(&dev->dev, "clock rate forced to %d\n", clock);
+ clk_set_rate(ss->clk, clock);
+ }
+ ss->speed_khz = clk_get_rate(ss->clk);
+ ss->divider = 2;
+ dev_info(&dev->dev, "max possible speed %d = %ld/%d kHz\n",
+ ss->speed_khz, clk_get_rate(ss->clk), ss->divider);
+
+ /* Register for SPI interrupt */
+ err = request_irq(ss->irq, stmp_spi_irq, 0,
+ dev_name(&dev->dev), ss);
+ if (err) {
+ dev_dbg(&dev->dev, "request_irq failed, %d\n", err);
+ goto out_release_hw;
+ }
+
+ /* ..and shared interrupt for all SSP controllers */
+ err = request_irq(ss->err_irq, stmp_spi_irq_err, IRQF_SHARED,
+ dev_name(&dev->dev), ss);
+ if (err) {
+ dev_dbg(&dev->dev, "request_irq(error) failed, %d\n", err);
+ goto out_free_irq;
+ }
+
+ err = spi_register_master(master);
+ if (err) {
+ dev_dbg(&dev->dev, "cannot register spi master, %d\n", err);
+ goto out_free_irq_2;
+ }
+ dev_info(&dev->dev, "at (mapped) 0x%08X, irq=%d, bus %d, %s mode\n",
+ (u32)ss->regs, ss->irq, master->bus_num,
+ pio ? "PIO" : "DMA");
+ return 0;
+
+out_free_irq_2:
+ free_irq(ss->err_irq, ss);
+out_free_irq:
+ free_irq(ss->irq, ss);
+out_free_dma_desc:
+ stmp3xxx_dma_free_command(ss->dma, &ss->d);
+out_free_dma:
+ stmp3xxx_dma_release(ss->dma);
+out_release_hw:
+ stmp_spi_release_hw(ss);
+out_put_master:
+ if (ss->workqueue)
+ destroy_workqueue(ss->workqueue);
+ if (ss->regs)
+ iounmap(ss->regs);
+ platform_set_drvdata(dev, NULL);
+ spi_master_put(master);
+out0:
+ return err;
+}
+
+static int __devexit stmp_spi_remove(struct platform_device *dev)
+{
+ struct stmp_spi *ss;
+ struct spi_master *master;
+
+ master = platform_get_drvdata(dev);
+ if (master == NULL)
+ goto out0;
+ ss = spi_master_get_devdata(master);
+
+ spi_unregister_master(master);
+
+ free_irq(ss->err_irq, ss);
+ free_irq(ss->irq, ss);
+ stmp3xxx_dma_free_command(ss->dma, &ss->d);
+ stmp3xxx_dma_release(ss->dma);
+ stmp_spi_release_hw(ss);
+ destroy_workqueue(ss->workqueue);
+ iounmap(ss->regs);
+ spi_master_put(master);
+ platform_set_drvdata(dev, NULL);
+out0:
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int stmp_spi_suspend(struct platform_device *pdev, pm_message_t pmsg)
+{
+ struct stmp_spi *ss;
+ struct spi_master *master;
+
+ master = platform_get_drvdata(pdev);
+ ss = spi_master_get_devdata(master);
+
+ ss->saved_timings = readl(HW_SSP_TIMING + ss->regs);
+ clk_disable(ss->clk);
+
+ return 0;
+}
+
+static int stmp_spi_resume(struct platform_device *pdev)
+{
+ struct stmp_spi *ss;
+ struct spi_master *master;
+
+ master = platform_get_drvdata(pdev);
+ ss = spi_master_get_devdata(master);
+
+ clk_enable(ss->clk);
+ stmp3xxx_reset_block(ss->regs, false);
+ writel(ss->saved_timings, ss->regs + HW_SSP_TIMING);
+
+ return 0;
+}
+
+#else
+#define stmp_spi_suspend NULL
+#define stmp_spi_resume NULL
+#endif
+
+static struct platform_driver stmp_spi_driver = {
+ .probe = stmp_spi_probe,
+ .remove = __devexit_p(stmp_spi_remove),
+ .driver = {
+ .name = "stmp3xxx_ssp",
+ .owner = THIS_MODULE,
+ },
+ .suspend = stmp_spi_suspend,
+ .resume = stmp_spi_resume,
+};
+
+static int __init stmp_spi_init(void)
+{
+ return platform_driver_register(&stmp_spi_driver);
+}
+
+static void __exit stmp_spi_exit(void)
+{
+ platform_driver_unregister(&stmp_spi_driver);
+}
+
+module_init(stmp_spi_init);
+module_exit(stmp_spi_exit);
+module_param(pio, int, S_IRUGO);
+module_param(clock, int, S_IRUGO);
+MODULE_AUTHOR("dmitry pervushin <dpervushin@embeddedalley.com>");
+MODULE_DESCRIPTION("STMP3xxx SPI/SSP driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 606e7a40a8d..f921bd1109e 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -688,3 +688,4 @@ module_exit(spidev_exit);
MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
MODULE_DESCRIPTION("User mode SPI device interface");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:spidev");
diff --git a/drivers/spi/tle62x0.c b/drivers/spi/tle62x0.c
index 455991fbe28..bf9540f5fb9 100644
--- a/drivers/spi/tle62x0.c
+++ b/drivers/spi/tle62x0.c
@@ -329,3 +329,4 @@ module_exit(tle62x0_exit);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("TLE62x0 SPI driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:tle62x0");
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 82b34893e5b..9a4dd5992f6 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -117,8 +117,6 @@ source "drivers/staging/vt6655/Kconfig"
source "drivers/staging/vt6656/Kconfig"
-source "drivers/staging/cpc-usb/Kconfig"
-
source "drivers/staging/udlfb/Kconfig"
source "drivers/staging/hv/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index b1cad0d9ba7..104f2f8897e 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -40,7 +40,6 @@ obj-$(CONFIG_USB_SERIAL_QUATECH_USB2) += quatech_usb2/
obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
obj-$(CONFIG_VT6655) += vt6655/
obj-$(CONFIG_VT6656) += vt6656/
-obj-$(CONFIG_USB_CPC) += cpc-usb/
obj-$(CONFIG_FB_UDL) += udlfb/
obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_VME_BUS) += vme/
diff --git a/drivers/staging/cpc-usb/Kconfig b/drivers/staging/cpc-usb/Kconfig
deleted file mode 100644
index 2be0bc9c39d..00000000000
--- a/drivers/staging/cpc-usb/Kconfig
+++ /dev/null
@@ -1,4 +0,0 @@
-config USB_CPC
- tristate "CPC CAN USB driver"
- depends on USB && PROC_FS
- default n
diff --git a/drivers/staging/cpc-usb/Makefile b/drivers/staging/cpc-usb/Makefile
deleted file mode 100644
index 3f83170a8fa..00000000000
--- a/drivers/staging/cpc-usb/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_USB_CPC) += cpc-usb.o
-
-cpc-usb-y := cpc-usb_drv.o sja2m16c_2.o
diff --git a/drivers/staging/cpc-usb/TODO b/drivers/staging/cpc-usb/TODO
deleted file mode 100644
index 9b1752fb9cd..00000000000
--- a/drivers/staging/cpc-usb/TODO
+++ /dev/null
@@ -1,10 +0,0 @@
-Things to do for this driver to get merged into the main portion of the
-kernel:
- - checkpatch cleanups
- - sparse clean
- - remove proc code
- - tie into CAN socket interfaces if possible
- - figure out sane userspace api
- - use linux's error codes
-
-Send patches to Greg Kroah-Hartman <greg@kroah.com>
diff --git a/drivers/staging/cpc-usb/cpc-usb_drv.c b/drivers/staging/cpc-usb/cpc-usb_drv.c
deleted file mode 100644
index c5eca46996f..00000000000
--- a/drivers/staging/cpc-usb/cpc-usb_drv.c
+++ /dev/null
@@ -1,1184 +0,0 @@
-/*
- * CPC-USB CAN Interface Kernel Driver
- *
- * Copyright (C) 2004-2009 EMS Dr. Thomas Wuensche
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published
- * by the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/poll.h>
-#include <linux/smp_lock.h>
-#include <linux/completion.h>
-#include <asm/uaccess.h>
-#include <linux/usb.h>
-
-
-#include <linux/proc_fs.h>
-
-#include "cpc.h"
-
-#include "cpc_int.h"
-#include "cpcusb.h"
-
-#include "sja2m16c.h"
-
-/* Version Information */
-#define DRIVER_AUTHOR "Sebastian Haas <haas@ems-wuensche.com>"
-#define DRIVER_DESC "CPC-USB Driver for Linux Kernel 2.6"
-#define DRIVER_VERSION CPC_DRIVER_VERSION
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_VERSION(DRIVER_VERSION);
-MODULE_LICENSE("GPL v2");
-
-/* Define these values to match your devices */
-#define USB_CPCUSB_VENDOR_ID 0x12D6
-
-#define USB_CPCUSB_M16C_PRODUCT_ID 0x0888
-#define USB_CPCUSB_LPC2119_PRODUCT_ID 0x0444
-
-#define CPC_USB_PROC_DIR CPC_PROC_DIR "cpc-usb"
-
-static struct proc_dir_entry *procDir;
-static struct proc_dir_entry *procEntry;
-
-/* Module parameters */
-static int debug;
-module_param(debug, int, S_IRUGO);
-
-/* table of devices that work with this driver */
-static struct usb_device_id cpcusb_table[] = {
- {USB_DEVICE(USB_CPCUSB_VENDOR_ID, USB_CPCUSB_M16C_PRODUCT_ID)},
- {USB_DEVICE(USB_CPCUSB_VENDOR_ID, USB_CPCUSB_LPC2119_PRODUCT_ID)},
- {} /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(usb, cpcusb_table);
-
-/* use to prevent kernel panic if driver is unloaded
- * while a programm has still open the device
- */
-DECLARE_WAIT_QUEUE_HEAD(rmmodWq);
-atomic_t useCount;
-
-static CPC_USB_T *CPCUSB_Table[CPC_USB_CARD_CNT] = { 0 };
-static unsigned int CPCUsbCnt;
-
-/* prevent races between open() and disconnect() */
-static DECLARE_MUTEX(disconnect_sem);
-
-/* local function prototypes */
-static ssize_t cpcusb_read(struct file *file, char *buffer, size_t count,
- loff_t *ppos);
-static ssize_t cpcusb_write(struct file *file, const char *buffer,
- size_t count, loff_t *ppos);
-static unsigned int cpcusb_poll(struct file *file, poll_table * wait);
-static int cpcusb_open(struct inode *inode, struct file *file);
-static int cpcusb_release(struct inode *inode, struct file *file);
-
-static int cpcusb_probe(struct usb_interface *interface,
- const struct usb_device_id *id);
-static void cpcusb_disconnect(struct usb_interface *interface);
-
-static void cpcusb_read_bulk_callback(struct urb *urb);
-static void cpcusb_write_bulk_callback(struct urb *urb);
-static void cpcusb_read_interrupt_callback(struct urb *urb);
-
-static int cpcusb_setup_intrep(CPC_USB_T *card);
-
-static struct file_operations cpcusb_fops = {
- /*
- * The owner field is part of the module-locking
- * mechanism. The idea is that the kernel knows
- * which module to increment the use-counter of
- * BEFORE it calls the device's open() function.
- * This also means that the kernel can decrement
- * the use-counter again before calling release()
- * or should the open() function fail.
- */
- .owner = THIS_MODULE,
-
- .read = cpcusb_read,
- .write = cpcusb_write,
- .poll = cpcusb_poll,
- .open = cpcusb_open,
- .release = cpcusb_release,
-};
-
-/*
- * usb class driver info in order to get a minor number from the usb core,
- * and to have the device registered with devfs and the driver core
- */
-static struct usb_class_driver cpcusb_class = {
- .name = "usb/cpc_usb%d",
- .fops = &cpcusb_fops,
- .minor_base = CPC_USB_BASE_MNR,
-};
-
-/* usb specific object needed to register this driver with the usb subsystem */
-static struct usb_driver cpcusb_driver = {
- .name = "cpc-usb",
- .probe = cpcusb_probe,
- .disconnect = cpcusb_disconnect,
- .id_table = cpcusb_table,
-};
-
-static int cpcusb_create_info_output(char *buf)
-{
- int i = 0, j;
-
- for (j = 0; j < CPC_USB_CARD_CNT; j++) {
- if (CPCUSB_Table[j]) {
- CPC_USB_T *card = CPCUSB_Table[j];
- CPC_CHAN_T *chan = card->chan;
-
- /* MINOR CHANNELNO BUSNO SLOTNO */
- i += sprintf(&buf[i], "%d %s\n", chan->minor,
- card->serialNumber);
- }
- }
-
- return i;
-}
-
-static int cpcusb_proc_read_info(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int len = cpcusb_create_info_output(page);
-
- if (len <= off + count)
- *eof = 1;
- *start = page + off;
- len -= off;
- if (len > count)
- len = count;
- if (len < 0)
- len = 0;
-
- return len;
-}
-
-/*
- * Remove CPC-USB and cleanup
- */
-static inline void cpcusb_delete(CPC_USB_T *card)
-{
- if (card) {
- if (card->chan) {
- if (card->chan->buf)
- vfree(card->chan->buf);
-
- if (card->chan->CPCWait_q)
- kfree(card->chan->CPCWait_q);
-
- kfree(card->chan);
- }
-
- CPCUSB_Table[card->idx] = NULL;
- kfree(card);
- }
-}
-
-/*
- * setup the interrupt IN endpoint of a specific CPC-USB device
- */
-static int cpcusb_setup_intrep(CPC_USB_T *card)
-{
- int retval = 0;
- struct usb_endpoint_descriptor *ep;
-
- ep = &card->interface->altsetting[0].endpoint[card->num_intr_in].desc;
-
- card->intr_in_buffer[0] = 0;
- card->free_slots = 15; /* initial size */
-
- /* setup the urb */
- usb_fill_int_urb(card->intr_in_urb, card->udev,
- usb_rcvintpipe(card->udev, card->num_intr_in),
- card->intr_in_buffer,
- sizeof(card->intr_in_buffer),
- cpcusb_read_interrupt_callback,
- card,
- ep->bInterval);
-
- card->intr_in_urb->status = 0; /* needed! */
-
- /* submit the urb */
- retval = usb_submit_urb(card->intr_in_urb, GFP_KERNEL);
-
- if (retval)
- err("%s - failed submitting intr urb, error %d", __func__,
- retval);
-
- return retval;
-}
-
-static int cpcusb_open(struct inode *inode, struct file *file)
-{
- CPC_USB_T *card = NULL;
- struct usb_interface *interface;
- int subminor;
- int j, retval = 0;
-
- subminor = iminor(inode);
-
- /* prevent disconnects */
- down(&disconnect_sem);
-
- interface = usb_find_interface(&cpcusb_driver, subminor);
- if (!interface) {
- err("%s - error, can't find device for minor %d",
- __func__, subminor);
- retval = CPC_ERR_NO_INTERFACE_PRESENT;
- goto exit_no_device;
- }
-
- card = usb_get_intfdata(interface);
- if (!card) {
- retval = CPC_ERR_NO_INTERFACE_PRESENT;
- goto exit_no_device;
- }
-
- /* lock this device */
- down(&card->sem);
-
- /* increment our usage count for the driver */
- if (card->open) {
- dbg("device already opened");
- retval = CPC_ERR_CHANNEL_ALREADY_OPEN;
- goto exit_on_error;
- }
-
- /* save our object in the file's private structure */
- file->private_data = card;
- for (j = 0; j < CPC_USB_URB_CNT; j++) {
- usb_fill_bulk_urb(card->urbs[j].urb, card->udev,
- usb_rcvbulkpipe(card->udev, card->num_bulk_in),
- card->urbs[j].buffer, card->urbs[j].size,
- cpcusb_read_bulk_callback, card);
-
- retval = usb_submit_urb(card->urbs[j].urb, GFP_KERNEL);
-
- if (retval) {
- err("%s - failed submitting read urb, error %d",
- __func__, retval);
- retval = CPC_ERR_TRANSMISSION_FAILED;
- goto exit_on_error;
- }
- }
-
- info("%s - %d URB's submitted", __func__, j);
-
- ResetBuffer(card->chan);
-
- cpcusb_setup_intrep(card);
- card->open = 1;
-
- atomic_inc(&useCount);
-
-exit_on_error:
- /* unlock this device */
- up(&card->sem);
-
-exit_no_device:
- up(&disconnect_sem);
-
- return retval;
-}
-
-static unsigned int cpcusb_poll(struct file *file, poll_table * wait)
-{
- CPC_USB_T *card = (CPC_USB_T *) file->private_data;
- unsigned int retval = 0;
-
- if (!card) {
- err("%s - device object lost", __func__);
- return -EIO;
- }
-
- poll_wait(file, card->chan->CPCWait_q, wait);
-
- if (IsBufferNotEmpty(card->chan) || !(card->present))
- retval |= (POLLIN | POLLRDNORM);
-
- if (card->free_slots)
- retval |= (POLLOUT | POLLWRNORM);
-
- return retval;
-}
-
-static int cpcusb_release(struct inode *inode, struct file *file)
-{
- CPC_USB_T *card = (CPC_USB_T *) file->private_data;
- int j, retval = 0;
-
- if (card == NULL) {
- dbg("%s - object is NULL", __func__);
- return CPC_ERR_NO_INTERFACE_PRESENT;
- }
-
- /* lock our device */
- down(&card->sem);
-
- if (!card->open) {
- dbg("%s - device not opened", __func__);
- retval = CPC_ERR_NO_INTERFACE_PRESENT;
- goto exit_not_opened;
- }
-
- /* if device wasn't unplugged kill all urbs */
- if (card->present) {
- /* kill read urbs */
- for (j = 0; j < CPC_USB_URB_CNT; j++) {
- usb_kill_urb(card->urbs[j].urb);
- }
-
- /* kill irq urb */
- usb_kill_urb(card->intr_in_urb);
-
- /* kill write urbs */
- for (j = 0; j < CPC_USB_URB_CNT; j++) {
- if (atomic_read(&card->wrUrbs[j].busy)) {
- usb_kill_urb(card->wrUrbs[j].urb);
- wait_for_completion(&card->wrUrbs[j].finished);
- }
- }
- }
-
- atomic_dec(&useCount);
-
- /* last process detached */
- if (atomic_read(&useCount) == 0) {
- wake_up(&rmmodWq);
- }
-
- if (!card->present && card->open) {
- /* the device was unplugged before the file was released */
- up(&card->sem);
- cpcusb_delete(card);
- return 0;
- }
-
- card->open = 0;
-
-exit_not_opened:
- up(&card->sem);
-
- return 0;
-}
-
-static ssize_t cpcusb_read(struct file *file, char *buffer, size_t count,
- loff_t *ppos)
-{
- CPC_USB_T *card = (CPC_USB_T *) file->private_data;
- CPC_CHAN_T *chan;
- int retval = 0;
-
- if (count < sizeof(CPC_MSG_T))
- return CPC_ERR_UNKNOWN;
-
- /* check if can read from the given address */
- if (!access_ok(VERIFY_WRITE, buffer, count))
- return CPC_ERR_UNKNOWN;
-
- /* lock this object */
- down(&card->sem);
-
- /* verify that the device wasn't unplugged */
- if (!card->present) {
- up(&card->sem);
- return CPC_ERR_NO_INTERFACE_PRESENT;
- }
-
- if (IsBufferEmpty(card->chan)) {
- retval = 0;
- } else {
- chan = card->chan;
-
-#if 0
- /* convert LPC2119 params back to SJA1000 params */
- if (card->deviceRevision >= 0x0200
- && chan->buf[chan->oidx].type == CPC_MSG_T_CAN_PRMS) {
- LPC2119_TO_SJA1000_Params(&chan->buf[chan->oidx]);
- }
-#endif
-
- if (copy_to_user(buffer, &chan->buf[chan->oidx], count) != 0) {
- retval = CPC_ERR_IO_TRANSFER;
- } else {
- chan->oidx = (chan->oidx + 1) % CPC_MSG_BUF_CNT;
- chan->WnR = 1;
- retval = sizeof(CPC_MSG_T);
- }
- }
-/* spin_unlock_irqrestore(&card->slock, flags); */
-
- /* unlock the device */
- up(&card->sem);
-
- return retval;
-}
-
-#define SHIFT 1
-static inline void cpcusb_align_buffer_alignment(unsigned char *buf)
-{
- /* CPC-USB uploads packed bytes. */
- CPC_MSG_T *cpc = (CPC_MSG_T *) buf;
- unsigned int i;
-
- for (i = 0; i < cpc->length + (2 * sizeof(unsigned long)); i++) {
- ((unsigned char *) &cpc->msgid)[1 + i] =
- ((unsigned char *) &cpc->msgid)[1 + SHIFT + i];
- }
-}
-
-static int cpc_get_buffer_count(CPC_CHAN_T *chan)
-{
- /* check the buffer parameters */
- if (chan->iidx == chan->oidx)
- return !chan->WnR ? CPC_MSG_BUF_CNT : 0;
- else if (chan->iidx >= chan->oidx)
- return (chan->iidx - chan->oidx) % CPC_MSG_BUF_CNT;
-
- return (chan->iidx + CPC_MSG_BUF_CNT - chan->oidx) % CPC_MSG_BUF_CNT;
-}
-
-static ssize_t cpcusb_write(struct file *file, const char *buffer,
- size_t count, loff_t *ppos)
-{
- CPC_USB_T *card = (CPC_USB_T *) file->private_data;
- CPC_USB_WRITE_URB_T *wrUrb = NULL;
-
- ssize_t bytes_written = 0;
- int retval = 0;
- int j;
-
- unsigned char *obuf = NULL;
- unsigned char type = 0;
- CPC_MSG_T *info = NULL;
-
- dbg("%s - entered minor %d, count = %zu, present = %d",
- __func__, card->minor, count, card->present);
-
- if (count > sizeof(CPC_MSG_T))
- return CPC_ERR_UNKNOWN;
-
- /* check if can read from the given address */
- if (!access_ok(VERIFY_READ, buffer, count))
- return CPC_ERR_UNKNOWN;
-
- /* lock this object */
- down(&card->sem);
-
- /* verify that the device wasn't unplugged */
- if (!card->present) {
- retval = CPC_ERR_NO_INTERFACE_PRESENT;
- goto exit;
- }
-
- /* verify that we actually have some data to write */
- if (count == 0) {
- dbg("%s - write request of 0 bytes", __func__);
- goto exit;
- }
-
- if (card->free_slots <= 5) {
- info = (CPC_MSG_T *) buffer;
-
- if (info->type != CPC_CMD_T_CLEAR_CMD_QUEUE
- || card->free_slots <= 0) {
- dbg("%s - send buffer full please try again %d",
- __func__, card->free_slots);
- retval = CPC_ERR_CAN_NO_TRANSMIT_BUF;
- goto exit;
- }
- }
-
- /* Find a free write urb */
- for (j = 0; j < CPC_USB_URB_CNT; j++) {
- if (!atomic_read(&card->wrUrbs[j].busy)) {
- wrUrb = &card->wrUrbs[j]; /* remember found URB */
- atomic_set(&wrUrb->busy, 1); /* lock this URB */
- init_completion(&wrUrb->finished); /* init completion */
- dbg("WR URB no. %d started", j);
- break;
- }
- }
-
- /* don't found write urb say error */
- if (!wrUrb) {
- dbg("%s - no free send urb available", __func__);
- retval = CPC_ERR_CAN_NO_TRANSMIT_BUF;
- goto exit;
- }
- dbg("URB write req");
-
- obuf = (unsigned char *) wrUrb->urb->transfer_buffer;
-
- /* copy the data from userspace into our transfer buffer;
- * this is the only copy required.
- */
- if (copy_from_user(&obuf[4], buffer, count) != 0) {
- atomic_set(&wrUrb->busy, 0); /* release urb */
- retval = CPC_ERR_IO_TRANSFER;
- goto exit;
- }
-
- /* check if it is a DRIVER information message, so we can
- * response to that message and not the USB
- */
- info = (CPC_MSG_T *) &obuf[4];
-
- bytes_written = 11 + info->length;
- if (bytes_written >= wrUrb->size) {
- retval = CPC_ERR_IO_TRANSFER;
- goto exit;
- }
-
- switch (info->type) {
- case CPC_CMD_T_CLEAR_MSG_QUEUE:
- ResetBuffer(card->chan);
- break;
-
- case CPC_CMD_T_INQ_MSG_QUEUE_CNT:
- retval = cpc_get_buffer_count(card->chan);
- atomic_set(&wrUrb->busy, 0);
-
- goto exit;
-
- case CPC_CMD_T_INQ_INFO:
- if (info->msg.info.source == CPC_INFOMSG_T_DRIVER) {
- /* release urb cause we'll use it for driver
- * information
- */
- atomic_set(&wrUrb->busy, 0);
- if (IsBufferFull(card->chan)) {
- retval = CPC_ERR_IO_TRANSFER;
- goto exit;
- }
-
- /* it is a driver information request message and we have
- * free rx slots to store the response
- */
- type = info->msg.info.type;
- info = &card->chan->buf[card->chan->iidx];
-
- info->type = CPC_MSG_T_INFO;
- info->msg.info.source = CPC_INFOMSG_T_DRIVER;
- info->msg.info.type = type;
-
- switch (type) {
- case CPC_INFOMSG_T_VERSION:
- info->length = strlen(CPC_DRIVER_VERSION) + 2;
- sprintf(info->msg.info.msg, "%s\n",
- CPC_DRIVER_VERSION);
- break;
-
- case CPC_INFOMSG_T_SERIAL:
- info->length = strlen(CPC_DRIVER_SERIAL) + 2;
- sprintf(info->msg.info.msg, "%s\n",
- CPC_DRIVER_SERIAL);
- break;
-
- default:
- info->length = 2;
- info->msg.info.type =
- CPC_INFOMSG_T_UNKNOWN_TYPE;
- }
-
- card->chan->WnR = 0;
- card->chan->iidx =
- (card->chan->iidx + 1) % CPC_MSG_BUF_CNT;
-
- retval = info->length;
- goto exit;
- }
- break;
- case CPC_CMD_T_CAN_PRMS:
- /* Check the controller type. If it's the new CPC-USB, make sure if these are SJA1000 params */
- if (info->msg.canparams.cc_type != SJA1000
- && info->msg.canparams.cc_type != M16C_BASIC
- && (card->productId == USB_CPCUSB_LPC2119_PRODUCT_ID
- && info->msg.canparams.cc_type != SJA1000)) {
- /* don't forget to release the urb */
- atomic_set(&wrUrb->busy, 0);
- retval = CPC_ERR_WRONG_CONTROLLER_TYPE;
- goto exit;
- }
- break;
- }
-
- /* just convert the params if it is an old CPC-USB with M16C controller */
- if (card->productId == USB_CPCUSB_M16C_PRODUCT_ID) {
- /* if it is a parameter message convert it from SJA1000 controller
- * settings to M16C Basic controller settings
- */
- SJA1000_TO_M16C_BASIC_Params((CPC_MSG_T *) &obuf[4]);
- }
-
- /* don't forget the byte alignment */
- cpcusb_align_buffer_alignment(&obuf[4]);
-
- /* setup a the 4 byte header */
- obuf[0] = obuf[1] = obuf[2] = obuf[3] = 0;
-
- /* this urb was already set up, except for this write size */
- wrUrb->urb->transfer_buffer_length = bytes_written + 4;
-
- /* send the data out the bulk port */
- /* a character device write uses GFP_KERNEL,
- unless a spinlock is held */
- retval = usb_submit_urb(wrUrb->urb, GFP_KERNEL);
- if (retval) {
- atomic_set(&wrUrb->busy, 0); /* release urb */
- err("%s - failed submitting write urb, error %d",
- __func__, retval);
- } else {
- retval = bytes_written;
- }
-
-exit:
- /* unlock the device */
- up(&card->sem);
-
- dbg("%s - leaved", __func__);
-
- return retval;
-}
-
-/*
- * callback for interrupt IN urb
- */
-static void cpcusb_read_interrupt_callback(struct urb *urb)
-{
- CPC_USB_T *card = (CPC_USB_T *) urb->context;
- int retval;
- unsigned long flags;
-
- spin_lock_irqsave(&card->slock, flags);
-
- if (!card->present) {
- spin_unlock_irqrestore(&card->slock, flags);
- info("%s - no such device", __func__);
- return;
- }
-
- switch (urb->status) {
- case 0: /* success */
- card->free_slots = card->intr_in_buffer[1];
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* urb was killed */
- spin_unlock_irqrestore(&card->slock, flags);
- dbg("%s - intr urb killed", __func__);
- return;
- default:
- info("%s - nonzero urb status %d", __func__, urb->status);
- break;
- }
-
- retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval) {
- err("%s - failed resubmitting intr urb, error %d",
- __func__, retval);
- }
-
- spin_unlock_irqrestore(&card->slock, flags);
- wake_up_interruptible(card->chan->CPCWait_q);
-
- return;
-}
-
-#define UN_SHIFT 1
-#define CPCMSG_HEADER_LEN_FIRMWARE 11
-static inline int cpcusb_unalign_and_copy_buffy(unsigned char *out,
- unsigned char *in)
-{
- unsigned int i, j;
-
- for (i = 0; i < 3; i++)
- out[i] = in[i];
-
- for (j = 0; j < (in[1] + (CPCMSG_HEADER_LEN_FIRMWARE - 3)); j++)
- out[j + i + UN_SHIFT] = in[j + i];
-
- return i + j;
-}
-
-/*
- * callback for bulk IN urb
- */
-static void cpcusb_read_bulk_callback(struct urb *urb)
-{
- CPC_USB_T *card = (CPC_USB_T *) urb->context;
- CPC_CHAN_T *chan;
- unsigned char *ibuf = urb->transfer_buffer;
- int retval, msgCnt, start, again = 0;
- unsigned long flags;
-
- if (!card) {
- err("%s - device object lost", __func__);
- return;
- }
-
- spin_lock_irqsave(&card->slock, flags);
-
- if (!card->present) {
- spin_unlock_irqrestore(&card->slock, flags);
- info("%s - no such device", __func__);
- return;
- }
-
- switch (urb->status) {
- case 0: /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* urb was killed */
- spin_unlock_irqrestore(&card->slock, flags);
- dbg("%s - read urb killed", __func__);
- return;
- default:
- info("%s - nonzero urb status %d", __func__, urb->status);
- break;
- }
-
- if (urb->actual_length) {
- msgCnt = ibuf[0] & ~0x80;
- again = ibuf[0] & 0x80;
-
- /* we have a 4 byte header */
- start = 4;
- chan = card->chan;
- while (msgCnt) {
- if (!(IsBufferFull(card->chan))) {
- start +=
- cpcusb_unalign_and_copy_buffy((unsigned char *)
- &chan->buf[chan->iidx], &ibuf[start]);
-
- if (start > urb->transfer_buffer_length) {
- err("%d > %d", start, urb->transfer_buffer_length);
- break;
- }
-
- chan->WnR = 0;
- chan->iidx = (chan->iidx + 1) % CPC_MSG_BUF_CNT;
- msgCnt--;
- } else {
- break;
- }
- }
- }
-
- usb_fill_bulk_urb(urb, card->udev,
- usb_rcvbulkpipe(card->udev, card->num_bulk_in),
- urb->transfer_buffer,
- urb->transfer_buffer_length,
- cpcusb_read_bulk_callback, card);
-
- retval = usb_submit_urb(urb, GFP_ATOMIC);
-
- if (retval) {
- err("%s - failed resubmitting read urb, error %d", __func__, retval);
- }
-
- spin_unlock_irqrestore(&card->slock, flags);
-
- wake_up_interruptible(card->chan->CPCWait_q);
-}
-
-/*
- * callback for bulk IN urb
- */
-static void cpcusb_write_bulk_callback(struct urb *urb)
-{
- CPC_USB_T *card = (CPC_USB_T *) urb->context;
- unsigned long flags;
- int j;
-
- spin_lock_irqsave(&card->slock, flags);
-
- /* find this urb */
- for (j = 0; j < CPC_USB_URB_CNT; j++) {
- if (card->wrUrbs[j].urb == urb) {
- dbg("URB found no. %d", j);
- /* notify anyone waiting that the write has finished */
- complete(&card->wrUrbs[j].finished);
- atomic_set(&card->wrUrbs[j].busy, 0);
- break;
- }
- }
-
- switch (urb->status) {
- case 0: /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* urb was killed */
- spin_unlock_irqrestore(&card->slock, flags);
- dbg("%s - write urb no. %d killed", __func__, j);
- return;
- default:
- info("%s - nonzero urb status %d", __func__, urb->status);
- break;
- }
-
- spin_unlock_irqrestore(&card->slock, flags);
-
- wake_up_interruptible(card->chan->CPCWait_q);
-}
-
-static inline int cpcusb_get_free_slot(void)
-{
- int i;
-
- for (i = 0; i < CPC_USB_CARD_CNT; i++) {
- if (!CPCUSB_Table[i])
- return i;
- }
-
- return -1;
-}
-
-/*
- * probe function for new CPC-USB devices
- */
-static int cpcusb_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
-{
- CPC_USB_T *card = NULL;
- CPC_CHAN_T *chan = NULL;
-
- struct usb_device *udev = interface_to_usbdev(interface);
- struct usb_host_interface *iface_desc;
- struct usb_endpoint_descriptor *endpoint;
-
- int i, j, retval = -ENOMEM, slot;
-
- slot = cpcusb_get_free_slot();
- if (slot < 0) {
- info("No more devices supported");
- return -ENOMEM;
- }
-
- /* allocate memory for our device state and initialize it */
- card = kzalloc(sizeof(CPC_USB_T), GFP_KERNEL);
- if (!card) {
- err("Out of memory");
- return -ENOMEM;
- }
- CPCUSB_Table[slot] = card;
-
- /* allocate and initialize the channel struct */
- card->chan = kmalloc(sizeof(CPC_CHAN_T), GFP_KERNEL);
- if (!card->chan) {
- kfree(card);
- err("Out of memory");
- return -ENOMEM;
- }
-
- chan = card->chan;
- memset(chan, 0, sizeof(CPC_CHAN_T));
- ResetBuffer(chan);
-
- init_MUTEX(&card->sem);
- spin_lock_init(&card->slock);
-
- card->udev = udev;
- card->interface = interface;
- if (udev->descriptor.iSerialNumber) {
- usb_string(udev, udev->descriptor.iSerialNumber, card->serialNumber,
- 128);
- info("Serial %s", card->serialNumber);
- }
-
- card->productId = udev->descriptor.idProduct;
- info("Product %s",
- card->productId == USB_CPCUSB_LPC2119_PRODUCT_ID ?
- "CPC-USB/ARM7" : "CPC-USB/M16C");
-
- /* set up the endpoint information */
- /* check out the endpoints */
- /* use only the first bulk-in and bulk-out endpoints */
- iface_desc = &interface->altsetting[0];
- for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
- endpoint = &iface_desc->endpoint[i].desc;
-
- if (!card->num_intr_in &&
- (endpoint->bEndpointAddress & USB_DIR_IN) &&
- ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
- == USB_ENDPOINT_XFER_INT)) {
- card->intr_in_urb = usb_alloc_urb(0, GFP_KERNEL);
- card->num_intr_in = 1;
-
- if (!card->intr_in_urb) {
- err("No free urbs available");
- goto error;
- }
-
- dbg("intr_in urb %d", card->num_intr_in);
- }
-
- if (!card->num_bulk_in &&
- (endpoint->bEndpointAddress & USB_DIR_IN) &&
- ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
- == USB_ENDPOINT_XFER_BULK)) {
- card->num_bulk_in = 2;
- for (j = 0; j < CPC_USB_URB_CNT; j++) {
- card->urbs[j].size = endpoint->wMaxPacketSize;
- card->urbs[j].urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!card->urbs[j].urb) {
- err("No free urbs available");
- goto error;
- }
- card->urbs[j].buffer =
- usb_buffer_alloc(udev,
- card->urbs[j].size,
- GFP_KERNEL,
- &card->urbs[j].urb->transfer_dma);
- if (!card->urbs[j].buffer) {
- err("Couldn't allocate bulk_in_buffer");
- goto error;
- }
- }
- info("%s - %d reading URB's allocated",
- __func__, CPC_USB_URB_CNT);
- }
-
- if (!card->num_bulk_out &&
- !(endpoint->bEndpointAddress & USB_DIR_IN) &&
- ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
- == USB_ENDPOINT_XFER_BULK)) {
-
- card->num_bulk_out = 2;
-
- for (j = 0; j < CPC_USB_URB_CNT; j++) {
- card->wrUrbs[j].size =
- endpoint->wMaxPacketSize;
- card->wrUrbs[j].urb =
- usb_alloc_urb(0, GFP_KERNEL);
- if (!card->wrUrbs[j].urb) {
- err("No free urbs available");
- goto error;
- }
- card->wrUrbs[j].buffer = usb_buffer_alloc(udev,
- card->wrUrbs[j].size, GFP_KERNEL,
- &card->wrUrbs[j].urb->transfer_dma);
-
- if (!card->wrUrbs[j].buffer) {
- err("Couldn't allocate bulk_out_buffer");
- goto error;
- }
-
- usb_fill_bulk_urb(card->wrUrbs[j].urb, udev,
- usb_sndbulkpipe(udev, endpoint->bEndpointAddress),
- card->wrUrbs[j].buffer,
- card->wrUrbs[j].size,
- cpcusb_write_bulk_callback,
- card);
- }
-
- info("%s - %d writing URB's allocated", __func__, CPC_USB_URB_CNT);
- }
- }
-
- if (!(card->num_bulk_in && card->num_bulk_out)) {
- err("Couldn't find both bulk-in and bulk-out endpoints");
- goto error;
- }
-
- /* allow device read, write and ioctl */
- card->present = 1;
-
- /* we can register the device now, as it is ready */
- usb_set_intfdata(interface, card);
- retval = usb_register_dev(interface, &cpcusb_class);
-
- if (retval) {
- /* something prevented us from registering this driver */
- err("Not able to get a minor for this device.");
- usb_set_intfdata(interface, NULL);
- goto error;
- }
-
- card->chan->minor = card->minor = interface->minor;
-
- chan->buf = vmalloc(sizeof(CPC_MSG_T) * CPC_MSG_BUF_CNT);
- if (chan->buf == NULL) {
- err("Out of memory");
- retval = -ENOMEM;
- goto error;
- }
- info("Allocated memory for %d messages (%lu kbytes)",
- CPC_MSG_BUF_CNT, (long unsigned int)(sizeof(CPC_MSG_T) * CPC_MSG_BUF_CNT) / 1000);
- memset(chan->buf, 0, sizeof(CPC_MSG_T) * CPC_MSG_BUF_CNT);
-
- ResetBuffer(chan);
-
- card->chan->CPCWait_q = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
- if (!card->chan->CPCWait_q) {
- err("Out of memory");
- retval = -ENOMEM;
- goto error;
- }
- init_waitqueue_head(card->chan->CPCWait_q);
-
- CPCUSB_Table[slot] = card;
- card->idx = slot;
- CPCUsbCnt++;
-
- /* let the user know what node this device is now attached to */
- info("Device now attached to USB-%d", card->minor);
- return 0;
-
-error:
- for (j = 0; j < CPC_USB_URB_CNT; j++) {
- if (card->urbs[j].buffer) {
- usb_buffer_free(card->udev, card->urbs[j].size,
- card->urbs[j].buffer,
- card->urbs[j].urb->transfer_dma);
- card->urbs[j].buffer = NULL;
- }
- if (card->urbs[j].urb) {
- usb_free_urb(card->urbs[j].urb);
- card->urbs[j].urb = NULL;
- }
- }
-
- cpcusb_delete(card);
- return retval;
-}
-
-/*
- * called by the usb core when the device is removed from the system
- */
-static void cpcusb_disconnect(struct usb_interface *interface)
-{
- CPC_USB_T *card = NULL;
- int minor, j;
-
- /* prevent races with open() */
- down(&disconnect_sem);
-
- card = usb_get_intfdata(interface);
- usb_set_intfdata(interface, NULL);
-
- down(&card->sem);
-
- /* prevent device read, write and ioctl */
- card->present = 0;
-
- minor = card->minor;
-
- /* free all urbs and their buffers */
- for (j = 0; j < CPC_USB_URB_CNT; j++) {
- /* terminate an ongoing write */
- if (atomic_read(&card->wrUrbs[j].busy)) {
- usb_kill_urb(card->wrUrbs[j].urb);
- wait_for_completion(&card->wrUrbs[j].finished);
- }
- usb_buffer_free(card->udev, card->wrUrbs[j].size,
- card->wrUrbs[j].buffer,
- card->wrUrbs[j].urb->transfer_dma);
- usb_free_urb(card->wrUrbs[j].urb);
- }
- info("%d write URBs freed", CPC_USB_URB_CNT);
-
- /* free all urbs and their buffers */
- for (j = 0; j < CPC_USB_URB_CNT; j++) {
- usb_buffer_free(card->udev, card->urbs[j].size,
- card->urbs[j].buffer,
- card->urbs[j].urb->transfer_dma);
- usb_free_urb(card->urbs[j].urb);
- }
- info("%d read URBs freed", CPC_USB_URB_CNT);
- usb_free_urb(card->intr_in_urb);
-
- /* give back our minor */
- usb_deregister_dev(interface, &cpcusb_class);
-
- up(&card->sem);
-
- /* if the device is opened, cpcusb_release will clean this up */
- if (!card->open)
- cpcusb_delete(card);
- else
- wake_up_interruptible(card->chan->CPCWait_q);
-
- up(&disconnect_sem);
-
- CPCUsbCnt--;
- info("USB-%d now disconnected", minor);
-}
-
-static int __init CPCUsb_Init(void)
-{
- int result, i;
-
- info(DRIVER_DESC " v" DRIVER_VERSION);
- info("Build on " __DATE__ " at " __TIME__);
-
- for (i = 0; i < CPC_USB_CARD_CNT; i++)
- CPCUSB_Table[i] = 0;
-
- /* register this driver with the USB subsystem */
- result = usb_register(&cpcusb_driver);
- if (result) {
- err("usb_register failed. Error number %d", result);
- return result;
- }
-
- procDir = proc_mkdir(CPC_USB_PROC_DIR, NULL);
- if (!procDir) {
- err("Could not create proc entry");
- } else {
- procEntry = create_proc_read_entry("info", 0444, procDir,
- cpcusb_proc_read_info,
- NULL);
- if (!procEntry) {
- err("Could not create proc entry %s", CPC_USB_PROC_DIR "/info");
- remove_proc_entry(CPC_USB_PROC_DIR, NULL);
- procDir = NULL;
- }
- }
-
- return 0;
-}
-
-static void __exit CPCUsb_Exit(void)
-{
- wait_event(rmmodWq, !atomic_read(&useCount));
-
- /* deregister this driver with the USB subsystem */
- usb_deregister(&cpcusb_driver);
-
- if (procDir) {
- if (procEntry)
- remove_proc_entry("info", procDir);
- remove_proc_entry(CPC_USB_PROC_DIR, NULL);
- }
-}
-
-module_init(CPCUsb_Init);
-module_exit(CPCUsb_Exit);
diff --git a/drivers/staging/cpc-usb/cpc.h b/drivers/staging/cpc-usb/cpc.h
deleted file mode 100644
index b2fda5d14c1..00000000000
--- a/drivers/staging/cpc-usb/cpc.h
+++ /dev/null
@@ -1,417 +0,0 @@
-/*
- * CPC CAN Interface Definitions
- *
- * Copyright (C) 2000-2008 EMS Dr. Thomas Wuensche
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- */
-#ifndef CPC_HEADER
-#define CPC_HEADER
-
-/*
- * the maximum length of the union members within a CPC_MSG
- * this value can be defined by the customer, but has to be
- * >= 64 bytes
- * however, if not defined before, we set a length of 64 byte
- */
-#if !defined(CPC_MSG_LEN) || (CPC_MSG_LEN < 64)
-#undef CPC_MSG_LEN
-#define CPC_MSG_LEN 64
-#endif
-
-/*
- * Transmission of events from CPC interfaces to PC can be individually
- * controlled per event type. Default state is: don't transmit
- * Control values are constructed by bit-or of Subject and Action
- * and passed to CPC_Control()
- */
-
-/* Control-Values for CPC_Control() Command Subject Selection */
-#define CONTR_CAN_Message 0x04
-#define CONTR_Busload 0x08
-#define CONTR_CAN_State 0x0C
-#define CONTR_SendAck 0x10
-#define CONTR_Filter 0x14
-#define CONTR_CmdQueue 0x18 /* reserved, do not use */
-#define CONTR_BusError 0x1C
-
-/* Control Command Actions */
-#define CONTR_CONT_OFF 0
-#define CONTR_CONT_ON 1
-#define CONTR_SING_ON 2
-/*
- * CONTR_SING_ON doesn't change CONTR_CONT_ON state, so it should be
- * read as: transmit at least once
- */
-
-/* defines for confirmed request */
-#define DO_NOT_CONFIRM 0
-#define DO_CONFIRM 1
-
-/* event flags */
-#define EVENT_READ 0x01
-#define EVENT_WRITE 0x02
-
-/*
- * Messages from CPC to PC contain a message object type field.
- * The following message types are sent by CPC and can be used in
- * handlers, others should be ignored.
- */
-#define CPC_MSG_T_RESYNC 0 /* Normally to be ignored */
-#define CPC_MSG_T_CAN 1 /* CAN data frame */
-#define CPC_MSG_T_BUSLOAD 2 /* Busload message */
-#define CPC_MSG_T_STRING 3 /* Normally to be ignored */
-#define CPC_MSG_T_CONTI 4 /* Normally to be ignored */
-#define CPC_MSG_T_MEM 7 /* Normally not to be handled */
-#define CPC_MSG_T_RTR 8 /* CAN remote frame */
-#define CPC_MSG_T_TXACK 9 /* Send acknowledge */
-#define CPC_MSG_T_POWERUP 10 /* Power-up message */
-#define CPC_MSG_T_CMD_NO 11 /* Normally to be ignored */
-#define CPC_MSG_T_CAN_PRMS 12 /* Actual CAN parameters */
-#define CPC_MSG_T_ABORTED 13 /* Command aborted message */
-#define CPC_MSG_T_CANSTATE 14 /* CAN state message */
-#define CPC_MSG_T_RESET 15 /* used to reset CAN-Controller */
-#define CPC_MSG_T_XCAN 16 /* XCAN data frame */
-#define CPC_MSG_T_XRTR 17 /* XCAN remote frame */
-#define CPC_MSG_T_INFO 18 /* information strings */
-#define CPC_MSG_T_CONTROL 19 /* used for control of interface/driver behaviour */
-#define CPC_MSG_T_CONFIRM 20 /* response type for confirmed requests */
-#define CPC_MSG_T_OVERRUN 21 /* response type for overrun conditions */
-#define CPC_MSG_T_KEEPALIVE 22 /* response type for keep alive conditions */
-#define CPC_MSG_T_CANERROR 23 /* response type for bus error conditions */
-#define CPC_MSG_T_DISCONNECTED 24 /* response type for a disconnected interface */
-#define CPC_MSG_T_ERR_COUNTER 25 /* RX/TX error counter of CAN controller */
-
-#define CPC_MSG_T_FIRMWARE 100 /* response type for USB firmware download */
-
-/*
- * Messages from the PC to the CPC interface contain a command field
- * Most of the command types are wrapped by the library functions and have therefore
- * normally not to be used.
- * However, programmers who wish to circumvent the library and talk directly
- * to the drivers (mainly Linux programmers) can use the following
- * command types:
- */
-#define CPC_CMD_T_CAN 1 /* CAN data frame */
-#define CPC_CMD_T_CONTROL 3 /* used for control of interface/driver behaviour */
-#define CPC_CMD_T_CAN_PRMS 6 /* set CAN parameters */
-#define CPC_CMD_T_CLEARBUF 8 /* clears input queue; this is depricated, use CPC_CMD_T_CLEAR_MSG_QUEUE instead */
-#define CPC_CMD_T_INQ_CAN_PARMS 11 /* inquire actual CAN parameters */
-#define CPC_CMD_T_FILTER_PRMS 12 /* set filter parameter */
-#define CPC_CMD_T_RTR 13 /* CAN remote frame */
-#define CPC_CMD_T_CANSTATE 14 /* CAN state message */
-#define CPC_CMD_T_XCAN 15 /* XCAN data frame */
-#define CPC_CMD_T_XRTR 16 /* XCAN remote frame */
-#define CPC_CMD_T_RESET 17 /* used to reset CAN-Controller */
-#define CPC_CMD_T_INQ_INFO 18 /* miscellanous information strings */
-#define CPC_CMD_T_OPEN_CHAN 19 /* open a channel */
-#define CPC_CMD_T_CLOSE_CHAN 20 /* close a channel */
-#define CPC_CMD_T_CNTBUF 21 /* this is depricated, use CPC_CMD_T_INQ_MSG_QUEUE_CNT instead */
-#define CPC_CMD_T_CAN_EXIT 200 /* exit the CAN (disable interrupts; reset bootrate; reset output_cntr; mode = 1) */
-
-#define CPC_CMD_T_INQ_MSG_QUEUE_CNT CPC_CMD_T_CNTBUF /* inquires the count of elements in the message queue */
-#define CPC_CMD_T_INQ_ERR_COUNTER 25 /* request the CAN controllers error counter */
-#define CPC_CMD_T_CLEAR_MSG_QUEUE CPC_CMD_T_CLEARBUF /* clear CPC_MSG queue */
-#define CPC_CMD_T_CLEAR_CMD_QUEUE 28 /* clear CPC_CMD queue */
-#define CPC_CMD_T_FIRMWARE 100 /* reserved, must not be used */
-#define CPC_CMD_T_USB_RESET 101 /* reserved, must not be used */
-#define CPC_CMD_T_WAIT_NOTIFY 102 /* reserved, must not be used */
-#define CPC_CMD_T_WAIT_SETUP 103 /* reserved, must not be used */
-#define CPC_CMD_T_ABORT 255 /* Normally not to be used */
-
-/* definitions for CPC_MSG_T_INFO information sources */
-#define CPC_INFOMSG_T_UNKNOWN_SOURCE 0
-#define CPC_INFOMSG_T_INTERFACE 1
-#define CPC_INFOMSG_T_DRIVER 2
-#define CPC_INFOMSG_T_LIBRARY 3
-
-/* information types */
-#define CPC_INFOMSG_T_UNKNOWN_TYPE 0
-#define CPC_INFOMSG_T_VERSION 1
-#define CPC_INFOMSG_T_SERIAL 2
-
-/* definitions for controller types */
-#define PCA82C200 1 /* Philips basic CAN controller, replaced by SJA1000 */
-#define SJA1000 2 /* Philips basic CAN controller */
-#define AN82527 3 /* Intel full CAN controller */
-#define M16C_BASIC 4 /* M16C controller running in basic CAN (not full CAN) mode */
-
-/* channel open error codes */
-#define CPC_ERR_NO_FREE_CHANNEL -1 /* no more free space within the channel array */
-#define CPC_ERR_CHANNEL_ALREADY_OPEN -2 /* the channel is already open */
-#define CPC_ERR_CHANNEL_NOT_ACTIVE -3 /* access to a channel not active failed */
-#define CPC_ERR_NO_DRIVER_PRESENT -4 /* no driver at the location searched by the library */
-#define CPC_ERR_NO_INIFILE_PRESENT -5 /* the library could not find the inifile */
-#define CPC_ERR_WRONG_PARAMETERS -6 /* wrong parameters in the inifile */
-#define CPC_ERR_NO_INTERFACE_PRESENT -7 /* 1. The specified interface is not connected */
- /* 2. The interface (mostly CPC-USB) was disconnected upon operation */
-#define CPC_ERR_NO_MATCHING_CHANNEL -8 /* the driver couldn't find a matching channel */
-#define CPC_ERR_NO_BUFFER_AVAILABLE -9 /* the driver couldn't allocate buffer for messages */
-#define CPC_ERR_NO_INTERRUPT -10 /* the requested interrupt couldn't be claimed */
-#define CPC_ERR_NO_MATCHING_INTERFACE -11 /* no interface type related to this channel was found */
-#define CPC_ERR_NO_RESOURCES -12 /* the requested resources could not be claimed */
-#define CPC_ERR_SOCKET -13 /* error concerning TCP sockets */
-
-/* init error codes */
-#define CPC_ERR_WRONG_CONTROLLER_TYPE -14 /* wrong CAN controller type within initialization */
-#define CPC_ERR_NO_RESET_MODE -15 /* the controller could not be set into reset mode */
-#define CPC_ERR_NO_CAN_ACCESS -16 /* the CAN controller could not be accessed */
-
-/* transmit error codes */
-#define CPC_ERR_CAN_WRONG_ID -20 /* the provided CAN id is too big */
-#define CPC_ERR_CAN_WRONG_LENGTH -21 /* the provided CAN length is too long */
-#define CPC_ERR_CAN_NO_TRANSMIT_BUF -22 /* the transmit buffer was occupied */
-#define CPC_ERR_CAN_TRANSMIT_TIMEOUT -23 /* The message could not be sent within a */
- /* specified time */
-
-/* other error codes */
-#define CPC_ERR_SERVICE_NOT_SUPPORTED -30 /* the requested service is not supported by the interface */
-#define CPC_ERR_IO_TRANSFER -31 /* a transmission error down to the driver occurred */
-#define CPC_ERR_TRANSMISSION_FAILED -32 /* a transmission error down to the interface occurred */
-#define CPC_ERR_TRANSMISSION_TIMEOUT -33 /* a timeout occurred within transmission to the interface */
-#define CPC_ERR_OP_SYS_NOT_SUPPORTED -35 /* the operating system is not supported */
-#define CPC_ERR_UNKNOWN -40 /* an unknown error ocurred (mostly IOCTL errors) */
-
-#define CPC_ERR_LOADING_DLL -50 /* the library 'cpcwin.dll' could not be loaded */
-#define CPC_ERR_ASSIGNING_FUNCTION -51 /* the specified function could not be assigned */
-#define CPC_ERR_DLL_INITIALIZATION -52 /* the DLL was not initialized correctly */
-#define CPC_ERR_MISSING_LICFILE -55 /* the file containing the licenses does not exist */
-#define CPC_ERR_MISSING_LICENSE -56 /* a required license was not found */
-
-/* CAN state bit values. Ignore any bits not listed */
-#define CPC_CAN_STATE_BUSOFF 0x80
-#define CPC_CAN_STATE_ERROR 0x40
-
-/* Mask to help ignore undefined bits */
-#define CPC_CAN_STATE_MASK 0xc0
-
-/*
- * CAN-Message representation in a CPC_MS
- * Message object type is CPC_MSG_T_CAN or CPC_MSG_T_RTR
- * or CPC_MSG_T_XCAN or CPC_MSG_T_XRTR
- */
-typedef struct CPC_CAN_MSG {
- u32 id;
- u8 length;
- u8 msg[8];
-} CPC_CAN_MSG_T;
-
-/* representation of the CAN parameters for the PCA82C200 controller */
-typedef struct CPC_PCA82C200_PARAMS {
- u8 acc_code; /* Acceptance-code for receive, Standard: 0 */
- u8 acc_mask; /* Acceptance-mask for receive, Standard: 0xff (everything) */
- u8 btr0; /* Bus-timing register 0 */
- u8 btr1; /* Bus-timing register 1 */
- u8 outp_contr; /* Output-control register */
-} CPC_PCA82C200_PARAMS_T;
-
-/* representation of the CAN parameters for the SJA1000 controller */
-typedef struct CPC_SJA1000_PARAMS {
- u8 mode; /* enables single or dual acceptance filtering */
- u8 acc_code0; /* Acceptance-code for receive, Standard: 0 */
- u8 acc_code1;
- u8 acc_code2;
- u8 acc_code3;
- u8 acc_mask0; /* Acceptance-mask for receive, Standard: 0xff (everything) */
- u8 acc_mask1;
- u8 acc_mask2;
- u8 acc_mask3;
- u8 btr0; /* Bus-timing register 0 */
- u8 btr1; /* Bus-timing register 1 */
- u8 outp_contr; /* Output-control register */
-} CPC_SJA1000_PARAMS_T;
-
-/*
- * representation of the CAN parameters for the M16C controller
- * in basic CAN mode (means no full CAN)
- */
-typedef struct CPC_M16C_BASIC_PARAMS {
- u8 con0;
- u8 con1;
- u8 ctlr0;
- u8 ctlr1;
- u8 clk;
- u8 acc_std_code0;
- u8 acc_std_code1;
- u8 acc_ext_code0;
- u8 acc_ext_code1;
- u8 acc_ext_code2;
- u8 acc_ext_code3;
- u8 acc_std_mask0;
- u8 acc_std_mask1;
- u8 acc_ext_mask0;
- u8 acc_ext_mask1;
- u8 acc_ext_mask2;
- u8 acc_ext_mask3;
-} CPC_M16C_BASIC_PARAMS_T;
-
-/* CAN params message representation */
-typedef struct CPC_CAN_PARAMS {
- u8 cc_type; /* represents the controller type */
- union {
- CPC_M16C_BASIC_PARAMS_T m16c_basic;
- CPC_SJA1000_PARAMS_T sja1000;
- CPC_PCA82C200_PARAMS_T pca82c200;
- } cc_params;
-} CPC_CAN_PARAMS_T;
-
-/* CHAN init params representation */
-typedef struct CPC_CHAN_PARAMS {
- int fd;
-} CPC_CHAN_PARAMS_T;
-
-/* CAN init params message representation */
-typedef struct CPC_INIT_PARAMS {
- CPC_CHAN_PARAMS_T chanparams;
- CPC_CAN_PARAMS_T canparams;
-} CPC_INIT_PARAMS_T;
-
-/* structure for confirmed message handling */
-typedef struct CPC_CONFIRM {
- u8 result; /* error code */
-} CPC_CONFIRM_T;
-
-/* structure for information requests */
-typedef struct CPC_INFO {
- u8 source; /* interface, driver or library */
- u8 type; /* version or serial number */
- char msg[CPC_MSG_LEN - 2]; /* string holding the requested information */
-} CPC_INFO_T;
-
-/*
- * OVERRUN
- * In general two types of overrun may occur.
- * A hardware overrun, where the CAN controller
- * lost a message, because the interrupt was
- * not handled before the next messgae comes in.
- * Or a software overrun, where i.e. a received
- * message could not be stored in the CPC_MSG
- * buffer.
- */
-
-/* After a software overrun has occurred
- * we wait until we have CPC_OVR_GAP slots
- * free in the CPC_MSG buffer.
- */
-#define CPC_OVR_GAP 10
-
-/*
- * Two types of software overrun may occur.
- * A received CAN message or a CAN state event
- * can cause an overrun.
- * Note: A CPC_CMD which would normally store
- * its result immediately in the CPC_MSG
- * queue may fail, because the message queue is full.
- * This will not generate an overrun message, but
- * will halt command execution, until this command
- * is able to store its message in the message queue.
- */
-#define CPC_OVR_EVENT_CAN 0x01
-#define CPC_OVR_EVENT_CANSTATE 0x02
-#define CPC_OVR_EVENT_BUSERROR 0x04
-
-/*
- * If the CAN controller lost a message
- * we indicate it with the highest bit
- * set in the count field.
- */
-#define CPC_OVR_HW 0x80
-
-/* structure for overrun conditions */
-typedef struct {
- u8 event;
- u8 count;
-} CPC_OVERRUN_T;
-
-/*
- * CAN errors
- * Each CAN controller type has different
- * registers to record errors.
- * Therefor a structure containing the specific
- * errors is set up for each controller here
- */
-
-/*
- * SJA1000 error structure
- * see the SJA1000 datasheet for detailed
- * explanation of the registers
- */
-typedef struct CPC_SJA1000_CAN_ERROR {
- u8 ecc; /* error capture code register */
- u8 rxerr; /* RX error counter register */
- u8 txerr; /* TX error counter register */
-} CPC_SJA1000_CAN_ERROR_T;
-
-/*
- * M16C error structure
- * see the M16C datasheet for detailed
- * explanation of the registers
- */
-typedef struct CPC_M16C_CAN_ERROR {
- u8 tbd; /* to be defined */
-} CPC_M16C_CAN_ERROR_T;
-
-/* structure for CAN error conditions */
-#define CPC_CAN_ECODE_ERRFRAME 0x01
-typedef struct CPC_CAN_ERROR {
- u8 ecode;
- struct {
- u8 cc_type; /* CAN controller type */
- union {
- CPC_SJA1000_CAN_ERROR_T sja1000;
- CPC_M16C_CAN_ERROR_T m16c;
- } regs;
- } cc;
-} CPC_CAN_ERROR_T;
-
-/*
- * Structure containing RX/TX error counter.
- * This structure is used to request the
- * values of the CAN controllers TX and RX
- * error counter.
- */
-typedef struct CPC_CAN_ERR_COUNTER {
- u8 rx;
- u8 tx;
-} CPC_CAN_ERR_COUNTER_T;
-
-/* If this flag is set, transmissions from PC to CPC are protected against loss */
-#define CPC_SECURE_TO_CPC 0x01
-
-/* If this flag is set, transmissions from CPC to PC are protected against loss */
-#define CPC_SECURE_TO_PC 0x02
-
-/* If this flag is set, the CAN-transmit buffer is checked to be free before sending a message */
-#define CPC_SECURE_SEND 0x04
-
-/*
- * If this flag is set, the transmission complete flag is checked
- * after sending a message
- * THIS IS CURRENTLY ONLY IMPLEMENTED IN THE PASSIVE INTERFACE DRIVERS
- */
-#define CPC_SECURE_TRANSMIT 0x08
-
-/* main message type used between library and application */
-typedef struct CPC_MSG {
- u8 type; /* type of message */
- u8 length; /* length of data within union 'msg' */
- u8 msgid; /* confirmation handle */
- u32 ts_sec; /* timestamp in seconds */
- u32 ts_nsec; /* timestamp in nano seconds */
- union {
- u8 generic[CPC_MSG_LEN];
- CPC_CAN_MSG_T canmsg;
- CPC_CAN_PARAMS_T canparams;
- CPC_CONFIRM_T confirmation;
- CPC_INFO_T info;
- CPC_OVERRUN_T overrun;
- CPC_CAN_ERROR_T error;
- CPC_CAN_ERR_COUNTER_T err_counter;
- u8 busload;
- u8 canstate;
- } msg;
-} CPC_MSG_T;
-
-#endif /* CPC_HEADER */
diff --git a/drivers/staging/cpc-usb/cpc_int.h b/drivers/staging/cpc-usb/cpc_int.h
deleted file mode 100644
index 38674e9690a..00000000000
--- a/drivers/staging/cpc-usb/cpc_int.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * CPCLIB
- *
- * Copyright (C) 2000-2008 EMS Dr. Thomas Wuensche
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- */
-#ifndef CPC_INT_H
-#define CPC_INT_H
-
-#include <linux/wait.h>
-
-#define CPC_MSG_BUF_CNT 1500
-
-#define CPC_PROC_DIR "driver/"
-
-#undef dbg
-#undef err
-#undef info
-
-/* Use our own dbg macro */
-#define dbg(format, arg...) do { if (debug) printk( KERN_INFO format "\n" , ## arg); } while (0)
-#define err(format, arg...) do { printk( KERN_INFO "ERROR " format "\n" , ## arg); } while (0)
-#define info(format, arg...) do { printk( KERN_INFO format "\n" , ## arg); } while (0)
-
-/* Macros help using of our buffers */
-#define IsBufferFull(x) (!(x)->WnR) && ((x)->iidx == (x)->oidx)
-#define IsBufferEmpty(x) ((x)->WnR) && ((x)->iidx == (x)->oidx)
-#define IsBufferNotEmpty(x) (!(x)->WnR) || ((x)->iidx != (x)->oidx)
-#define ResetBuffer(x) do { (x)->oidx = (x)->iidx=0; (x)->WnR = 1; } while(0);
-
-#define CPC_BufWriteAllowed ((chan->oidx != chan->iidx) || chan->WnR)
-
-typedef void (*chan_write_byte_t) (void *chan, unsigned int reg,
- unsigned char val);
-typedef unsigned char (*chan_read_byte_t) (void *chan, unsigned int reg);
-
-typedef struct CPC_CHAN {
- void __iomem * canBase; /* base address of SJA1000 */
- chan_read_byte_t read_byte; /* CAN controller read access routine */
- chan_write_byte_t write_byte; /* CAN controller write access routine */
- CPC_MSG_T *buf; /* buffer for CPC msg */
- unsigned int iidx;
- unsigned int oidx;
- unsigned int WnR;
- unsigned int minor;
- unsigned int locked;
- unsigned int irqDisabled;
-
- unsigned char cpcCtrlCANMessage;
- unsigned char cpcCtrlCANState;
- unsigned char cpcCtrlBUSState;
-
- unsigned char controllerType;
-
- unsigned long ovrTimeSec;
- unsigned long ovrTimeNSec;
- unsigned long ovrLockedBuffer;
- CPC_OVERRUN_T ovr;
-
- /* for debugging only */
- unsigned int handledIrqs;
- unsigned int lostMessages;
-
- unsigned int sentStdCan;
- unsigned int sentExtCan;
- unsigned int sentStdRtr;
- unsigned int sentExtRtr;
-
- unsigned int recvStdCan;
- unsigned int recvExtCan;
- unsigned int recvStdRtr;
- unsigned int recvExtRtr;
-
- wait_queue_head_t *CPCWait_q;
-
- void *private;
-} CPC_CHAN_T;
-
-#endif
diff --git a/drivers/staging/cpc-usb/cpcusb.h b/drivers/staging/cpc-usb/cpcusb.h
deleted file mode 100644
index 6bdf30be239..00000000000
--- a/drivers/staging/cpc-usb/cpcusb.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/* Header for CPC-USB Driver ********************
- * Copyright 1999, 2000, 2001
- *
- * Company: EMS Dr. Thomas Wuensche
- * Sonnenhang 3
- * 85304 Ilmmuenster
- * Phone: +49-8441-490260
- * Fax: +49-8441-81860
- * email: support@ems-wuensche.com
- * WWW: www.ems-wuensche.com
- */
-
-#ifndef CPCUSB_H
-#define CPCUSB_H
-
-#undef err
-#undef dbg
-#undef info
-
-/* Use our own dbg macro */
-#define dbg(format, arg...) do { if (debug) printk(KERN_INFO "CPC-USB: " format "\n" , ## arg); } while (0)
-#define info(format, arg...) do { printk(KERN_INFO "CPC-USB: " format "\n" , ## arg); } while (0)
-#define err(format, arg...) do { printk(KERN_INFO "CPC-USB(ERROR): " format "\n" , ## arg); } while (0)
-
-#define CPC_USB_CARD_CNT 4
-
-typedef struct CPC_USB_READ_URB {
- unsigned char *buffer; /* the buffer to send data */
- size_t size; /* the size of the send buffer */
- struct urb *urb; /* the urb used to send data */
-} CPC_USB_READ_URB_T;
-
-typedef struct CPC_USB_WRITE_URB {
- unsigned char *buffer; /* the buffer to send data */
- size_t size; /* the size of the send buffer */
- struct urb *urb; /* the urb used to send data */
- atomic_t busy; /* true if write urb is busy */
- struct completion finished; /* wait for the write to finish */
-} CPC_USB_WRITE_URB_T;
-
-#define CPC_USB_URB_CNT 10
-
-typedef struct CPC_USB {
- struct usb_device *udev; /* save off the usb device pointer */
- struct usb_interface *interface; /* the interface for this device */
- unsigned char minor; /* the starting minor number for this device */
- unsigned char num_ports; /* the number of ports this device has */
- int num_intr_in; /* number of interrupt in endpoints we have */
- int num_bulk_in; /* number of bulk in endpoints we have */
- int num_bulk_out; /* number of bulk out endpoints we have */
-
- CPC_USB_READ_URB_T urbs[CPC_USB_URB_CNT];
-
- unsigned char intr_in_buffer[4]; /* interrupt transfer buffer */
- struct urb *intr_in_urb; /* interrupt transfer urb */
-
- CPC_USB_WRITE_URB_T wrUrbs[CPC_USB_URB_CNT];
-
- int open; /* if the port is open or not */
- int present; /* if the device is not disconnected */
- struct semaphore sem; /* locks this structure */
-
- int free_slots; /* free send slots of CPC-USB */
- int idx;
-
- spinlock_t slock;
-
- char serialNumber[128]; /* serial number */
- int productId; /* product id to differ between M16C and LPC2119 */
- CPC_CHAN_T *chan;
-} CPC_USB_T;
-
-#define CPCTable CPCUSB_Table
-
-#define CPC_DRIVER_VERSION "0.724"
-#define CPC_DRIVER_SERIAL "not applicable"
-
-#define OBUF_SIZE 255 /* 4096 */
-
-/* read timeouts -- RD_NAK_TIMEOUT * RD_EXPIRE = Number of seconds */
-#define RD_NAK_TIMEOUT (10*HZ) /* Default number of X seconds to wait */
-#define RD_EXPIRE 12 /* Number of attempts to wait X seconds */
-
-#define CPC_USB_BASE_MNR 0 /* CPC-USB start at minor 0 */
-
-#endif
diff --git a/drivers/staging/cpc-usb/sja2m16c.h b/drivers/staging/cpc-usb/sja2m16c.h
deleted file mode 100644
index 654bd3fc91d..00000000000
--- a/drivers/staging/cpc-usb/sja2m16c.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef _SJA2M16C_H
-#define _SJA2M16C_H
-
-#include "cpc.h"
-
-#define BAUDRATE_TOLERANCE_PERCENT 1
-#define SAMPLEPOINT_TOLERANCE_PERCENT 5
-#define SAMPLEPOINT_UPPER_LIMIT 88
-
-/* M16C parameters */
-struct FIELD_C0CONR {
- unsigned int brp:4;
- unsigned int sam:1;
- unsigned int pr:3;
- unsigned int dummy:8;
-};
-struct FIELD_C1CONR {
- unsigned int ph1:3;
- unsigned int ph2:3;
- unsigned int sjw:2;
- unsigned int dummy:8;
-};
-typedef union C0CONR {
- unsigned char c0con;
- struct FIELD_C0CONR bc0con;
-} C0CONR_T;
-typedef union C1CONR {
- unsigned char c1con;
- struct FIELD_C1CONR bc1con;
-} C1CONR_T;
-
-#define SJA_TSEG1 ((pParams->btr1 & 0x0f)+1)
-#define SJA_TSEG2 (((pParams->btr1 & 0x70)>>4)+1)
-#define SJA_BRP ((pParams->btr0 & 0x3f)+1)
-#define SJA_SJW ((pParams->btr0 & 0xc0)>>6)
-#define SJA_SAM ((pParams->btr1 & 0x80)>>7)
-int baudrate_m16c(int clk, int brp, int pr, int ph1, int ph2);
-int samplepoint_m16c(int brp, int pr, int ph1, int ph2);
-int SJA1000_TO_M16C_BASIC_Params(CPC_MSG_T *pMsg);
-
-#endif
diff --git a/drivers/staging/cpc-usb/sja2m16c_2.c b/drivers/staging/cpc-usb/sja2m16c_2.c
deleted file mode 100644
index bf0230fb778..00000000000
--- a/drivers/staging/cpc-usb/sja2m16c_2.c
+++ /dev/null
@@ -1,452 +0,0 @@
-/****************************************************************************
-*
-* Copyright (c) 2003,2004 by EMS Dr. Thomas Wuensche
-*
-* - All rights reserved -
-*
-* This code is provided "as is" without warranty of any kind, either
-* expressed or implied, including but not limited to the liability
-* concerning the freedom from material defects, the fitness for parti-
-* cular purposes or the freedom of proprietary rights of third parties.
-*
-*****************************************************************************
-* Module name.: cpcusb
-*****************************************************************************
-* Include file: cpc.h
-*****************************************************************************
-* Project.....: Windows Driver Development Kit
-* Filename....: sja2m16c.cpp
-* Authors.....: (GU) Gerhard Uttenthaler
-* (CS) Christian Schoett
-*****************************************************************************
-* Short descr.: converts baudrate between SJA1000 and M16C
-*****************************************************************************
-* Description.: handles the baudrate conversion from SJA1000 parameters to
-* M16C parameters
-*****************************************************************************
-* Address : EMS Dr. Thomas Wuensche
-* Sonnenhang 3
-* D-85304 Ilmmuenster
-* Tel. : +49-8441-490260
-* Fax. : +49-8441-81860
-* email: support@ems-wuensche.com
-*****************************************************************************
-* History
-*****************************************************************************
-* Version Date Auth Remark
-*
-* 01.00 ?? GU - initial release
-* 01.10 ?????????? CS - adapted to fit into the USB Windows driver
-* 02.00 18.08.2004 GU - improved the baudrate calculating algorithm
-* - implemented acceptance filtering
-* 02.10 10.09.2004 CS - adapted to fit into the USB Windows driver
-*****************************************************************************
-* ToDo's
-*****************************************************************************
-*/
-
-/****************************************************************************/
-/* I N C L U D E S
-*/
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/poll.h>
-#include <linux/smp_lock.h>
-#include <linux/completion.h>
-#include <asm/uaccess.h>
-#include <linux/usb.h>
-
-#include "cpc.h"
-#include "cpc_int.h"
-#include "cpcusb.h"
-
-#include "sja2m16c.h"
-
-/*********************************************************************/
-int baudrate_m16c(int clk, int brp, int pr, int ph1, int ph2)
-{
- return (16000000 / (1 << clk)) / 2 / (brp + 1) / (1 + pr + 1 +
- ph1 + 1 + ph2 +
- 1);
-}
-
-
-/*********************************************************************/
-int samplepoint_m16c(int brp, int pr, int ph1, int ph2)
-{
- return (100 * (1 + pr + 1 + ph1 + 1)) / (1 + pr + 1 + ph1 + 1 +
- ph2 + 1);
-}
-
-
-/****************************************************************************
-* Function.....: SJA1000_TO_M16C_BASIC_Params
-*
-* Task.........: This routine converts SJA1000 CAN btr parameters into M16C
-* parameters based on the sample point and the error. In
-* addition it converts the acceptance filter parameters to
-* suit the M16C parameters
-*
-* Parameters...: None
-*
-* Return values: None
-*
-* Comments.....:
-*****************************************************************************
-* History
-*****************************************************************************
-* 19.01.2005 CS - modifed the conversion of SJA1000 filter params into
-* M16C params. Due to compatibility reasons with the
-* older 82C200 CAN controller the SJA1000
-****************************************************************************/
-int SJA1000_TO_M16C_BASIC_Params(CPC_MSG_T * in)
-{
- int sjaBaudrate;
- int sjaSamplepoint;
- int *baudrate_error; // BRP[0..15], PR[0..7], PH1[0..7], PH2[0..7]
- int *samplepoint_error; // BRP[0..15], PR[0..7], PH1[0..7], PH2[0..7]
- int baudrate_error_merk;
- int clk, brp, pr, ph1, ph2;
- int clk_merk, brp_merk, pr_merk, ph1_merk, ph2_merk;
- int index;
- unsigned char acc_code0, acc_code1, acc_code2, acc_code3;
- unsigned char acc_mask0, acc_mask1, acc_mask2, acc_mask3;
- CPC_MSG_T * out;
- C0CONR_T c0con;
- C1CONR_T c1con;
- int tmpAccCode;
- int tmpAccMask;
-
- // we have to convert the parameters into M16C parameters
- CPC_SJA1000_PARAMS_T * pParams;
-
- // check if the type is CAN parameters and if we have to convert the given params
- if (in->type != CPC_CMD_T_CAN_PRMS
- || in->msg.canparams.cc_type != SJA1000)
- return 0;
- pParams =
- (CPC_SJA1000_PARAMS_T *) & in->msg.canparams.cc_params.sja1000;
- acc_code0 = pParams->acc_code0;
- acc_code1 = pParams->acc_code1;
- acc_code2 = pParams->acc_code2;
- acc_code3 = pParams->acc_code3;
- acc_mask0 = pParams->acc_mask0;
- acc_mask1 = pParams->acc_mask1;
- acc_mask2 = pParams->acc_mask2;
- acc_mask3 = pParams->acc_mask3;
-
-#ifdef _DEBUG_OUTPUT_CAN_PARAMS
- info("acc_code0: %2.2Xh\n", acc_code0);
- info("acc_code1: %2.2Xh\n", acc_code1);
- info("acc_code2: %2.2Xh\n", acc_code2);
- info("acc_code3: %2.2Xh\n", acc_code3);
- info("acc_mask0: %2.2Xh\n", acc_mask0);
- info("acc_mask1: %2.2Xh\n", acc_mask1);
- info("acc_mask2: %2.2Xh\n", acc_mask2);
- info("acc_mask3: %2.2Xh\n", acc_mask3);
-
-#endif /* */
- if (!
- (baudrate_error =
- (int *) vmalloc(sizeof(int) * 16 * 8 * 8 * 8 * 5))) {
- err("Could not allocate memory\n");
- return -3;
- }
- if (!
- (samplepoint_error =
- (int *) vmalloc(sizeof(int) * 16 * 8 * 8 * 8 * 5))) {
- err("Could not allocate memory\n");
- vfree(baudrate_error);
- return -3;
- }
- memset(baudrate_error, 0xff, sizeof(baudrate_error));
- memset(samplepoint_error, 0xff, sizeof(baudrate_error));
- sjaBaudrate =
- 16000000 / 2 / SJA_BRP / (1 + SJA_TSEG1 + SJA_TSEG2);
- sjaSamplepoint =
- 100 * (1 + SJA_TSEG1) / (1 + SJA_TSEG1 + SJA_TSEG2);
- if (sjaBaudrate == 0) {
- vfree(baudrate_error);
- vfree(samplepoint_error);
- return -2;
- }
-
-#ifdef _DEBUG_OUTPUT_CAN_PARAMS
- info("\nStarting SJA CAN params\n");
- info("-------------------------\n");
- info("TS1 : %2.2Xh TS2 : %2.2Xh\n", SJA_TSEG1, SJA_TSEG2);
- info("BTR0 : %2.2Xh BTR1: %2.2Xh\n", pParams->btr0,
- pParams->btr1);
- info("Baudrate: %d.%dkBaud\n", sjaBaudrate / 1000,
- sjaBaudrate % 1000);
- info("Sample P: 0.%d\n", sjaSamplepoint);
- info("\n");
-
-#endif /* */
- c0con.bc0con.sam = SJA_SAM;
- c1con.bc1con.sjw = SJA_SJW;
-
- // calculate errors for all baudrates
- index = 0;
- for (clk = 0; clk < 5; clk++) {
- for (brp = 0; brp < 16; brp++) {
- for (pr = 0; pr < 8; pr++) {
- for (ph1 = 0; ph1 < 8; ph1++) {
- for (ph2 = 0; ph2 < 8; ph2++) {
- baudrate_error[index] =
- 100 *
- abs(baudrate_m16c
- (clk, brp, pr, ph1,
- ph2) -
- sjaBaudrate) /
- sjaBaudrate;
- samplepoint_error[index] =
- abs(samplepoint_m16c
- (brp, pr, ph1,
- ph2) -
- sjaSamplepoint);
-
-#if 0
- info
- ("Baudrate : %d kBaud\n",
- baudrate_m16c(clk,
- brp, pr,
- ph1,
- ph2));
- info
- ("Baudrate Error: %d\n",
- baudrate_error
- [index]);
- info
- ("Sample P Error: %d\n",
- samplepoint_error
- [index]);
- info
- ("clk : %d\n",
- clk);
-
-#endif /* */
- index++;
- }
- }
- }
- }
- }
-
- // mark all baudrate_error entries which are outer limits
- index = 0;
- for (clk = 0; clk < 5; clk++) {
- for (brp = 0; brp < 16; brp++) {
- for (pr = 0; pr < 8; pr++) {
- for (ph1 = 0; ph1 < 8; ph1++) {
- for (ph2 = 0; ph2 < 8; ph2++) {
- if ((baudrate_error[index]
- >
- BAUDRATE_TOLERANCE_PERCENT)
- ||
- (samplepoint_error
- [index] >
- SAMPLEPOINT_TOLERANCE_PERCENT)
- ||
- (samplepoint_m16c
- (brp, pr, ph1,
- ph2) >
- SAMPLEPOINT_UPPER_LIMIT))
- {
- baudrate_error
- [index] = -1;
- } else
- if (((1 + pr + 1 +
- ph1 + 1 + ph2 +
- 1) < 8)
- ||
- ((1 + pr + 1 +
- ph1 + 1 + ph2 +
- 1) > 25)) {
- baudrate_error
- [index] = -1;
- }
-
-#if 0
- else {
- info
- ("Baudrate : %d kBaud\n",
- baudrate_m16c
- (clk, brp, pr,
- ph1, ph2));
- info
- ("Baudrate Error: %d\n",
- baudrate_error
- [index]);
- info
- ("Sample P Error: %d\n",
- samplepoint_error
- [index]);
- }
-
-#endif /* */
- index++;
- }
- }
- }
- }
- }
-
- // find list of minimum of baudrate_error within unmarked entries
- clk_merk = brp_merk = pr_merk = ph1_merk = ph2_merk = 0;
- baudrate_error_merk = 100;
- index = 0;
- for (clk = 0; clk < 5; clk++) {
- for (brp = 0; brp < 16; brp++) {
- for (pr = 0; pr < 8; pr++) {
- for (ph1 = 0; ph1 < 8; ph1++) {
- for (ph2 = 0; ph2 < 8; ph2++) {
- if (baudrate_error[index]
- != -1) {
- if (baudrate_error
- [index] <
- baudrate_error_merk)
- {
- baudrate_error_merk
- =
- baudrate_error
- [index];
- brp_merk =
- brp;
- pr_merk =
- pr;
- ph1_merk =
- ph1;
- ph2_merk =
- ph2;
- clk_merk =
- clk;
-
-#if 0
- info
- ("brp: %2.2Xh pr: %2.2Xh ph1: %2.2Xh ph2: %2.2Xh\n",
- brp,
- pr,
- ph1,
- ph2);
- info
- ("Baudrate : %d kBaud\n",
- baudrate_m16c
- (clk,
- brp,
- pr,
- ph1,
- ph2));
- info
- ("Baudrate Error: %d\n",
- baudrate_error
- [index]);
- info
- ("Sample P Error: %d\n",
- samplepoint_error
- [index]);
-
-#endif /* */
- }
- }
- index++;
- }
- }
- }
- }
- }
- if (baudrate_error_merk == 100) {
- info("ERROR: Could not convert CAN init parameter\n");
- vfree(baudrate_error);
- vfree(samplepoint_error);
- return -1;
- }
-
- // setting m16c CAN parameter
- c0con.bc0con.brp = brp_merk;
- c0con.bc0con.pr = pr_merk;
- c1con.bc1con.ph1 = ph1_merk;
- c1con.bc1con.ph2 = ph2_merk;
-
-#ifdef _DEBUG_OUTPUT_CAN_PARAMS
- info("\nResulting M16C CAN params\n");
- info("-------------------------\n");
- info("clk : %2.2Xh\n", clk_merk);
- info("ph1 : %2.2Xh ph2: %2.2Xh\n", c1con.bc1con.ph1 + 1,
- c1con.bc1con.ph2 + 1);
- info("pr : %2.2Xh brp: %2.2Xh\n", c0con.bc0con.pr + 1,
- c0con.bc0con.brp + 1);
- info("sjw : %2.2Xh sam: %2.2Xh\n", c1con.bc1con.sjw,
- c0con.bc0con.sam);
- info("co1 : %2.2Xh co0: %2.2Xh\n", c1con.c1con, c0con.c0con);
- info("Baudrate: %d.%dBaud\n",
- baudrate_m16c(clk_merk, c0con.bc0con.brp, c0con.bc0con.pr,
- c1con.bc1con.ph1, c1con.bc1con.ph2) / 1000,
- baudrate_m16c(clk_merk, c0con.bc0con.brp, c0con.bc0con.pr,
- c1con.bc1con.ph1, c1con.bc1con.ph2) % 1000);
- info("Sample P: 0.%d\n",
- samplepoint_m16c(c0con.bc0con.brp, c0con.bc0con.pr,
- c1con.bc1con.ph1, c1con.bc1con.ph2));
- info("\n");
-
-#endif /* */
- out = in;
- out->type = 6;
- out->length = sizeof(CPC_M16C_BASIC_PARAMS_T) + 1;
- out->msg.canparams.cc_type = M16C_BASIC;
- out->msg.canparams.cc_params.m16c_basic.con0 = c0con.c0con;
- out->msg.canparams.cc_params.m16c_basic.con1 = c1con.c1con;
- out->msg.canparams.cc_params.m16c_basic.ctlr0 = 0x4C;
- out->msg.canparams.cc_params.m16c_basic.ctlr1 = 0x00;
- out->msg.canparams.cc_params.m16c_basic.clk = clk_merk;
- out->msg.canparams.cc_params.m16c_basic.acc_std_code0 =
- acc_code0;
- out->msg.canparams.cc_params.m16c_basic.acc_std_code1 = acc_code1;
-
-// info("code0: 0x%2.2X, code1: 0x%2.2X\n", out->msg.canparams.cc_params.m16c_basic.acc_std_code0, out->msg.canparams.cc_params.m16c_basic.acc_std_code1);
- tmpAccCode = (acc_code1 >> 5) + (acc_code0 << 3);
- out->msg.canparams.cc_params.m16c_basic.acc_std_code0 =
- (unsigned char) tmpAccCode;
- out->msg.canparams.cc_params.m16c_basic.acc_std_code1 =
- (unsigned char) (tmpAccCode >> 8);
-
-// info("code0: 0x%2.2X, code1: 0x%2.2X\n", out->msg.canparams.cc_params.m16c_basic.acc_std_code0, out->msg.canparams.cc_params.m16c_basic.acc_std_code1);
- out->msg.canparams.cc_params.m16c_basic.acc_std_mask0 =
- ~acc_mask0;
- out->msg.canparams.cc_params.m16c_basic.acc_std_mask1 =
- ~acc_mask1;
-
-// info("mask0: 0x%2.2X, mask1: 0x%2.2X\n", out->msg.canparams.cc_params.m16c_basic.acc_std_mask0, out->msg.canparams.cc_params.m16c_basic.acc_std_mask1);
- tmpAccMask = ((acc_mask1) >> 5) + ((acc_mask0) << 3);
-
-// info("tmpAccMask: 0x%4.4X\n", tmpAccMask);
- out->msg.canparams.cc_params.m16c_basic.acc_std_mask0 =
- (unsigned char) ~tmpAccMask;
- out->msg.canparams.cc_params.m16c_basic.acc_std_mask1 =
- (unsigned char) ~(tmpAccMask >> 8);
-
-// info("mask0: 0x%2.2X, mask1: 0x%2.2X\n", out->msg.canparams.cc_params.m16c_basic.acc_std_mask0, out->msg.canparams.cc_params.m16c_basic.acc_std_mask1);
- out->msg.canparams.cc_params.m16c_basic.acc_ext_code0 =
- (unsigned char) tmpAccCode;
- out->msg.canparams.cc_params.m16c_basic.acc_ext_code1 =
- (unsigned char) (tmpAccCode >> 8);
- out->msg.canparams.cc_params.m16c_basic.acc_ext_code2 = acc_code2;
- out->msg.canparams.cc_params.m16c_basic.acc_ext_code3 = acc_code3;
- out->msg.canparams.cc_params.m16c_basic.acc_ext_mask0 =
- (unsigned char) ~tmpAccMask;
- out->msg.canparams.cc_params.m16c_basic.acc_ext_mask1 =
- (unsigned char) ~(tmpAccMask >> 8);
- out->msg.canparams.cc_params.m16c_basic.acc_ext_mask2 =
- ~acc_mask2;
- out->msg.canparams.cc_params.m16c_basic.acc_ext_mask3 =
- ~acc_mask3;
- vfree(baudrate_error);
- vfree(samplepoint_error);
- return 0;
-}
-
-
diff --git a/drivers/staging/go7007/Makefile b/drivers/staging/go7007/Makefile
index d14ea84a01f..1301caa7495 100644
--- a/drivers/staging/go7007/Makefile
+++ b/drivers/staging/go7007/Makefile
@@ -32,8 +32,3 @@ endif
EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
-
-# Ubuntu 8.04 has CONFIG_SND undefined, so include lum sound/config.h too
-ifeq ($(CONFIG_SND),)
-EXTRA_CFLAGS += -include sound/config.h
-endif
diff --git a/drivers/staging/rt2860/rtmp.h b/drivers/staging/rt2860/rtmp.h
index 3f498f6f3ff..90fd40f2473 100644
--- a/drivers/staging/rt2860/rtmp.h
+++ b/drivers/staging/rt2860/rtmp.h
@@ -2060,7 +2060,7 @@ typedef struct _STA_ADMIN_CONFIG {
BOOLEAN AdhocBGJoined; // Indicate Adhoc B/G Join.
BOOLEAN Adhoc20NJoined; // Indicate Adhoc 20MHz N Join.
#endif
- // New for WPA, windows want us to to keep association information and
+ // New for WPA, windows want us to keep association information and
// Fixed IEs from last association response
NDIS_802_11_ASSOCIATION_INFORMATION AssocInfo;
USHORT ReqVarIELen; // Length of next VIE include EID & Length
diff --git a/drivers/staging/stlc45xx/stlc45xx.c b/drivers/staging/stlc45xx/stlc45xx.c
index 12d414deaad..be99eb33d81 100644
--- a/drivers/staging/stlc45xx/stlc45xx.c
+++ b/drivers/staging/stlc45xx/stlc45xx.c
@@ -2591,3 +2591,4 @@ module_exit(stlc45xx_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>");
+MODULE_ALIAS("spi:cx3110x");
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index a86e952ed4c..bf7c687519e 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -15,6 +15,7 @@ menuconfig THERMAL
config THERMAL_HWMON
bool "Hardware monitoring support"
+ depends on THERMAL
depends on HWMON=y || HWMON=THERMAL
help
The generic thermal sysfs driver's hardware monitoring support
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index dcd49f1e96d..240750881d2 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -22,7 +22,6 @@ config USB_ARCH_HAS_HCD
default y if PCMCIA && !M32R # sl811_cs
default y if ARM # SL-811
default y if SUPERH # r8a66597-hcd
- default y if MICROBLAZE
default PCI
# many non-PCI SOC chips embed OHCI
@@ -39,6 +38,7 @@ config USB_ARCH_HAS_OHCI
default y if ARCH_AT91
default y if ARCH_PNX4008 && I2C
default y if MFD_TC6393XB
+ default y if ARCH_W90X900
# PPC:
default y if STB03xxx
default y if PPC_MPC52xx
@@ -58,6 +58,8 @@ config USB_ARCH_HAS_EHCI
default y if PPC_83xx
default y if SOC_AU1200
default y if ARCH_IXP4XX
+ default y if ARCH_W90X900
+ default y if ARCH_AT91SAM9G45
default PCI
# ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface.
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 19cb7d5480d..be3c9b80bc9 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_USB_UHCI_HCD) += host/
obj-$(CONFIG_USB_FHCI_HCD) += host/
obj-$(CONFIG_USB_XHCI_HCD) += host/
obj-$(CONFIG_USB_SL811_HCD) += host/
+obj-$(CONFIG_USB_ISP1362_HCD) += host/
obj-$(CONFIG_USB_U132_HCD) += host/
obj-$(CONFIG_USB_R8A66597_HCD) += host/
obj-$(CONFIG_USB_HWA_HCD) += host/
@@ -39,6 +40,7 @@ obj-$(CONFIG_USB_MICROTEK) += image/
obj-$(CONFIG_USB_SERIAL) += serial/
obj-$(CONFIG_USB) += misc/
+obj-y += early/
obj-$(CONFIG_USB_ATM) += atm/
obj-$(CONFIG_USB_SPEEDTOUCH) += atm/
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 85a1a55815c..e3861b21e77 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -59,6 +59,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
+#include <linux/serial.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
@@ -609,6 +610,7 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
acm->throttle = 0;
tasklet_schedule(&acm->urb_task);
+ set_bit(ASYNCB_INITIALIZED, &acm->port.flags);
rv = tty_port_block_til_ready(&acm->port, tty, filp);
done:
mutex_unlock(&acm->mutex);
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index ba589d4ca8b..3e564bfe17d 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -313,8 +313,13 @@ static ssize_t wdm_write
r = usb_autopm_get_interface(desc->intf);
if (r < 0)
goto outnp;
- r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
- &desc->flags));
+
+ if (!file->f_flags && O_NONBLOCK)
+ r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
+ &desc->flags));
+ else
+ if (test_bit(WDM_IN_USE, &desc->flags))
+ r = -EAGAIN;
if (r < 0)
goto out;
@@ -377,7 +382,7 @@ outnl:
static ssize_t wdm_read
(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
- int rv, cntr;
+ int rv, cntr = 0;
int i = 0;
struct wdm_device *desc = file->private_data;
@@ -389,10 +394,23 @@ static ssize_t wdm_read
if (desc->length == 0) {
desc->read = 0;
retry:
+ if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
+ rv = -ENODEV;
+ goto err;
+ }
i++;
- rv = wait_event_interruptible(desc->wait,
- test_bit(WDM_READ, &desc->flags));
+ if (file->f_flags & O_NONBLOCK) {
+ if (!test_bit(WDM_READ, &desc->flags)) {
+ rv = cntr ? cntr : -EAGAIN;
+ goto err;
+ }
+ rv = 0;
+ } else {
+ rv = wait_event_interruptible(desc->wait,
+ test_bit(WDM_READ, &desc->flags));
+ }
+ /* may have happened while we slept */
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
rv = -ENODEV;
goto err;
@@ -448,7 +466,7 @@ retry:
err:
mutex_unlock(&desc->rlock);
- if (rv < 0)
+ if (rv < 0 && rv != -EAGAIN)
dev_err(&desc->intf->dev, "wdm_read: exit error\n");
return rv;
}
@@ -506,8 +524,6 @@ static int wdm_open(struct inode *inode, struct file *file)
desc = usb_get_intfdata(intf);
if (test_bit(WDM_DISCONNECTING, &desc->flags))
goto out;
-
- ;
file->private_data = desc;
rv = usb_autopm_get_interface(desc->intf);
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index b09a527f734..333ee02e7b2 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -57,7 +57,9 @@ MODULE_DEVICE_TABLE(usb, usbtmc_devices);
/*
* This structure is the capabilities for the device
- * See section 4.2.1.8 of the USBTMC specification for details.
+ * See section 4.2.1.8 of the USBTMC specification,
+ * and section 4.2.2 of the USBTMC usb488 subclass
+ * specification for details.
*/
struct usbtmc_dev_capabilities {
__u8 interface_capabilities;
@@ -86,6 +88,8 @@ struct usbtmc_device_data {
bool TermCharEnabled;
bool auto_abort;
+ bool zombie; /* fd of disconnected device */
+
struct usbtmc_dev_capabilities capabilities;
struct kref kref;
struct mutex io_mutex; /* only one i/o function running at a time */
@@ -367,13 +371,13 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
{
struct usbtmc_device_data *data;
struct device *dev;
- unsigned long int n_characters;
+ u32 n_characters;
u8 *buffer;
int actual;
- int done;
- int remaining;
+ size_t done;
+ size_t remaining;
int retval;
- int this_part;
+ size_t this_part;
/* Get pointer to private data structure */
data = filp->private_data;
@@ -384,6 +388,10 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
return -ENOMEM;
mutex_lock(&data->io_mutex);
+ if (data->zombie) {
+ retval = -ENODEV;
+ goto exit;
+ }
remaining = count;
done = 0;
@@ -401,10 +409,10 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
buffer[1] = data->bTag;
buffer[2] = ~(data->bTag);
buffer[3] = 0; /* Reserved */
- buffer[4] = (this_part - 12 - 3) & 255;
- buffer[5] = ((this_part - 12 - 3) >> 8) & 255;
- buffer[6] = ((this_part - 12 - 3) >> 16) & 255;
- buffer[7] = ((this_part - 12 - 3) >> 24) & 255;
+ buffer[4] = (this_part) & 255;
+ buffer[5] = ((this_part) >> 8) & 255;
+ buffer[6] = ((this_part) >> 16) & 255;
+ buffer[7] = ((this_part) >> 24) & 255;
buffer[8] = data->TermCharEnabled * 2;
/* Use term character? */
buffer[9] = data->TermChar;
@@ -455,6 +463,22 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
(buffer[6] << 16) +
(buffer[7] << 24);
+ /* Ensure the instrument doesn't lie about it */
+ if(n_characters > actual - 12) {
+ dev_err(dev, "Device lies about message size: %u > %d\n", n_characters, actual - 12);
+ n_characters = actual - 12;
+ }
+
+ /* Ensure the instrument doesn't send more back than requested */
+ if(n_characters > this_part) {
+ dev_err(dev, "Device returns more than requested: %zu > %zu\n", done + n_characters, done + this_part);
+ n_characters = this_part;
+ }
+
+ /* Bound amount of data received by amount of data requested */
+ if (n_characters > this_part)
+ n_characters = this_part;
+
/* Copy buffer to user space */
if (copy_to_user(buf + done, &buffer[12], n_characters)) {
/* There must have been an addressing problem */
@@ -463,8 +487,11 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
}
done += n_characters;
- if (n_characters < USBTMC_SIZE_IOBUFFER)
+ /* Terminate if end-of-message bit recieved from device */
+ if ((buffer[8] & 0x01) && (actual >= n_characters + 12))
remaining = 0;
+ else
+ remaining -= n_characters;
}
/* Update file position value */
@@ -496,6 +523,10 @@ static ssize_t usbtmc_write(struct file *filp, const char __user *buf,
return -ENOMEM;
mutex_lock(&data->io_mutex);
+ if (data->zombie) {
+ retval = -ENODEV;
+ goto exit;
+ }
remaining = count;
done = 0;
@@ -767,20 +798,21 @@ static int get_capabilities(struct usbtmc_device_data *data)
}
dev_dbg(dev, "GET_CAPABILITIES returned %x\n", buffer[0]);
- dev_dbg(dev, "Interface capabilities are %x\n", buffer[4]);
- dev_dbg(dev, "Device capabilities are %x\n", buffer[5]);
- dev_dbg(dev, "USB488 interface capabilities are %x\n", buffer[14]);
- dev_dbg(dev, "USB488 device capabilities are %x\n", buffer[15]);
if (buffer[0] != USBTMC_STATUS_SUCCESS) {
dev_err(dev, "GET_CAPABILITIES returned %x\n", buffer[0]);
rv = -EPERM;
goto err_out;
}
+ dev_dbg(dev, "Interface capabilities are %x\n", buffer[4]);
+ dev_dbg(dev, "Device capabilities are %x\n", buffer[5]);
+ dev_dbg(dev, "USB488 interface capabilities are %x\n", buffer[14]);
+ dev_dbg(dev, "USB488 device capabilities are %x\n", buffer[15]);
data->capabilities.interface_capabilities = buffer[4];
data->capabilities.device_capabilities = buffer[5];
data->capabilities.usb488_interface_capabilities = buffer[14];
data->capabilities.usb488_device_capabilities = buffer[15];
+ rv = 0;
err_out:
kfree(buffer);
@@ -925,6 +957,10 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
data = file->private_data;
mutex_lock(&data->io_mutex);
+ if (data->zombie) {
+ retval = -ENODEV;
+ goto skip_io_on_zombie;
+ }
switch (cmd) {
case USBTMC_IOCTL_CLEAR_OUT_HALT:
@@ -952,6 +988,7 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
}
+skip_io_on_zombie:
mutex_unlock(&data->io_mutex);
return retval;
}
@@ -995,6 +1032,7 @@ static int usbtmc_probe(struct usb_interface *intf,
usb_set_intfdata(intf, data);
kref_init(&data->kref);
mutex_init(&data->io_mutex);
+ data->zombie = 0;
/* Initialize USBTMC bTag and other fields */
data->bTag = 1;
@@ -1065,14 +1103,30 @@ static void usbtmc_disconnect(struct usb_interface *intf)
usb_deregister_dev(intf, &usbtmc_class);
sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
+ mutex_lock(&data->io_mutex);
+ data->zombie = 1;
+ mutex_unlock(&data->io_mutex);
kref_put(&data->kref, usbtmc_delete);
}
+static int usbtmc_suspend (struct usb_interface *intf, pm_message_t message)
+{
+ /* this driver does not have pending URBs */
+ return 0;
+}
+
+static int usbtmc_resume (struct usb_interface *intf)
+{
+ return 0;
+}
+
static struct usb_driver usbtmc_driver = {
.name = "usbtmc",
.id_table = usbtmc_devices,
.probe = usbtmc_probe,
- .disconnect = usbtmc_disconnect
+ .disconnect = usbtmc_disconnect,
+ .suspend = usbtmc_suspend,
+ .resume = usbtmc_resume,
};
static int __init usbtmc_init(void)
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index a16c538d013..0d3af6a6ee4 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -105,7 +105,7 @@ static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
ep->ss_ep_comp->extralen = i;
buffer += i;
size -= i;
- retval = buffer - buffer_start + i;
+ retval = buffer - buffer_start;
if (num_skipped > 0)
dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
num_skipped, plural(num_skipped),
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 4247eccf858..181f78c8410 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -52,6 +52,7 @@
#include "hcd.h" /* for usbcore internals */
#include "usb.h"
+#include "hub.h"
#define USB_MAXBUS 64
#define USB_DEVICE_MAX USB_MAXBUS * 128
@@ -73,6 +74,7 @@ struct dev_state {
void __user *disccontext;
unsigned long ifclaimed;
u32 secid;
+ u32 disabled_bulk_eps;
};
struct async {
@@ -87,6 +89,8 @@ struct async {
struct urb *urb;
int status;
u32 secid;
+ u8 bulk_addr;
+ u8 bulk_status;
};
static int usbfs_snoop;
@@ -99,11 +103,15 @@ MODULE_PARM_DESC(usbfs_snoop, "true to log all usbfs traffic");
dev_info(dev , format , ## arg); \
} while (0)
-#define USB_DEVICE_DEV MKDEV(USB_DEVICE_MAJOR, 0)
+enum snoop_when {
+ SUBMIT, COMPLETE
+};
+#define USB_DEVICE_DEV MKDEV(USB_DEVICE_MAJOR, 0)
#define MAX_USBFS_BUFFER_SIZE 16384
+
static int connected(struct dev_state *ps)
{
return (!list_empty(&ps->list) &&
@@ -300,24 +308,79 @@ static struct async *async_getpending(struct dev_state *ps,
return NULL;
}
-static void snoop_urb(struct urb *urb, void __user *userurb)
+static void snoop_urb(struct usb_device *udev,
+ void __user *userurb, int pipe, unsigned length,
+ int timeout_or_status, enum snoop_when when)
{
- unsigned j;
- unsigned char *data = urb->transfer_buffer;
+ static const char *types[] = {"isoc", "int", "ctrl", "bulk"};
+ static const char *dirs[] = {"out", "in"};
+ int ep;
+ const char *t, *d;
if (!usbfs_snoop)
return;
- dev_info(&urb->dev->dev, "direction=%s\n",
- usb_urb_dir_in(urb) ? "IN" : "OUT");
- dev_info(&urb->dev->dev, "userurb=%p\n", userurb);
- dev_info(&urb->dev->dev, "transfer_buffer_length=%u\n",
- urb->transfer_buffer_length);
- dev_info(&urb->dev->dev, "actual_length=%u\n", urb->actual_length);
- dev_info(&urb->dev->dev, "data: ");
- for (j = 0; j < urb->transfer_buffer_length; ++j)
- printk("%02x ", data[j]);
- printk("\n");
+ ep = usb_pipeendpoint(pipe);
+ t = types[usb_pipetype(pipe)];
+ d = dirs[!!usb_pipein(pipe)];
+
+ if (userurb) { /* Async */
+ if (when == SUBMIT)
+ dev_info(&udev->dev, "userurb %p, ep%d %s-%s, "
+ "length %u\n",
+ userurb, ep, t, d, length);
+ else
+ dev_info(&udev->dev, "userurb %p, ep%d %s-%s, "
+ "actual_length %u status %d\n",
+ userurb, ep, t, d, length,
+ timeout_or_status);
+ } else {
+ if (when == SUBMIT)
+ dev_info(&udev->dev, "ep%d %s-%s, length %u, "
+ "timeout %d\n",
+ ep, t, d, length, timeout_or_status);
+ else
+ dev_info(&udev->dev, "ep%d %s-%s, actual_length %u, "
+ "status %d\n",
+ ep, t, d, length, timeout_or_status);
+ }
+}
+
+#define AS_CONTINUATION 1
+#define AS_UNLINK 2
+
+static void cancel_bulk_urbs(struct dev_state *ps, unsigned bulk_addr)
+__releases(ps->lock)
+__acquires(ps->lock)
+{
+ struct async *as;
+
+ /* Mark all the pending URBs that match bulk_addr, up to but not
+ * including the first one without AS_CONTINUATION. If such an
+ * URB is encountered then a new transfer has already started so
+ * the endpoint doesn't need to be disabled; otherwise it does.
+ */
+ list_for_each_entry(as, &ps->async_pending, asynclist) {
+ if (as->bulk_addr == bulk_addr) {
+ if (as->bulk_status != AS_CONTINUATION)
+ goto rescan;
+ as->bulk_status = AS_UNLINK;
+ as->bulk_addr = 0;
+ }
+ }
+ ps->disabled_bulk_eps |= (1 << bulk_addr);
+
+ /* Now carefully unlink all the marked pending URBs */
+ rescan:
+ list_for_each_entry(as, &ps->async_pending, asynclist) {
+ if (as->bulk_status == AS_UNLINK) {
+ as->bulk_status = 0; /* Only once */
+ spin_unlock(&ps->lock); /* Allow completions */
+ usb_unlink_urb(as->urb);
+ spin_lock(&ps->lock);
+ goto rescan;
+ }
+ }
}
static void async_completed(struct urb *urb)
@@ -346,7 +409,11 @@ static void async_completed(struct urb *urb)
secid = as->secid;
}
snoop(&urb->dev->dev, "urb complete\n");
- snoop_urb(urb, as->userurb);
+ snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length,
+ as->status, COMPLETE);
+ if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
+ as->status != -ENOENT)
+ cancel_bulk_urbs(ps, as->bulk_addr);
spin_unlock(&ps->lock);
if (signr)
@@ -655,6 +722,7 @@ static int usbdev_release(struct inode *inode, struct file *file)
struct async *as;
usb_lock_device(dev);
+ usb_hub_release_all_ports(dev, ps);
/* Protect against simultaneous open */
mutex_lock(&usbfs_mutex);
@@ -688,7 +756,7 @@ static int proc_control(struct dev_state *ps, void __user *arg)
unsigned int tmo;
unsigned char *tbuf;
unsigned wLength;
- int i, j, ret;
+ int i, pipe, ret;
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
@@ -708,24 +776,17 @@ static int proc_control(struct dev_state *ps, void __user *arg)
free_page((unsigned long)tbuf);
return -EINVAL;
}
- snoop(&dev->dev, "control read: bRequest=%02x "
- "bRrequestType=%02x wValue=%04x "
- "wIndex=%04x wLength=%04x\n",
- ctrl.bRequest, ctrl.bRequestType, ctrl.wValue,
- ctrl.wIndex, ctrl.wLength);
+ pipe = usb_rcvctrlpipe(dev, 0);
+ snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT);
usb_unlock_device(dev);
- i = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), ctrl.bRequest,
+ i = usb_control_msg(dev, pipe, ctrl.bRequest,
ctrl.bRequestType, ctrl.wValue, ctrl.wIndex,
tbuf, ctrl.wLength, tmo);
usb_lock_device(dev);
+ snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE);
+
if ((i > 0) && ctrl.wLength) {
- if (usbfs_snoop) {
- dev_info(&dev->dev, "control read: data ");
- for (j = 0; j < i; ++j)
- printk("%02x ", (u8)(tbuf)[j]);
- printk("\n");
- }
if (copy_to_user(ctrl.data, tbuf, i)) {
free_page((unsigned long)tbuf);
return -EFAULT;
@@ -738,22 +799,15 @@ static int proc_control(struct dev_state *ps, void __user *arg)
return -EFAULT;
}
}
- snoop(&dev->dev, "control write: bRequest=%02x "
- "bRrequestType=%02x wValue=%04x "
- "wIndex=%04x wLength=%04x\n",
- ctrl.bRequest, ctrl.bRequestType, ctrl.wValue,
- ctrl.wIndex, ctrl.wLength);
- if (usbfs_snoop) {
- dev_info(&dev->dev, "control write: data: ");
- for (j = 0; j < ctrl.wLength; ++j)
- printk("%02x ", (unsigned char)(tbuf)[j]);
- printk("\n");
- }
+ pipe = usb_sndctrlpipe(dev, 0);
+ snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT);
+
usb_unlock_device(dev);
i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl.bRequest,
ctrl.bRequestType, ctrl.wValue, ctrl.wIndex,
tbuf, ctrl.wLength, tmo);
usb_lock_device(dev);
+ snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE);
}
free_page((unsigned long)tbuf);
if (i < 0 && i != -EPIPE) {
@@ -772,7 +826,7 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
unsigned int tmo, len1, pipe;
int len2;
unsigned char *tbuf;
- int i, j, ret;
+ int i, ret;
if (copy_from_user(&bulk, arg, sizeof(bulk)))
return -EFAULT;
@@ -799,18 +853,14 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
kfree(tbuf);
return -EINVAL;
}
- snoop(&dev->dev, "bulk read: len=0x%02x timeout=%04d\n",
- bulk.len, bulk.timeout);
+ snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT);
+
usb_unlock_device(dev);
i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
usb_lock_device(dev);
+ snoop_urb(dev, NULL, pipe, len2, i, COMPLETE);
+
if (!i && len2) {
- if (usbfs_snoop) {
- dev_info(&dev->dev, "bulk read: data ");
- for (j = 0; j < len2; ++j)
- printk("%02x ", (u8)(tbuf)[j]);
- printk("\n");
- }
if (copy_to_user(bulk.data, tbuf, len2)) {
kfree(tbuf);
return -EFAULT;
@@ -823,17 +873,12 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
return -EFAULT;
}
}
- snoop(&dev->dev, "bulk write: len=0x%02x timeout=%04d\n",
- bulk.len, bulk.timeout);
- if (usbfs_snoop) {
- dev_info(&dev->dev, "bulk write: data: ");
- for (j = 0; j < len1; ++j)
- printk("%02x ", (unsigned char)(tbuf)[j]);
- printk("\n");
- }
+ snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT);
+
usb_unlock_device(dev);
i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
usb_lock_device(dev);
+ snoop_urb(dev, NULL, pipe, len2, i, COMPLETE);
}
kfree(tbuf);
if (i < 0)
@@ -991,6 +1036,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
USBDEVFS_URB_SHORT_NOT_OK |
+ USBDEVFS_URB_BULK_CONTINUATION |
USBDEVFS_URB_NO_FSBR |
USBDEVFS_URB_ZERO_PACKET |
USBDEVFS_URB_NO_INTERRUPT))
@@ -1051,13 +1097,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
is_in = 0;
uurb->endpoint &= ~USB_DIR_IN;
}
- snoop(&ps->dev->dev, "control urb: bRequest=%02x "
- "bRrequestType=%02x wValue=%04x "
- "wIndex=%04x wLength=%04x\n",
- dr->bRequest, dr->bRequestType,
- __le16_to_cpup(&dr->wValue),
- __le16_to_cpup(&dr->wIndex),
- __le16_to_cpup(&dr->wLength));
break;
case USBDEVFS_URB_TYPE_BULK:
@@ -1070,7 +1109,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
uurb->number_of_packets = 0;
if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
return -EINVAL;
- snoop(&ps->dev->dev, "bulk urb\n");
break;
case USBDEVFS_URB_TYPE_ISO:
@@ -1097,12 +1135,12 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
}
totlen += isopkt[u].length;
}
- if (totlen > 32768) {
+ /* 3072 * 64 microframes */
+ if (totlen > 196608) {
kfree(isopkt);
return -EINVAL;
}
uurb->buffer_length = totlen;
- snoop(&ps->dev->dev, "iso urb\n");
break;
case USBDEVFS_URB_TYPE_INTERRUPT:
@@ -1111,7 +1149,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
return -EINVAL;
if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
return -EINVAL;
- snoop(&ps->dev->dev, "interrupt urb\n");
break;
default:
@@ -1198,11 +1235,46 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
return -EFAULT;
}
}
- snoop_urb(as->urb, as->userurb);
+ snoop_urb(ps->dev, as->userurb, as->urb->pipe,
+ as->urb->transfer_buffer_length, 0, SUBMIT);
async_newpending(as);
- if ((ret = usb_submit_urb(as->urb, GFP_KERNEL))) {
+
+ if (usb_endpoint_xfer_bulk(&ep->desc)) {
+ spin_lock_irq(&ps->lock);
+
+ /* Not exactly the endpoint address; the direction bit is
+ * shifted to the 0x10 position so that the value will be
+ * between 0 and 31.
+ */
+ as->bulk_addr = usb_endpoint_num(&ep->desc) |
+ ((ep->desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+ >> 3);
+
+ /* If this bulk URB is the start of a new transfer, re-enable
+ * the endpoint. Otherwise mark it as a continuation URB.
+ */
+ if (uurb->flags & USBDEVFS_URB_BULK_CONTINUATION)
+ as->bulk_status = AS_CONTINUATION;
+ else
+ ps->disabled_bulk_eps &= ~(1 << as->bulk_addr);
+
+ /* Don't accept continuation URBs if the endpoint is
+ * disabled because of an earlier error.
+ */
+ if (ps->disabled_bulk_eps & (1 << as->bulk_addr))
+ ret = -EREMOTEIO;
+ else
+ ret = usb_submit_urb(as->urb, GFP_ATOMIC);
+ spin_unlock_irq(&ps->lock);
+ } else {
+ ret = usb_submit_urb(as->urb, GFP_KERNEL);
+ }
+
+ if (ret) {
dev_printk(KERN_DEBUG, &ps->dev->dev,
"usbfs: usb_submit_urb returned %d\n", ret);
+ snoop_urb(ps->dev, as->userurb, as->urb->pipe,
+ 0, ret, COMPLETE);
async_removepending(as);
free_async(as);
return ret;
@@ -1548,6 +1620,29 @@ static int proc_ioctl_compat(struct dev_state *ps, compat_uptr_t arg)
}
#endif
+static int proc_claim_port(struct dev_state *ps, void __user *arg)
+{
+ unsigned portnum;
+ int rc;
+
+ if (get_user(portnum, (unsigned __user *) arg))
+ return -EFAULT;
+ rc = usb_hub_claim_port(ps->dev, portnum, ps);
+ if (rc == 0)
+ snoop(&ps->dev->dev, "port %d claimed by process %d: %s\n",
+ portnum, task_pid_nr(current), current->comm);
+ return rc;
+}
+
+static int proc_release_port(struct dev_state *ps, void __user *arg)
+{
+ unsigned portnum;
+
+ if (get_user(portnum, (unsigned __user *) arg))
+ return -EFAULT;
+ return usb_hub_release_port(ps->dev, portnum, ps);
+}
+
/*
* NOTE: All requests here that have interface numbers as parameters
* are assuming that somehow the configuration has been prevented from
@@ -1645,7 +1740,7 @@ static int usbdev_ioctl(struct inode *inode, struct file *file,
break;
case USBDEVFS_REAPURBNDELAY32:
- snoop(&dev->dev, "%s: REAPURBDELAY32\n", __func__);
+ snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__);
ret = proc_reapurbnonblock_compat(ps, p);
break;
@@ -1666,7 +1761,7 @@ static int usbdev_ioctl(struct inode *inode, struct file *file,
break;
case USBDEVFS_REAPURBNDELAY:
- snoop(&dev->dev, "%s: REAPURBDELAY\n", __func__);
+ snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__);
ret = proc_reapurbnonblock(ps, p);
break;
@@ -1689,6 +1784,16 @@ static int usbdev_ioctl(struct inode *inode, struct file *file,
snoop(&dev->dev, "%s: IOCTL\n", __func__);
ret = proc_ioctl_default(ps, p);
break;
+
+ case USBDEVFS_CLAIM_PORT:
+ snoop(&dev->dev, "%s: CLAIM_PORT\n", __func__);
+ ret = proc_claim_port(ps, p);
+ break;
+
+ case USBDEVFS_RELEASE_PORT:
+ snoop(&dev->dev, "%s: RELEASE_PORT\n", __func__);
+ ret = proc_release_port(ps, p);
+ break;
}
usb_unlock_device(dev);
if (ret >= 0)
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 69e5773abfc..4f864472c5c 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -207,6 +207,9 @@ static int usb_probe_interface(struct device *dev)
intf->needs_binding = 0;
+ if (usb_device_is_owned(udev))
+ return -ENODEV;
+
if (udev->authorized == 0) {
dev_err(&intf->dev, "Device is not authorized for usage\n");
return -ENODEV;
@@ -232,28 +235,35 @@ static int usb_probe_interface(struct device *dev)
/* The interface should always appear to be in use
* unless the driver suports autosuspend.
*/
- intf->pm_usage_cnt = !(driver->supports_autosuspend);
+ atomic_set(&intf->pm_usage_cnt, !driver->supports_autosuspend);
/* Carry out a deferred switch to altsetting 0 */
if (intf->needs_altsetting0) {
- usb_set_interface(udev, intf->altsetting[0].
+ error = usb_set_interface(udev, intf->altsetting[0].
desc.bInterfaceNumber, 0);
+ if (error < 0)
+ goto err;
+
intf->needs_altsetting0 = 0;
}
error = driver->probe(intf, id);
- if (error) {
- mark_quiesced(intf);
- intf->needs_remote_wakeup = 0;
- intf->condition = USB_INTERFACE_UNBOUND;
- usb_cancel_queued_reset(intf);
- } else
- intf->condition = USB_INTERFACE_BOUND;
+ if (error)
+ goto err;
+ intf->condition = USB_INTERFACE_BOUND;
usb_autosuspend_device(udev);
}
return error;
+
+err:
+ mark_quiesced(intf);
+ intf->needs_remote_wakeup = 0;
+ intf->condition = USB_INTERFACE_UNBOUND;
+ usb_cancel_queued_reset(intf);
+ usb_autosuspend_device(udev);
+ return error;
}
/* called from driver core with dev locked */
@@ -262,7 +272,7 @@ static int usb_unbind_interface(struct device *dev)
struct usb_driver *driver = to_usb_driver(dev->driver);
struct usb_interface *intf = to_usb_interface(dev);
struct usb_device *udev;
- int error;
+ int error, r;
intf->condition = USB_INTERFACE_UNBINDING;
@@ -290,11 +300,14 @@ static int usb_unbind_interface(struct device *dev)
* Just re-enable it without affecting the endpoint toggles.
*/
usb_enable_interface(udev, intf, false);
- } else if (!error && intf->dev.power.status == DPM_ON)
- usb_set_interface(udev, intf->altsetting[0].
+ } else if (!error && intf->dev.power.status == DPM_ON) {
+ r = usb_set_interface(udev, intf->altsetting[0].
desc.bInterfaceNumber, 0);
- else
+ if (r < 0)
+ intf->needs_altsetting0 = 1;
+ } else {
intf->needs_altsetting0 = 1;
+ }
usb_set_intfdata(intf, NULL);
intf->condition = USB_INTERFACE_UNBOUND;
@@ -344,7 +357,7 @@ int usb_driver_claim_interface(struct usb_driver *driver,
usb_pm_lock(udev);
iface->condition = USB_INTERFACE_BOUND;
mark_active(iface);
- iface->pm_usage_cnt = !(driver->supports_autosuspend);
+ atomic_set(&iface->pm_usage_cnt, !driver->supports_autosuspend);
usb_pm_unlock(udev);
/* if interface was already added, bind now; else let
@@ -1065,7 +1078,7 @@ static int autosuspend_check(struct usb_device *udev, int reschedule)
intf = udev->actconfig->interface[i];
if (!is_active(intf))
continue;
- if (intf->pm_usage_cnt > 0)
+ if (atomic_read(&intf->pm_usage_cnt) > 0)
return -EBUSY;
if (intf->needs_remote_wakeup &&
!udev->do_remote_wakeup) {
@@ -1461,17 +1474,19 @@ static int usb_autopm_do_interface(struct usb_interface *intf,
status = -ENODEV;
else {
udev->auto_pm = 1;
- intf->pm_usage_cnt += inc_usage_cnt;
+ atomic_add(inc_usage_cnt, &intf->pm_usage_cnt);
udev->last_busy = jiffies;
- if (inc_usage_cnt >= 0 && intf->pm_usage_cnt > 0) {
+ if (inc_usage_cnt >= 0 &&
+ atomic_read(&intf->pm_usage_cnt) > 0) {
if (udev->state == USB_STATE_SUSPENDED)
status = usb_resume_both(udev,
PMSG_AUTO_RESUME);
if (status != 0)
- intf->pm_usage_cnt -= inc_usage_cnt;
+ atomic_sub(inc_usage_cnt, &intf->pm_usage_cnt);
else
udev->last_busy = jiffies;
- } else if (inc_usage_cnt <= 0 && intf->pm_usage_cnt <= 0) {
+ } else if (inc_usage_cnt <= 0 &&
+ atomic_read(&intf->pm_usage_cnt) <= 0) {
status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
}
}
@@ -1516,7 +1531,7 @@ void usb_autopm_put_interface(struct usb_interface *intf)
status = usb_autopm_do_interface(intf, -1);
dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
- __func__, status, intf->pm_usage_cnt);
+ __func__, status, atomic_read(&intf->pm_usage_cnt));
}
EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
@@ -1544,10 +1559,10 @@ void usb_autopm_put_interface_async(struct usb_interface *intf)
status = -ENODEV;
} else {
udev->last_busy = jiffies;
- --intf->pm_usage_cnt;
+ atomic_dec(&intf->pm_usage_cnt);
if (udev->autosuspend_disabled || udev->autosuspend_delay < 0)
status = -EPERM;
- else if (intf->pm_usage_cnt <= 0 &&
+ else if (atomic_read(&intf->pm_usage_cnt) <= 0 &&
!timer_pending(&udev->autosuspend.timer)) {
queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
round_jiffies_up_relative(
@@ -1555,7 +1570,7 @@ void usb_autopm_put_interface_async(struct usb_interface *intf)
}
}
dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
- __func__, status, intf->pm_usage_cnt);
+ __func__, status, atomic_read(&intf->pm_usage_cnt));
}
EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async);
@@ -1599,7 +1614,7 @@ int usb_autopm_get_interface(struct usb_interface *intf)
status = usb_autopm_do_interface(intf, 1);
dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
- __func__, status, intf->pm_usage_cnt);
+ __func__, status, atomic_read(&intf->pm_usage_cnt));
return status;
}
EXPORT_SYMBOL_GPL(usb_autopm_get_interface);
@@ -1627,10 +1642,14 @@ int usb_autopm_get_interface_async(struct usb_interface *intf)
status = -ENODEV;
else if (udev->autoresume_disabled)
status = -EPERM;
- else if (++intf->pm_usage_cnt > 0 && udev->state == USB_STATE_SUSPENDED)
- queue_work(ksuspend_usb_wq, &udev->autoresume);
+ else {
+ atomic_inc(&intf->pm_usage_cnt);
+ if (atomic_read(&intf->pm_usage_cnt) > 0 &&
+ udev->state == USB_STATE_SUSPENDED)
+ queue_work(ksuspend_usb_wq, &udev->autoresume);
+ }
dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
- __func__, status, intf->pm_usage_cnt);
+ __func__, status, atomic_read(&intf->pm_usage_cnt));
return status;
}
EXPORT_SYMBOL_GPL(usb_autopm_get_interface_async);
@@ -1652,7 +1671,7 @@ int usb_autopm_set_interface(struct usb_interface *intf)
status = usb_autopm_do_interface(intf, 0);
dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
- __func__, status, intf->pm_usage_cnt);
+ __func__, status, atomic_read(&intf->pm_usage_cnt));
return status;
}
EXPORT_SYMBOL_GPL(usb_autopm_set_interface);
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 30ecac3af15..05e6d313961 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -158,7 +158,9 @@ static int generic_probe(struct usb_device *udev)
/* Choose and set the configuration. This registers the interfaces
* with the driver core and lets interface drivers bind to them.
*/
- if (udev->authorized == 0)
+ if (usb_device_is_owned(udev))
+ ; /* Don't configure if the device is owned */
+ else if (udev->authorized == 0)
dev_err(&udev->dev, "Device is not authorized for usage\n");
else {
c = usb_choose_configuration(udev);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 95ccfa0b9fc..34de475f016 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -337,72 +337,89 @@ static const u8 ss_rh_config_descriptor[] = {
/*-------------------------------------------------------------------------*/
-/*
- * helper routine for returning string descriptors in UTF-16LE
- * input can actually be ISO-8859-1; ASCII is its 7-bit subset
+/**
+ * ascii2desc() - Helper routine for producing UTF-16LE string descriptors
+ * @s: Null-terminated ASCII (actually ISO-8859-1) string
+ * @buf: Buffer for USB string descriptor (header + UTF-16LE)
+ * @len: Length (in bytes; may be odd) of descriptor buffer.
+ *
+ * The return value is the number of bytes filled in: 2 + 2*strlen(s) or
+ * buflen, whichever is less.
+ *
+ * USB String descriptors can contain at most 126 characters; input
+ * strings longer than that are truncated.
*/
-static unsigned ascii2utf(char *s, u8 *utf, int utfmax)
+static unsigned
+ascii2desc(char const *s, u8 *buf, unsigned len)
{
- unsigned retval;
+ unsigned n, t = 2 + 2*strlen(s);
- for (retval = 0; *s && utfmax > 1; utfmax -= 2, retval += 2) {
- *utf++ = *s++;
- *utf++ = 0;
- }
- if (utfmax > 0) {
- *utf = *s;
- ++retval;
+ if (t > 254)
+ t = 254; /* Longest possible UTF string descriptor */
+ if (len > t)
+ len = t;
+
+ t += USB_DT_STRING << 8; /* Now t is first 16 bits to store */
+
+ n = len;
+ while (n--) {
+ *buf++ = t;
+ if (!n--)
+ break;
+ *buf++ = t >> 8;
+ t = (unsigned char)*s++;
}
- return retval;
+ return len;
}
-/*
- * rh_string - provides manufacturer, product and serial strings for root hub
- * @id: the string ID number (1: serial number, 2: product, 3: vendor)
+/**
+ * rh_string() - provides string descriptors for root hub
+ * @id: the string ID number (0: langids, 1: serial #, 2: product, 3: vendor)
* @hcd: the host controller for this root hub
- * @data: return packet in UTF-16 LE
- * @len: length of the return packet
+ * @data: buffer for output packet
+ * @len: length of the provided buffer
*
* Produces either a manufacturer, product or serial number string for the
* virtual root hub device.
+ * Returns the number of bytes filled in: the length of the descriptor or
+ * of the provided buffer, whichever is less.
*/
-static unsigned rh_string(int id, struct usb_hcd *hcd, u8 *data, unsigned len)
+static unsigned
+rh_string(int id, struct usb_hcd const *hcd, u8 *data, unsigned len)
{
- char buf [100];
+ char buf[100];
+ char const *s;
+ static char const langids[4] = {4, USB_DT_STRING, 0x09, 0x04};
// language ids
- if (id == 0) {
- buf[0] = 4; buf[1] = 3; /* 4 bytes string data */
- buf[2] = 0x09; buf[3] = 0x04; /* MSFT-speak for "en-us" */
- len = min_t(unsigned, len, 4);
- memcpy (data, buf, len);
+ switch (id) {
+ case 0:
+ /* Array of LANGID codes (0x0409 is MSFT-speak for "en-us") */
+ /* See http://www.usb.org/developers/docs/USB_LANGIDs.pdf */
+ if (len > 4)
+ len = 4;
+ memcpy(data, langids, len);
return len;
-
- // serial number
- } else if (id == 1) {
- strlcpy (buf, hcd->self.bus_name, sizeof buf);
-
- // product description
- } else if (id == 2) {
- strlcpy (buf, hcd->product_desc, sizeof buf);
-
- // id 3 == vendor description
- } else if (id == 3) {
+ case 1:
+ /* Serial number */
+ s = hcd->self.bus_name;
+ break;
+ case 2:
+ /* Product name */
+ s = hcd->product_desc;
+ break;
+ case 3:
+ /* Manufacturer */
snprintf (buf, sizeof buf, "%s %s %s", init_utsname()->sysname,
init_utsname()->release, hcd->driver->description);
- }
-
- switch (len) { /* All cases fall through */
+ s = buf;
+ break;
default:
- len = 2 + ascii2utf (buf, data + 2, len - 2);
- case 2:
- data [1] = 3; /* type == string */
- case 1:
- data [0] = 2 * (strlen (buf) + 1);
- case 0:
- ; /* Compiler wants a statement here */
+ /* Can't happen; caller guarantees it */
+ return 0;
}
- return len;
+
+ return ascii2desc(s, data, len);
}
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index ec5c67ea07b..79782a1c43f 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -267,6 +267,11 @@ struct hc_driver {
void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
/* Returns the hardware-chosen device address */
int (*address_device)(struct usb_hcd *, struct usb_device *udev);
+ /* Notifies the HCD after a hub descriptor is fetched.
+ * Will block.
+ */
+ int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags);
};
extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 71f86c60d83..5ce839137ad 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -78,6 +78,7 @@ struct usb_hub {
u8 indicator[USB_MAXCHILDREN];
struct delayed_work leds;
struct delayed_work init_work;
+ void **port_owners;
};
@@ -162,8 +163,10 @@ static inline char *portspeed(int portstatus)
}
/* Note that hdev or one of its children must be locked! */
-static inline struct usb_hub *hdev_to_hub(struct usb_device *hdev)
+static struct usb_hub *hdev_to_hub(struct usb_device *hdev)
{
+ if (!hdev || !hdev->actconfig)
+ return NULL;
return usb_get_intfdata(hdev->actconfig->interface[0]);
}
@@ -372,7 +375,7 @@ static void kick_khubd(struct usb_hub *hub)
unsigned long flags;
/* Suppress autosuspend until khubd runs */
- to_usb_interface(hub->intfdev)->pm_usage_cnt = 1;
+ atomic_set(&to_usb_interface(hub->intfdev)->pm_usage_cnt, 1);
spin_lock_irqsave(&hub_event_lock, flags);
if (!hub->disconnected && list_empty(&hub->event_list)) {
@@ -384,8 +387,10 @@ static void kick_khubd(struct usb_hub *hub)
void usb_kick_khubd(struct usb_device *hdev)
{
- /* FIXME: What if hdev isn't bound to the hub driver? */
- kick_khubd(hdev_to_hub(hdev));
+ struct usb_hub *hub = hdev_to_hub(hdev);
+
+ if (hub)
+ kick_khubd(hub);
}
@@ -677,7 +682,8 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
msecs_to_jiffies(delay));
/* Suppress autosuspend until init is done */
- to_usb_interface(hub->intfdev)->pm_usage_cnt = 1;
+ atomic_set(&to_usb_interface(hub->intfdev)->
+ pm_usage_cnt, 1);
return; /* Continues at init2: below */
} else {
hub_power_on(hub, true);
@@ -854,25 +860,24 @@ static int hub_post_reset(struct usb_interface *intf)
static int hub_configure(struct usb_hub *hub,
struct usb_endpoint_descriptor *endpoint)
{
+ struct usb_hcd *hcd;
struct usb_device *hdev = hub->hdev;
struct device *hub_dev = hub->intfdev;
u16 hubstatus, hubchange;
u16 wHubCharacteristics;
unsigned int pipe;
int maxp, ret;
- char *message;
+ char *message = "out of memory";
hub->buffer = usb_buffer_alloc(hdev, sizeof(*hub->buffer), GFP_KERNEL,
&hub->buffer_dma);
if (!hub->buffer) {
- message = "can't allocate hub irq buffer";
ret = -ENOMEM;
goto fail;
}
hub->status = kmalloc(sizeof(*hub->status), GFP_KERNEL);
if (!hub->status) {
- message = "can't kmalloc hub status buffer";
ret = -ENOMEM;
goto fail;
}
@@ -880,7 +885,6 @@ static int hub_configure(struct usb_hub *hub,
hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL);
if (!hub->descriptor) {
- message = "can't kmalloc hub descriptor";
ret = -ENOMEM;
goto fail;
}
@@ -904,6 +908,12 @@ static int hub_configure(struct usb_hub *hub,
dev_info (hub_dev, "%d port%s detected\n", hdev->maxchild,
(hdev->maxchild == 1) ? "" : "s");
+ hub->port_owners = kzalloc(hdev->maxchild * sizeof(void *), GFP_KERNEL);
+ if (!hub->port_owners) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics);
if (wHubCharacteristics & HUB_CHAR_COMPOUND) {
@@ -1052,6 +1062,19 @@ static int hub_configure(struct usb_hub *hub,
dev_dbg(hub_dev, "%umA bus power budget for each child\n",
hub->mA_per_port);
+ /* Update the HCD's internal representation of this hub before khubd
+ * starts getting port status changes for devices under the hub.
+ */
+ hcd = bus_to_hcd(hdev->bus);
+ if (hcd->driver->update_hub_device) {
+ ret = hcd->driver->update_hub_device(hcd, hdev,
+ &hub->tt, GFP_KERNEL);
+ if (ret < 0) {
+ message = "can't update HCD hub info";
+ goto fail;
+ }
+ }
+
ret = hub_hub_status(hub, &hubstatus, &hubchange);
if (ret < 0) {
message = "can't get hub status";
@@ -1082,7 +1105,6 @@ static int hub_configure(struct usb_hub *hub,
hub->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!hub->urb) {
- message = "couldn't allocate interrupt urb";
ret = -ENOMEM;
goto fail;
}
@@ -1131,11 +1153,13 @@ static void hub_disconnect(struct usb_interface *intf)
hub_quiesce(hub, HUB_DISCONNECT);
usb_set_intfdata (intf, NULL);
+ hub->hdev->maxchild = 0;
if (hub->hdev->speed == USB_SPEED_HIGH)
highspeed_hubs--;
usb_free_urb(hub->urb);
+ kfree(hub->port_owners);
kfree(hub->descriptor);
kfree(hub->status);
usb_buffer_free(hub->hdev, sizeof(*hub->buffer), hub->buffer,
@@ -1250,6 +1274,79 @@ hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data)
}
}
+/*
+ * Allow user programs to claim ports on a hub. When a device is attached
+ * to one of these "claimed" ports, the program will "own" the device.
+ */
+static int find_port_owner(struct usb_device *hdev, unsigned port1,
+ void ***ppowner)
+{
+ if (hdev->state == USB_STATE_NOTATTACHED)
+ return -ENODEV;
+ if (port1 == 0 || port1 > hdev->maxchild)
+ return -EINVAL;
+
+ /* This assumes that devices not managed by the hub driver
+ * will always have maxchild equal to 0.
+ */
+ *ppowner = &(hdev_to_hub(hdev)->port_owners[port1 - 1]);
+ return 0;
+}
+
+/* In the following three functions, the caller must hold hdev's lock */
+int usb_hub_claim_port(struct usb_device *hdev, unsigned port1, void *owner)
+{
+ int rc;
+ void **powner;
+
+ rc = find_port_owner(hdev, port1, &powner);
+ if (rc)
+ return rc;
+ if (*powner)
+ return -EBUSY;
+ *powner = owner;
+ return rc;
+}
+
+int usb_hub_release_port(struct usb_device *hdev, unsigned port1, void *owner)
+{
+ int rc;
+ void **powner;
+
+ rc = find_port_owner(hdev, port1, &powner);
+ if (rc)
+ return rc;
+ if (*powner != owner)
+ return -ENOENT;
+ *powner = NULL;
+ return rc;
+}
+
+void usb_hub_release_all_ports(struct usb_device *hdev, void *owner)
+{
+ int n;
+ void **powner;
+
+ n = find_port_owner(hdev, 1, &powner);
+ if (n == 0) {
+ for (; n < hdev->maxchild; (++n, ++powner)) {
+ if (*powner == owner)
+ *powner = NULL;
+ }
+ }
+}
+
+/* The caller must hold udev's lock */
+bool usb_device_is_owned(struct usb_device *udev)
+{
+ struct usb_hub *hub;
+
+ if (udev->state == USB_STATE_NOTATTACHED || !udev->parent)
+ return false;
+ hub = hdev_to_hub(udev->parent);
+ return !!hub->port_owners[udev->portnum - 1];
+}
+
static void recursively_mark_NOTATTACHED(struct usb_device *udev)
{
@@ -2849,14 +2946,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
/* For a suspended device, treat this as a
* remote wakeup event.
*/
- if (udev->do_remote_wakeup)
- status = remote_wakeup(udev);
-
- /* Otherwise leave it be; devices can't tell the
- * difference between suspended and disabled.
- */
- else
- status = 0;
+ status = remote_wakeup(udev);
#endif
} else {
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index ffe75e83787..97b40ce133f 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -48,7 +48,6 @@
#define USBFS_DEFAULT_BUSMODE (S_IXUGO | S_IRUGO)
#define USBFS_DEFAULT_LISTMODE S_IRUGO
-static struct super_operations usbfs_ops;
static const struct file_operations default_file_operations;
static struct vfsmount *usbfs_mount;
static int usbfs_mount_count; /* = 0 */
@@ -449,7 +448,7 @@ static const struct file_operations default_file_operations = {
.llseek = default_file_lseek,
};
-static struct super_operations usbfs_ops = {
+static const struct super_operations usbfs_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
.remount_fs = remount,
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 9720e699f47..da718e84d58 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -459,35 +459,23 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
io->urbs[i]->context = io;
/*
- * Some systems need to revert to PIO when DMA is
- * temporarily unavailable. For their sakes, both
- * transfer_buffer and transfer_dma are set when
- * possible. However this can only work on systems
- * without:
+ * Some systems need to revert to PIO when DMA is temporarily
+ * unavailable. For their sakes, both transfer_buffer and
+ * transfer_dma are set when possible.
*
- * - HIGHMEM, since DMA buffers located in high memory
- * are not directly addressable by the CPU for PIO;
- *
- * - IOMMU, since dma_map_sg() is allowed to use an
- * IOMMU to make virtually discontiguous buffers be
- * "dma-contiguous" so that PIO and DMA need diferent
- * numbers of URBs.
- *
- * So when HIGHMEM or IOMMU are in use, transfer_buffer
- * is NULL to prevent stale pointers and to help spot
- * bugs.
+ * Note that if IOMMU coalescing occurred, we cannot
+ * trust sg_page anymore, so check if S/G list shrunk.
*/
+ if (io->nents == io->entries && !PageHighMem(sg_page(sg)))
+ io->urbs[i]->transfer_buffer = sg_virt(sg);
+ else
+ io->urbs[i]->transfer_buffer = NULL;
+
if (dma) {
io->urbs[i]->transfer_dma = sg_dma_address(sg);
len = sg_dma_len(sg);
-#if defined(CONFIG_HIGHMEM) || defined(CONFIG_GART_IOMMU)
- io->urbs[i]->transfer_buffer = NULL;
-#else
- io->urbs[i]->transfer_buffer = sg_virt(sg);
-#endif
} else {
/* hc may use _only_ transfer_buffer */
- io->urbs[i]->transfer_buffer = sg_virt(sg);
len = sg->length;
}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 43ee943d757..b1b85abb9a2 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -413,8 +413,13 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
} else {
snprintf(dev->devpath, sizeof dev->devpath,
"%s.%d", parent->devpath, port1);
- dev->route = parent->route +
- (port1 << ((parent->level - 1)*4));
+ /* Route string assumes hubs have less than 16 ports */
+ if (port1 < 15)
+ dev->route = parent->route +
+ (port1 << ((parent->level - 1)*4));
+ else
+ dev->route = parent->route +
+ (15 << ((parent->level - 1)*4));
}
dev->dev.parent = &parent->dev;
@@ -914,11 +919,11 @@ int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
|| !(bus = dev->bus)
|| !(controller = bus->controller)
|| !controller->dma_mask)
- return -1;
+ return -EINVAL;
/* FIXME generic api broken like pci, can't report errors */
return dma_map_sg(controller, sg, nents,
- is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE) ? : -ENOMEM;
}
EXPORT_SYMBOL_GPL(usb_buffer_map_sg);
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index c0e0ae2bb8e..9a8b15e6377 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -37,6 +37,13 @@ extern int usb_match_device(struct usb_device *dev,
extern void usb_forced_unbind_intf(struct usb_interface *intf);
extern void usb_rebind_intf(struct usb_interface *intf);
+extern int usb_hub_claim_port(struct usb_device *hdev, unsigned port,
+ void *owner);
+extern int usb_hub_release_port(struct usb_device *hdev, unsigned port,
+ void *owner);
+extern void usb_hub_release_all_ports(struct usb_device *hdev, void *owner);
+extern bool usb_device_is_owned(struct usb_device *udev);
+
extern int usb_hub_init(void);
extern void usb_hub_cleanup(void);
extern int usb_major_init(void);
diff --git a/drivers/usb/early/Makefile b/drivers/usb/early/Makefile
new file mode 100644
index 00000000000..dfedee8c45b
--- /dev/null
+++ b/drivers/usb/early/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for early USB devices
+#
+
+obj-$(CONFIG_EARLY_PRINTK_DBGP) += ehci-dbgp.o
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
new file mode 100644
index 00000000000..1206a26ef89
--- /dev/null
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -0,0 +1,996 @@
+/*
+ * Standalone EHCI usb debug driver
+ *
+ * Originally written by:
+ * Eric W. Biederman" <ebiederm@xmission.com> and
+ * Yinghai Lu <yhlu.kernel@gmail.com>
+ *
+ * Changes for early/late printk and HW errata:
+ * Jason Wessel <jason.wessel@windriver.com>
+ * Copyright (C) 2009 Wind River Systems, Inc.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci_regs.h>
+#include <linux/pci_ids.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/ehci_def.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/pci-direct.h>
+#include <asm/fixmap.h>
+
+/* The code here is intended to talk directly to the EHCI debug port
+ * and does not require that you have any kind of USB host controller
+ * drivers or USB device drivers compiled into the kernel.
+ *
+ * If you make a change to anything in here, the following test cases
+ * need to pass where a USB debug device works in the following
+ * configurations.
+ *
+ * 1. boot args: earlyprintk=dbgp
+ * o kernel compiled with # CONFIG_USB_EHCI_HCD is not set
+ * o kernel compiled with CONFIG_USB_EHCI_HCD=y
+ * 2. boot args: earlyprintk=dbgp,keep
+ * o kernel compiled with # CONFIG_USB_EHCI_HCD is not set
+ * o kernel compiled with CONFIG_USB_EHCI_HCD=y
+ * 3. boot args: earlyprintk=dbgp console=ttyUSB0
+ * o kernel has CONFIG_USB_EHCI_HCD=y and
+ * CONFIG_USB_SERIAL_DEBUG=y
+ * 4. boot args: earlyprintk=vga,dbgp
+ * o kernel compiled with # CONFIG_USB_EHCI_HCD is not set
+ * o kernel compiled with CONFIG_USB_EHCI_HCD=y
+ *
+ * For the 4th configuration you can turn on or off the DBGP_DEBUG
+ * such that you can debug the dbgp device's driver code.
+ */
+
+static int dbgp_phys_port = 1;
+
+static struct ehci_caps __iomem *ehci_caps;
+static struct ehci_regs __iomem *ehci_regs;
+static struct ehci_dbg_port __iomem *ehci_debug;
+static int dbgp_not_safe; /* Cannot use debug device during ehci reset */
+static unsigned int dbgp_endpoint_out;
+
+struct ehci_dev {
+ u32 bus;
+ u32 slot;
+ u32 func;
+};
+
+static struct ehci_dev ehci_dev;
+
+#define USB_DEBUG_DEVNUM 127
+
+#define DBGP_DATA_TOGGLE 0x8800
+
+#ifdef DBGP_DEBUG
+#define dbgp_printk printk
+static void dbgp_ehci_status(char *str)
+{
+ if (!ehci_debug)
+ return;
+ dbgp_printk("dbgp: %s\n", str);
+ dbgp_printk(" Debug control: %08x", readl(&ehci_debug->control));
+ dbgp_printk(" ehci cmd : %08x", readl(&ehci_regs->command));
+ dbgp_printk(" ehci conf flg: %08x\n",
+ readl(&ehci_regs->configured_flag));
+ dbgp_printk(" ehci status : %08x", readl(&ehci_regs->status));
+ dbgp_printk(" ehci portsc : %08x\n",
+ readl(&ehci_regs->port_status[dbgp_phys_port - 1]));
+}
+#else
+static inline void dbgp_ehci_status(char *str) { }
+static inline void dbgp_printk(const char *fmt, ...) { }
+#endif
+
+static inline u32 dbgp_pid_update(u32 x, u32 tok)
+{
+ return ((x ^ DBGP_DATA_TOGGLE) & 0xffff00) | (tok & 0xff);
+}
+
+static inline u32 dbgp_len_update(u32 x, u32 len)
+{
+ return (x & ~0x0f) | (len & 0x0f);
+}
+
+/*
+ * USB Packet IDs (PIDs)
+ */
+
+/* token */
+#define USB_PID_OUT 0xe1
+#define USB_PID_IN 0x69
+#define USB_PID_SOF 0xa5
+#define USB_PID_SETUP 0x2d
+/* handshake */
+#define USB_PID_ACK 0xd2
+#define USB_PID_NAK 0x5a
+#define USB_PID_STALL 0x1e
+#define USB_PID_NYET 0x96
+/* data */
+#define USB_PID_DATA0 0xc3
+#define USB_PID_DATA1 0x4b
+#define USB_PID_DATA2 0x87
+#define USB_PID_MDATA 0x0f
+/* Special */
+#define USB_PID_PREAMBLE 0x3c
+#define USB_PID_ERR 0x3c
+#define USB_PID_SPLIT 0x78
+#define USB_PID_PING 0xb4
+#define USB_PID_UNDEF_0 0xf0
+
+#define USB_PID_DATA_TOGGLE 0x88
+#define DBGP_CLAIM (DBGP_OWNER | DBGP_ENABLED | DBGP_INUSE)
+
+#define PCI_CAP_ID_EHCI_DEBUG 0xa
+
+#define HUB_ROOT_RESET_TIME 50 /* times are in msec */
+#define HUB_SHORT_RESET_TIME 10
+#define HUB_LONG_RESET_TIME 200
+#define HUB_RESET_TIMEOUT 500
+
+#define DBGP_MAX_PACKET 8
+#define DBGP_TIMEOUT (250 * 1000)
+
+static int dbgp_wait_until_complete(void)
+{
+ u32 ctrl;
+ int loop = DBGP_TIMEOUT;
+
+ do {
+ ctrl = readl(&ehci_debug->control);
+ /* Stop when the transaction is finished */
+ if (ctrl & DBGP_DONE)
+ break;
+ udelay(1);
+ } while (--loop > 0);
+
+ if (!loop)
+ return -DBGP_TIMEOUT;
+
+ /*
+ * Now that we have observed the completed transaction,
+ * clear the done bit.
+ */
+ writel(ctrl | DBGP_DONE, &ehci_debug->control);
+ return (ctrl & DBGP_ERROR) ? -DBGP_ERRCODE(ctrl) : DBGP_LEN(ctrl);
+}
+
+static inline void dbgp_mdelay(int ms)
+{
+ int i;
+
+ while (ms--) {
+ for (i = 0; i < 1000; i++)
+ outb(0x1, 0x80);
+ }
+}
+
+static void dbgp_breath(void)
+{
+ /* Sleep to give the debug port a chance to breathe */
+}
+
+static int dbgp_wait_until_done(unsigned ctrl)
+{
+ u32 pids, lpid;
+ int ret;
+ int loop = 3;
+
+retry:
+ writel(ctrl | DBGP_GO, &ehci_debug->control);
+ ret = dbgp_wait_until_complete();
+ pids = readl(&ehci_debug->pids);
+ lpid = DBGP_PID_GET(pids);
+
+ if (ret < 0) {
+ /* A -DBGP_TIMEOUT failure here means the device has
+ * failed, perhaps because it was unplugged, in which
+ * case we do not want to hang the system so the dbgp
+ * will be marked as unsafe to use. EHCI reset is the
+ * only way to recover if you unplug the dbgp device.
+ */
+ if (ret == -DBGP_TIMEOUT && !dbgp_not_safe)
+ dbgp_not_safe = 1;
+ return ret;
+ }
+
+ /*
+ * If the port is getting full or it has dropped data
+ * start pacing ourselves, not necessary but it's friendly.
+ */
+ if ((lpid == USB_PID_NAK) || (lpid == USB_PID_NYET))
+ dbgp_breath();
+
+ /* If I get a NACK reissue the transmission */
+ if (lpid == USB_PID_NAK) {
+ if (--loop > 0)
+ goto retry;
+ }
+
+ return ret;
+}
+
+static inline void dbgp_set_data(const void *buf, int size)
+{
+ const unsigned char *bytes = buf;
+ u32 lo, hi;
+ int i;
+
+ lo = hi = 0;
+ for (i = 0; i < 4 && i < size; i++)
+ lo |= bytes[i] << (8*i);
+ for (; i < 8 && i < size; i++)
+ hi |= bytes[i] << (8*(i - 4));
+ writel(lo, &ehci_debug->data03);
+ writel(hi, &ehci_debug->data47);
+}
+
+static inline void dbgp_get_data(void *buf, int size)
+{
+ unsigned char *bytes = buf;
+ u32 lo, hi;
+ int i;
+
+ lo = readl(&ehci_debug->data03);
+ hi = readl(&ehci_debug->data47);
+ for (i = 0; i < 4 && i < size; i++)
+ bytes[i] = (lo >> (8*i)) & 0xff;
+ for (; i < 8 && i < size; i++)
+ bytes[i] = (hi >> (8*(i - 4))) & 0xff;
+}
+
+static int dbgp_out(u32 addr, const char *bytes, int size)
+{
+ u32 pids, ctrl;
+
+ pids = readl(&ehci_debug->pids);
+ pids = dbgp_pid_update(pids, USB_PID_OUT);
+
+ ctrl = readl(&ehci_debug->control);
+ ctrl = dbgp_len_update(ctrl, size);
+ ctrl |= DBGP_OUT;
+ ctrl |= DBGP_GO;
+
+ dbgp_set_data(bytes, size);
+ writel(addr, &ehci_debug->address);
+ writel(pids, &ehci_debug->pids);
+ return dbgp_wait_until_done(ctrl);
+}
+
+static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
+ const char *bytes, int size)
+{
+ int ret;
+ int loops = 5;
+ u32 addr;
+ if (size > DBGP_MAX_PACKET)
+ return -1;
+
+ addr = DBGP_EPADDR(devnum, endpoint);
+try_again:
+ if (loops--) {
+ ret = dbgp_out(addr, bytes, size);
+ if (ret == -DBGP_ERR_BAD) {
+ int try_loops = 3;
+ do {
+ /* Emit a dummy packet to re-sync communication
+ * with the debug device */
+ if (dbgp_out(addr, "12345678", 8) >= 0) {
+ udelay(2);
+ goto try_again;
+ }
+ } while (try_loops--);
+ }
+ }
+
+ return ret;
+}
+
+static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
+ int size)
+{
+ u32 pids, addr, ctrl;
+ int ret;
+
+ if (size > DBGP_MAX_PACKET)
+ return -1;
+
+ addr = DBGP_EPADDR(devnum, endpoint);
+
+ pids = readl(&ehci_debug->pids);
+ pids = dbgp_pid_update(pids, USB_PID_IN);
+
+ ctrl = readl(&ehci_debug->control);
+ ctrl = dbgp_len_update(ctrl, size);
+ ctrl &= ~DBGP_OUT;
+ ctrl |= DBGP_GO;
+
+ writel(addr, &ehci_debug->address);
+ writel(pids, &ehci_debug->pids);
+ ret = dbgp_wait_until_done(ctrl);
+ if (ret < 0)
+ return ret;
+
+ if (size > ret)
+ size = ret;
+ dbgp_get_data(data, size);
+ return ret;
+}
+
+static int dbgp_control_msg(unsigned devnum, int requesttype,
+ int request, int value, int index, void *data, int size)
+{
+ u32 pids, addr, ctrl;
+ struct usb_ctrlrequest req;
+ int read;
+ int ret;
+
+ read = (requesttype & USB_DIR_IN) != 0;
+ if (size > (read ? DBGP_MAX_PACKET:0))
+ return -1;
+
+ /* Compute the control message */
+ req.bRequestType = requesttype;
+ req.bRequest = request;
+ req.wValue = cpu_to_le16(value);
+ req.wIndex = cpu_to_le16(index);
+ req.wLength = cpu_to_le16(size);
+
+ pids = DBGP_PID_SET(USB_PID_DATA0, USB_PID_SETUP);
+ addr = DBGP_EPADDR(devnum, 0);
+
+ ctrl = readl(&ehci_debug->control);
+ ctrl = dbgp_len_update(ctrl, sizeof(req));
+ ctrl |= DBGP_OUT;
+ ctrl |= DBGP_GO;
+
+ /* Send the setup message */
+ dbgp_set_data(&req, sizeof(req));
+ writel(addr, &ehci_debug->address);
+ writel(pids, &ehci_debug->pids);
+ ret = dbgp_wait_until_done(ctrl);
+ if (ret < 0)
+ return ret;
+
+ /* Read the result */
+ return dbgp_bulk_read(devnum, 0, data, size);
+}
+
+
+/* Find a PCI capability */
+static u32 __init find_cap(u32 num, u32 slot, u32 func, int cap)
+{
+ u8 pos;
+ int bytes;
+
+ if (!(read_pci_config_16(num, slot, func, PCI_STATUS) &
+ PCI_STATUS_CAP_LIST))
+ return 0;
+
+ pos = read_pci_config_byte(num, slot, func, PCI_CAPABILITY_LIST);
+ for (bytes = 0; bytes < 48 && pos >= 0x40; bytes++) {
+ u8 id;
+
+ pos &= ~3;
+ id = read_pci_config_byte(num, slot, func, pos+PCI_CAP_LIST_ID);
+ if (id == 0xff)
+ break;
+ if (id == cap)
+ return pos;
+
+ pos = read_pci_config_byte(num, slot, func,
+ pos+PCI_CAP_LIST_NEXT);
+ }
+ return 0;
+}
+
+static u32 __init __find_dbgp(u32 bus, u32 slot, u32 func)
+{
+ u32 class;
+
+ class = read_pci_config(bus, slot, func, PCI_CLASS_REVISION);
+ if ((class >> 8) != PCI_CLASS_SERIAL_USB_EHCI)
+ return 0;
+
+ return find_cap(bus, slot, func, PCI_CAP_ID_EHCI_DEBUG);
+}
+
+static u32 __init find_dbgp(int ehci_num, u32 *rbus, u32 *rslot, u32 *rfunc)
+{
+ u32 bus, slot, func;
+
+ for (bus = 0; bus < 256; bus++) {
+ for (slot = 0; slot < 32; slot++) {
+ for (func = 0; func < 8; func++) {
+ unsigned cap;
+
+ cap = __find_dbgp(bus, slot, func);
+
+ if (!cap)
+ continue;
+ if (ehci_num-- != 0)
+ continue;
+ *rbus = bus;
+ *rslot = slot;
+ *rfunc = func;
+ return cap;
+ }
+ }
+ }
+ return 0;
+}
+
+static int dbgp_ehci_startup(void)
+{
+ u32 ctrl, cmd, status;
+ int loop;
+
+ /* Claim ownership, but do not enable yet */
+ ctrl = readl(&ehci_debug->control);
+ ctrl |= DBGP_OWNER;
+ ctrl &= ~(DBGP_ENABLED | DBGP_INUSE);
+ writel(ctrl, &ehci_debug->control);
+ udelay(1);
+
+ dbgp_ehci_status("EHCI startup");
+ /* Start the ehci running */
+ cmd = readl(&ehci_regs->command);
+ cmd &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE | CMD_ASE | CMD_RESET);
+ cmd |= CMD_RUN;
+ writel(cmd, &ehci_regs->command);
+
+ /* Ensure everything is routed to the EHCI */
+ writel(FLAG_CF, &ehci_regs->configured_flag);
+
+ /* Wait until the controller is no longer halted */
+ loop = 10;
+ do {
+ status = readl(&ehci_regs->status);
+ if (!(status & STS_HALT))
+ break;
+ udelay(1);
+ } while (--loop > 0);
+
+ if (!loop) {
+ dbgp_printk("ehci can not be started\n");
+ return -ENODEV;
+ }
+ dbgp_printk("ehci started\n");
+ return 0;
+}
+
+static int dbgp_ehci_controller_reset(void)
+{
+ int loop = 250 * 1000;
+ u32 cmd;
+
+ /* Reset the EHCI controller */
+ cmd = readl(&ehci_regs->command);
+ cmd |= CMD_RESET;
+ writel(cmd, &ehci_regs->command);
+ do {
+ cmd = readl(&ehci_regs->command);
+ } while ((cmd & CMD_RESET) && (--loop > 0));
+
+ if (!loop) {
+ dbgp_printk("can not reset ehci\n");
+ return -1;
+ }
+ dbgp_ehci_status("ehci reset done");
+ return 0;
+}
+static int ehci_wait_for_port(int port);
+/* Return 0 on success
+ * Return -ENODEV for any general failure
+ * Return -EIO if wait for port fails
+ */
+int dbgp_external_startup(void)
+{
+ int devnum;
+ struct usb_debug_descriptor dbgp_desc;
+ int ret;
+ u32 ctrl, portsc, cmd;
+ int dbg_port = dbgp_phys_port;
+ int tries = 3;
+ int reset_port_tries = 1;
+ int try_hard_once = 1;
+
+try_port_reset_again:
+ ret = dbgp_ehci_startup();
+ if (ret)
+ return ret;
+
+ /* Wait for a device to show up in the debug port */
+ ret = ehci_wait_for_port(dbg_port);
+ if (ret < 0) {
+ portsc = readl(&ehci_regs->port_status[dbg_port - 1]);
+ if (!(portsc & PORT_CONNECT) && try_hard_once) {
+ /* Last ditch effort to try to force enable
+ * the debug device by using the packet test
+ * ehci command to try and wake it up. */
+ try_hard_once = 0;
+ cmd = readl(&ehci_regs->command);
+ cmd &= ~CMD_RUN;
+ writel(cmd, &ehci_regs->command);
+ portsc = readl(&ehci_regs->port_status[dbg_port - 1]);
+ portsc |= PORT_TEST_PKT;
+ writel(portsc, &ehci_regs->port_status[dbg_port - 1]);
+ dbgp_ehci_status("Trying to force debug port online");
+ mdelay(50);
+ dbgp_ehci_controller_reset();
+ goto try_port_reset_again;
+ } else if (reset_port_tries--) {
+ goto try_port_reset_again;
+ }
+ dbgp_printk("No device found in debug port\n");
+ return -EIO;
+ }
+ dbgp_ehci_status("wait for port done");
+
+ /* Enable the debug port */
+ ctrl = readl(&ehci_debug->control);
+ ctrl |= DBGP_CLAIM;
+ writel(ctrl, &ehci_debug->control);
+ ctrl = readl(&ehci_debug->control);
+ if ((ctrl & DBGP_CLAIM) != DBGP_CLAIM) {
+ dbgp_printk("No device in debug port\n");
+ writel(ctrl & ~DBGP_CLAIM, &ehci_debug->control);
+ return -ENODEV;
+ }
+ dbgp_ehci_status("debug ported enabled");
+
+ /* Completely transfer the debug device to the debug controller */
+ portsc = readl(&ehci_regs->port_status[dbg_port - 1]);
+ portsc &= ~PORT_PE;
+ writel(portsc, &ehci_regs->port_status[dbg_port - 1]);
+
+ dbgp_mdelay(100);
+
+try_again:
+ /* Find the debug device and make it device number 127 */
+ for (devnum = 0; devnum <= 127; devnum++) {
+ ret = dbgp_control_msg(devnum,
+ USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ USB_REQ_GET_DESCRIPTOR, (USB_DT_DEBUG << 8), 0,
+ &dbgp_desc, sizeof(dbgp_desc));
+ if (ret > 0)
+ break;
+ }
+ if (devnum > 127) {
+ dbgp_printk("Could not find attached debug device\n");
+ goto err;
+ }
+ if (ret < 0) {
+ dbgp_printk("Attached device is not a debug device\n");
+ goto err;
+ }
+ dbgp_endpoint_out = dbgp_desc.bDebugOutEndpoint;
+
+ /* Move the device to 127 if it isn't already there */
+ if (devnum != USB_DEBUG_DEVNUM) {
+ ret = dbgp_control_msg(devnum,
+ USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ USB_REQ_SET_ADDRESS, USB_DEBUG_DEVNUM, 0, NULL, 0);
+ if (ret < 0) {
+ dbgp_printk("Could not move attached device to %d\n",
+ USB_DEBUG_DEVNUM);
+ goto err;
+ }
+ devnum = USB_DEBUG_DEVNUM;
+ dbgp_printk("debug device renamed to 127\n");
+ }
+
+ /* Enable the debug interface */
+ ret = dbgp_control_msg(USB_DEBUG_DEVNUM,
+ USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ USB_REQ_SET_FEATURE, USB_DEVICE_DEBUG_MODE, 0, NULL, 0);
+ if (ret < 0) {
+ dbgp_printk(" Could not enable the debug device\n");
+ goto err;
+ }
+ dbgp_printk("debug interface enabled\n");
+ /* Perform a small write to get the even/odd data state in sync
+ */
+ ret = dbgp_bulk_write(USB_DEBUG_DEVNUM, dbgp_endpoint_out, " ", 1);
+ if (ret < 0) {
+ dbgp_printk("dbgp_bulk_write failed: %d\n", ret);
+ goto err;
+ }
+ dbgp_printk("small write doned\n");
+ dbgp_not_safe = 0;
+
+ return 0;
+err:
+ if (tries--)
+ goto try_again;
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(dbgp_external_startup);
+
+static int __init ehci_reset_port(int port)
+{
+ u32 portsc;
+ u32 delay_time, delay;
+ int loop;
+
+ dbgp_ehci_status("reset port");
+ /* Reset the usb debug port */
+ portsc = readl(&ehci_regs->port_status[port - 1]);
+ portsc &= ~PORT_PE;
+ portsc |= PORT_RESET;
+ writel(portsc, &ehci_regs->port_status[port - 1]);
+
+ delay = HUB_ROOT_RESET_TIME;
+ for (delay_time = 0; delay_time < HUB_RESET_TIMEOUT;
+ delay_time += delay) {
+ dbgp_mdelay(delay);
+ portsc = readl(&ehci_regs->port_status[port - 1]);
+ if (!(portsc & PORT_RESET))
+ break;
+ }
+ if (portsc & PORT_RESET) {
+ /* force reset to complete */
+ loop = 100 * 1000;
+ writel(portsc & ~(PORT_RWC_BITS | PORT_RESET),
+ &ehci_regs->port_status[port - 1]);
+ do {
+ udelay(1);
+ portsc = readl(&ehci_regs->port_status[port-1]);
+ } while ((portsc & PORT_RESET) && (--loop > 0));
+ }
+
+ /* Device went away? */
+ if (!(portsc & PORT_CONNECT))
+ return -ENOTCONN;
+
+ /* bomb out completely if something weird happend */
+ if ((portsc & PORT_CSC))
+ return -EINVAL;
+
+ /* If we've finished resetting, then break out of the loop */
+ if (!(portsc & PORT_RESET) && (portsc & PORT_PE))
+ return 0;
+ return -EBUSY;
+}
+
+static int ehci_wait_for_port(int port)
+{
+ u32 status;
+ int ret, reps;
+
+ for (reps = 0; reps < 300; reps++) {
+ status = readl(&ehci_regs->status);
+ if (status & STS_PCD)
+ break;
+ dbgp_mdelay(1);
+ }
+ ret = ehci_reset_port(port);
+ if (ret == 0)
+ return 0;
+ return -ENOTCONN;
+}
+
+typedef void (*set_debug_port_t)(int port);
+
+static void __init default_set_debug_port(int port)
+{
+}
+
+static set_debug_port_t __initdata set_debug_port = default_set_debug_port;
+
+static void __init nvidia_set_debug_port(int port)
+{
+ u32 dword;
+ dword = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
+ 0x74);
+ dword &= ~(0x0f<<12);
+ dword |= ((port & 0x0f)<<12);
+ write_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, 0x74,
+ dword);
+ dbgp_printk("set debug port to %d\n", port);
+}
+
+static void __init detect_set_debug_port(void)
+{
+ u32 vendorid;
+
+ vendorid = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
+ 0x00);
+
+ if ((vendorid & 0xffff) == 0x10de) {
+ dbgp_printk("using nvidia set_debug_port\n");
+ set_debug_port = nvidia_set_debug_port;
+ }
+}
+
+/* The code in early_ehci_bios_handoff() is derived from the usb pci
+ * quirk initialization, but altered so as to use the early PCI
+ * routines. */
+#define EHCI_USBLEGSUP_BIOS (1 << 16) /* BIOS semaphore */
+#define EHCI_USBLEGCTLSTS 4 /* legacy control/status */
+static void __init early_ehci_bios_handoff(void)
+{
+ u32 hcc_params = readl(&ehci_caps->hcc_params);
+ int offset = (hcc_params >> 8) & 0xff;
+ u32 cap;
+ int msec;
+
+ if (!offset)
+ return;
+
+ cap = read_pci_config(ehci_dev.bus, ehci_dev.slot,
+ ehci_dev.func, offset);
+ dbgp_printk("dbgp: ehci BIOS state %08x\n", cap);
+
+ if ((cap & 0xff) == 1 && (cap & EHCI_USBLEGSUP_BIOS)) {
+ dbgp_printk("dbgp: BIOS handoff\n");
+ write_pci_config_byte(ehci_dev.bus, ehci_dev.slot,
+ ehci_dev.func, offset + 3, 1);
+ }
+
+ /* if boot firmware now owns EHCI, spin till it hands it over. */
+ msec = 1000;
+ while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
+ mdelay(10);
+ msec -= 10;
+ cap = read_pci_config(ehci_dev.bus, ehci_dev.slot,
+ ehci_dev.func, offset);
+ }
+
+ if (cap & EHCI_USBLEGSUP_BIOS) {
+ /* well, possibly buggy BIOS... try to shut it down,
+ * and hope nothing goes too wrong */
+ dbgp_printk("dbgp: BIOS handoff failed: %08x\n", cap);
+ write_pci_config_byte(ehci_dev.bus, ehci_dev.slot,
+ ehci_dev.func, offset + 2, 0);
+ }
+
+ /* just in case, always disable EHCI SMIs */
+ write_pci_config_byte(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
+ offset + EHCI_USBLEGCTLSTS, 0);
+}
+
+static int __init ehci_setup(void)
+{
+ u32 ctrl, portsc, hcs_params;
+ u32 debug_port, new_debug_port = 0, n_ports;
+ int ret, i;
+ int port_map_tried;
+ int playtimes = 3;
+
+ early_ehci_bios_handoff();
+
+try_next_time:
+ port_map_tried = 0;
+
+try_next_port:
+
+ hcs_params = readl(&ehci_caps->hcs_params);
+ debug_port = HCS_DEBUG_PORT(hcs_params);
+ dbgp_phys_port = debug_port;
+ n_ports = HCS_N_PORTS(hcs_params);
+
+ dbgp_printk("debug_port: %d\n", debug_port);
+ dbgp_printk("n_ports: %d\n", n_ports);
+ dbgp_ehci_status("");
+
+ for (i = 1; i <= n_ports; i++) {
+ portsc = readl(&ehci_regs->port_status[i-1]);
+ dbgp_printk("portstatus%d: %08x\n", i, portsc);
+ }
+
+ if (port_map_tried && (new_debug_port != debug_port)) {
+ if (--playtimes) {
+ set_debug_port(new_debug_port);
+ goto try_next_time;
+ }
+ return -1;
+ }
+
+ /* Only reset the controller if it is not already in the
+ * configured state */
+ if (!(readl(&ehci_regs->configured_flag) & FLAG_CF)) {
+ if (dbgp_ehci_controller_reset() != 0)
+ return -1;
+ } else {
+ dbgp_ehci_status("ehci skip - already configured");
+ }
+
+ ret = dbgp_external_startup();
+ if (ret == -EIO)
+ goto next_debug_port;
+
+ if (ret < 0) {
+ /* Things didn't work so remove my claim */
+ ctrl = readl(&ehci_debug->control);
+ ctrl &= ~(DBGP_CLAIM | DBGP_OUT);
+ writel(ctrl, &ehci_debug->control);
+ return -1;
+ }
+ return 0;
+
+next_debug_port:
+ port_map_tried |= (1<<(debug_port - 1));
+ new_debug_port = ((debug_port-1+1)%n_ports) + 1;
+ if (port_map_tried != ((1<<n_ports) - 1)) {
+ set_debug_port(new_debug_port);
+ goto try_next_port;
+ }
+ if (--playtimes) {
+ set_debug_port(new_debug_port);
+ goto try_next_time;
+ }
+
+ return -1;
+}
+
+int __init early_dbgp_init(char *s)
+{
+ u32 debug_port, bar, offset;
+ u32 bus, slot, func, cap;
+ void __iomem *ehci_bar;
+ u32 dbgp_num;
+ u32 bar_val;
+ char *e;
+ int ret;
+ u8 byte;
+
+ if (!early_pci_allowed())
+ return -1;
+
+ dbgp_num = 0;
+ if (*s)
+ dbgp_num = simple_strtoul(s, &e, 10);
+ dbgp_printk("dbgp_num: %d\n", dbgp_num);
+
+ cap = find_dbgp(dbgp_num, &bus, &slot, &func);
+ if (!cap)
+ return -1;
+
+ dbgp_printk("Found EHCI debug port on %02x:%02x.%1x\n", bus, slot,
+ func);
+
+ debug_port = read_pci_config(bus, slot, func, cap);
+ bar = (debug_port >> 29) & 0x7;
+ bar = (bar * 4) + 0xc;
+ offset = (debug_port >> 16) & 0xfff;
+ dbgp_printk("bar: %02x offset: %03x\n", bar, offset);
+ if (bar != PCI_BASE_ADDRESS_0) {
+ dbgp_printk("only debug ports on bar 1 handled.\n");
+
+ return -1;
+ }
+
+ bar_val = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0);
+ dbgp_printk("bar_val: %02x offset: %03x\n", bar_val, offset);
+ if (bar_val & ~PCI_BASE_ADDRESS_MEM_MASK) {
+ dbgp_printk("only simple 32bit mmio bars supported\n");
+
+ return -1;
+ }
+
+ /* double check if the mem space is enabled */
+ byte = read_pci_config_byte(bus, slot, func, 0x04);
+ if (!(byte & 0x2)) {
+ byte |= 0x02;
+ write_pci_config_byte(bus, slot, func, 0x04, byte);
+ dbgp_printk("mmio for ehci enabled\n");
+ }
+
+ /*
+ * FIXME I don't have the bar size so just guess PAGE_SIZE is more
+ * than enough. 1K is the biggest I have seen.
+ */
+ set_fixmap_nocache(FIX_DBGP_BASE, bar_val & PAGE_MASK);
+ ehci_bar = (void __iomem *)__fix_to_virt(FIX_DBGP_BASE);
+ ehci_bar += bar_val & ~PAGE_MASK;
+ dbgp_printk("ehci_bar: %p\n", ehci_bar);
+
+ ehci_caps = ehci_bar;
+ ehci_regs = ehci_bar + HC_LENGTH(readl(&ehci_caps->hc_capbase));
+ ehci_debug = ehci_bar + offset;
+ ehci_dev.bus = bus;
+ ehci_dev.slot = slot;
+ ehci_dev.func = func;
+
+ detect_set_debug_port();
+
+ ret = ehci_setup();
+ if (ret < 0) {
+ dbgp_printk("ehci_setup failed\n");
+ ehci_debug = NULL;
+
+ return -1;
+ }
+ dbgp_ehci_status("early_init_complete");
+
+ return 0;
+}
+
+static void early_dbgp_write(struct console *con, const char *str, u32 n)
+{
+ int chunk, ret;
+ char buf[DBGP_MAX_PACKET];
+ int use_cr = 0;
+ u32 cmd, ctrl;
+ int reset_run = 0;
+
+ if (!ehci_debug || dbgp_not_safe)
+ return;
+
+ cmd = readl(&ehci_regs->command);
+ if (unlikely(!(cmd & CMD_RUN))) {
+ /* If the ehci controller is not in the run state do extended
+ * checks to see if the acpi or some other initialization also
+ * reset the ehci debug port */
+ ctrl = readl(&ehci_debug->control);
+ if (!(ctrl & DBGP_ENABLED)) {
+ dbgp_not_safe = 1;
+ dbgp_external_startup();
+ } else {
+ cmd |= CMD_RUN;
+ writel(cmd, &ehci_regs->command);
+ reset_run = 1;
+ }
+ }
+ while (n > 0) {
+ for (chunk = 0; chunk < DBGP_MAX_PACKET && n > 0;
+ str++, chunk++, n--) {
+ if (!use_cr && *str == '\n') {
+ use_cr = 1;
+ buf[chunk] = '\r';
+ str--;
+ n++;
+ continue;
+ }
+ if (use_cr)
+ use_cr = 0;
+ buf[chunk] = *str;
+ }
+ if (chunk > 0) {
+ ret = dbgp_bulk_write(USB_DEBUG_DEVNUM,
+ dbgp_endpoint_out, buf, chunk);
+ }
+ }
+ if (unlikely(reset_run)) {
+ cmd = readl(&ehci_regs->command);
+ cmd &= ~CMD_RUN;
+ writel(cmd, &ehci_regs->command);
+ }
+}
+
+struct console early_dbgp_console = {
+ .name = "earlydbg",
+ .write = early_dbgp_write,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+int dbgp_reset_prep(void)
+{
+ u32 ctrl;
+
+ dbgp_not_safe = 1;
+ if (!ehci_debug)
+ return 0;
+
+ if (early_dbgp_console.index != -1 &&
+ !(early_dbgp_console.flags & CON_BOOT))
+ return 1;
+ /* This means the console is not initialized, or should get
+ * shutdown so as to allow for reuse of the usb device, which
+ * means it is time to shutdown the usb debug port. */
+ ctrl = readl(&ehci_debug->control);
+ if (ctrl & DBGP_ENABLED) {
+ ctrl &= ~(DBGP_CLAIM);
+ writel(ctrl, &ehci_debug->control);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dbgp_reset_prep);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 9f986b417c5..33351312327 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -124,7 +124,7 @@ choice
config USB_GADGET_AT91
boolean "Atmel AT91 USB Device Port"
- depends on ARCH_AT91 && !ARCH_AT91SAM9RL && !ARCH_AT91CAP9
+ depends on ARCH_AT91 && !ARCH_AT91SAM9RL && !ARCH_AT91CAP9 && !ARCH_AT91SAM9G45
select USB_GADGET_SELECTED
help
Many Atmel AT91 processors (such as the AT91RM2000) have a
@@ -143,7 +143,7 @@ config USB_AT91
config USB_GADGET_ATMEL_USBA
boolean "Atmel USBA"
select USB_GADGET_DUALSPEED
- depends on AVR32 || ARCH_AT91CAP9 || ARCH_AT91SAM9RL
+ depends on AVR32 || ARCH_AT91CAP9 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
help
USBA is the integrated high-speed USB Device controller on
the AT32AP700x, some AT91SAM9 and AT91CAP9 processors from Atmel.
@@ -627,9 +627,10 @@ config USB_AUDIO
config USB_ETH
tristate "Ethernet Gadget (with CDC Ethernet support)"
depends on NET
+ select CRC32
help
- This driver implements Ethernet style communication, in either
- of two ways:
+ This driver implements Ethernet style communication, in one of
+ several ways:
- The "Communication Device Class" (CDC) Ethernet Control Model.
That protocol is often avoided with pure Ethernet adapters, in
@@ -639,7 +640,11 @@ config USB_ETH
- On hardware can't implement that protocol, a simple CDC subset
is used, placing fewer demands on USB.
- RNDIS support is a third option, more demanding than that subset.
+ - CDC Ethernet Emulation Model (EEM) is a newer standard that has
+ a simpler interface that can be used by more USB hardware.
+
+ RNDIS support is an additional option, more demanding than than
+ subset.
Within the USB device, this gadget driver exposes a network device
"usbX", where X depends on what other networking devices you have.
@@ -672,6 +677,22 @@ config USB_ETH_RNDIS
XP, you'll need to download drivers from Microsoft's website; a URL
is given in comments found in that info file.
+config USB_ETH_EEM
+ bool "Ethernet Emulation Model (EEM) support"
+ depends on USB_ETH
+ default n
+ help
+ CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
+ and therefore can be supported by more hardware. Technically ECM and
+ EEM are designed for different applications. The ECM model extends
+ the network interface to the target (e.g. a USB cable modem), and the
+ EEM model is for mobile devices to communicate with hosts using
+ ethernet over USB. For Linux gadgets, however, the interface with
+ the host is the same (a usbX device), so the differences are minimal.
+
+ If you say "y" here, the Ethernet gadget driver will use the EEM
+ protocol rather than ECM. If unsure, say "n".
+
config USB_GADGETFS
tristate "Gadget Filesystem (EXPERIMENTAL)"
depends on EXPERIMENTAL
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 77352ccc245..d5b65962dd3 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -2378,40 +2378,34 @@ static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct udc_request, queue);
- if (req) {
- /*
- * length bytes transfered
- * check dma done of last desc. in PPBDU mode
- */
- if (use_dma_ppb_du) {
- td = udc_get_last_dma_desc(req);
- if (td) {
- dma_done =
- AMD_GETBITS(td->status,
- UDC_DMA_IN_STS_BS);
- /* don't care DMA done */
- req->req.actual =
- req->req.length;
- }
- } else {
- /* assume all bytes transferred */
+ /*
+ * length bytes transfered
+ * check dma done of last desc. in PPBDU mode
+ */
+ if (use_dma_ppb_du) {
+ td = udc_get_last_dma_desc(req);
+ if (td) {
+ dma_done =
+ AMD_GETBITS(td->status,
+ UDC_DMA_IN_STS_BS);
+ /* don't care DMA done */
req->req.actual = req->req.length;
}
+ } else {
+ /* assume all bytes transferred */
+ req->req.actual = req->req.length;
+ }
- if (req->req.actual == req->req.length) {
- /* complete req */
- complete_req(ep, req, 0);
- req->dma_going = 0;
- /* further request available ? */
- if (list_empty(&ep->queue)) {
- /* disable interrupt */
- tmp = readl(
- &dev->regs->ep_irqmsk);
- tmp |= AMD_BIT(ep->num);
- writel(tmp,
- &dev->regs->ep_irqmsk);
- }
-
+ if (req->req.actual == req->req.length) {
+ /* complete req */
+ complete_req(ep, req, 0);
+ req->dma_going = 0;
+ /* further request available ? */
+ if (list_empty(&ep->queue)) {
+ /* disable interrupt */
+ tmp = readl(&dev->regs->ep_irqmsk);
+ tmp |= AMD_BIT(ep->num);
+ writel(tmp, &dev->regs->ep_irqmsk);
}
}
}
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 72bae8f39d8..66450a1abc2 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1754,7 +1754,6 @@ static int __init at91udc_probe(struct platform_device *pdev)
IRQF_DISABLED, driver_name, udc)) {
DBG("request vbus irq %d failed\n",
udc->board.vbus_pin);
- free_irq(udc->udp_irq, udc);
retval = -EBUSY;
goto fail3;
}
diff --git a/drivers/usb/gadget/audio.c b/drivers/usb/gadget/audio.c
index 9f80f4e970b..a3a0f4a27ef 100644
--- a/drivers/usb/gadget/audio.c
+++ b/drivers/usb/gadget/audio.c
@@ -106,20 +106,20 @@ static int audio_set_endpoint_req(struct usb_configuration *c,
ctrl->bRequest, w_value, len, ep);
switch (ctrl->bRequest) {
- case SET_CUR:
+ case UAC_SET_CUR:
value = 0;
break;
- case SET_MIN:
+ case UAC_SET_MIN:
break;
- case SET_MAX:
+ case UAC_SET_MAX:
break;
- case SET_RES:
+ case UAC_SET_RES:
break;
- case SET_MEM:
+ case UAC_SET_MEM:
break;
default:
@@ -142,13 +142,13 @@ static int audio_get_endpoint_req(struct usb_configuration *c,
ctrl->bRequest, w_value, len, ep);
switch (ctrl->bRequest) {
- case GET_CUR:
- case GET_MIN:
- case GET_MAX:
- case GET_RES:
+ case UAC_GET_CUR:
+ case UAC_GET_MIN:
+ case UAC_GET_MAX:
+ case UAC_GET_RES:
value = 3;
break;
- case GET_MEM:
+ case UAC_GET_MEM:
break;
default:
break;
@@ -171,11 +171,11 @@ audio_setup(struct usb_configuration *c, const struct usb_ctrlrequest *ctrl)
* Audio class messages; interface activation uses set_alt().
*/
switch (ctrl->bRequestType) {
- case USB_AUDIO_SET_ENDPOINT:
+ case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
value = audio_set_endpoint_req(c, ctrl);
break;
- case USB_AUDIO_GET_ENDPOINT:
+ case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
value = audio_get_endpoint_req(c, ctrl);
break;
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 59e85234fa0..d05397ec8a1 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -602,7 +602,7 @@ static int get_string(struct usb_composite_dev *cdev,
}
}
- for (len = 0; s->wData[len] && len <= 126; len++)
+ for (len = 0; len <= 126 && s->wData[len]; len++)
continue;
if (!len)
return -EINVAL;
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index a56b24d305f..5e096648518 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -1306,11 +1306,6 @@ restart:
setup = *(struct usb_ctrlrequest*) urb->setup_packet;
w_index = le16_to_cpu(setup.wIndex);
w_value = le16_to_cpu(setup.wValue);
- if (le16_to_cpu(setup.wLength) !=
- urb->transfer_buffer_length) {
- status = -EOVERFLOW;
- goto return_urb;
- }
/* paranoia, in case of stale queued data */
list_for_each_entry (req, &ep->queue, queue) {
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index bd102f5052b..f37de283d0a 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -61,6 +61,11 @@
* simpler, Microsoft pushes their own approach: RNDIS. The published
* RNDIS specs are ambiguous and appear to be incomplete, and are also
* needlessly complex. They borrow more from CDC ACM than CDC ECM.
+ *
+ * While CDC ECM, CDC Subset, and RNDIS are designed to extend the ethernet
+ * interface to the target, CDC EEM was designed to use ethernet over the USB
+ * link between the host and target. CDC EEM is implemented as an alternative
+ * to those other protocols when that communication model is more appropriate
*/
#define DRIVER_DESC "Ethernet Gadget"
@@ -114,6 +119,7 @@ static inline bool has_rndis(void)
#include "f_rndis.c"
#include "rndis.c"
#endif
+#include "f_eem.c"
#include "u_ether.c"
/*-------------------------------------------------------------------------*/
@@ -150,6 +156,10 @@ static inline bool has_rndis(void)
#define RNDIS_VENDOR_NUM 0x0525 /* NetChip */
#define RNDIS_PRODUCT_NUM 0xa4a2 /* Ethernet/RNDIS Gadget */
+/* For EEM gadgets */
+#define EEM_VENDOR_NUM 0x0525 /* INVALID - NEEDS TO BE ALLOCATED */
+#define EEM_PRODUCT_NUM 0xa4a1 /* INVALID - NEEDS TO BE ALLOCATED */
+
/*-------------------------------------------------------------------------*/
static struct usb_device_descriptor device_desc = {
@@ -246,8 +256,16 @@ static struct usb_configuration rndis_config_driver = {
/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_USB_ETH_EEM
+static int use_eem = 1;
+#else
+static int use_eem;
+#endif
+module_param(use_eem, bool, 0);
+MODULE_PARM_DESC(use_eem, "use CDC EEM mode");
+
/*
- * We _always_ have an ECM or CDC Subset configuration.
+ * We _always_ have an ECM, CDC Subset, or EEM configuration.
*/
static int __init eth_do_config(struct usb_configuration *c)
{
@@ -258,7 +276,9 @@ static int __init eth_do_config(struct usb_configuration *c)
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
- if (can_support_ecm(c->cdev->gadget))
+ if (use_eem)
+ return eem_bind_config(c);
+ else if (can_support_ecm(c->cdev->gadget))
return ecm_bind_config(c, hostaddr);
else
return geth_bind_config(c, hostaddr);
@@ -286,7 +306,12 @@ static int __init eth_bind(struct usb_composite_dev *cdev)
return status;
/* set up main config label and device descriptor */
- if (can_support_ecm(cdev->gadget)) {
+ if (use_eem) {
+ /* EEM */
+ eth_config_driver.label = "CDC Ethernet (EEM)";
+ device_desc.idVendor = cpu_to_le16(EEM_VENDOR_NUM);
+ device_desc.idProduct = cpu_to_le16(EEM_PRODUCT_NUM);
+ } else if (can_support_ecm(cdev->gadget)) {
/* ECM */
eth_config_driver.label = "CDC Ethernet (ECM)";
} else {
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
index 66527ba2d2e..98e9bb97729 100644
--- a/drivers/usb/gadget/f_audio.c
+++ b/drivers/usb/gadget/f_audio.c
@@ -28,6 +28,9 @@ static int audio_buf_size = 48000;
module_param(audio_buf_size, int, S_IRUGO);
MODULE_PARM_DESC(audio_buf_size, "Audio buffer size");
+static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value);
+static int generic_get_cmd(struct usb_audio_control *con, u8 cmd);
+
/*
* DESCRIPTORS ... most are static, but strings and full
* configuration descriptors are built on demand.
@@ -50,16 +53,16 @@ static struct usb_interface_descriptor ac_interface_desc __initdata = {
.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
};
-DECLARE_USB_AC_HEADER_DESCRIPTOR(2);
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
-#define USB_DT_AC_HEADER_LENGH USB_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES)
+#define UAC_DT_AC_HEADER_LENGTH UAC_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES)
/* B.3.2 Class-Specific AC Interface Descriptor */
-static struct usb_ac_header_descriptor_2 ac_header_desc = {
- .bLength = USB_DT_AC_HEADER_LENGH,
+static struct uac_ac_header_descriptor_2 ac_header_desc = {
+ .bLength = UAC_DT_AC_HEADER_LENGTH,
.bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubtype = HEADER,
+ .bDescriptorSubtype = UAC_HEADER,
.bcdADC = __constant_cpu_to_le16(0x0100),
- .wTotalLength = __constant_cpu_to_le16(USB_DT_AC_HEADER_LENGH),
+ .wTotalLength = __constant_cpu_to_le16(UAC_DT_AC_HEADER_LENGTH),
.bInCollection = F_AUDIO_NUM_INTERFACES,
.baInterfaceNr = {
[0] = F_AUDIO_AC_INTERFACE,
@@ -68,33 +71,33 @@ static struct usb_ac_header_descriptor_2 ac_header_desc = {
};
#define INPUT_TERMINAL_ID 1
-static struct usb_input_terminal_descriptor input_terminal_desc = {
- .bLength = USB_DT_AC_INPUT_TERMINAL_SIZE,
+static struct uac_input_terminal_descriptor input_terminal_desc = {
+ .bLength = UAC_DT_INPUT_TERMINAL_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubtype = INPUT_TERMINAL,
+ .bDescriptorSubtype = UAC_INPUT_TERMINAL,
.bTerminalID = INPUT_TERMINAL_ID,
- .wTerminalType = USB_AC_TERMINAL_STREAMING,
+ .wTerminalType = UAC_TERMINAL_STREAMING,
.bAssocTerminal = 0,
.wChannelConfig = 0x3,
};
-DECLARE_USB_AC_FEATURE_UNIT_DESCRIPTOR(0);
+DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
#define FEATURE_UNIT_ID 2
-static struct usb_ac_feature_unit_descriptor_0 feature_unit_desc = {
- .bLength = USB_DT_AC_FEATURE_UNIT_SIZE(0),
+static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
+ .bLength = UAC_DT_FEATURE_UNIT_SIZE(0),
.bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubtype = FEATURE_UNIT,
+ .bDescriptorSubtype = UAC_FEATURE_UNIT,
.bUnitID = FEATURE_UNIT_ID,
.bSourceID = INPUT_TERMINAL_ID,
.bControlSize = 2,
- .bmaControls[0] = (FU_MUTE | FU_VOLUME),
+ .bmaControls[0] = (UAC_FU_MUTE | UAC_FU_VOLUME),
};
static struct usb_audio_control mute_control = {
.list = LIST_HEAD_INIT(mute_control.list),
.name = "Mute Control",
- .type = MUTE_CONTROL,
+ .type = UAC_MUTE_CONTROL,
/* Todo: add real Mute control code */
.set = generic_set_cmd,
.get = generic_get_cmd,
@@ -103,7 +106,7 @@ static struct usb_audio_control mute_control = {
static struct usb_audio_control volume_control = {
.list = LIST_HEAD_INIT(volume_control.list),
.name = "Volume Control",
- .type = VOLUME_CONTROL,
+ .type = UAC_VOLUME_CONTROL,
/* Todo: add real Volume control code */
.set = generic_set_cmd,
.get = generic_get_cmd,
@@ -113,17 +116,17 @@ static struct usb_audio_control_selector feature_unit = {
.list = LIST_HEAD_INIT(feature_unit.list),
.id = FEATURE_UNIT_ID,
.name = "Mute & Volume Control",
- .type = FEATURE_UNIT,
+ .type = UAC_FEATURE_UNIT,
.desc = (struct usb_descriptor_header *)&feature_unit_desc,
};
#define OUTPUT_TERMINAL_ID 3
-static struct usb_output_terminal_descriptor output_terminal_desc = {
- .bLength = USB_DT_AC_OUTPUT_TERMINAL_SIZE,
+static struct uac_output_terminal_descriptor output_terminal_desc = {
+ .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubtype = OUTPUT_TERMINAL,
+ .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
.bTerminalID = OUTPUT_TERMINAL_ID,
- .wTerminalType = USB_AC_OUTPUT_TERMINAL_SPEAKER,
+ .wTerminalType = UAC_OUTPUT_TERMINAL_SPEAKER,
.bAssocTerminal = FEATURE_UNIT_ID,
.bSourceID = FEATURE_UNIT_ID,
};
@@ -148,22 +151,22 @@ static struct usb_interface_descriptor as_interface_alt_1_desc = {
};
/* B.4.2 Class-Specific AS Interface Descriptor */
-static struct usb_as_header_descriptor as_header_desc = {
- .bLength = USB_DT_AS_HEADER_SIZE,
+static struct uac_as_header_descriptor as_header_desc = {
+ .bLength = UAC_DT_AS_HEADER_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubtype = AS_GENERAL,
+ .bDescriptorSubtype = UAC_AS_GENERAL,
.bTerminalLink = INPUT_TERMINAL_ID,
.bDelay = 1,
- .wFormatTag = USB_AS_AUDIO_FORMAT_TYPE_I_PCM,
+ .wFormatTag = UAC_FORMAT_TYPE_I_PCM,
};
-DECLARE_USB_AS_FORMAT_TYPE_I_DISCRETE_DESC(1);
+DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
-static struct usb_as_formate_type_i_discrete_descriptor_1 as_type_i_desc = {
- .bLength = USB_AS_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
+ .bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
.bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubtype = FORMAT_TYPE,
- .bFormatType = USB_AS_FORMAT_TYPE_I,
+ .bDescriptorSubtype = UAC_FORMAT_TYPE,
+ .bFormatType = UAC_FORMAT_TYPE_I,
.bSubframeSize = 2,
.bBitResolution = 16,
.bSamFreqType = 1,
@@ -174,17 +177,17 @@ static struct usb_endpoint_descriptor as_out_ep_desc __initdata = {
.bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
- .bmAttributes = USB_AS_ENDPOINT_ADAPTIVE
+ .bmAttributes = USB_ENDPOINT_SYNC_ADAPTIVE
| USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = __constant_cpu_to_le16(OUT_EP_MAX_PACKET_SIZE),
.bInterval = 4,
};
/* Class-specific AS ISO OUT Endpoint Descriptor */
-static struct usb_as_iso_endpoint_descriptor as_iso_out_desc __initdata = {
- .bLength = USB_AS_ISO_ENDPOINT_DESC_SIZE,
+static struct uac_iso_endpoint_descriptor as_iso_out_desc __initdata = {
+ .bLength = UAC_ISO_ENDPOINT_DESC_SIZE,
.bDescriptorType = USB_DT_CS_ENDPOINT,
- .bDescriptorSubtype = EP_GENERAL,
+ .bDescriptorSubtype = UAC_EP_GENERAL,
.bmAttributes = 1,
.bLockDelayUnits = 1,
.wLockDelay = __constant_cpu_to_le16(1),
@@ -456,11 +459,11 @@ f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
* Audio class messages; interface activation uses set_alt().
*/
switch (ctrl->bRequestType) {
- case USB_AUDIO_SET_INTF:
+ case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
value = audio_set_intf_req(f, ctrl);
break;
- case USB_AUDIO_GET_INTF:
+ case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
value = audio_get_intf_req(f, ctrl);
break;
@@ -632,6 +635,18 @@ f_audio_unbind(struct usb_configuration *c, struct usb_function *f)
/*-------------------------------------------------------------------------*/
+static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value)
+{
+ con->data[cmd] = value;
+
+ return 0;
+}
+
+static int generic_get_cmd(struct usb_audio_control *con, u8 cmd)
+{
+ return con->data[cmd];
+}
+
/* Todo: add more control selecotor dynamically */
int __init control_selector_init(struct f_audio *audio)
{
@@ -642,10 +657,10 @@ int __init control_selector_init(struct f_audio *audio)
list_add(&mute_control.list, &feature_unit.control);
list_add(&volume_control.list, &feature_unit.control);
- volume_control.data[_CUR] = 0xffc0;
- volume_control.data[_MIN] = 0xe3a0;
- volume_control.data[_MAX] = 0xfff0;
- volume_control.data[_RES] = 0x0030;
+ volume_control.data[UAC__CUR] = 0xffc0;
+ volume_control.data[UAC__MIN] = 0xe3a0;
+ volume_control.data[UAC__MAX] = 0xfff0;
+ volume_control.data[UAC__RES] = 0x0030;
return 0;
}
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
new file mode 100644
index 00000000000..0a577d5694f
--- /dev/null
+++ b/drivers/usb/gadget/f_eem.c
@@ -0,0 +1,562 @@
+/*
+ * f_eem.c -- USB CDC Ethernet (EEM) link function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2009 EF Johnson Technologies
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+
+#include "u_ether.h"
+
+#define EEM_HLEN 2
+
+/*
+ * This function is a "CDC Ethernet Emulation Model" (CDC EEM)
+ * Ethernet link.
+ */
+
+struct eem_ep_descs {
+ struct usb_endpoint_descriptor *in;
+ struct usb_endpoint_descriptor *out;
+};
+
+struct f_eem {
+ struct gether port;
+ u8 ctrl_id;
+
+ struct eem_ep_descs fs;
+ struct eem_ep_descs hs;
+};
+
+static inline struct f_eem *func_to_eem(struct usb_function *f)
+{
+ return container_of(f, struct f_eem, port.func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* interface descriptor: */
+
+static struct usb_interface_descriptor eem_intf __initdata = {
+ .bLength = sizeof eem_intf,
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_COMM,
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_EEM,
+ .bInterfaceProtocol = USB_CDC_PROTO_EEM,
+ /* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor eem_fs_in_desc __initdata = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor eem_fs_out_desc __initdata = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *eem_fs_function[] __initdata = {
+ /* CDC EEM control descriptors */
+ (struct usb_descriptor_header *) &eem_intf,
+ (struct usb_descriptor_header *) &eem_fs_in_desc,
+ (struct usb_descriptor_header *) &eem_fs_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor eem_hs_in_desc __initdata = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor eem_hs_out_desc __initdata = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *eem_hs_function[] __initdata = {
+ /* CDC EEM control descriptors */
+ (struct usb_descriptor_header *) &eem_intf,
+ (struct usb_descriptor_header *) &eem_hs_in_desc,
+ (struct usb_descriptor_header *) &eem_hs_out_desc,
+ NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string eem_string_defs[] = {
+ [0].s = "CDC Ethernet Emulation Model (EEM)",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings eem_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = eem_string_defs,
+};
+
+static struct usb_gadget_strings *eem_strings[] = {
+ &eem_string_table,
+ NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int eem_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+
+static int eem_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_eem *eem = func_to_eem(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct net_device *net;
+
+ /* we know alt == 0, so this is an activation or a reset */
+ if (alt != 0)
+ goto fail;
+
+ if (intf == eem->ctrl_id) {
+
+ if (eem->port.in_ep->driver_data) {
+ DBG(cdev, "reset eem\n");
+ gether_disconnect(&eem->port);
+ }
+
+ if (!eem->port.in) {
+ DBG(cdev, "init eem\n");
+ eem->port.in = ep_choose(cdev->gadget,
+ eem->hs.in, eem->fs.in);
+ eem->port.out = ep_choose(cdev->gadget,
+ eem->hs.out, eem->fs.out);
+ }
+
+ /* zlps should not occur because zero-length EEM packets
+ * will be inserted in those cases where they would occur
+ */
+ eem->port.is_zlp_ok = 1;
+ eem->port.cdc_filter = DEFAULT_FILTER;
+ DBG(cdev, "activate eem\n");
+ net = gether_connect(&eem->port);
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+ } else
+ goto fail;
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static void eem_disable(struct usb_function *f)
+{
+ struct f_eem *eem = func_to_eem(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ DBG(cdev, "eem deactivated\n");
+
+ if (eem->port.in_ep->driver_data)
+ gether_disconnect(&eem->port);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* EEM function driver setup/binding */
+
+static int __init
+eem_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_eem *eem = func_to_eem(f);
+ int status;
+ struct usb_ep *ep;
+
+ /* allocate instance-specific interface IDs */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ eem->ctrl_id = status;
+ eem_intf.bInterfaceNumber = status;
+
+ status = -ENODEV;
+
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_in_desc);
+ if (!ep)
+ goto fail;
+ eem->port.in_ep = ep;
+ ep->driver_data = cdev; /* claim */
+
+ ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_out_desc);
+ if (!ep)
+ goto fail;
+ eem->port.out_ep = ep;
+ ep->driver_data = cdev; /* claim */
+
+ status = -ENOMEM;
+
+ /* copy descriptors, and track endpoint copies */
+ f->descriptors = usb_copy_descriptors(eem_fs_function);
+ if (!f->descriptors)
+ goto fail;
+
+ eem->fs.in = usb_find_endpoint(eem_fs_function,
+ f->descriptors, &eem_fs_in_desc);
+ eem->fs.out = usb_find_endpoint(eem_fs_function,
+ f->descriptors, &eem_fs_out_desc);
+
+ /* support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ eem_hs_in_desc.bEndpointAddress =
+ eem_fs_in_desc.bEndpointAddress;
+ eem_hs_out_desc.bEndpointAddress =
+ eem_fs_out_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(eem_hs_function);
+ if (!f->hs_descriptors)
+ goto fail;
+
+ eem->hs.in = usb_find_endpoint(eem_hs_function,
+ f->hs_descriptors, &eem_hs_in_desc);
+ eem->hs.out = usb_find_endpoint(eem_hs_function,
+ f->hs_descriptors, &eem_hs_out_desc);
+ }
+
+ DBG(cdev, "CDC Ethernet (EEM): %s speed IN/%s OUT/%s\n",
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ eem->port.in_ep->name, eem->port.out_ep->name);
+ return 0;
+
+fail:
+ if (f->descriptors)
+ usb_free_descriptors(f->descriptors);
+
+ /* we might as well release our claims on endpoints */
+ if (eem->port.out)
+ eem->port.out_ep->driver_data = NULL;
+ if (eem->port.in)
+ eem->port.in_ep->driver_data = NULL;
+
+ ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+
+ return status;
+}
+
+static void
+eem_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_eem *eem = func_to_eem(f);
+
+ DBG(c->cdev, "eem unbind\n");
+
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->descriptors);
+ kfree(eem);
+}
+
+static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+}
+
+/*
+ * Add the EEM header and ethernet checksum.
+ * We currently do not attempt to put multiple ethernet frames
+ * into a single USB transfer
+ */
+static struct sk_buff *eem_wrap(struct gether *port, struct sk_buff *skb)
+{
+ struct sk_buff *skb2 = NULL;
+ struct usb_ep *in = port->in_ep;
+ int padlen = 0;
+ u16 len = skb->len;
+
+ if (!skb_cloned(skb)) {
+ int headroom = skb_headroom(skb);
+ int tailroom = skb_tailroom(skb);
+
+ /* When (len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) is 0,
+ * stick two bytes of zero-length EEM packet on the end.
+ */
+ if (((len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) == 0)
+ padlen += 2;
+
+ if ((tailroom >= (ETH_FCS_LEN + padlen)) &&
+ (headroom >= EEM_HLEN))
+ goto done;
+ }
+
+ skb2 = skb_copy_expand(skb, EEM_HLEN, ETH_FCS_LEN + padlen, GFP_ATOMIC);
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+ if (!skb)
+ return skb;
+
+done:
+ /* use the "no CRC" option */
+ put_unaligned_be32(0xdeadbeef, skb_put(skb, 4));
+
+ /* EEM packet header format:
+ * b0..13: length of ethernet frame
+ * b14: bmCRC (0 == sentinel CRC)
+ * b15: bmType (0 == data)
+ */
+ len = skb->len;
+ put_unaligned_le16((len & 0x3FFF) | BIT(14), skb_push(skb, 2));
+
+ /* add a zero-length EEM packet, if needed */
+ if (padlen)
+ put_unaligned_le16(0, skb_put(skb, 2));
+
+ return skb;
+}
+
+/*
+ * Remove the EEM header. Note that there can be many EEM packets in a single
+ * USB transfer, so we need to break them out and handle them independently.
+ */
+static int eem_unwrap(struct gether *port,
+ struct sk_buff *skb,
+ struct sk_buff_head *list)
+{
+ struct usb_composite_dev *cdev = port->func.config->cdev;
+ int status = 0;
+
+ do {
+ struct sk_buff *skb2;
+ u16 header;
+ u16 len = 0;
+
+ if (skb->len < EEM_HLEN) {
+ status = -EINVAL;
+ DBG(cdev, "invalid EEM header\n");
+ goto error;
+ }
+
+ /* remove the EEM header */
+ header = get_unaligned_le16(skb->data);
+ skb_pull(skb, EEM_HLEN);
+
+ /* EEM packet header format:
+ * b0..14: EEM type dependent (data or command)
+ * b15: bmType (0 == data, 1 == command)
+ */
+ if (header & BIT(15)) {
+ struct usb_request *req = cdev->req;
+ u16 bmEEMCmd;
+
+ /* EEM command packet format:
+ * b0..10: bmEEMCmdParam
+ * b11..13: bmEEMCmd
+ * b14: reserved (must be zero)
+ * b15: bmType (1 == command)
+ */
+ if (header & BIT(14))
+ continue;
+
+ bmEEMCmd = (header >> 11) & 0x7;
+ switch (bmEEMCmd) {
+ case 0: /* echo */
+ len = header & 0x7FF;
+ if (skb->len < len) {
+ status = -EOVERFLOW;
+ goto error;
+ }
+
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!skb2)) {
+ DBG(cdev, "EEM echo response error\n");
+ goto next;
+ }
+ skb_trim(skb2, len);
+ put_unaligned_le16(BIT(15) | BIT(11) | len,
+ skb_push(skb2, 2));
+ skb_copy_bits(skb, 0, req->buf, skb->len);
+ req->length = skb->len;
+ req->complete = eem_cmd_complete;
+ req->zero = 1;
+ if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC))
+ DBG(cdev, "echo response queue fail\n");
+ break;
+
+ case 1: /* echo response */
+ case 2: /* suspend hint */
+ case 3: /* response hint */
+ case 4: /* response complete hint */
+ case 5: /* tickle */
+ default: /* reserved */
+ continue;
+ }
+ } else {
+ u32 crc, crc2;
+ struct sk_buff *skb3;
+
+ /* check for zero-length EEM packet */
+ if (header == 0)
+ continue;
+
+ /* EEM data packet format:
+ * b0..13: length of ethernet frame
+ * b14: bmCRC (0 == sentinel, 1 == calculated)
+ * b15: bmType (0 == data)
+ */
+ len = header & 0x3FFF;
+ if ((skb->len < len)
+ || (len < (ETH_HLEN + ETH_FCS_LEN))) {
+ status = -EINVAL;
+ goto error;
+ }
+
+ /* validate CRC */
+ crc = get_unaligned_le32(skb->data + len - ETH_FCS_LEN);
+ if (header & BIT(14)) {
+ crc = get_unaligned_le32(skb->data + len
+ - ETH_FCS_LEN);
+ crc2 = ~crc32_le(~0,
+ skb->data,
+ skb->len - ETH_FCS_LEN);
+ } else {
+ crc = get_unaligned_be32(skb->data + len
+ - ETH_FCS_LEN);
+ crc2 = 0xdeadbeef;
+ }
+ if (crc != crc2) {
+ DBG(cdev, "invalid EEM CRC\n");
+ goto next;
+ }
+
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!skb2)) {
+ DBG(cdev, "unable to unframe EEM packet\n");
+ continue;
+ }
+ skb_trim(skb2, len - ETH_FCS_LEN);
+
+ skb3 = skb_copy_expand(skb2,
+ NET_IP_ALIGN,
+ 0,
+ GFP_ATOMIC);
+ if (unlikely(!skb3)) {
+ DBG(cdev, "unable to realign EEM packet\n");
+ dev_kfree_skb_any(skb2);
+ continue;
+ }
+ dev_kfree_skb_any(skb2);
+ skb_queue_tail(list, skb3);
+ }
+next:
+ skb_pull(skb, len);
+ } while (skb->len);
+
+error:
+ dev_kfree_skb_any(skb);
+ return status;
+}
+
+/**
+ * eem_bind_config - add CDC Ethernet (EEM) network link to a configuration
+ * @c: the configuration to support the network link
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_setup(). Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+int __init eem_bind_config(struct usb_configuration *c)
+{
+ struct f_eem *eem;
+ int status;
+
+ /* maybe allocate device-global string IDs */
+ if (eem_string_defs[0].id == 0) {
+
+ /* control interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ eem_string_defs[0].id = status;
+ eem_intf.iInterface = status;
+ }
+
+ /* allocate and initialize one new instance */
+ eem = kzalloc(sizeof *eem, GFP_KERNEL);
+ if (!eem)
+ return -ENOMEM;
+
+ eem->port.cdc_filter = DEFAULT_FILTER;
+
+ eem->port.func.name = "cdc_eem";
+ eem->port.func.strings = eem_strings;
+ /* descriptors are per-instance copies */
+ eem->port.func.bind = eem_bind;
+ eem->port.func.unbind = eem_unbind;
+ eem->port.func.set_alt = eem_set_alt;
+ eem->port.func.setup = eem_setup;
+ eem->port.func.disable = eem_disable;
+ eem->port.wrap = eem_wrap;
+ eem->port.unwrap = eem_unwrap;
+ eem->port.header_len = EEM_HLEN;
+
+ status = usb_add_function(c, &eem->port.func);
+ if (status)
+ kfree(eem);
+ return status;
+}
+
diff --git a/drivers/usb/gadget/f_loopback.c b/drivers/usb/gadget/f_loopback.c
index eb6ddfc2085..6cb29d3df57 100644
--- a/drivers/usb/gadget/f_loopback.c
+++ b/drivers/usb/gadget/f_loopback.c
@@ -22,7 +22,6 @@
/* #define VERBOSE_DEBUG */
#include <linux/kernel.h>
-#include <linux/utsname.h>
#include <linux/device.h>
#include "g_zero.h"
diff --git a/drivers/usb/gadget/f_obex.c b/drivers/usb/gadget/f_obex.c
index 46d6266f30e..b4a3ba654ea 100644
--- a/drivers/usb/gadget/f_obex.c
+++ b/drivers/usb/gadget/f_obex.c
@@ -24,7 +24,6 @@
/* #define VERBOSE_DEBUG */
#include <linux/kernel.h>
-#include <linux/utsname.h>
#include <linux/device.h>
#include "u_serial.h"
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 424a37c5773..c9966cc07d3 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -286,12 +286,17 @@ static struct usb_gadget_strings *rndis_strings[] = {
/*-------------------------------------------------------------------------*/
-static struct sk_buff *rndis_add_header(struct sk_buff *skb)
+static struct sk_buff *rndis_add_header(struct gether *port,
+ struct sk_buff *skb)
{
- skb = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
- if (skb)
- rndis_add_hdr(skb);
- return skb;
+ struct sk_buff *skb2;
+
+ skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
+ if (skb2)
+ rndis_add_hdr(skb2);
+
+ dev_kfree_skb_any(skb);
+ return skb2;
}
static void rndis_response_available(void *_rndis)
diff --git a/drivers/usb/gadget/f_sourcesink.c b/drivers/usb/gadget/f_sourcesink.c
index bffe91d525f..09cba273d2d 100644
--- a/drivers/usb/gadget/f_sourcesink.c
+++ b/drivers/usb/gadget/f_sourcesink.c
@@ -22,7 +22,6 @@
/* #define VERBOSE_DEBUG */
#include <linux/kernel.h>
-#include <linux/utsname.h>
#include <linux/device.h>
#include "g_zero.h"
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index d701bf4698d..7881f12413c 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2751,6 +2751,10 @@ static int __devexit qe_udc_remove(struct of_device *ofdev)
/*-------------------------------------------------------------------------*/
static struct of_device_id __devinitdata qe_udc_match[] = {
{
+ .compatible = "fsl,mpc8323-qe-usb",
+ .data = (void *)PORT_QE,
+ },
+ {
.compatible = "fsl,mpc8360-qe-usb",
.data = (void *)PORT_QE,
},
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index b9312dc6e04..d0b1e836f0e 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -191,7 +191,7 @@ module_param(qlen, uint, S_IRUGO);
#define GMIDI_MS_INTERFACE 1
#define GMIDI_NUM_INTERFACES 2
-DECLARE_USB_AC_HEADER_DESCRIPTOR(1);
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1);
DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(1);
@@ -237,12 +237,12 @@ static const struct usb_interface_descriptor ac_interface_desc = {
};
/* B.3.2 Class-Specific AC Interface Descriptor */
-static const struct usb_ac_header_descriptor_1 ac_header_desc = {
- .bLength = USB_DT_AC_HEADER_SIZE(1),
+static const struct uac_ac_header_descriptor_1 ac_header_desc = {
+ .bLength = UAC_DT_AC_HEADER_SIZE(1),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = USB_MS_HEADER,
.bcdADC = cpu_to_le16(0x0100),
- .wTotalLength = cpu_to_le16(USB_DT_AC_HEADER_SIZE(1)),
+ .wTotalLength = cpu_to_le16(UAC_DT_AC_HEADER_SIZE(1)),
.bInCollection = 1,
.baInterfaceNr = {
[0] = GMIDI_MS_INTERFACE,
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 7d33f50b587..c44367fea18 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -2033,7 +2033,7 @@ gadgetfs_create_file (struct super_block *sb, char const *name,
return inode;
}
-static struct super_operations gadget_fs_operations = {
+static const struct super_operations gadget_fs_operations = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
};
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index ed21e263f83..e6fedbd5a65 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -56,6 +56,7 @@
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
/*
* This driver is PXA25x only. Grab the right register definitions.
@@ -1008,15 +1009,27 @@ static int pxa25x_udc_pullup(struct usb_gadget *_gadget, int is_active)
return 0;
}
+/* boards may consume current from VBUS, up to 100-500mA based on config.
+ * the 500uA suspend ceiling means that exclusively vbus-powered PXA designs
+ * violate USB specs.
+ */
+static int pxa25x_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
+{
+ struct pxa25x_udc *udc;
+
+ udc = container_of(_gadget, struct pxa25x_udc, gadget);
+
+ if (udc->transceiver)
+ return otg_set_power(udc->transceiver, mA);
+ return -EOPNOTSUPP;
+}
+
static const struct usb_gadget_ops pxa25x_udc_ops = {
.get_frame = pxa25x_udc_get_frame,
.wakeup = pxa25x_udc_wakeup,
.vbus_session = pxa25x_udc_vbus_session,
.pullup = pxa25x_udc_pullup,
-
- // .vbus_draw ... boards may consume current from VBUS, up to
- // 100-500mA based on config. the 500uA suspend ceiling means
- // that exclusively vbus-powered PXA designs violate USB specs.
+ .vbus_draw = pxa25x_udc_vbus_draw,
};
/*-------------------------------------------------------------------------*/
@@ -1303,9 +1316,23 @@ fail:
* for set_configuration as well as eventual disconnect.
*/
DMSG("registered gadget driver '%s'\n", driver->driver.name);
+
+ /* connect to bus through transceiver */
+ if (dev->transceiver) {
+ retval = otg_set_peripheral(dev->transceiver, &dev->gadget);
+ if (retval) {
+ DMSG("can't bind to transceiver\n");
+ if (driver->unbind)
+ driver->unbind(&dev->gadget);
+ goto bind_fail;
+ }
+ }
+
pullup(dev);
dump_state(dev);
return 0;
+bind_fail:
+ return retval;
}
EXPORT_SYMBOL(usb_gadget_register_driver);
@@ -1351,6 +1378,9 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
stop_activity(dev, driver);
local_irq_enable();
+ if (dev->transceiver)
+ (void) otg_set_peripheral(dev->transceiver, NULL);
+
driver->unbind(&dev->gadget);
dev->gadget.dev.driver = NULL;
dev->driver = NULL;
@@ -2162,6 +2192,8 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev)
dev->dev = &pdev->dev;
dev->mach = pdev->dev.platform_data;
+ dev->transceiver = otg_get_transceiver();
+
if (gpio_is_valid(dev->mach->gpio_vbus)) {
if ((retval = gpio_request(dev->mach->gpio_vbus,
"pxa25x_udc GPIO VBUS"))) {
@@ -2264,6 +2296,10 @@ lubbock_fail0:
if (gpio_is_valid(dev->mach->gpio_vbus))
gpio_free(dev->mach->gpio_vbus);
err_gpio_vbus:
+ if (dev->transceiver) {
+ otg_put_transceiver(dev->transceiver);
+ dev->transceiver = NULL;
+ }
clk_put(dev->clk);
err_clk:
return retval;
@@ -2305,6 +2341,11 @@ static int __exit pxa25x_udc_remove(struct platform_device *pdev)
clk_put(dev->clk);
+ if (dev->transceiver) {
+ otg_put_transceiver(dev->transceiver);
+ dev->transceiver = NULL;
+ }
+
platform_set_drvdata(pdev, NULL);
the_controller = NULL;
return 0;
diff --git a/drivers/usb/gadget/pxa25x_udc.h b/drivers/usb/gadget/pxa25x_udc.h
index 1d51aa21e6e..f572c561746 100644
--- a/drivers/usb/gadget/pxa25x_udc.h
+++ b/drivers/usb/gadget/pxa25x_udc.h
@@ -128,6 +128,7 @@ struct pxa25x_udc {
struct device *dev;
struct clk *clk;
struct pxa2xx_udc_mach_info *mach;
+ struct otg_transceiver *transceiver;
u64 dma_mask;
struct pxa25x_ep ep [PXA_UDC_NUM_ENDPOINTS];
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index ca41b0b5afb..48267bc0b2e 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -1022,22 +1022,29 @@ static rndis_resp_t *rndis_add_response (int configNr, u32 length)
return r;
}
-int rndis_rm_hdr(struct sk_buff *skb)
+int rndis_rm_hdr(struct gether *port,
+ struct sk_buff *skb,
+ struct sk_buff_head *list)
{
/* tmp points to a struct rndis_packet_msg_type */
__le32 *tmp = (void *) skb->data;
/* MessageType, MessageLength */
if (cpu_to_le32(REMOTE_NDIS_PACKET_MSG)
- != get_unaligned(tmp++))
+ != get_unaligned(tmp++)) {
+ dev_kfree_skb_any(skb);
return -EINVAL;
+ }
tmp++;
/* DataOffset, DataLength */
- if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8))
+ if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8)) {
+ dev_kfree_skb_any(skb);
return -EOVERFLOW;
+ }
skb_trim(skb, get_unaligned_le32(tmp++));
+ skb_queue_tail(list, skb);
return 0;
}
diff --git a/drivers/usb/gadget/rndis.h b/drivers/usb/gadget/rndis.h
index aac61dfe0f0..c236aaa9dcd 100644
--- a/drivers/usb/gadget/rndis.h
+++ b/drivers/usb/gadget/rndis.h
@@ -251,7 +251,8 @@ int rndis_set_param_vendor (u8 configNr, u32 vendorID,
const char *vendorDescr);
int rndis_set_param_medium (u8 configNr, u32 medium, u32 speed);
void rndis_add_hdr (struct sk_buff *skb);
-int rndis_rm_hdr (struct sk_buff *skb);
+int rndis_rm_hdr(struct gether *port, struct sk_buff *skb,
+ struct sk_buff_head *list);
u8 *rndis_get_next_response (int configNr, u32 *length);
void rndis_free_response (int configNr, u8 *buf);
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 50c71aae2cc..4b5dbd0127f 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2392,7 +2392,7 @@ static int s3c_hsotg_corereset(struct s3c_hsotg *hsotg)
grstctl = readl(hsotg->regs + S3C_GRSTCTL);
} while (!(grstctl & S3C_GRSTCTL_CSftRst) && timeout-- > 0);
- if (!grstctl & S3C_GRSTCTL_CSftRst) {
+ if (!(grstctl & S3C_GRSTCTL_CSftRst)) {
dev_err(hsotg->dev, "Failed to get CSftRst asserted\n");
return -EINVAL;
}
@@ -2514,8 +2514,8 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
* DMA mode we may need this. */
writel(S3C_DOEPMSK_SetupMsk | S3C_DOEPMSK_AHBErrMsk |
S3C_DOEPMSK_EPDisbldMsk |
- using_dma(hsotg) ? (S3C_DIEPMSK_XferComplMsk |
- S3C_DIEPMSK_TimeOUTMsk) : 0,
+ (using_dma(hsotg) ? (S3C_DIEPMSK_XferComplMsk |
+ S3C_DIEPMSK_TimeOUTMsk) : 0),
hsotg->regs + S3C_DOEPMSK);
writel(0, hsotg->regs + S3C_DAINTMSK);
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index a9b452fe622..d5f4c1d45c9 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1703,8 +1703,7 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
dprintk(DEBUG_NORMAL,"usb_gadget_register_driver() '%s'\n",
driver->driver.name);
- if (driver->disconnect)
- driver->disconnect(&udc->gadget);
+ driver->unbind(&udc->gadget);
device_del(&udc->gadget.dev);
udc->driver = NULL;
diff --git a/drivers/usb/gadget/u_audio.c b/drivers/usb/gadget/u_audio.c
index 0f3d22fc030..8252595d619 100644
--- a/drivers/usb/gadget/u_audio.c
+++ b/drivers/usb/gadget/u_audio.c
@@ -10,7 +10,6 @@
*/
#include <linux/kernel.h>
-#include <linux/utsname.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/ctype.h>
@@ -253,11 +252,13 @@ static int gaudio_open_snd_dev(struct gaudio *card)
snd->filp = filp_open(fn_cap, O_RDONLY, 0);
if (IS_ERR(snd->filp)) {
ERROR(card, "No such PCM capture device: %s\n", fn_cap);
- snd->filp = NULL;
+ snd->substream = NULL;
+ snd->card = NULL;
+ } else {
+ pcm_file = snd->filp->private_data;
+ snd->substream = pcm_file->substream;
+ snd->card = card;
}
- pcm_file = snd->filp->private_data;
- snd->substream = pcm_file->substream;
- snd->card = card;
return 0;
}
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index c6652195391..2fc02bd9584 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -23,7 +23,6 @@
/* #define VERBOSE_DEBUG */
#include <linux/kernel.h>
-#include <linux/utsname.h>
#include <linux/device.h>
#include <linux/ctype.h>
#include <linux/etherdevice.h>
@@ -37,8 +36,9 @@
* one (!) network link through the USB gadget stack, normally "usb0".
*
* The control and data models are handled by the function driver which
- * connects to this code; such as CDC Ethernet, "CDC Subset", or RNDIS.
- * That includes all descriptor and endpoint management.
+ * connects to this code; such as CDC Ethernet (ECM or EEM),
+ * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
+ * management.
*
* Link level addressing is handled by this component using module
* parameters; if no such parameters are provided, random link level
@@ -68,9 +68,13 @@ struct eth_dev {
struct list_head tx_reqs, rx_reqs;
atomic_t tx_qlen;
+ struct sk_buff_head rx_frames;
+
unsigned header_len;
- struct sk_buff *(*wrap)(struct sk_buff *skb);
- int (*unwrap)(struct sk_buff *skb);
+ struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb);
+ int (*unwrap)(struct gether *,
+ struct sk_buff *skb,
+ struct sk_buff_head *list);
struct work_struct work;
@@ -269,7 +273,7 @@ enomem:
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
- struct sk_buff *skb = req->context;
+ struct sk_buff *skb = req->context, *skb2;
struct eth_dev *dev = ep->driver_data;
int status = req->status;
@@ -278,26 +282,47 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
/* normal completion */
case 0:
skb_put(skb, req->actual);
- if (dev->unwrap)
- status = dev->unwrap(skb);
- if (status < 0
- || ETH_HLEN > skb->len
- || skb->len > ETH_FRAME_LEN) {
- dev->net->stats.rx_errors++;
- dev->net->stats.rx_length_errors++;
- DBG(dev, "rx length %d\n", skb->len);
- break;
- }
- skb->protocol = eth_type_trans(skb, dev->net);
- dev->net->stats.rx_packets++;
- dev->net->stats.rx_bytes += skb->len;
+ if (dev->unwrap) {
+ unsigned long flags;
- /* no buffer copies needed, unless hardware can't
- * use skb buffers.
- */
- status = netif_rx(skb);
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->port_usb) {
+ status = dev->unwrap(dev->port_usb,
+ skb,
+ &dev->rx_frames);
+ } else {
+ dev_kfree_skb_any(skb);
+ status = -ENOTCONN;
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ } else {
+ skb_queue_tail(&dev->rx_frames, skb);
+ }
skb = NULL;
+
+ skb2 = skb_dequeue(&dev->rx_frames);
+ while (skb2) {
+ if (status < 0
+ || ETH_HLEN > skb2->len
+ || skb2->len > ETH_FRAME_LEN) {
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_length_errors++;
+ DBG(dev, "rx length %d\n", skb2->len);
+ dev_kfree_skb_any(skb2);
+ goto next_frame;
+ }
+ skb2->protocol = eth_type_trans(skb2, dev->net);
+ dev->net->stats.rx_packets++;
+ dev->net->stats.rx_bytes += skb2->len;
+
+ /* no buffer copies needed, unless hardware can't
+ * use skb buffers.
+ */
+ status = netif_rx(skb2);
+next_frame:
+ skb2 = skb_dequeue(&dev->rx_frames);
+ }
break;
/* software-driven interface shutdown */
@@ -537,14 +562,15 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
* or there's not enough space for extra headers we need
*/
if (dev->wrap) {
- struct sk_buff *skb_new;
+ unsigned long flags;
- skb_new = dev->wrap(skb);
- if (!skb_new)
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->port_usb)
+ skb = dev->wrap(dev->port_usb, skb);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (!skb)
goto drop;
- dev_kfree_skb_any(skb);
- skb = skb_new;
length = skb->len;
}
req->buf = skb->data;
@@ -578,9 +604,9 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
}
if (retval) {
+ dev_kfree_skb_any(skb);
drop:
dev->net->stats.tx_dropped++;
- dev_kfree_skb_any(skb);
spin_lock_irqsave(&dev->req_lock, flags);
if (list_empty(&dev->tx_reqs))
netif_start_queue(net);
@@ -753,6 +779,8 @@ int __init gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
INIT_LIST_HEAD(&dev->tx_reqs);
INIT_LIST_HEAD(&dev->rx_reqs);
+ skb_queue_head_init(&dev->rx_frames);
+
/* network device setup */
dev->net = net;
strcpy(net->name, "usb%d");
diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h
index 0d1f7ae3b07..91b39ffdf6e 100644
--- a/drivers/usb/gadget/u_ether.h
+++ b/drivers/usb/gadget/u_ether.h
@@ -60,12 +60,13 @@ struct gether {
u16 cdc_filter;
- /* hooks for added framing, as needed for RNDIS and EEM.
- * we currently don't support multiple frames per SKB.
- */
+ /* hooks for added framing, as needed for RNDIS and EEM. */
u32 header_len;
- struct sk_buff *(*wrap)(struct sk_buff *skb);
- int (*unwrap)(struct sk_buff *skb);
+ struct sk_buff *(*wrap)(struct gether *port,
+ struct sk_buff *skb);
+ int (*unwrap)(struct gether *port,
+ struct sk_buff *skb,
+ struct sk_buff_head *list);
/* called on network open/close */
void (*open)(struct gether *);
@@ -109,6 +110,7 @@ static inline bool can_support_ecm(struct usb_gadget *gadget)
/* each configuration may bind one instance of an ethernet link */
int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
int ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
+int eem_bind_config(struct usb_configuration *c);
#ifdef CONFIG_USB_ETH_RNDIS
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index fc6e709f45b..adf8260c3a6 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -1114,7 +1114,6 @@ int __init gserial_setup(struct usb_gadget *g, unsigned count)
/* export the driver ... */
status = tty_register_driver(gs_tty_driver);
if (status) {
- put_tty_driver(gs_tty_driver);
pr_err("%s: cannot register, err %d\n",
__func__, status);
goto fail;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index f21ca7d27a4..9b43b226817 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -113,6 +113,12 @@ config USB_EHCI_HCD_PPC_OF
Enables support for the USB controller present on the PowerPC
OpenFirmware platform bus.
+config USB_W90X900_EHCI
+ bool "W90X900(W90P910) EHCI support"
+ depends on USB_EHCI_HCD && ARCH_W90X900
+ ---help---
+ Enables support for the W90X900 USB controller
+
config USB_OXU210HP_HCD
tristate "OXU210HP HCD support"
depends on USB
@@ -153,6 +159,18 @@ config USB_ISP1760_HCD
To compile this driver as a module, choose M here: the
module will be called isp1760.
+config USB_ISP1362_HCD
+ tristate "ISP1362 HCD support"
+ depends on USB
+ default N
+ ---help---
+ Supports the Philips ISP1362 chip as a host controller
+
+ This driver does not support isochronous transfers.
+
+ To compile this driver as a module, choose M here: the
+ module will be called isp1362-hcd.
+
config USB_OHCI_HCD
tristate "OHCI HCD support"
depends on USB && USB_ARCH_HAS_OHCI
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 289d748bb41..f58b2494c44 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_PCI) += pci-quirks.o
obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o
obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
+obj-$(CONFIG_USB_ISP1362_HCD) += isp1362-hcd.o
obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o
obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
new file mode 100644
index 00000000000..87c1b7c34c0
--- /dev/null
+++ b/drivers/usb/host/ehci-atmel.c
@@ -0,0 +1,230 @@
+/*
+ * Driver for EHCI UHP on Atmel chips
+ *
+ * Copyright (C) 2009 Atmel Corporation,
+ * Nicolas Ferre <nicolas.ferre@atmel.com>
+ *
+ * Based on various ehci-*.c drivers
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+/* interface and function clocks */
+static struct clk *iclk, *fclk;
+static int clocked;
+
+/*-------------------------------------------------------------------------*/
+
+static void atmel_start_clock(void)
+{
+ clk_enable(iclk);
+ clk_enable(fclk);
+ clocked = 1;
+}
+
+static void atmel_stop_clock(void)
+{
+ clk_disable(fclk);
+ clk_disable(iclk);
+ clocked = 0;
+}
+
+static void atmel_start_ehci(struct platform_device *pdev)
+{
+ dev_dbg(&pdev->dev, "start\n");
+ atmel_start_clock();
+}
+
+static void atmel_stop_ehci(struct platform_device *pdev)
+{
+ dev_dbg(&pdev->dev, "stop\n");
+ atmel_stop_clock();
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int ehci_atmel_setup(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval = 0;
+
+ /* registers start at offset 0x0 */
+ ehci->caps = hcd->regs;
+ ehci->regs = hcd->regs +
+ HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+ dbg_hcs_params(ehci, "reset");
+ dbg_hcc_params(ehci, "reset");
+
+ /* cache this readonly data; minimize chip reads */
+ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+ retval = ehci_halt(ehci);
+ if (retval)
+ return retval;
+
+ /* data structure init */
+ retval = ehci_init(hcd);
+ if (retval)
+ return retval;
+
+ ehci->sbrn = 0x20;
+
+ ehci_reset(ehci);
+ ehci_port_power(ehci, 0);
+
+ return retval;
+}
+
+static const struct hc_driver ehci_atmel_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Atmel EHCI UHP HS",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /* generic hardware linkage */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+
+ /* basic lifecycle operations */
+ .reset = ehci_atmel_setup,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /* managing i/o requests and associated device resources */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+
+ /* scheduling support */
+ .get_frame_number = ehci_get_frame,
+
+ /* root hub support */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+};
+
+static int __init ehci_atmel_drv_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ const struct hc_driver *driver = &ehci_atmel_hc_driver;
+ struct resource *res;
+ int irq;
+ int retval;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_debug("Initializing Atmel-SoC USB Host Controller\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev,
+ "Found HC with no IRQ. Check %s setup!\n",
+ dev_name(&pdev->dev));
+ retval = -ENODEV;
+ goto fail_create_hcd;
+ }
+
+ hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ retval = -ENOMEM;
+ goto fail_create_hcd;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev,
+ "Found HC with no register addr. Check %s setup!\n",
+ dev_name(&pdev->dev));
+ retval = -ENODEV;
+ goto fail_request_resource;
+ }
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = res->end - res->start + 1;
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
+ driver->description)) {
+ dev_dbg(&pdev->dev, "controller already in use\n");
+ retval = -EBUSY;
+ goto fail_request_resource;
+ }
+
+ hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+ if (hcd->regs == NULL) {
+ dev_dbg(&pdev->dev, "error mapping memory\n");
+ retval = -EFAULT;
+ goto fail_ioremap;
+ }
+
+ iclk = clk_get(&pdev->dev, "ehci_clk");
+ if (IS_ERR(iclk)) {
+ dev_err(&pdev->dev, "Error getting interface clock\n");
+ retval = -ENOENT;
+ goto fail_get_iclk;
+ }
+ fclk = clk_get(&pdev->dev, "uhpck");
+ if (IS_ERR(fclk)) {
+ dev_err(&pdev->dev, "Error getting function clock\n");
+ retval = -ENOENT;
+ goto fail_get_fclk;
+ }
+
+ atmel_start_ehci(pdev);
+
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (retval)
+ goto fail_add_hcd;
+
+ return retval;
+
+fail_add_hcd:
+ atmel_stop_ehci(pdev);
+ clk_put(fclk);
+fail_get_fclk:
+ clk_put(iclk);
+fail_get_iclk:
+ iounmap(hcd->regs);
+fail_ioremap:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+fail_request_resource:
+ usb_put_hcd(hcd);
+fail_create_hcd:
+ dev_err(&pdev->dev, "init %s fail, %d\n",
+ dev_name(&pdev->dev), retval);
+
+ return retval;
+}
+
+static int __exit ehci_atmel_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ ehci_shutdown(hcd);
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+
+ atmel_stop_ehci(pdev);
+ clk_put(fclk);
+ clk_put(iclk);
+ fclk = iclk = NULL;
+
+ return 0;
+}
+
+static struct platform_driver ehci_atmel_driver = {
+ .probe = ehci_atmel_drv_probe,
+ .remove = __exit_p(ehci_atmel_drv_remove),
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver.name = "atmel-ehci",
+};
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index 59d208d94d4..ed77be76d6b 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -199,10 +199,9 @@ static int ehci_hcd_au1xxx_drv_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int ehci_hcd_au1xxx_drv_suspend(struct platform_device *pdev,
- pm_message_t message)
+static int ehci_hcd_au1xxx_drv_suspend(struct device *dev)
{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
unsigned long flags;
int rc;
@@ -229,12 +228,6 @@ static int ehci_hcd_au1xxx_drv_suspend(struct platform_device *pdev,
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
(void)ehci_readl(ehci, &ehci->regs->intr_enable);
- /* make sure snapshot being resumed re-enumerates everything */
- if (message.event == PM_EVENT_PRETHAW) {
- ehci_halt(ehci);
- ehci_reset(ehci);
- }
-
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
au1xxx_stop_ehc();
@@ -248,10 +241,9 @@ bail:
return rc;
}
-
-static int ehci_hcd_au1xxx_drv_resume(struct platform_device *pdev)
+static int ehci_hcd_au1xxx_drv_resume(struct device *dev)
{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
au1xxx_start_ehc();
@@ -305,20 +297,25 @@ static int ehci_hcd_au1xxx_drv_resume(struct platform_device *pdev)
return 0;
}
+static struct dev_pm_ops au1xxx_ehci_pmops = {
+ .suspend = ehci_hcd_au1xxx_drv_suspend,
+ .resume = ehci_hcd_au1xxx_drv_resume,
+};
+
+#define AU1XXX_EHCI_PMOPS &au1xxx_ehci_pmops
+
#else
-#define ehci_hcd_au1xxx_drv_suspend NULL
-#define ehci_hcd_au1xxx_drv_resume NULL
+#define AU1XXX_EHCI_PMOPS NULL
#endif
static struct platform_driver ehci_hcd_au1xxx_driver = {
.probe = ehci_hcd_au1xxx_drv_probe,
.remove = ehci_hcd_au1xxx_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
- .suspend = ehci_hcd_au1xxx_drv_suspend,
- .resume = ehci_hcd_au1xxx_drv_resume,
.driver = {
.name = "au1xxx-ehci",
.owner = THIS_MODULE,
+ .pm = AU1XXX_EHCI_PMOPS,
}
};
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 7f4ace73d44..874d2000bf9 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -134,10 +134,11 @@ dbg_qtd (const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd)
static void __maybe_unused
dbg_qh (const char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
{
+ struct ehci_qh_hw *hw = qh->hw;
+
ehci_dbg (ehci, "%s qh %p n%08x info %x %x qtd %x\n", label,
- qh, qh->hw_next, qh->hw_info1, qh->hw_info2,
- qh->hw_current);
- dbg_qtd ("overlay", ehci, (struct ehci_qtd *) &qh->hw_qtd_next);
+ qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current);
+ dbg_qtd("overlay", ehci, (struct ehci_qtd *) &hw->hw_qtd_next);
}
static void __maybe_unused
@@ -400,31 +401,32 @@ static void qh_lines (
char *next = *nextp;
char mark;
__le32 list_end = EHCI_LIST_END(ehci);
+ struct ehci_qh_hw *hw = qh->hw;
- if (qh->hw_qtd_next == list_end) /* NEC does this */
+ if (hw->hw_qtd_next == list_end) /* NEC does this */
mark = '@';
else
- mark = token_mark(ehci, qh->hw_token);
+ mark = token_mark(ehci, hw->hw_token);
if (mark == '/') { /* qh_alt_next controls qh advance? */
- if ((qh->hw_alt_next & QTD_MASK(ehci))
- == ehci->async->hw_alt_next)
+ if ((hw->hw_alt_next & QTD_MASK(ehci))
+ == ehci->async->hw->hw_alt_next)
mark = '#'; /* blocked */
- else if (qh->hw_alt_next == list_end)
+ else if (hw->hw_alt_next == list_end)
mark = '.'; /* use hw_qtd_next */
/* else alt_next points to some other qtd */
}
- scratch = hc32_to_cpup(ehci, &qh->hw_info1);
- hw_curr = (mark == '*') ? hc32_to_cpup(ehci, &qh->hw_current) : 0;
+ scratch = hc32_to_cpup(ehci, &hw->hw_info1);
+ hw_curr = (mark == '*') ? hc32_to_cpup(ehci, &hw->hw_current) : 0;
temp = scnprintf (next, size,
"qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)",
qh, scratch & 0x007f,
speed_char (scratch),
(scratch >> 8) & 0x000f,
- scratch, hc32_to_cpup(ehci, &qh->hw_info2),
- hc32_to_cpup(ehci, &qh->hw_token), mark,
- (cpu_to_hc32(ehci, QTD_TOGGLE) & qh->hw_token)
+ scratch, hc32_to_cpup(ehci, &hw->hw_info2),
+ hc32_to_cpup(ehci, &hw->hw_token), mark,
+ (cpu_to_hc32(ehci, QTD_TOGGLE) & hw->hw_token)
? "data1" : "data0",
- (hc32_to_cpup(ehci, &qh->hw_alt_next) >> 1) & 0x0f);
+ (hc32_to_cpup(ehci, &hw->hw_alt_next) >> 1) & 0x0f);
size -= temp;
next += temp;
@@ -435,10 +437,10 @@ static void qh_lines (
mark = ' ';
if (hw_curr == td->qtd_dma)
mark = '*';
- else if (qh->hw_qtd_next == cpu_to_hc32(ehci, td->qtd_dma))
+ else if (hw->hw_qtd_next == cpu_to_hc32(ehci, td->qtd_dma))
mark = '+';
else if (QTD_LENGTH (scratch)) {
- if (td->hw_alt_next == ehci->async->hw_alt_next)
+ if (td->hw_alt_next == ehci->async->hw->hw_alt_next)
mark = '#';
else if (td->hw_alt_next != list_end)
mark = '/';
@@ -550,12 +552,15 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
next += temp;
do {
+ struct ehci_qh_hw *hw;
+
switch (hc32_to_cpu(ehci, tag)) {
case Q_TYPE_QH:
+ hw = p.qh->hw;
temp = scnprintf (next, size, " qh%d-%04x/%p",
p.qh->period,
hc32_to_cpup(ehci,
- &p.qh->hw_info2)
+ &hw->hw_info2)
/* uframe masks */
& (QH_CMASK | QH_SMASK),
p.qh);
@@ -576,7 +581,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
/* show more info the first time around */
if (temp == seen_count) {
u32 scratch = hc32_to_cpup(ehci,
- &p.qh->hw_info1);
+ &hw->hw_info1);
struct ehci_qtd *qtd;
char *type = "";
@@ -609,7 +614,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
} else
temp = 0;
if (p.qh) {
- tag = Q_NEXT_TYPE(ehci, p.qh->hw_next);
+ tag = Q_NEXT_TYPE(ehci, hw->hw_next);
p = p.qh->qh_next;
}
break;
@@ -879,8 +884,7 @@ static int debug_close(struct inode *inode, struct file *file)
struct debug_buffer *buf = file->private_data;
if (buf) {
- if (buf->output_buf)
- vfree(buf->output_buf);
+ vfree(buf->output_buf);
kfree(buf);
}
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 11c627ce602..9835e071394 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -30,7 +30,6 @@
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
-#include <linux/reboot.h>
#include <linux/usb.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
@@ -127,6 +126,8 @@ timer_action(struct ehci_hcd *ehci, enum ehci_timer_action action)
switch (action) {
case TIMER_IO_WATCHDOG:
+ if (!ehci->need_io_watchdog)
+ return;
t = EHCI_IO_JIFFIES;
break;
case TIMER_ASYNC_OFF:
@@ -239,6 +240,11 @@ static int ehci_reset (struct ehci_hcd *ehci)
int retval;
u32 command = ehci_readl(ehci, &ehci->regs->command);
+ /* If the EHCI debug controller is active, special care must be
+ * taken before and after a host controller reset */
+ if (ehci->debug && !dbgp_reset_prep())
+ ehci->debug = NULL;
+
command |= CMD_RESET;
dbg_cmd (ehci, "reset", command);
ehci_writel(ehci, command, &ehci->regs->command);
@@ -247,12 +253,21 @@ static int ehci_reset (struct ehci_hcd *ehci)
retval = handshake (ehci, &ehci->regs->command,
CMD_RESET, 0, 250 * 1000);
+ if (ehci->has_hostpc) {
+ ehci_writel(ehci, USBMODE_EX_HC | USBMODE_EX_VBPS,
+ (u32 __iomem *)(((u8 *)ehci->regs) + USBMODE_EX));
+ ehci_writel(ehci, TXFIFO_DEFAULT,
+ (u32 __iomem *)(((u8 *)ehci->regs) + TXFILLTUNING));
+ }
if (retval)
return retval;
if (ehci_is_TDI(ehci))
tdi_reset (ehci);
+ if (ehci->debug)
+ dbgp_external_startup();
+
return retval;
}
@@ -505,9 +520,14 @@ static int ehci_init(struct usb_hcd *hcd)
u32 temp;
int retval;
u32 hcc_params;
+ struct ehci_qh_hw *hw;
spin_lock_init(&ehci->lock);
+ /*
+ * keep io watchdog by default, those good HCDs could turn off it later
+ */
+ ehci->need_io_watchdog = 1;
init_timer(&ehci->watchdog);
ehci->watchdog.function = ehci_watchdog;
ehci->watchdog.data = (unsigned long) ehci;
@@ -544,12 +564,13 @@ static int ehci_init(struct usb_hcd *hcd)
* from automatically advancing to the next td after short reads.
*/
ehci->async->qh_next.qh = NULL;
- ehci->async->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
- ehci->async->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
- ehci->async->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
- ehci->async->hw_qtd_next = EHCI_LIST_END(ehci);
+ hw = ehci->async->hw;
+ hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
+ hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
+ hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
+ hw->hw_qtd_next = EHCI_LIST_END(ehci);
ehci->async->qh_state = QH_STATE_LINKED;
- ehci->async->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma);
+ hw->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma);
/* clear interrupt enables, set irq latency */
if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
@@ -850,12 +871,18 @@ static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state) && ehci->reclaim)
end_unlink_async(ehci);
- /* if it's not linked then there's nothing to do */
- if (qh->qh_state != QH_STATE_LINKED)
- ;
+ /* If the QH isn't linked then there's nothing we can do
+ * unless we were called during a giveback, in which case
+ * qh_completions() has to deal with it.
+ */
+ if (qh->qh_state != QH_STATE_LINKED) {
+ if (qh->qh_state == QH_STATE_COMPLETING)
+ qh->needs_rescan = 1;
+ return;
+ }
/* defer till later if busy */
- else if (ehci->reclaim) {
+ if (ehci->reclaim) {
struct ehci_qh *last;
for (last = ehci->reclaim;
@@ -915,8 +942,9 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
break;
switch (qh->qh_state) {
case QH_STATE_LINKED:
+ case QH_STATE_COMPLETING:
intr_deschedule (ehci, qh);
- /* FALL THROUGH */
+ break;
case QH_STATE_IDLE:
qh_completions (ehci, qh);
break;
@@ -925,23 +953,6 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
qh, qh->qh_state);
goto done;
}
-
- /* reschedule QH iff another request is queued */
- if (!list_empty (&qh->qtd_list)
- && HC_IS_RUNNING (hcd->state)) {
- rc = qh_schedule(ehci, qh);
-
- /* An error here likely indicates handshake failure
- * or no space left in the schedule. Neither fault
- * should happen often ...
- *
- * FIXME kill the now-dysfunctional queued urbs
- */
- if (rc != 0)
- ehci_err(ehci,
- "can't reschedule qh %p, err %d",
- qh, rc);
- }
break;
case PIPE_ISOCHRONOUS:
@@ -979,7 +990,7 @@ rescan:
/* endpoints can be iso streams. for now, we don't
* accelerate iso completions ... so spin a while.
*/
- if (qh->hw_info1 == 0) {
+ if (qh->hw->hw_info1 == 0) {
ehci_vdbg (ehci, "iso delay\n");
goto idle_timeout;
}
@@ -988,6 +999,7 @@ rescan:
qh->qh_state = QH_STATE_IDLE;
switch (qh->qh_state) {
case QH_STATE_LINKED:
+ case QH_STATE_COMPLETING:
for (tmp = ehci->async->qh_next.qh;
tmp && tmp != qh;
tmp = tmp->qh_next.qh)
@@ -1052,18 +1064,17 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
usb_settoggle(qh->dev, epnum, is_out, 0);
if (!list_empty(&qh->qtd_list)) {
WARN_ONCE(1, "clear_halt for a busy endpoint\n");
- } else if (qh->qh_state == QH_STATE_LINKED) {
+ } else if (qh->qh_state == QH_STATE_LINKED ||
+ qh->qh_state == QH_STATE_COMPLETING) {
/* The toggle value in the QH can't be updated
* while the QH is active. Unlink it now;
* re-linking will call qh_refresh().
*/
- if (eptype == USB_ENDPOINT_XFER_BULK) {
+ if (eptype == USB_ENDPOINT_XFER_BULK)
unlink_async(ehci, qh);
- } else {
+ else
intr_deschedule(ehci, qh);
- (void) qh_schedule(ehci, qh);
- }
}
}
spin_unlock_irqrestore(&ehci->lock, flags);
@@ -1117,6 +1128,16 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ixp4xx_ehci_driver
#endif
+#ifdef CONFIG_USB_W90X900_EHCI
+#include "ehci-w90x900.c"
+#define PLATFORM_DRIVER ehci_hcd_w90x900_driver
+#endif
+
+#ifdef CONFIG_ARCH_AT91
+#include "ehci-atmel.c"
+#define PLATFORM_DRIVER ehci_atmel_driver
+#endif
+
#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
!defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER)
#error "missing bus glue for ehci-hcd"
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index f46ad27c9a9..1b6f1c0e5ce 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -111,6 +111,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
int port;
int mask;
+ u32 __iomem *hostpc_reg = NULL;
ehci_dbg(ehci, "suspend root hub\n");
@@ -142,6 +143,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
u32 t2 = t1;
+ if (ehci->has_hostpc)
+ hostpc_reg = (u32 __iomem *)((u8 *)ehci->regs
+ + HOSTPC0 + 4 * (port & 0xff));
/* keep track of which ports we suspend */
if (t1 & PORT_OWNER)
set_bit(port, &ehci->owned_ports);
@@ -151,15 +155,37 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
}
/* enable remote wakeup on all ports */
- if (hcd->self.root_hub->do_remote_wakeup)
- t2 |= PORT_WAKE_BITS;
- else
+ if (hcd->self.root_hub->do_remote_wakeup) {
+ /* only enable appropriate wake bits, otherwise the
+ * hardware can not go phy low power mode. If a race
+ * condition happens here(connection change during bits
+ * set), the port change detection will finally fix it.
+ */
+ if (t1 & PORT_CONNECT) {
+ t2 |= PORT_WKOC_E | PORT_WKDISC_E;
+ t2 &= ~PORT_WKCONN_E;
+ } else {
+ t2 |= PORT_WKOC_E | PORT_WKCONN_E;
+ t2 &= ~PORT_WKDISC_E;
+ }
+ } else
t2 &= ~PORT_WAKE_BITS;
if (t1 != t2) {
ehci_vdbg (ehci, "port %d, %08x -> %08x\n",
port + 1, t1, t2);
ehci_writel(ehci, t2, reg);
+ if (hostpc_reg) {
+ u32 t3;
+
+ msleep(5);/* 5ms for HCD enter low pwr mode */
+ t3 = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
+ t3 = ehci_readl(ehci, hostpc_reg);
+ ehci_dbg(ehci, "Port%d phy low pwr mode %s\n",
+ port, (t3 & HOSTPC_PHCD) ?
+ "succeeded" : "failed");
+ }
}
}
@@ -183,6 +209,11 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
ehci->next_statechange = jiffies + msecs_to_jiffies(10);
spin_unlock_irq (&ehci->lock);
+
+ /* ehci_work() may have re-enabled the watchdog timer, which we do not
+ * want, and so we must delete any pending watchdog timer events.
+ */
+ del_timer_sync(&ehci->watchdog);
return 0;
}
@@ -204,6 +235,13 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
return -ESHUTDOWN;
}
+ if (unlikely(ehci->debug)) {
+ if (ehci->debug && !dbgp_reset_prep())
+ ehci->debug = NULL;
+ else
+ dbgp_external_startup();
+ }
+
/* Ideally and we've got a real resume here, and no port's power
* was lost. (For PCI, that means Vaux was maintained.) But we
* could instead be restoring a swsusp snapshot -- so that BIOS was
@@ -563,7 +601,8 @@ static int ehci_hub_control (
int ports = HCS_N_PORTS (ehci->hcs_params);
u32 __iomem *status_reg = &ehci->regs->port_status[
(wIndex & 0xff) - 1];
- u32 temp, status;
+ u32 __iomem *hostpc_reg = NULL;
+ u32 temp, temp1, status;
unsigned long flags;
int retval = 0;
unsigned selector;
@@ -575,6 +614,9 @@ static int ehci_hub_control (
* power, "this is the one", etc. EHCI spec supports this.
*/
+ if (ehci->has_hostpc)
+ hostpc_reg = (u32 __iomem *)((u8 *)ehci->regs
+ + HOSTPC0 + 4 * ((wIndex & 0xff) - 1));
spin_lock_irqsave (&ehci->lock, flags);
switch (typeReq) {
case ClearHubFeature:
@@ -773,7 +815,11 @@ static int ehci_hub_control (
if (temp & PORT_CONNECT) {
status |= 1 << USB_PORT_FEAT_CONNECTION;
// status may be from integrated TT
- status |= ehci_port_speed(ehci, temp);
+ if (ehci->has_hostpc) {
+ temp1 = ehci_readl(ehci, hostpc_reg);
+ status |= ehci_port_speed(ehci, temp1);
+ } else
+ status |= ehci_port_speed(ehci, temp);
}
if (temp & PORT_PE)
status |= 1 << USB_PORT_FEAT_ENABLE;
@@ -816,6 +862,15 @@ static int ehci_hub_control (
case SetPortFeature:
selector = wIndex >> 8;
wIndex &= 0xff;
+ if (unlikely(ehci->debug)) {
+ /* If the debug port is active any port
+ * feature requests should get denied */
+ if (wIndex == HCS_DEBUG_PORT(ehci->hcs_params) &&
+ (readl(&ehci->debug->control) & DBGP_ENABLED)) {
+ retval = -ENODEV;
+ goto error_exit;
+ }
+ }
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
@@ -832,6 +887,24 @@ static int ehci_hub_control (
|| (temp & PORT_RESET) != 0)
goto error;
ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+ /* After above check the port must be connected.
+ * Set appropriate bit thus could put phy into low power
+ * mode if we have hostpc feature
+ */
+ if (hostpc_reg) {
+ temp &= ~PORT_WKCONN_E;
+ temp |= (PORT_WKDISC_E | PORT_WKOC_E);
+ ehci_writel(ehci, temp | PORT_SUSPEND,
+ status_reg);
+ msleep(5);/* 5ms for HCD enter low pwr mode */
+ temp1 = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, temp1 | HOSTPC_PHCD,
+ hostpc_reg);
+ temp1 = ehci_readl(ehci, hostpc_reg);
+ ehci_dbg(ehci, "Port%d phy low pwr mode %s\n",
+ wIndex, (temp1 & HOSTPC_PHCD) ?
+ "succeeded" : "failed");
+ }
set_bit(wIndex, &ehci->suspended_ports);
break;
case USB_PORT_FEAT_POWER:
@@ -894,6 +967,7 @@ error:
/* "stall" on error */
retval = -EPIPE;
}
+error_exit:
spin_unlock_irqrestore (&ehci->lock, flags);
return retval;
}
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index 10d52919abb..aeda96e0af6 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -75,7 +75,8 @@ static void qh_destroy(struct ehci_qh *qh)
}
if (qh->dummy)
ehci_qtd_free (ehci, qh->dummy);
- dma_pool_free (ehci->qh_pool, qh, qh->qh_dma);
+ dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
+ kfree(qh);
}
static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
@@ -83,12 +84,14 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
struct ehci_qh *qh;
dma_addr_t dma;
- qh = (struct ehci_qh *)
- dma_pool_alloc (ehci->qh_pool, flags, &dma);
+ qh = kzalloc(sizeof *qh, GFP_ATOMIC);
if (!qh)
- return qh;
-
- memset (qh, 0, sizeof *qh);
+ goto done;
+ qh->hw = (struct ehci_qh_hw *)
+ dma_pool_alloc(ehci->qh_pool, flags, &dma);
+ if (!qh->hw)
+ goto fail;
+ memset(qh->hw, 0, sizeof *qh->hw);
qh->refcount = 1;
qh->ehci = ehci;
qh->qh_dma = dma;
@@ -99,10 +102,15 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
qh->dummy = ehci_qtd_alloc (ehci, flags);
if (qh->dummy == NULL) {
ehci_dbg (ehci, "no dummy td\n");
- dma_pool_free (ehci->qh_pool, qh, qh->qh_dma);
- qh = NULL;
+ goto fail1;
}
+done:
return qh;
+fail1:
+ dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
+fail:
+ kfree(qh);
+ return NULL;
}
/* to share a qh (cpu threads, or hc) */
@@ -180,7 +188,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
/* QHs for control/bulk/intr transfers */
ehci->qh_pool = dma_pool_create ("ehci_qh",
ehci_to_hcd(ehci)->self.controller,
- sizeof (struct ehci_qh),
+ sizeof(struct ehci_qh_hw),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
if (!ehci->qh_pool) {
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index c2f1b7df918..378861b9d79 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -27,28 +27,8 @@
/* called after powerup, by probe or system-pm "wakeup" */
static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
{
- u32 temp;
int retval;
- /* optional debug port, normally in the first BAR */
- temp = pci_find_capability(pdev, 0x0a);
- if (temp) {
- pci_read_config_dword(pdev, temp, &temp);
- temp >>= 16;
- if ((temp & (3 << 13)) == (1 << 13)) {
- temp &= 0x1fff;
- ehci->debug = ehci_to_hcd(ehci)->regs + temp;
- temp = ehci_readl(ehci, &ehci->debug->control);
- ehci_info(ehci, "debug port %d%s\n",
- HCS_DEBUG_PORT(ehci->hcs_params),
- (temp & DBGP_ENABLED)
- ? " IN USE"
- : "");
- if (!(temp & DBGP_ENABLED))
- ehci->debug = NULL;
- }
- }
-
/* we expect static quirk code to handle the "extended capabilities"
* (currently just BIOS handoff) allowed starting with EHCI 0.96
*/
@@ -129,6 +109,9 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
return retval;
switch (pdev->vendor) {
+ case PCI_VENDOR_ID_INTEL:
+ ehci->need_io_watchdog = 0;
+ break;
case PCI_VENDOR_ID_TDI:
if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
hcd->has_tt = 1;
@@ -192,6 +175,25 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
break;
}
+ /* optional debug port, normally in the first BAR */
+ temp = pci_find_capability(pdev, 0x0a);
+ if (temp) {
+ pci_read_config_dword(pdev, temp, &temp);
+ temp >>= 16;
+ if ((temp & (3 << 13)) == (1 << 13)) {
+ temp &= 0x1fff;
+ ehci->debug = ehci_to_hcd(ehci)->regs + temp;
+ temp = ehci_readl(ehci, &ehci->debug->control);
+ ehci_info(ehci, "debug port %d%s\n",
+ HCS_DEBUG_PORT(ehci->hcs_params),
+ (temp & DBGP_ENABLED)
+ ? " IN USE"
+ : "");
+ if (!(temp & DBGP_ENABLED))
+ ehci->debug = NULL;
+ }
+ }
+
ehci_reset(ehci);
/* at least the Genesys GL880S needs fixup here */
@@ -242,7 +244,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
* System suspend currently expects to be able to suspend the entire
* device tree, device-at-a-time. If we failed selective suspend
* reports, system suspend would fail; so the root hub code must claim
- * success. That's lying to usbcore, and it matters for for runtime
+ * success. That's lying to usbcore, and it matters for runtime
* PM scenarios with selective suspend and remote wakeup...
*/
if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev))
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 7673554fa64..00ad9ce392e 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -87,31 +87,33 @@ qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
static inline void
qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
{
+ struct ehci_qh_hw *hw = qh->hw;
+
/* writes to an active overlay are unsafe */
BUG_ON(qh->qh_state != QH_STATE_IDLE);
- qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
- qh->hw_alt_next = EHCI_LIST_END(ehci);
+ hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
+ hw->hw_alt_next = EHCI_LIST_END(ehci);
/* Except for control endpoints, we make hardware maintain data
* toggle (like OHCI) ... here (re)initialize the toggle in the QH,
* and set the pseudo-toggle in udev. Only usb_clear_halt() will
* ever clear it.
*/
- if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
+ if (!(hw->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
unsigned is_out, epnum;
is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
- epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f;
+ epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f;
if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
- qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
+ hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
usb_settoggle (qh->dev, epnum, is_out, 1);
}
}
/* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
wmb ();
- qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
+ hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
}
/* if it weren't for a common silicon quirk (writing the dummy into the qh
@@ -129,7 +131,7 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
qtd = list_entry (qh->qtd_list.next,
struct ehci_qtd, qtd_list);
/* first qtd may already be partially processed */
- if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw_current)
+ if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current)
qtd = NULL;
}
@@ -260,7 +262,7 @@ __acquires(ehci->lock)
struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
/* S-mask in a QH means it's an interrupt urb */
- if ((qh->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) {
+ if ((qh->hw->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) {
/* ... update hc-wide periodic stats (for usbfs) */
ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
@@ -297,7 +299,6 @@ __acquires(ehci->lock)
static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
-static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
/*
@@ -308,13 +309,14 @@ static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
static unsigned
qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- struct ehci_qtd *last = NULL, *end = qh->dummy;
+ struct ehci_qtd *last, *end = qh->dummy;
struct list_head *entry, *tmp;
- int last_status = -EINPROGRESS;
+ int last_status;
int stopped;
unsigned count = 0;
u8 state;
- __le32 halt = HALT_BIT(ehci);
+ const __le32 halt = HALT_BIT(ehci);
+ struct ehci_qh_hw *hw = qh->hw;
if (unlikely (list_empty (&qh->qtd_list)))
return count;
@@ -324,11 +326,20 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
* they add urbs to this qh's queue or mark them for unlinking.
*
* NOTE: unlinking expects to be done in queue order.
+ *
+ * It's a bug for qh->qh_state to be anything other than
+ * QH_STATE_IDLE, unless our caller is scan_async() or
+ * scan_periodic().
*/
state = qh->qh_state;
qh->qh_state = QH_STATE_COMPLETING;
stopped = (state == QH_STATE_IDLE);
+ rescan:
+ last = NULL;
+ last_status = -EINPROGRESS;
+ qh->needs_rescan = 0;
+
/* remove de-activated QTDs from front of queue.
* after faults (including short reads), cleanup this urb
* then let the queue advance.
@@ -392,7 +403,8 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
qtd->hw_token = cpu_to_hc32(ehci,
token);
wmb();
- qh->hw_token = cpu_to_hc32(ehci, token);
+ hw->hw_token = cpu_to_hc32(ehci,
+ token);
goto retry_xacterr;
}
stopped = 1;
@@ -435,8 +447,8 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
/* qh unlinked; token in overlay may be most current */
if (state == QH_STATE_IDLE
&& cpu_to_hc32(ehci, qtd->qtd_dma)
- == qh->hw_current) {
- token = hc32_to_cpu(ehci, qh->hw_token);
+ == hw->hw_current) {
+ token = hc32_to_cpu(ehci, hw->hw_token);
/* An unlink may leave an incomplete
* async transaction in the TT buffer.
@@ -449,9 +461,9 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
* patch the qh later and so that completions can't
* activate it while we "know" it's stopped.
*/
- if ((halt & qh->hw_token) == 0) {
+ if ((halt & hw->hw_token) == 0) {
halt:
- qh->hw_token |= halt;
+ hw->hw_token |= halt;
wmb ();
}
}
@@ -503,6 +515,21 @@ halt:
ehci_qtd_free (ehci, last);
}
+ /* Do we need to rescan for URBs dequeued during a giveback? */
+ if (unlikely(qh->needs_rescan)) {
+ /* If the QH is already unlinked, do the rescan now. */
+ if (state == QH_STATE_IDLE)
+ goto rescan;
+
+ /* Otherwise we have to wait until the QH is fully unlinked.
+ * Our caller will start an unlink if qh->needs_rescan is
+ * set. But if an unlink has already started, nothing needs
+ * to be done.
+ */
+ if (state != QH_STATE_LINKED)
+ qh->needs_rescan = 0;
+ }
+
/* restore original state; caller must unlink or relink */
qh->qh_state = state;
@@ -510,7 +537,7 @@ halt:
* it after fault cleanup, or recovering from silicon wrongly
* overlaying the dummy qtd (which reduces DMA chatter).
*/
- if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END(ehci)) {
+ if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) {
switch (state) {
case QH_STATE_IDLE:
qh_refresh(ehci, qh);
@@ -527,12 +554,9 @@ halt:
* That should be rare for interrupt transfers,
* except maybe high bandwidth ...
*/
- if ((cpu_to_hc32(ehci, QH_SMASK)
- & qh->hw_info2) != 0) {
- intr_deschedule (ehci, qh);
- (void) qh_schedule (ehci, qh);
- } else
- unlink_async (ehci, qh);
+
+ /* Tell the caller to start an unlink */
+ qh->needs_rescan = 1;
break;
/* otherwise, unlink already started */
}
@@ -649,7 +673,7 @@ qh_urb_transaction (
* (this will usually be overridden later.)
*/
if (is_input)
- qtd->hw_alt_next = ehci->async->hw_alt_next;
+ qtd->hw_alt_next = ehci->async->hw->hw_alt_next;
/* qh makes control packets use qtd toggle; maybe switch it */
if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
@@ -744,6 +768,7 @@ qh_make (
int is_input, type;
int maxp = 0;
struct usb_tt *tt = urb->dev->tt;
+ struct ehci_qh_hw *hw;
if (!qh)
return qh;
@@ -890,8 +915,9 @@ done:
/* init as live, toggle clear, advance to dummy */
qh->qh_state = QH_STATE_IDLE;
- qh->hw_info1 = cpu_to_hc32(ehci, info1);
- qh->hw_info2 = cpu_to_hc32(ehci, info2);
+ hw = qh->hw;
+ hw->hw_info1 = cpu_to_hc32(ehci, info1);
+ hw->hw_info2 = cpu_to_hc32(ehci, info2);
usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
qh_refresh (ehci, qh);
return qh;
@@ -910,6 +936,8 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
if (unlikely(qh->clearing_tt))
return;
+ WARN_ON(qh->qh_state != QH_STATE_IDLE);
+
/* (re)start the async schedule? */
head = ehci->async;
timer_action_done (ehci, TIMER_ASYNC_OFF);
@@ -928,16 +956,15 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
}
/* clear halt and/or toggle; and maybe recover from silicon quirk */
- if (qh->qh_state == QH_STATE_IDLE)
- qh_refresh (ehci, qh);
+ qh_refresh(ehci, qh);
/* splice right after start */
qh->qh_next = head->qh_next;
- qh->hw_next = head->hw_next;
+ qh->hw->hw_next = head->hw->hw_next;
wmb ();
head->qh_next.qh = qh;
- head->hw_next = dma;
+ head->hw->hw_next = dma;
qh_get(qh);
qh->xacterrs = 0;
@@ -984,7 +1011,7 @@ static struct ehci_qh *qh_append_tds (
/* usb_reset_device() briefly reverts to address 0 */
if (usb_pipedevice (urb->pipe) == 0)
- qh->hw_info1 &= ~qh_addr_mask;
+ qh->hw->hw_info1 &= ~qh_addr_mask;
}
/* just one way to queue requests: swap with the dummy qtd.
@@ -1169,7 +1196,7 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
while (prev->qh_next.qh != qh)
prev = prev->qh_next.qh;
- prev->hw_next = qh->hw_next;
+ prev->hw->hw_next = qh->hw->hw_next;
prev->qh_next = qh->qh_next;
wmb ();
@@ -1214,6 +1241,8 @@ rescan:
qh = qh_get (qh);
qh->stamp = ehci->stamp;
temp = qh_completions (ehci, qh);
+ if (qh->needs_rescan)
+ unlink_async(ehci, qh);
qh_put (qh);
if (temp != 0) {
goto rescan;
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index edd61ee9032..3ea05936851 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -60,6 +60,20 @@ periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
}
}
+static __hc32 *
+shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
+ __hc32 tag)
+{
+ switch (hc32_to_cpu(ehci, tag)) {
+ /* our ehci_shadow.qh is actually software part */
+ case Q_TYPE_QH:
+ return &periodic->qh->hw->hw_next;
+ /* others are hw parts */
+ default:
+ return periodic->hw_next;
+ }
+}
+
/* caller must hold ehci->lock */
static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
{
@@ -71,7 +85,8 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
while (here.ptr && here.ptr != ptr) {
prev_p = periodic_next_shadow(ehci, prev_p,
Q_NEXT_TYPE(ehci, *hw_p));
- hw_p = here.hw_next;
+ hw_p = shadow_next_periodic(ehci, &here,
+ Q_NEXT_TYPE(ehci, *hw_p));
here = *prev_p;
}
/* an interrupt entry (at list end) could have been shared */
@@ -83,7 +98,7 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
*/
*prev_p = *periodic_next_shadow(ehci, &here,
Q_NEXT_TYPE(ehci, *hw_p));
- *hw_p = *here.hw_next;
+ *hw_p = *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p));
}
/* how many of the uframe's 125 usecs are allocated? */
@@ -93,18 +108,20 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
__hc32 *hw_p = &ehci->periodic [frame];
union ehci_shadow *q = &ehci->pshadow [frame];
unsigned usecs = 0;
+ struct ehci_qh_hw *hw;
while (q->ptr) {
switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
case Q_TYPE_QH:
+ hw = q->qh->hw;
/* is it in the S-mask? */
- if (q->qh->hw_info2 & cpu_to_hc32(ehci, 1 << uframe))
+ if (hw->hw_info2 & cpu_to_hc32(ehci, 1 << uframe))
usecs += q->qh->usecs;
/* ... or C-mask? */
- if (q->qh->hw_info2 & cpu_to_hc32(ehci,
+ if (hw->hw_info2 & cpu_to_hc32(ehci,
1 << (8 + uframe)))
usecs += q->qh->c_usecs;
- hw_p = &q->qh->hw_next;
+ hw_p = &hw->hw_next;
q = &q->qh->qh_next;
break;
// case Q_TYPE_FSTN:
@@ -237,10 +254,10 @@ periodic_tt_usecs (
continue;
case Q_TYPE_QH:
if (same_tt(dev, q->qh->dev)) {
- uf = tt_start_uframe(ehci, q->qh->hw_info2);
+ uf = tt_start_uframe(ehci, q->qh->hw->hw_info2);
tt_usecs[uf] += q->qh->tt_usecs;
}
- hw_p = &q->qh->hw_next;
+ hw_p = &q->qh->hw->hw_next;
q = &q->qh->qh_next;
continue;
case Q_TYPE_SITD:
@@ -375,6 +392,7 @@ static int tt_no_collision (
for (; frame < ehci->periodic_size; frame += period) {
union ehci_shadow here;
__hc32 type;
+ struct ehci_qh_hw *hw;
here = ehci->pshadow [frame];
type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]);
@@ -385,17 +403,18 @@ static int tt_no_collision (
here = here.itd->itd_next;
continue;
case Q_TYPE_QH:
+ hw = here.qh->hw;
if (same_tt (dev, here.qh->dev)) {
u32 mask;
mask = hc32_to_cpu(ehci,
- here.qh->hw_info2);
+ hw->hw_info2);
/* "knows" no gap is needed */
mask |= mask >> 8;
if (mask & uf_mask)
break;
}
- type = Q_NEXT_TYPE(ehci, here.qh->hw_next);
+ type = Q_NEXT_TYPE(ehci, hw->hw_next);
here = here.qh->qh_next;
continue;
case Q_TYPE_SITD:
@@ -498,7 +517,8 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
dev_dbg (&qh->dev->dev,
"link qh%d-%04x/%p start %d [%d/%d us]\n",
- period, hc32_to_cpup(ehci, &qh->hw_info2) & (QH_CMASK | QH_SMASK),
+ period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
+ & (QH_CMASK | QH_SMASK),
qh, qh->start, qh->usecs, qh->c_usecs);
/* high bandwidth, or otherwise every microframe */
@@ -517,7 +537,7 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
break;
prev = periodic_next_shadow(ehci, prev, type);
- hw_p = &here.qh->hw_next;
+ hw_p = shadow_next_periodic(ehci, &here, type);
here = *prev;
}
@@ -528,14 +548,14 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
if (qh->period > here.qh->period)
break;
prev = &here.qh->qh_next;
- hw_p = &here.qh->hw_next;
+ hw_p = &here.qh->hw->hw_next;
here = *prev;
}
/* link in this qh, unless some earlier pass did that */
if (qh != here.qh) {
qh->qh_next = here;
if (here.qh)
- qh->hw_next = *hw_p;
+ qh->hw->hw_next = *hw_p;
wmb ();
prev->qh = qh;
*hw_p = QH_NEXT (ehci, qh->qh_dma);
@@ -581,7 +601,7 @@ static int qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
dev_dbg (&qh->dev->dev,
"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
qh->period,
- hc32_to_cpup(ehci, &qh->hw_info2) & (QH_CMASK | QH_SMASK),
+ hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
qh, qh->start, qh->usecs, qh->c_usecs);
/* qh->qh_next still "live" to HC */
@@ -595,7 +615,19 @@ static int qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- unsigned wait;
+ unsigned wait;
+ struct ehci_qh_hw *hw = qh->hw;
+ int rc;
+
+ /* If the QH isn't linked then there's nothing we can do
+ * unless we were called during a giveback, in which case
+ * qh_completions() has to deal with it.
+ */
+ if (qh->qh_state != QH_STATE_LINKED) {
+ if (qh->qh_state == QH_STATE_COMPLETING)
+ qh->needs_rescan = 1;
+ return;
+ }
qh_unlink_periodic (ehci, qh);
@@ -606,15 +638,33 @@ static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
*/
if (list_empty (&qh->qtd_list)
|| (cpu_to_hc32(ehci, QH_CMASK)
- & qh->hw_info2) != 0)
+ & hw->hw_info2) != 0)
wait = 2;
else
wait = 55; /* worst case: 3 * 1024 */
udelay (wait);
qh->qh_state = QH_STATE_IDLE;
- qh->hw_next = EHCI_LIST_END(ehci);
+ hw->hw_next = EHCI_LIST_END(ehci);
wmb ();
+
+ qh_completions(ehci, qh);
+
+ /* reschedule QH iff another request is queued */
+ if (!list_empty(&qh->qtd_list) &&
+ HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
+ rc = qh_schedule(ehci, qh);
+
+ /* An error here likely indicates handshake failure
+ * or no space left in the schedule. Neither fault
+ * should happen often ...
+ *
+ * FIXME kill the now-dysfunctional queued urbs
+ */
+ if (rc != 0)
+ ehci_err(ehci, "can't reschedule qh %p, err %d\n",
+ qh, rc);
+ }
}
/*-------------------------------------------------------------------------*/
@@ -739,14 +789,15 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
unsigned uframe;
__hc32 c_mask;
unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
+ struct ehci_qh_hw *hw = qh->hw;
qh_refresh(ehci, qh);
- qh->hw_next = EHCI_LIST_END(ehci);
+ hw->hw_next = EHCI_LIST_END(ehci);
frame = qh->start;
/* reuse the previous schedule slots, if we can */
if (frame < qh->period) {
- uframe = ffs(hc32_to_cpup(ehci, &qh->hw_info2) & QH_SMASK);
+ uframe = ffs(hc32_to_cpup(ehci, &hw->hw_info2) & QH_SMASK);
status = check_intr_schedule (ehci, frame, --uframe,
qh, &c_mask);
} else {
@@ -784,11 +835,11 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
qh->start = frame;
/* reset S-frame and (maybe) C-frame masks */
- qh->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
- qh->hw_info2 |= qh->period
+ hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
+ hw->hw_info2 |= qh->period
? cpu_to_hc32(ehci, 1 << uframe)
: cpu_to_hc32(ehci, QH_SMASK);
- qh->hw_info2 |= c_mask;
+ hw->hw_info2 |= c_mask;
} else
ehci_dbg (ehci, "reused qh %p schedule\n", qh);
@@ -2188,10 +2239,11 @@ restart:
case Q_TYPE_QH:
/* handle any completions */
temp.qh = qh_get (q.qh);
- type = Q_NEXT_TYPE(ehci, q.qh->hw_next);
+ type = Q_NEXT_TYPE(ehci, q.qh->hw->hw_next);
q = q.qh->qh_next;
modified = qh_completions (ehci, temp.qh);
- if (unlikely (list_empty (&temp.qh->qtd_list)))
+ if (unlikely(list_empty(&temp.qh->qtd_list) ||
+ temp.qh->needs_rescan))
intr_deschedule (ehci, temp.qh);
qh_put (temp.qh);
break;
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
new file mode 100644
index 00000000000..cfa21ea20f8
--- /dev/null
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -0,0 +1,181 @@
+/*
+ * linux/driver/usb/host/ehci-w90x900.c
+ *
+ * Copyright (c) 2008 Nuvoton technology corporation.
+ *
+ * Wan ZongShun <mcuos.com@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;version 2 of the License.
+ *
+ */
+
+#include <linux/platform_device.h>
+
+/*ebable phy0 and phy1 for w90p910*/
+#define ENPHY (0x01<<8)
+#define PHY0_CTR (0xA4)
+#define PHY1_CTR (0xA8)
+
+static int __devinit usb_w90x900_probe(const struct hc_driver *driver,
+ struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct resource *res;
+ int retval = 0, irq;
+ unsigned long val;
+
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ retval = -ENXIO;
+ goto err1;
+ }
+
+ hcd = usb_create_hcd(driver, &pdev->dev, "w90x900 EHCI");
+ if (!hcd) {
+ retval = -ENOMEM;
+ goto err1;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = res->end - res->start + 1;
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+ retval = -EBUSY;
+ goto err2;
+ }
+
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+ if (hcd->regs == NULL) {
+ retval = -EFAULT;
+ goto err3;
+ }
+
+ ehci = hcd_to_ehci(hcd);
+ ehci->caps = hcd->regs;
+ ehci->regs = hcd->regs +
+ HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+
+ /* enable PHY 0,1,the regs only apply to w90p910
+ * 0xA4,0xA8 were offsets of PHY0 and PHY1 controller of
+ * w90p910 IC relative to ehci->regs.
+ */
+ val = __raw_readl(ehci->regs+PHY0_CTR);
+ val |= ENPHY;
+ __raw_writel(val, ehci->regs+PHY0_CTR);
+
+ val = __raw_readl(ehci->regs+PHY1_CTR);
+ val |= ENPHY;
+ __raw_writel(val, ehci->regs+PHY1_CTR);
+
+ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+ ehci->sbrn = 0x20;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ goto err4;
+
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (retval != 0)
+ goto err4;
+
+ ehci_writel(ehci, 1, &ehci->regs->configured_flag);
+
+ return retval;
+err4:
+ iounmap(hcd->regs);
+err3:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err2:
+ usb_put_hcd(hcd);
+err1:
+ return retval;
+}
+
+static
+void usb_w90x900_remove(struct usb_hcd *hcd, struct platform_device *pdev)
+{
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+}
+
+static const struct hc_driver ehci_w90x900_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Nuvoton w90x900 EHCI Host Controller",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_USB2|HCD_MEMORY,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ehci_init,
+ .start = ehci_run,
+
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+#endif
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+};
+
+static int __devinit ehci_w90x900_probe(struct platform_device *pdev)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ return usb_w90x900_probe(&ehci_w90x900_hc_driver, pdev);
+}
+
+static int __devexit ehci_w90x900_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ usb_w90x900_remove(hcd, pdev);
+
+ return 0;
+}
+
+static struct platform_driver ehci_hcd_w90x900_driver = {
+ .probe = ehci_w90x900_probe,
+ .remove = __devexit_p(ehci_w90x900_remove),
+ .driver = {
+ .name = "w90x900-ehci",
+ .owner = THIS_MODULE,
+ },
+};
+
+MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
+MODULE_DESCRIPTION("w90p910 usb ehci driver!");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:w90p910-ehci");
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 2bfff30f470..064e76821ff 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -37,7 +37,7 @@ typedef __u16 __bitwise __hc16;
#define __hc16 __le16
#endif
-/* statistics can be kept for for tuning/monitoring */
+/* statistics can be kept for tuning/monitoring */
struct ehci_stats {
/* irq usage */
unsigned long normal;
@@ -126,6 +126,7 @@ struct ehci_hcd { /* one per controller */
unsigned big_endian_mmio:1;
unsigned big_endian_desc:1;
unsigned has_amcc_usb23:1;
+ unsigned need_io_watchdog:1;
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
@@ -135,6 +136,7 @@ struct ehci_hcd { /* one per controller */
#define OHCI_HCCTRL_OFFSET 0x4
#define OHCI_HCCTRL_LEN 0x4
__hc32 *ohci_hcctrl_reg;
+ unsigned has_hostpc:1;
u8 sbrn; /* packed release number */
@@ -298,8 +300,8 @@ union ehci_shadow {
* These appear in both the async and (for interrupt) periodic schedules.
*/
-struct ehci_qh {
- /* first part defined by EHCI spec */
+/* first part defined by EHCI spec */
+struct ehci_qh_hw {
__hc32 hw_next; /* see EHCI 3.6.1 */
__hc32 hw_info1; /* see EHCI 3.6.2 */
#define QH_HEAD 0x00008000
@@ -317,7 +319,10 @@ struct ehci_qh {
__hc32 hw_token;
__hc32 hw_buf [5];
__hc32 hw_buf_hi [5];
+} __attribute__ ((aligned(32)));
+struct ehci_qh {
+ struct ehci_qh_hw *hw;
/* the rest is HCD-private */
dma_addr_t qh_dma; /* address of qh */
union ehci_shadow qh_next; /* ptr to qh; or periodic */
@@ -336,6 +341,7 @@ struct ehci_qh {
u32 refcount;
unsigned stamp;
+ u8 needs_rescan; /* Dequeue during giveback */
u8 qh_state;
#define QH_STATE_LINKED 1 /* HC sees this */
#define QH_STATE_UNLINK 2 /* HC may still see this */
@@ -357,7 +363,7 @@ struct ehci_qh {
struct usb_device *dev; /* access to TT */
unsigned clearing_tt:1; /* Clear-TT-Buf in progress */
-} __attribute__ ((aligned (32)));
+};
/*-------------------------------------------------------------------------*/
@@ -544,7 +550,7 @@ static inline unsigned int
ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
{
if (ehci_is_TDI(ehci)) {
- switch ((portsc>>26)&3) {
+ switch ((portsc >> (ehci->has_hostpc ? 25 : 26)) & 3) {
case 0:
return 0;
case 1:
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
new file mode 100644
index 00000000000..e35d82808ba
--- /dev/null
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -0,0 +1,2909 @@
+/*
+ * ISP1362 HCD (Host Controller Driver) for USB.
+ *
+ * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
+ *
+ * Derived from the SL811 HCD, rewritten for ISP116x.
+ * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
+ *
+ * Portions:
+ * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
+ * Copyright (C) 2004 David Brownell
+ */
+
+/*
+ * The ISP1362 chip requires a large delay (300ns and 462ns) between
+ * accesses to the address and data register.
+ * The following timing options exist:
+ *
+ * 1. Configure your memory controller to add such delays if it can (the best)
+ * 2. Implement platform-specific delay function possibly
+ * combined with configuring the memory controller; see
+ * include/linux/usb_isp1362.h for more info.
+ * 3. Use ndelay (easiest, poorest).
+ *
+ * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
+ * platform specific section of isp1362.h to select the appropriate variant.
+ *
+ * Also note that according to the Philips "ISP1362 Errata" document
+ * Rev 1.00 from 27 May data corruption may occur when the #WR signal
+ * is reasserted (even with #CS deasserted) within 132ns after a
+ * write cycle to any controller register. If the hardware doesn't
+ * implement the recommended fix (gating the #WR with #CS) software
+ * must ensure that no further write cycle (not necessarily to the chip!)
+ * is issued by the CPU within this interval.
+
+ * For PXA25x this can be ensured by using VLIO with the maximum
+ * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
+ */
+
+#ifdef CONFIG_USB_DEBUG
+# define ISP1362_DEBUG
+#else
+# undef ISP1362_DEBUG
+#endif
+
+/*
+ * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
+ * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
+ * requests are carried out in separate frames. This will delay any SETUP
+ * packets until the start of the next frame so that this situation is
+ * unlikely to occur (and makes usbtest happy running with a PXA255 target
+ * device).
+ */
+#undef BUGGY_PXA2XX_UDC_USBTEST
+
+#undef PTD_TRACE
+#undef URB_TRACE
+#undef VERBOSE
+#undef REGISTERS
+
+/* This enables a memory test on the ISP1362 chip memory to make sure the
+ * chip access timing is correct.
+ */
+#undef CHIP_BUFFER_TEST
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+#include <linux/usb/isp1362.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+static int dbg_level;
+#ifdef ISP1362_DEBUG
+module_param(dbg_level, int, 0644);
+#else
+module_param(dbg_level, int, 0);
+#define STUB_DEBUG_FILE
+#endif
+
+#include "../core/hcd.h"
+#include "../core/usb.h"
+#include "isp1362.h"
+
+
+#define DRIVER_VERSION "2005-04-04"
+#define DRIVER_DESC "ISP1362 USB Host Controller Driver"
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+static const char hcd_name[] = "isp1362-hcd";
+
+static void isp1362_hc_stop(struct usb_hcd *hcd);
+static int isp1362_hc_start(struct usb_hcd *hcd);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
+ * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
+ * completion.
+ * We don't need a 'disable' counterpart, since interrupts will be disabled
+ * only by the interrupt handler.
+ */
+static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
+{
+ if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
+ return;
+ if (mask & ~isp1362_hcd->irqenb)
+ isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
+ isp1362_hcd->irqenb |= mask;
+ if (isp1362_hcd->irq_active)
+ return;
+ isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
+ u16 offset)
+{
+ struct isp1362_ep_queue *epq = NULL;
+
+ if (offset < isp1362_hcd->istl_queue[1].buf_start)
+ epq = &isp1362_hcd->istl_queue[0];
+ else if (offset < isp1362_hcd->intl_queue.buf_start)
+ epq = &isp1362_hcd->istl_queue[1];
+ else if (offset < isp1362_hcd->atl_queue.buf_start)
+ epq = &isp1362_hcd->intl_queue;
+ else if (offset < isp1362_hcd->atl_queue.buf_start +
+ isp1362_hcd->atl_queue.buf_size)
+ epq = &isp1362_hcd->atl_queue;
+
+ if (epq)
+ DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
+ else
+ pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
+
+ return epq;
+}
+
+static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
+{
+ int offset;
+
+ if (index * epq->blk_size > epq->buf_size) {
+ pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
+ epq->buf_size / epq->blk_size);
+ return -EINVAL;
+ }
+ offset = epq->buf_start + index * epq->blk_size;
+ DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
+
+ return offset;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
+ int mps)
+{
+ u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
+
+ xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
+ if (xfer_size < size && xfer_size % mps)
+ xfer_size -= xfer_size % mps;
+
+ return xfer_size;
+}
+
+static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
+ struct isp1362_ep *ep, u16 len)
+{
+ int ptd_offset = -EINVAL;
+ int index;
+ int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
+ int found = -1;
+ int last = -1;
+
+ BUG_ON(len > epq->buf_size);
+
+ if (!epq->buf_avail)
+ return -ENOMEM;
+
+ if (ep->num_ptds)
+ pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
+ epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
+ BUG_ON(ep->num_ptds != 0);
+
+ for (index = 0; index <= epq->buf_count - num_ptds; index++) {
+ if (test_bit(index, &epq->buf_map))
+ continue;
+ found = index;
+ for (last = index + 1; last < index + num_ptds; last++) {
+ if (test_bit(last, &epq->buf_map)) {
+ found = -1;
+ break;
+ }
+ }
+ if (found >= 0)
+ break;
+ }
+ if (found < 0)
+ return -EOVERFLOW;
+
+ DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
+ num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
+ ptd_offset = get_ptd_offset(epq, found);
+ WARN_ON(ptd_offset < 0);
+ ep->ptd_offset = ptd_offset;
+ ep->num_ptds += num_ptds;
+ epq->buf_avail -= num_ptds;
+ BUG_ON(epq->buf_avail > epq->buf_count);
+ ep->ptd_index = found;
+ for (index = found; index < last; index++)
+ __set_bit(index, &epq->buf_map);
+ DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
+ __func__, epq->name, ep->ptd_index, ep->ptd_offset,
+ epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
+
+ return found;
+}
+
+static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
+{
+ int index = ep->ptd_index;
+ int last = ep->ptd_index + ep->num_ptds;
+
+ if (last > epq->buf_count)
+ pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
+ __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
+ ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
+ epq->buf_map, epq->skip_map);
+ BUG_ON(last > epq->buf_count);
+
+ for (; index < last; index++) {
+ __clear_bit(index, &epq->buf_map);
+ __set_bit(index, &epq->skip_map);
+ }
+ epq->buf_avail += ep->num_ptds;
+ epq->ptd_count--;
+
+ BUG_ON(epq->buf_avail > epq->buf_count);
+ BUG_ON(epq->ptd_count > epq->buf_count);
+
+ DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
+ __func__, epq->name,
+ ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
+ DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
+ epq->buf_map, epq->skip_map);
+
+ ep->num_ptds = 0;
+ ep->ptd_offset = -EINVAL;
+ ep->ptd_index = -EINVAL;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ Set up PTD's.
+*/
+static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
+ struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
+ u16 fno)
+{
+ struct ptd *ptd;
+ int toggle;
+ int dir;
+ u16 len;
+ size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
+
+ DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
+
+ ptd = &ep->ptd;
+
+ ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
+
+ switch (ep->nextpid) {
+ case USB_PID_IN:
+ toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
+ dir = PTD_DIR_IN;
+ if (usb_pipecontrol(urb->pipe)) {
+ len = min_t(size_t, ep->maxpacket, buf_len);
+ } else if (usb_pipeisoc(urb->pipe)) {
+ len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
+ ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
+ } else
+ len = max_transfer_size(epq, buf_len, ep->maxpacket);
+ DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
+ (int)buf_len);
+ break;
+ case USB_PID_OUT:
+ toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
+ dir = PTD_DIR_OUT;
+ if (usb_pipecontrol(urb->pipe))
+ len = min_t(size_t, ep->maxpacket, buf_len);
+ else if (usb_pipeisoc(urb->pipe))
+ len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
+ else
+ len = max_transfer_size(epq, buf_len, ep->maxpacket);
+ if (len == 0)
+ pr_info("%s: Sending ZERO packet: %d\n", __func__,
+ urb->transfer_flags & URB_ZERO_PACKET);
+ DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
+ (int)buf_len);
+ break;
+ case USB_PID_SETUP:
+ toggle = 0;
+ dir = PTD_DIR_SETUP;
+ len = sizeof(struct usb_ctrlrequest);
+ DBG(1, "%s: SETUP len %d\n", __func__, len);
+ ep->data = urb->setup_packet;
+ break;
+ case USB_PID_ACK:
+ toggle = 1;
+ len = 0;
+ dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
+ PTD_DIR_OUT : PTD_DIR_IN;
+ DBG(1, "%s: ACK len %d\n", __func__, len);
+ break;
+ default:
+ toggle = dir = len = 0;
+ pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
+ BUG_ON(1);
+ }
+
+ ep->length = len;
+ if (!len)
+ ep->data = NULL;
+
+ ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
+ ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
+ PTD_EP(ep->epnum);
+ ptd->len = PTD_LEN(len) | PTD_DIR(dir);
+ ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
+
+ if (usb_pipeint(urb->pipe)) {
+ ptd->faddr |= PTD_SF_INT(ep->branch);
+ ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
+ }
+ if (usb_pipeisoc(urb->pipe))
+ ptd->faddr |= PTD_SF_ISO(fno);
+
+ DBG(1, "%s: Finished\n", __func__);
+}
+
+static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
+ struct isp1362_ep_queue *epq)
+{
+ struct ptd *ptd = &ep->ptd;
+ int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
+
+ _BUG_ON(ep->ptd_offset < 0);
+
+ prefetch(ptd);
+ isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
+ if (len)
+ isp1362_write_buffer(isp1362_hcd, ep->data,
+ ep->ptd_offset + PTD_HEADER_SIZE, len);
+
+ dump_ptd(ptd);
+ dump_ptd_out_data(ptd, ep->data);
+}
+
+static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
+ struct isp1362_ep_queue *epq)
+{
+ struct ptd *ptd = &ep->ptd;
+ int act_len;
+
+ WARN_ON(list_empty(&ep->active));
+ BUG_ON(ep->ptd_offset < 0);
+
+ list_del_init(&ep->active);
+ DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
+
+ prefetchw(ptd);
+ isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
+ dump_ptd(ptd);
+ act_len = PTD_GET_COUNT(ptd);
+ if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
+ return;
+ if (act_len > ep->length)
+ pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
+ ep->ptd_offset, act_len, ep->length);
+ BUG_ON(act_len > ep->length);
+ /* Only transfer the amount of data that has actually been overwritten
+ * in the chip buffer. We don't want any data that doesn't belong to the
+ * transfer to leak out of the chip to the callers transfer buffer!
+ */
+ prefetchw(ep->data);
+ isp1362_read_buffer(isp1362_hcd, ep->data,
+ ep->ptd_offset + PTD_HEADER_SIZE, act_len);
+ dump_ptd_in_data(ptd, ep->data);
+}
+
+/*
+ * INT PTDs will stay in the chip until data is available.
+ * This function will remove a PTD from the chip when the URB is dequeued.
+ * Must be called with the spinlock held and IRQs disabled
+ */
+static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
+
+{
+ int index;
+ struct isp1362_ep_queue *epq;
+
+ DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
+ BUG_ON(ep->ptd_offset < 0);
+
+ epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
+ BUG_ON(!epq);
+
+ /* put ep in remove_list for cleanup */
+ WARN_ON(!list_empty(&ep->remove_list));
+ list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
+ /* let SOF interrupt handle the cleanup */
+ isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
+
+ index = ep->ptd_index;
+ if (index < 0)
+ /* ISO queues don't have SKIP registers */
+ return;
+
+ DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
+ index, ep->ptd_offset, epq->skip_map, 1 << index);
+
+ /* prevent further processing of PTD (will be effective after next SOF) */
+ epq->skip_map |= 1 << index;
+ if (epq == &isp1362_hcd->atl_queue) {
+ DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
+ isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
+ isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
+ if (~epq->skip_map == 0)
+ isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
+ } else if (epq == &isp1362_hcd->intl_queue) {
+ DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
+ isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
+ isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
+ if (~epq->skip_map == 0)
+ isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
+ }
+}
+
+/*
+ Take done or failed requests out of schedule. Give back
+ processed urbs.
+*/
+static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
+ struct urb *urb, int status)
+ __releases(isp1362_hcd->lock)
+ __acquires(isp1362_hcd->lock)
+{
+ urb->hcpriv = NULL;
+ ep->error_count = 0;
+
+ if (usb_pipecontrol(urb->pipe))
+ ep->nextpid = USB_PID_SETUP;
+
+ URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
+ ep->num_req, usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ !usb_pipein(urb->pipe) ? "out" : "in",
+ usb_pipecontrol(urb->pipe) ? "ctrl" :
+ usb_pipeint(urb->pipe) ? "int" :
+ usb_pipebulk(urb->pipe) ? "bulk" :
+ "iso",
+ urb->actual_length, urb->transfer_buffer_length,
+ !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
+ "short_ok" : "", urb->status);
+
+
+ usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
+ spin_unlock(&isp1362_hcd->lock);
+ usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
+ spin_lock(&isp1362_hcd->lock);
+
+ /* take idle endpoints out of the schedule right away */
+ if (!list_empty(&ep->hep->urb_list))
+ return;
+
+ /* async deschedule */
+ if (!list_empty(&ep->schedule)) {
+ list_del_init(&ep->schedule);
+ return;
+ }
+
+
+ if (ep->interval) {
+ /* periodic deschedule */
+ DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
+ ep, ep->branch, ep->load,
+ isp1362_hcd->load[ep->branch],
+ isp1362_hcd->load[ep->branch] - ep->load);
+ isp1362_hcd->load[ep->branch] -= ep->load;
+ ep->branch = PERIODIC_SIZE;
+ }
+}
+
+/*
+ * Analyze transfer results, handle partial transfers and errors
+*/
+static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
+{
+ struct urb *urb = get_urb(ep);
+ struct usb_device *udev;
+ struct ptd *ptd;
+ int short_ok;
+ u16 len;
+ int urbstat = -EINPROGRESS;
+ u8 cc;
+
+ DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
+
+ udev = urb->dev;
+ ptd = &ep->ptd;
+ cc = PTD_GET_CC(ptd);
+ if (cc == PTD_NOTACCESSED) {
+ pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
+ ep->num_req, ptd);
+ cc = PTD_DEVNOTRESP;
+ }
+
+ short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
+ len = urb->transfer_buffer_length - urb->actual_length;
+
+ /* Data underrun is special. For allowed underrun
+ we clear the error and continue as normal. For
+ forbidden underrun we finish the DATA stage
+ immediately while for control transfer,
+ we do a STATUS stage.
+ */
+ if (cc == PTD_DATAUNDERRUN) {
+ if (short_ok) {
+ DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
+ __func__, ep->num_req, short_ok ? "" : "not_",
+ PTD_GET_COUNT(ptd), ep->maxpacket, len);
+ cc = PTD_CC_NOERROR;
+ urbstat = 0;
+ } else {
+ DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
+ __func__, ep->num_req,
+ usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
+ short_ok ? "" : "not_",
+ PTD_GET_COUNT(ptd), ep->maxpacket, len);
+ if (usb_pipecontrol(urb->pipe)) {
+ ep->nextpid = USB_PID_ACK;
+ /* save the data underrun error code for later and
+ * procede with the status stage
+ */
+ urb->actual_length += PTD_GET_COUNT(ptd);
+ BUG_ON(urb->actual_length > urb->transfer_buffer_length);
+
+ if (urb->status == -EINPROGRESS)
+ urb->status = cc_to_error[PTD_DATAUNDERRUN];
+ } else {
+ usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
+ PTD_GET_TOGGLE(ptd));
+ urbstat = cc_to_error[PTD_DATAUNDERRUN];
+ }
+ goto out;
+ }
+ }
+
+ if (cc != PTD_CC_NOERROR) {
+ if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
+ urbstat = cc_to_error[cc];
+ DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
+ __func__, ep->num_req, ep->nextpid, urbstat, cc,
+ ep->error_count);
+ }
+ goto out;
+ }
+
+ switch (ep->nextpid) {
+ case USB_PID_OUT:
+ if (PTD_GET_COUNT(ptd) != ep->length)
+ pr_err("%s: count=%d len=%d\n", __func__,
+ PTD_GET_COUNT(ptd), ep->length);
+ BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
+ urb->actual_length += ep->length;
+ BUG_ON(urb->actual_length > urb->transfer_buffer_length);
+ usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
+ if (urb->actual_length == urb->transfer_buffer_length) {
+ DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
+ ep->num_req, len, ep->maxpacket, urbstat);
+ if (usb_pipecontrol(urb->pipe)) {
+ DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
+ ep->num_req,
+ usb_pipein(urb->pipe) ? "IN" : "OUT");
+ ep->nextpid = USB_PID_ACK;
+ } else {
+ if (len % ep->maxpacket ||
+ !(urb->transfer_flags & URB_ZERO_PACKET)) {
+ urbstat = 0;
+ DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
+ __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
+ urbstat, len, ep->maxpacket, urb->actual_length);
+ }
+ }
+ }
+ break;
+ case USB_PID_IN:
+ len = PTD_GET_COUNT(ptd);
+ BUG_ON(len > ep->length);
+ urb->actual_length += len;
+ BUG_ON(urb->actual_length > urb->transfer_buffer_length);
+ usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
+ /* if transfer completed or (allowed) data underrun */
+ if ((urb->transfer_buffer_length == urb->actual_length) ||
+ len % ep->maxpacket) {
+ DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
+ ep->num_req, len, ep->maxpacket, urbstat);
+ if (usb_pipecontrol(urb->pipe)) {
+ DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
+ ep->num_req,
+ usb_pipein(urb->pipe) ? "IN" : "OUT");
+ ep->nextpid = USB_PID_ACK;
+ } else {
+ urbstat = 0;
+ DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
+ __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
+ urbstat, len, ep->maxpacket, urb->actual_length);
+ }
+ }
+ break;
+ case USB_PID_SETUP:
+ if (urb->transfer_buffer_length == urb->actual_length) {
+ ep->nextpid = USB_PID_ACK;
+ } else if (usb_pipeout(urb->pipe)) {
+ usb_settoggle(udev, 0, 1, 1);
+ ep->nextpid = USB_PID_OUT;
+ } else {
+ usb_settoggle(udev, 0, 0, 1);
+ ep->nextpid = USB_PID_IN;
+ }
+ break;
+ case USB_PID_ACK:
+ DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
+ urbstat);
+ WARN_ON(urbstat != -EINPROGRESS);
+ urbstat = 0;
+ ep->nextpid = 0;
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ out:
+ if (urbstat != -EINPROGRESS) {
+ DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
+ ep, ep->num_req, urb, urbstat);
+ finish_request(isp1362_hcd, ep, urb, urbstat);
+ }
+}
+
+static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
+{
+ struct isp1362_ep *ep;
+ struct isp1362_ep *tmp;
+
+ list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
+ struct isp1362_ep_queue *epq =
+ get_ptd_queue(isp1362_hcd, ep->ptd_offset);
+ int index = ep->ptd_index;
+
+ BUG_ON(epq == NULL);
+ if (index >= 0) {
+ DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
+ BUG_ON(ep->num_ptds == 0);
+ release_ptd_buffers(epq, ep);
+ }
+ if (!list_empty(&ep->hep->urb_list)) {
+ struct urb *urb = get_urb(ep);
+
+ DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
+ ep->num_req, ep);
+ finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
+ }
+ WARN_ON(list_empty(&ep->active));
+ if (!list_empty(&ep->active)) {
+ list_del_init(&ep->active);
+ DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
+ }
+ list_del_init(&ep->remove_list);
+ DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
+ }
+ DBG(1, "%s: Done\n", __func__);
+}
+
+static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
+{
+ if (count > 0) {
+ if (count < isp1362_hcd->atl_queue.ptd_count)
+ isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
+ isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
+ isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
+ isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
+ } else
+ isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
+}
+
+static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
+{
+ isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
+ isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
+ isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
+}
+
+static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
+{
+ isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
+ isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
+ HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
+}
+
+static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
+ struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
+{
+ int index = epq->free_ptd;
+
+ prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
+ index = claim_ptd_buffers(epq, ep, ep->length);
+ if (index == -ENOMEM) {
+ DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
+ ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
+ return index;
+ } else if (index == -EOVERFLOW) {
+ DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
+ __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
+ epq->buf_map, epq->skip_map);
+ return index;
+ } else
+ BUG_ON(index < 0);
+ list_add_tail(&ep->active, &epq->active);
+ DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
+ ep, ep->num_req, ep->length, &epq->active);
+ DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
+ ep->ptd_offset, ep, ep->num_req);
+ isp1362_write_ptd(isp1362_hcd, ep, epq);
+ __clear_bit(ep->ptd_index, &epq->skip_map);
+
+ return 0;
+}
+
+static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
+{
+ int ptd_count = 0;
+ struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
+ struct isp1362_ep *ep;
+ int defer = 0;
+
+ if (atomic_read(&epq->finishing)) {
+ DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
+ return;
+ }
+
+ list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
+ struct urb *urb = get_urb(ep);
+ int ret;
+
+ if (!list_empty(&ep->active)) {
+ DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
+ continue;
+ }
+
+ DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
+ ep, ep->num_req);
+
+ ret = submit_req(isp1362_hcd, urb, ep, epq);
+ if (ret == -ENOMEM) {
+ defer = 1;
+ break;
+ } else if (ret == -EOVERFLOW) {
+ defer = 1;
+ continue;
+ }
+#ifdef BUGGY_PXA2XX_UDC_USBTEST
+ defer = ep->nextpid == USB_PID_SETUP;
+#endif
+ ptd_count++;
+ }
+
+ /* Avoid starving of endpoints */
+ if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
+ DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
+ list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
+ }
+ if (ptd_count || defer)
+ enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
+
+ epq->ptd_count += ptd_count;
+ if (epq->ptd_count > epq->stat_maxptds) {
+ epq->stat_maxptds = epq->ptd_count;
+ DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
+ }
+}
+
+static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
+{
+ int ptd_count = 0;
+ struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
+ struct isp1362_ep *ep;
+
+ if (atomic_read(&epq->finishing)) {
+ DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
+ return;
+ }
+
+ list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
+ struct urb *urb = get_urb(ep);
+ int ret;
+
+ if (!list_empty(&ep->active)) {
+ DBG(1, "%s: Skipping active %s ep %p\n", __func__,
+ epq->name, ep);
+ continue;
+ }
+
+ DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
+ epq->name, ep, ep->num_req);
+ ret = submit_req(isp1362_hcd, urb, ep, epq);
+ if (ret == -ENOMEM)
+ break;
+ else if (ret == -EOVERFLOW)
+ continue;
+ ptd_count++;
+ }
+
+ if (ptd_count) {
+ static int last_count;
+
+ if (ptd_count != last_count) {
+ DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
+ last_count = ptd_count;
+ }
+ enable_intl_transfers(isp1362_hcd);
+ }
+
+ epq->ptd_count += ptd_count;
+ if (epq->ptd_count > epq->stat_maxptds)
+ epq->stat_maxptds = epq->ptd_count;
+}
+
+static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
+{
+ u16 ptd_offset = ep->ptd_offset;
+ int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
+
+ DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
+ ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
+
+ ptd_offset += num_ptds * epq->blk_size;
+ if (ptd_offset < epq->buf_start + epq->buf_size)
+ return ptd_offset;
+ else
+ return -ENOMEM;
+}
+
+static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
+{
+ int ptd_count = 0;
+ int flip = isp1362_hcd->istl_flip;
+ struct isp1362_ep_queue *epq;
+ int ptd_offset;
+ struct isp1362_ep *ep;
+ struct isp1362_ep *tmp;
+ u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
+
+ fill2:
+ epq = &isp1362_hcd->istl_queue[flip];
+ if (atomic_read(&epq->finishing)) {
+ DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
+ return;
+ }
+
+ if (!list_empty(&epq->active))
+ return;
+
+ ptd_offset = epq->buf_start;
+ list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
+ struct urb *urb = get_urb(ep);
+ s16 diff = fno - (u16)urb->start_frame;
+
+ DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
+
+ if (diff > urb->number_of_packets) {
+ /* time frame for this URB has elapsed */
+ finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
+ continue;
+ } else if (diff < -1) {
+ /* URB is not due in this frame or the next one.
+ * Comparing with '-1' instead of '0' accounts for double
+ * buffering in the ISP1362 which enables us to queue the PTD
+ * one frame ahead of time
+ */
+ } else if (diff == -1) {
+ /* submit PTD's that are due in the next frame */
+ prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
+ if (ptd_offset + PTD_HEADER_SIZE + ep->length >
+ epq->buf_start + epq->buf_size) {
+ pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
+ __func__, ep->length);
+ continue;
+ }
+ ep->ptd_offset = ptd_offset;
+ list_add_tail(&ep->active, &epq->active);
+
+ ptd_offset = next_ptd(epq, ep);
+ if (ptd_offset < 0) {
+ pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
+ ep->num_req, epq->name);
+ break;
+ }
+ }
+ }
+ list_for_each_entry(ep, &epq->active, active) {
+ if (epq->active.next == &ep->active)
+ ep->ptd.mps |= PTD_LAST_MSK;
+ isp1362_write_ptd(isp1362_hcd, ep, epq);
+ ptd_count++;
+ }
+
+ if (ptd_count)
+ enable_istl_transfers(isp1362_hcd, flip);
+
+ epq->ptd_count += ptd_count;
+ if (epq->ptd_count > epq->stat_maxptds)
+ epq->stat_maxptds = epq->ptd_count;
+
+ /* check, whether the second ISTL buffer may also be filled */
+ if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
+ (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
+ fno++;
+ ptd_count = 0;
+ flip = 1 - flip;
+ goto fill2;
+ }
+}
+
+static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
+ struct isp1362_ep_queue *epq)
+{
+ struct isp1362_ep *ep;
+ struct isp1362_ep *tmp;
+
+ if (list_empty(&epq->active)) {
+ DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
+ return;
+ }
+
+ DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
+
+ atomic_inc(&epq->finishing);
+ list_for_each_entry_safe(ep, tmp, &epq->active, active) {
+ int index = ep->ptd_index;
+
+ DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
+ index, ep->ptd_offset);
+
+ BUG_ON(index < 0);
+ if (__test_and_clear_bit(index, &done_map)) {
+ isp1362_read_ptd(isp1362_hcd, ep, epq);
+ epq->free_ptd = index;
+ BUG_ON(ep->num_ptds == 0);
+ release_ptd_buffers(epq, ep);
+
+ DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
+ ep, ep->num_req);
+ if (!list_empty(&ep->remove_list)) {
+ list_del_init(&ep->remove_list);
+ DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
+ }
+ DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
+ ep, ep->num_req);
+ postproc_ep(isp1362_hcd, ep);
+ }
+ if (!done_map)
+ break;
+ }
+ if (done_map)
+ pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
+ epq->skip_map);
+ atomic_dec(&epq->finishing);
+}
+
+static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
+{
+ struct isp1362_ep *ep;
+ struct isp1362_ep *tmp;
+
+ if (list_empty(&epq->active)) {
+ DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
+ return;
+ }
+
+ DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
+
+ atomic_inc(&epq->finishing);
+ list_for_each_entry_safe(ep, tmp, &epq->active, active) {
+ DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
+
+ isp1362_read_ptd(isp1362_hcd, ep, epq);
+ DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
+ postproc_ep(isp1362_hcd, ep);
+ }
+ WARN_ON(epq->blk_size != 0);
+ atomic_dec(&epq->finishing);
+}
+
+static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
+{
+ int handled = 0;
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ u16 irqstat;
+ u16 svc_mask;
+
+ spin_lock(&isp1362_hcd->lock);
+
+ BUG_ON(isp1362_hcd->irq_active++);
+
+ isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
+
+ irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
+ DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
+
+ /* only handle interrupts that are currently enabled */
+ irqstat &= isp1362_hcd->irqenb;
+ isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
+ svc_mask = irqstat;
+
+ if (irqstat & HCuPINT_SOF) {
+ isp1362_hcd->irqenb &= ~HCuPINT_SOF;
+ isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
+ handled = 1;
+ svc_mask &= ~HCuPINT_SOF;
+ DBG(3, "%s: SOF\n", __func__);
+ isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
+ if (!list_empty(&isp1362_hcd->remove_list))
+ finish_unlinks(isp1362_hcd);
+ if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
+ if (list_empty(&isp1362_hcd->atl_queue.active)) {
+ start_atl_transfers(isp1362_hcd);
+ } else {
+ isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
+ isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
+ isp1362_hcd->atl_queue.skip_map);
+ isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
+ }
+ }
+ }
+
+ if (irqstat & HCuPINT_ISTL0) {
+ isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
+ handled = 1;
+ svc_mask &= ~HCuPINT_ISTL0;
+ isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
+ DBG(1, "%s: ISTL0\n", __func__);
+ WARN_ON((int)!!isp1362_hcd->istl_flip);
+ WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
+ HCBUFSTAT_ISTL0_ACTIVE);
+ WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
+ HCBUFSTAT_ISTL0_DONE));
+ isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
+ }
+
+ if (irqstat & HCuPINT_ISTL1) {
+ isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
+ handled = 1;
+ svc_mask &= ~HCuPINT_ISTL1;
+ isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
+ DBG(1, "%s: ISTL1\n", __func__);
+ WARN_ON(!(int)isp1362_hcd->istl_flip);
+ WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
+ HCBUFSTAT_ISTL1_ACTIVE);
+ WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
+ HCBUFSTAT_ISTL1_DONE));
+ isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
+ }
+
+ if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
+ WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
+ (HCuPINT_ISTL0 | HCuPINT_ISTL1));
+ finish_iso_transfers(isp1362_hcd,
+ &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
+ start_iso_transfers(isp1362_hcd);
+ isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
+ }
+
+ if (irqstat & HCuPINT_INTL) {
+ u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
+ u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
+ isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
+
+ DBG(2, "%s: INTL\n", __func__);
+
+ svc_mask &= ~HCuPINT_INTL;
+
+ isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
+ if (~(done_map | skip_map) == 0)
+ /* All PTDs are finished, disable INTL processing entirely */
+ isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
+
+ handled = 1;
+ WARN_ON(!done_map);
+ if (done_map) {
+ DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
+ finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
+ start_intl_transfers(isp1362_hcd);
+ }
+ }
+
+ if (irqstat & HCuPINT_ATL) {
+ u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
+ u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
+ isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
+
+ DBG(2, "%s: ATL\n", __func__);
+
+ svc_mask &= ~HCuPINT_ATL;
+
+ isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
+ if (~(done_map | skip_map) == 0)
+ isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
+ if (done_map) {
+ DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
+ finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
+ start_atl_transfers(isp1362_hcd);
+ }
+ handled = 1;
+ }
+
+ if (irqstat & HCuPINT_OPR) {
+ u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
+ isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
+
+ svc_mask &= ~HCuPINT_OPR;
+ DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
+ intstat &= isp1362_hcd->intenb;
+ if (intstat & OHCI_INTR_UE) {
+ pr_err("Unrecoverable error\n");
+ /* FIXME: do here reset or cleanup or whatever */
+ }
+ if (intstat & OHCI_INTR_RHSC) {
+ isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
+ isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
+ isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
+ }
+ if (intstat & OHCI_INTR_RD) {
+ pr_info("%s: RESUME DETECTED\n", __func__);
+ isp1362_show_reg(isp1362_hcd, HCCONTROL);
+ usb_hcd_resume_root_hub(hcd);
+ }
+ isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
+ irqstat &= ~HCuPINT_OPR;
+ handled = 1;
+ }
+
+ if (irqstat & HCuPINT_SUSP) {
+ isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
+ handled = 1;
+ svc_mask &= ~HCuPINT_SUSP;
+
+ pr_info("%s: SUSPEND IRQ\n", __func__);
+ }
+
+ if (irqstat & HCuPINT_CLKRDY) {
+ isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
+ handled = 1;
+ isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
+ svc_mask &= ~HCuPINT_CLKRDY;
+ pr_info("%s: CLKRDY IRQ\n", __func__);
+ }
+
+ if (svc_mask)
+ pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
+
+ isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
+ isp1362_hcd->irq_active--;
+ spin_unlock(&isp1362_hcd->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define MAX_PERIODIC_LOAD 900 /* out of 1000 usec */
+static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
+{
+ int i, branch = -ENOSPC;
+
+ /* search for the least loaded schedule branch of that interval
+ * which has enough bandwidth left unreserved.
+ */
+ for (i = 0; i < interval; i++) {
+ if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
+ int j;
+
+ for (j = i; j < PERIODIC_SIZE; j += interval) {
+ if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
+ pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
+ load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
+ break;
+ }
+ }
+ if (j < PERIODIC_SIZE)
+ continue;
+ branch = i;
+ }
+ }
+ return branch;
+}
+
+/* NB! ALL the code above this point runs with isp1362_hcd->lock
+ held, irqs off
+*/
+
+/*-------------------------------------------------------------------------*/
+
+static int isp1362_urb_enqueue(struct usb_hcd *hcd,
+ struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ struct usb_device *udev = urb->dev;
+ unsigned int pipe = urb->pipe;
+ int is_out = !usb_pipein(pipe);
+ int type = usb_pipetype(pipe);
+ int epnum = usb_pipeendpoint(pipe);
+ struct usb_host_endpoint *hep = urb->ep;
+ struct isp1362_ep *ep = NULL;
+ unsigned long flags;
+ int retval = 0;
+
+ DBG(3, "%s: urb %p\n", __func__, urb);
+
+ if (type == PIPE_ISOCHRONOUS) {
+ pr_err("Isochronous transfers not supported\n");
+ return -ENOSPC;
+ }
+
+ URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
+ usb_pipedevice(pipe), epnum,
+ is_out ? "out" : "in",
+ usb_pipecontrol(pipe) ? "ctrl" :
+ usb_pipeint(pipe) ? "int" :
+ usb_pipebulk(pipe) ? "bulk" :
+ "iso",
+ urb->transfer_buffer_length,
+ (urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
+ !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
+ "short_ok" : "");
+
+ /* avoid all allocations within spinlocks: request or endpoint */
+ if (!hep->hcpriv) {
+ ep = kcalloc(1, sizeof *ep, mem_flags);
+ if (!ep)
+ return -ENOMEM;
+ }
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+
+ /* don't submit to a dead or disabled port */
+ if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
+ (1 << USB_PORT_FEAT_ENABLE)) ||
+ !HC_IS_RUNNING(hcd->state)) {
+ kfree(ep);
+ retval = -ENODEV;
+ goto fail_not_linked;
+ }
+
+ retval = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (retval) {
+ kfree(ep);
+ goto fail_not_linked;
+ }
+
+ if (hep->hcpriv) {
+ ep = hep->hcpriv;
+ } else {
+ INIT_LIST_HEAD(&ep->schedule);
+ INIT_LIST_HEAD(&ep->active);
+ INIT_LIST_HEAD(&ep->remove_list);
+ ep->udev = usb_get_dev(udev);
+ ep->hep = hep;
+ ep->epnum = epnum;
+ ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
+ ep->ptd_offset = -EINVAL;
+ ep->ptd_index = -EINVAL;
+ usb_settoggle(udev, epnum, is_out, 0);
+
+ if (type == PIPE_CONTROL)
+ ep->nextpid = USB_PID_SETUP;
+ else if (is_out)
+ ep->nextpid = USB_PID_OUT;
+ else
+ ep->nextpid = USB_PID_IN;
+
+ switch (type) {
+ case PIPE_ISOCHRONOUS:
+ case PIPE_INTERRUPT:
+ if (urb->interval > PERIODIC_SIZE)
+ urb->interval = PERIODIC_SIZE;
+ ep->interval = urb->interval;
+ ep->branch = PERIODIC_SIZE;
+ ep->load = usb_calc_bus_time(udev->speed, !is_out,
+ (type == PIPE_ISOCHRONOUS),
+ usb_maxpacket(udev, pipe, is_out)) / 1000;
+ break;
+ }
+ hep->hcpriv = ep;
+ }
+ ep->num_req = isp1362_hcd->req_serial++;
+
+ /* maybe put endpoint into schedule */
+ switch (type) {
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ if (list_empty(&ep->schedule)) {
+ DBG(1, "%s: Adding ep %p req %d to async schedule\n",
+ __func__, ep, ep->num_req);
+ list_add_tail(&ep->schedule, &isp1362_hcd->async);
+ }
+ break;
+ case PIPE_ISOCHRONOUS:
+ case PIPE_INTERRUPT:
+ urb->interval = ep->interval;
+
+ /* urb submitted for already existing EP */
+ if (ep->branch < PERIODIC_SIZE)
+ break;
+
+ retval = balance(isp1362_hcd, ep->interval, ep->load);
+ if (retval < 0) {
+ pr_err("%s: balance returned %d\n", __func__, retval);
+ goto fail;
+ }
+ ep->branch = retval;
+ retval = 0;
+ isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
+ DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
+ __func__, isp1362_hcd->fmindex, ep->branch,
+ ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
+ ~(PERIODIC_SIZE - 1)) + ep->branch,
+ (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
+
+ if (list_empty(&ep->schedule)) {
+ if (type == PIPE_ISOCHRONOUS) {
+ u16 frame = isp1362_hcd->fmindex;
+
+ frame += max_t(u16, 8, ep->interval);
+ frame &= ~(ep->interval - 1);
+ frame |= ep->branch;
+ if (frame_before(frame, isp1362_hcd->fmindex))
+ frame += ep->interval;
+ urb->start_frame = frame;
+
+ DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
+ list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
+ } else {
+ DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
+ list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
+ }
+ } else
+ DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
+
+ DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
+ ep->load / ep->interval, isp1362_hcd->load[ep->branch],
+ isp1362_hcd->load[ep->branch] + ep->load);
+ isp1362_hcd->load[ep->branch] += ep->load;
+ }
+
+ urb->hcpriv = hep;
+ ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
+
+ switch (type) {
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ start_atl_transfers(isp1362_hcd);
+ break;
+ case PIPE_INTERRUPT:
+ start_intl_transfers(isp1362_hcd);
+ break;
+ case PIPE_ISOCHRONOUS:
+ start_iso_transfers(isp1362_hcd);
+ break;
+ default:
+ BUG();
+ }
+ fail:
+ if (retval)
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+
+
+ fail_not_linked:
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ if (retval)
+ DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
+ return retval;
+}
+
+static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ struct usb_host_endpoint *hep;
+ unsigned long flags;
+ struct isp1362_ep *ep;
+ int retval = 0;
+
+ DBG(3, "%s: urb %p\n", __func__, urb);
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ retval = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (retval)
+ goto done;
+
+ hep = urb->hcpriv;
+
+ if (!hep) {
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ return -EIDRM;
+ }
+
+ ep = hep->hcpriv;
+ if (ep) {
+ /* In front of queue? */
+ if (ep->hep->urb_list.next == &urb->urb_list) {
+ if (!list_empty(&ep->active)) {
+ DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
+ urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
+ /* disable processing and queue PTD for removal */
+ remove_ptd(isp1362_hcd, ep);
+ urb = NULL;
+ }
+ }
+ if (urb) {
+ DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
+ ep->num_req);
+ finish_request(isp1362_hcd, ep, urb, status);
+ } else
+ DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
+ } else {
+ pr_warning("%s: No EP in URB %p\n", __func__, urb);
+ retval = -EINVAL;
+ }
+done:
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+ DBG(3, "%s: exit\n", __func__);
+
+ return retval;
+}
+
+static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
+{
+ struct isp1362_ep *ep = hep->hcpriv;
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ unsigned long flags;
+
+ DBG(1, "%s: ep %p\n", __func__, ep);
+ if (!ep)
+ return;
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ if (!list_empty(&hep->urb_list)) {
+ if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
+ DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
+ ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
+ remove_ptd(isp1362_hcd, ep);
+ pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
+ }
+ }
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ /* Wait for interrupt to clear out active list */
+ while (!list_empty(&ep->active))
+ msleep(1);
+
+ DBG(1, "%s: Freeing EP %p\n", __func__, ep);
+
+ usb_put_dev(ep->udev);
+ kfree(ep);
+ hep->hcpriv = NULL;
+}
+
+static int isp1362_get_frame(struct usb_hcd *hcd)
+{
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ u32 fmnum;
+ unsigned long flags;
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+ return (int)fmnum;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Adapted from ohci-hub.c */
+static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ int ports, i, changed = 0;
+ unsigned long flags;
+
+ if (!HC_IS_RUNNING(hcd->state))
+ return -ESHUTDOWN;
+
+ /* Report no status change now, if we are scheduled to be
+ called later */
+ if (timer_pending(&hcd->rh_timer))
+ return 0;
+
+ ports = isp1362_hcd->rhdesca & RH_A_NDP;
+ BUG_ON(ports > 2);
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ /* init status */
+ if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
+ buf[0] = changed = 1;
+ else
+ buf[0] = 0;
+
+ for (i = 0; i < ports; i++) {
+ u32 status = isp1362_hcd->rhport[i];
+
+ if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
+ RH_PS_OCIC | RH_PS_PRSC)) {
+ changed = 1;
+ buf[0] |= 1 << (i + 1);
+ continue;
+ }
+
+ if (!(status & RH_PS_CCS))
+ continue;
+ }
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ return changed;
+}
+
+static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
+ struct usb_hub_descriptor *desc)
+{
+ u32 reg = isp1362_hcd->rhdesca;
+
+ DBG(3, "%s: enter\n", __func__);
+
+ desc->bDescriptorType = 0x29;
+ desc->bDescLength = 9;
+ desc->bHubContrCurrent = 0;
+ desc->bNbrPorts = reg & 0x3;
+ /* Power switching, device type, overcurrent. */
+ desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f);
+ DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f));
+ desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
+ /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */
+ desc->bitmap[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
+ desc->bitmap[1] = ~0;
+
+ DBG(3, "%s: exit\n", __func__);
+}
+
+/* Adapted from ohci-hub.c */
+static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ int retval = 0;
+ unsigned long flags;
+ unsigned long t1;
+ int ports = isp1362_hcd->rhdesca & RH_A_NDP;
+ u32 tmp = 0;
+
+ switch (typeReq) {
+ case ClearHubFeature:
+ DBG(0, "ClearHubFeature: ");
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ _DBG(0, "C_HUB_OVER_CURRENT\n");
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ case C_HUB_LOCAL_POWER:
+ _DBG(0, "C_HUB_LOCAL_POWER\n");
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case SetHubFeature:
+ DBG(0, "SetHubFeature: ");
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ case C_HUB_LOCAL_POWER:
+ _DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case GetHubDescriptor:
+ DBG(0, "GetHubDescriptor\n");
+ isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
+ break;
+ case GetHubStatus:
+ DBG(0, "GetHubStatus\n");
+ put_unaligned(cpu_to_le32(0), (__le32 *) buf);
+ break;
+ case GetPortStatus:
+#ifndef VERBOSE
+ DBG(0, "GetPortStatus\n");
+#endif
+ if (!wIndex || wIndex > ports)
+ goto error;
+ tmp = isp1362_hcd->rhport[--wIndex];
+ put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
+ break;
+ case ClearPortFeature:
+ DBG(0, "ClearPortFeature: ");
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ _DBG(0, "USB_PORT_FEAT_ENABLE\n");
+ tmp = RH_PS_CCS;
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ _DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
+ tmp = RH_PS_PESC;
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
+ tmp = RH_PS_POCI;
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ _DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
+ tmp = RH_PS_PSSC;
+ break;
+ case USB_PORT_FEAT_POWER:
+ _DBG(0, "USB_PORT_FEAT_POWER\n");
+ tmp = RH_PS_LSDA;
+
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ _DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
+ tmp = RH_PS_CSC;
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ _DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
+ tmp = RH_PS_OCIC;
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ _DBG(0, "USB_PORT_FEAT_C_RESET\n");
+ tmp = RH_PS_PRSC;
+ break;
+ default:
+ goto error;
+ }
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
+ isp1362_hcd->rhport[wIndex] =
+ isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ break;
+ case SetPortFeature:
+ DBG(0, "SetPortFeature: ");
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
+#ifdef CONFIG_USB_OTG
+ if (ohci->hcd.self.otg_port == (wIndex + 1) &&
+ ohci->hcd.self.b_hnp_enable) {
+ start_hnp(ohci);
+ break;
+ }
+#endif
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
+ isp1362_hcd->rhport[wIndex] =
+ isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ break;
+ case USB_PORT_FEAT_POWER:
+ _DBG(0, "USB_PORT_FEAT_POWER\n");
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
+ isp1362_hcd->rhport[wIndex] =
+ isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ break;
+ case USB_PORT_FEAT_RESET:
+ _DBG(0, "USB_PORT_FEAT_RESET\n");
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+
+ t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
+ while (time_before(jiffies, t1)) {
+ /* spin until any current reset finishes */
+ for (;;) {
+ tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
+ if (!(tmp & RH_PS_PRS))
+ break;
+ udelay(500);
+ }
+ if (!(tmp & RH_PS_CCS))
+ break;
+ /* Reset lasts 10ms (claims datasheet) */
+ isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
+
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ }
+
+ isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
+ HCRHPORT1 + wIndex);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ break;
+ default:
+ goto error;
+ }
+ break;
+
+ default:
+ error:
+ /* "protocol stall" on error */
+ _DBG(0, "PROTOCOL STALL\n");
+ retval = -EPIPE;
+ }
+
+ return retval;
+}
+
+#ifdef CONFIG_PM
+static int isp1362_bus_suspend(struct usb_hcd *hcd)
+{
+ int status = 0;
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ unsigned long flags;
+
+ if (time_before(jiffies, isp1362_hcd->next_statechange))
+ msleep(5);
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+
+ isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
+ switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
+ case OHCI_USB_RESUME:
+ DBG(0, "%s: resume/suspend?\n", __func__);
+ isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
+ isp1362_hcd->hc_control |= OHCI_USB_RESET;
+ isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
+ /* FALL THROUGH */
+ case OHCI_USB_RESET:
+ status = -EBUSY;
+ pr_warning("%s: needs reinit!\n", __func__);
+ goto done;
+ case OHCI_USB_SUSPEND:
+ pr_warning("%s: already suspended?\n", __func__);
+ goto done;
+ }
+ DBG(0, "%s: suspend root hub\n", __func__);
+
+ /* First stop any processing */
+ hcd->state = HC_STATE_QUIESCING;
+ if (!list_empty(&isp1362_hcd->atl_queue.active) ||
+ !list_empty(&isp1362_hcd->intl_queue.active) ||
+ !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
+ !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
+ int limit;
+
+ isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
+ isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
+ isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
+ isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
+ isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
+
+ DBG(0, "%s: stopping schedules ...\n", __func__);
+ limit = 2000;
+ while (limit > 0) {
+ udelay(250);
+ limit -= 250;
+ if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
+ break;
+ }
+ mdelay(7);
+ if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
+ u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
+ finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
+ }
+ if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
+ u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
+ finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
+ }
+ if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
+ finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
+ if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
+ finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
+ }
+ DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
+ isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
+ isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
+ isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
+
+ /* Suspend hub */
+ isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
+ isp1362_show_reg(isp1362_hcd, HCCONTROL);
+ isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
+ isp1362_show_reg(isp1362_hcd, HCCONTROL);
+
+#if 1
+ isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
+ if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
+ pr_err("%s: controller won't suspend %08x\n", __func__,
+ isp1362_hcd->hc_control);
+ status = -EBUSY;
+ } else
+#endif
+ {
+ /* no resumes until devices finish suspending */
+ isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
+ }
+done:
+ if (status == 0) {
+ hcd->state = HC_STATE_SUSPENDED;
+ DBG(0, "%s: HCD suspended: %08x\n", __func__,
+ isp1362_read_reg32(isp1362_hcd, HCCONTROL));
+ }
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ return status;
+}
+
+static int isp1362_bus_resume(struct usb_hcd *hcd)
+{
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ u32 port;
+ unsigned long flags;
+ int status = -EINPROGRESS;
+
+ if (time_before(jiffies, isp1362_hcd->next_statechange))
+ msleep(5);
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
+ pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
+ if (hcd->state == HC_STATE_RESUMING) {
+ pr_warning("%s: duplicate resume\n", __func__);
+ status = 0;
+ } else
+ switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
+ case OHCI_USB_SUSPEND:
+ DBG(0, "%s: resume root hub\n", __func__);
+ isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
+ isp1362_hcd->hc_control |= OHCI_USB_RESUME;
+ isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
+ break;
+ case OHCI_USB_RESUME:
+ /* HCFS changes sometime after INTR_RD */
+ DBG(0, "%s: remote wakeup\n", __func__);
+ break;
+ case OHCI_USB_OPER:
+ DBG(0, "%s: odd resume\n", __func__);
+ status = 0;
+ hcd->self.root_hub->dev.power.power_state = PMSG_ON;
+ break;
+ default: /* RESET, we lost power */
+ DBG(0, "%s: root hub hardware reset\n", __func__);
+ status = -EBUSY;
+ }
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ if (status == -EBUSY) {
+ DBG(0, "%s: Restarting HC\n", __func__);
+ isp1362_hc_stop(hcd);
+ return isp1362_hc_start(hcd);
+ }
+ if (status != -EINPROGRESS)
+ return status;
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
+ while (port--) {
+ u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
+
+ /* force global, not selective, resume */
+ if (!(stat & RH_PS_PSS)) {
+ DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
+ continue;
+ }
+ DBG(0, "%s: Resuming RH port %d\n", __func__, port);
+ isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
+ }
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+ /* Some controllers (lucent) need extra-long delays */
+ hcd->state = HC_STATE_RESUMING;
+ mdelay(20 /* usb 11.5.1.10 */ + 15);
+
+ isp1362_hcd->hc_control = OHCI_USB_OPER;
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_show_reg(isp1362_hcd, HCCONTROL);
+ isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ /* TRSMRCY */
+ msleep(10);
+
+ /* keep it alive for ~5x suspend + resume costs */
+ isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
+
+ hcd->self.root_hub->dev.power.power_state = PMSG_ON;
+ hcd->state = HC_STATE_RUNNING;
+ return 0;
+}
+#else
+#define isp1362_bus_suspend NULL
+#define isp1362_bus_resume NULL
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef STUB_DEBUG_FILE
+
+static inline void create_debug_file(struct isp1362_hcd *isp1362_hcd)
+{
+}
+static inline void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
+{
+}
+
+#else
+
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+static void dump_irq(struct seq_file *s, char *label, u16 mask)
+{
+ seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
+ mask & HCuPINT_CLKRDY ? " clkrdy" : "",
+ mask & HCuPINT_SUSP ? " susp" : "",
+ mask & HCuPINT_OPR ? " opr" : "",
+ mask & HCuPINT_EOT ? " eot" : "",
+ mask & HCuPINT_ATL ? " atl" : "",
+ mask & HCuPINT_SOF ? " sof" : "");
+}
+
+static void dump_int(struct seq_file *s, char *label, u32 mask)
+{
+ seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
+ mask & OHCI_INTR_MIE ? " MIE" : "",
+ mask & OHCI_INTR_RHSC ? " rhsc" : "",
+ mask & OHCI_INTR_FNO ? " fno" : "",
+ mask & OHCI_INTR_UE ? " ue" : "",
+ mask & OHCI_INTR_RD ? " rd" : "",
+ mask & OHCI_INTR_SF ? " sof" : "",
+ mask & OHCI_INTR_SO ? " so" : "");
+}
+
+static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
+{
+ seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
+ mask & OHCI_CTRL_RWC ? " rwc" : "",
+ mask & OHCI_CTRL_RWE ? " rwe" : "",
+ ({
+ char *hcfs;
+ switch (mask & OHCI_CTRL_HCFS) {
+ case OHCI_USB_OPER:
+ hcfs = " oper";
+ break;
+ case OHCI_USB_RESET:
+ hcfs = " reset";
+ break;
+ case OHCI_USB_RESUME:
+ hcfs = " resume";
+ break;
+ case OHCI_USB_SUSPEND:
+ hcfs = " suspend";
+ break;
+ default:
+ hcfs = " ?";
+ }
+ hcfs;
+ }));
+}
+
+static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
+{
+ seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
+ isp1362_read_reg32(isp1362_hcd, HCREVISION));
+ seq_printf(s, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
+ isp1362_read_reg32(isp1362_hcd, HCCONTROL));
+ seq_printf(s, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
+ isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
+ seq_printf(s, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
+ isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
+ seq_printf(s, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
+ isp1362_read_reg32(isp1362_hcd, HCINTENB));
+ seq_printf(s, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
+ isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
+ seq_printf(s, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
+ isp1362_read_reg32(isp1362_hcd, HCFMREM));
+ seq_printf(s, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
+ isp1362_read_reg32(isp1362_hcd, HCFMNUM));
+ seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
+ isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
+ seq_printf(s, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
+ isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
+ seq_printf(s, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
+ isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
+ seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
+ isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
+ seq_printf(s, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
+ isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
+ seq_printf(s, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
+ isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
+ seq_printf(s, "\n");
+ seq_printf(s, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
+ isp1362_read_reg16(isp1362_hcd, HCHWCFG));
+ seq_printf(s, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
+ isp1362_read_reg16(isp1362_hcd, HCDMACFG));
+ seq_printf(s, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
+ isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
+ seq_printf(s, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
+ isp1362_read_reg16(isp1362_hcd, HCuPINT));
+ seq_printf(s, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
+ isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
+ seq_printf(s, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
+ isp1362_read_reg16(isp1362_hcd, HCCHIPID));
+ seq_printf(s, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
+ isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
+ seq_printf(s, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
+ isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
+ seq_printf(s, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
+ isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
+#if 0
+ seq_printf(s, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA),
+ isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
+#endif
+ seq_printf(s, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
+ isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
+ seq_printf(s, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
+ isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
+ seq_printf(s, "\n");
+ seq_printf(s, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
+ isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
+ seq_printf(s, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
+ isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
+ seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
+ isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
+ seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
+ isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
+ seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
+ isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
+ seq_printf(s, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
+ isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
+ seq_printf(s, "\n");
+ seq_printf(s, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
+ isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
+ seq_printf(s, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
+ isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
+#if 0
+ seq_printf(s, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
+ isp1362_read_reg32(isp1362_hcd, HCATLDONE));
+#endif
+ seq_printf(s, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
+ isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
+ seq_printf(s, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
+ isp1362_read_reg32(isp1362_hcd, HCATLLAST));
+ seq_printf(s, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
+ isp1362_read_reg16(isp1362_hcd, HCATLCURR));
+ seq_printf(s, "\n");
+ seq_printf(s, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
+ isp1362_read_reg16(isp1362_hcd, HCATLDTC));
+ seq_printf(s, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
+ isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
+}
+
+static int proc_isp1362_show(struct seq_file *s, void *unused)
+{
+ struct isp1362_hcd *isp1362_hcd = s->private;
+ struct isp1362_ep *ep;
+ int i;
+
+ seq_printf(s, "%s\n%s version %s\n",
+ isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
+
+ /* collect statistics to help estimate potential win for
+ * DMA engines that care about alignment (PXA)
+ */
+ seq_printf(s, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
+ isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
+ isp1362_hcd->stat2, isp1362_hcd->stat1);
+ seq_printf(s, "max # ptds in ATL fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
+ seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
+ seq_printf(s, "max # ptds in ISTL fifo: %d\n",
+ max(isp1362_hcd->istl_queue[0] .stat_maxptds,
+ isp1362_hcd->istl_queue[1] .stat_maxptds));
+
+ /* FIXME: don't show the following in suspended state */
+ spin_lock_irq(&isp1362_hcd->lock);
+
+ dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
+ dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
+ dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
+ dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
+ dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
+
+ for (i = 0; i < NUM_ISP1362_IRQS; i++)
+ if (isp1362_hcd->irq_stat[i])
+ seq_printf(s, "%-15s: %d\n",
+ ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
+
+ dump_regs(s, isp1362_hcd);
+ list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
+ struct urb *urb;
+
+ seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
+ ({
+ char *s;
+ switch (ep->nextpid) {
+ case USB_PID_IN:
+ s = "in";
+ break;
+ case USB_PID_OUT:
+ s = "out";
+ break;
+ case USB_PID_SETUP:
+ s = "setup";
+ break;
+ case USB_PID_ACK:
+ s = "status";
+ break;
+ default:
+ s = "?";
+ break;
+ };
+ s;}), ep->maxpacket) ;
+ list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
+ seq_printf(s, " urb%p, %d/%d\n", urb,
+ urb->actual_length,
+ urb->transfer_buffer_length);
+ }
+ }
+ if (!list_empty(&isp1362_hcd->async))
+ seq_printf(s, "\n");
+ dump_ptd_queue(&isp1362_hcd->atl_queue);
+
+ seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
+
+ list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
+ seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
+ isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
+
+ seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
+ ep->interval, ep,
+ (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
+ ep->udev->devnum, ep->epnum,
+ (ep->epnum == 0) ? "" :
+ ((ep->nextpid == USB_PID_IN) ?
+ "in" : "out"), ep->maxpacket);
+ }
+ dump_ptd_queue(&isp1362_hcd->intl_queue);
+
+ seq_printf(s, "ISO:\n");
+
+ list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
+ seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
+ ep->interval, ep,
+ (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
+ ep->udev->devnum, ep->epnum,
+ (ep->epnum == 0) ? "" :
+ ((ep->nextpid == USB_PID_IN) ?
+ "in" : "out"), ep->maxpacket);
+ }
+
+ spin_unlock_irq(&isp1362_hcd->lock);
+ seq_printf(s, "\n");
+
+ return 0;
+}
+
+static int proc_isp1362_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, proc_isp1362_show, PDE(inode)->data);
+}
+
+static const struct file_operations proc_ops = {
+ .open = proc_isp1362_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* expect just one isp1362_hcd per system */
+static const char proc_filename[] = "driver/isp1362";
+
+static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
+{
+ struct proc_dir_entry *pde;
+
+ pde = create_proc_entry(proc_filename, 0, NULL);
+ if (pde == NULL) {
+ pr_warning("%s: Failed to create debug file '%s'\n", __func__, proc_filename);
+ return;
+ }
+
+ pde->proc_fops = &proc_ops;
+ pde->data = isp1362_hcd;
+ isp1362_hcd->pde = pde;
+}
+
+static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
+{
+ if (isp1362_hcd->pde)
+ remove_proc_entry(proc_filename, 0);
+}
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
+{
+ int tmp = 20;
+ unsigned long flags;
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+
+ isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
+ isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
+ while (--tmp) {
+ mdelay(1);
+ if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
+ break;
+ }
+ if (!tmp)
+ pr_err("Software reset timeout\n");
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+}
+
+static int isp1362_mem_config(struct usb_hcd *hcd)
+{
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ unsigned long flags;
+ u32 total;
+ u16 istl_size = ISP1362_ISTL_BUFSIZE;
+ u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
+ u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
+ u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
+ u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
+ u16 atl_size;
+ int i;
+
+ WARN_ON(istl_size & 3);
+ WARN_ON(atl_blksize & 3);
+ WARN_ON(intl_blksize & 3);
+ WARN_ON(atl_blksize < PTD_HEADER_SIZE);
+ WARN_ON(intl_blksize < PTD_HEADER_SIZE);
+
+ BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
+ if (atl_buffers > 32)
+ atl_buffers = 32;
+ atl_size = atl_buffers * atl_blksize;
+ total = atl_size + intl_size + istl_size;
+ dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
+ dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
+ istl_size / 2, istl_size, 0, istl_size / 2);
+ dev_info(hcd->self.controller, " INTL: %4d * (%3u+8): %4d @ $%04x\n",
+ ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
+ intl_size, istl_size);
+ dev_info(hcd->self.controller, " ATL : %4d * (%3u+8): %4d @ $%04x\n",
+ atl_buffers, atl_blksize - PTD_HEADER_SIZE,
+ atl_size, istl_size + intl_size);
+ dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total,
+ ISP1362_BUF_SIZE - total);
+
+ if (total > ISP1362_BUF_SIZE) {
+ dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
+ __func__, total, ISP1362_BUF_SIZE);
+ return -ENOMEM;
+ }
+
+ total = istl_size + intl_size + atl_size;
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+
+ for (i = 0; i < 2; i++) {
+ isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
+ isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
+ isp1362_hcd->istl_queue[i].blk_size = 4;
+ INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
+ snprintf(isp1362_hcd->istl_queue[i].name,
+ sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
+ DBG(3, "%s: %5s buf $%04x %d\n", __func__,
+ isp1362_hcd->istl_queue[i].name,
+ isp1362_hcd->istl_queue[i].buf_start,
+ isp1362_hcd->istl_queue[i].buf_size);
+ }
+ isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
+
+ isp1362_hcd->intl_queue.buf_start = istl_size;
+ isp1362_hcd->intl_queue.buf_size = intl_size;
+ isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
+ isp1362_hcd->intl_queue.blk_size = intl_blksize;
+ isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
+ isp1362_hcd->intl_queue.skip_map = ~0;
+ INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
+
+ isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
+ isp1362_hcd->intl_queue.buf_size);
+ isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
+ isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
+ isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
+ isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
+ 1 << (ISP1362_INTL_BUFFERS - 1));
+
+ isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
+ isp1362_hcd->atl_queue.buf_size = atl_size;
+ isp1362_hcd->atl_queue.buf_count = atl_buffers;
+ isp1362_hcd->atl_queue.blk_size = atl_blksize;
+ isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
+ isp1362_hcd->atl_queue.skip_map = ~0;
+ INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
+
+ isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
+ isp1362_hcd->atl_queue.buf_size);
+ isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
+ isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
+ isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
+ isp1362_write_reg32(isp1362_hcd, HCATLLAST,
+ 1 << (atl_buffers - 1));
+
+ snprintf(isp1362_hcd->atl_queue.name,
+ sizeof(isp1362_hcd->atl_queue.name), "ATL");
+ snprintf(isp1362_hcd->intl_queue.name,
+ sizeof(isp1362_hcd->intl_queue.name), "INTL");
+ DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
+ isp1362_hcd->intl_queue.name,
+ isp1362_hcd->intl_queue.buf_start,
+ ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
+ isp1362_hcd->intl_queue.buf_size);
+ DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
+ isp1362_hcd->atl_queue.name,
+ isp1362_hcd->atl_queue.buf_start,
+ atl_buffers, isp1362_hcd->atl_queue.blk_size,
+ isp1362_hcd->atl_queue.buf_size);
+
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+ return 0;
+}
+
+static int isp1362_hc_reset(struct usb_hcd *hcd)
+{
+ int ret = 0;
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ unsigned long t;
+ unsigned long timeout = 100;
+ unsigned long flags;
+ int clkrdy = 0;
+
+ pr_info("%s:\n", __func__);
+
+ if (isp1362_hcd->board && isp1362_hcd->board->reset) {
+ isp1362_hcd->board->reset(hcd->self.controller, 1);
+ msleep(20);
+ if (isp1362_hcd->board->clock)
+ isp1362_hcd->board->clock(hcd->self.controller, 1);
+ isp1362_hcd->board->reset(hcd->self.controller, 0);
+ } else
+ isp1362_sw_reset(isp1362_hcd);
+
+ /* chip has been reset. First we need to see a clock */
+ t = jiffies + msecs_to_jiffies(timeout);
+ while (!clkrdy && time_before_eq(jiffies, t)) {
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ if (!clkrdy)
+ msleep(4);
+ }
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ if (!clkrdy) {
+ pr_err("Clock not ready after %lums\n", timeout);
+ ret = -ENODEV;
+ }
+ return ret;
+}
+
+static void isp1362_hc_stop(struct usb_hcd *hcd)
+{
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ unsigned long flags;
+ u32 tmp;
+
+ pr_info("%s:\n", __func__);
+
+ del_timer_sync(&hcd->rh_timer);
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+
+ isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
+
+ /* Switch off power for all ports */
+ tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
+ tmp &= ~(RH_A_NPS | RH_A_PSM);
+ isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
+ isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
+
+ /* Reset the chip */
+ if (isp1362_hcd->board && isp1362_hcd->board->reset)
+ isp1362_hcd->board->reset(hcd->self.controller, 1);
+ else
+ isp1362_sw_reset(isp1362_hcd);
+
+ if (isp1362_hcd->board && isp1362_hcd->board->clock)
+ isp1362_hcd->board->clock(hcd->self.controller, 0);
+
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+}
+
+#ifdef CHIP_BUFFER_TEST
+static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
+{
+ int ret = 0;
+ u16 *ref;
+ unsigned long flags;
+
+ ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
+ if (ref) {
+ int offset;
+ u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
+
+ for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
+ ref[offset] = ~offset;
+ tst[offset] = offset;
+ }
+
+ for (offset = 0; offset < 4; offset++) {
+ int j;
+
+ for (j = 0; j < 8; j++) {
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
+ isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+ if (memcmp(ref, tst, j)) {
+ ret = -ENODEV;
+ pr_err("%s: memory check with %d byte offset %d failed\n",
+ __func__, j, offset);
+ dump_data((u8 *)ref + offset, j);
+ dump_data((u8 *)tst + offset, j);
+ }
+ }
+ }
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
+ isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+ if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
+ ret = -ENODEV;
+ pr_err("%s: memory check failed\n", __func__);
+ dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
+ }
+
+ for (offset = 0; offset < 256; offset++) {
+ int test_size = 0;
+
+ yield();
+
+ memset(tst, 0, ISP1362_BUF_SIZE);
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
+ isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
+ ISP1362_BUF_SIZE / 2)) {
+ pr_err("%s: Failed to clear buffer\n", __func__);
+ dump_data((u8 *)tst, ISP1362_BUF_SIZE);
+ break;
+ }
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
+ isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
+ offset * 2 + PTD_HEADER_SIZE, test_size);
+ isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
+ PTD_HEADER_SIZE + test_size);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
+ dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
+ dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
+ PTD_HEADER_SIZE + test_size);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
+ ret = -ENODEV;
+ pr_err("%s: memory check with offset %02x failed\n",
+ __func__, offset);
+ break;
+ }
+ pr_warning("%s: memory check with offset %02x ok after second read\n",
+ __func__, offset);
+ }
+ }
+ kfree(ref);
+ }
+ return ret;
+}
+#endif
+
+static int isp1362_hc_start(struct usb_hcd *hcd)
+{
+ int ret;
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ struct isp1362_platform_data *board = isp1362_hcd->board;
+ u16 hwcfg;
+ u16 chipid;
+ unsigned long flags;
+
+ pr_info("%s:\n", __func__);
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+ if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
+ pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
+ return -ENODEV;
+ }
+
+#ifdef CHIP_BUFFER_TEST
+ ret = isp1362_chip_test(isp1362_hcd);
+ if (ret)
+ return -ENODEV;
+#endif
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ /* clear interrupt status and disable all interrupt sources */
+ isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
+ isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
+
+ /* HW conf */
+ hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
+ if (board->sel15Kres)
+ hwcfg |= HCHWCFG_PULLDOWN_DS2 |
+ ((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
+ if (board->clknotstop)
+ hwcfg |= HCHWCFG_CLKNOTSTOP;
+ if (board->oc_enable)
+ hwcfg |= HCHWCFG_ANALOG_OC;
+ if (board->int_act_high)
+ hwcfg |= HCHWCFG_INT_POL;
+ if (board->int_edge_triggered)
+ hwcfg |= HCHWCFG_INT_TRIGGER;
+ if (board->dreq_act_high)
+ hwcfg |= HCHWCFG_DREQ_POL;
+ if (board->dack_act_high)
+ hwcfg |= HCHWCFG_DACK_POL;
+ isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
+ isp1362_show_reg(isp1362_hcd, HCHWCFG);
+ isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+ ret = isp1362_mem_config(hcd);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+
+ /* Root hub conf */
+ isp1362_hcd->rhdesca = 0;
+ if (board->no_power_switching)
+ isp1362_hcd->rhdesca |= RH_A_NPS;
+ if (board->power_switching_mode)
+ isp1362_hcd->rhdesca |= RH_A_PSM;
+ if (board->potpg)
+ isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
+ else
+ isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
+
+ isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
+ isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
+ isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
+
+ isp1362_hcd->rhdescb = RH_B_PPCM;
+ isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
+ isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
+
+ isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
+ isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
+ isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
+
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+ isp1362_hcd->hc_control = OHCI_USB_OPER;
+ hcd->state = HC_STATE_RUNNING;
+
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ /* Set up interrupts */
+ isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
+ isp1362_hcd->intenb |= OHCI_INTR_RD;
+ isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
+ isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
+ isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
+
+ /* Go operational */
+ isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
+ /* enable global power */
+ isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
+
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct hc_driver isp1362_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "ISP1362 Host Controller",
+ .hcd_priv_size = sizeof(struct isp1362_hcd),
+
+ .irq = isp1362_irq,
+ .flags = HCD_USB11 | HCD_MEMORY,
+
+ .reset = isp1362_hc_reset,
+ .start = isp1362_hc_start,
+ .stop = isp1362_hc_stop,
+
+ .urb_enqueue = isp1362_urb_enqueue,
+ .urb_dequeue = isp1362_urb_dequeue,
+ .endpoint_disable = isp1362_endpoint_disable,
+
+ .get_frame_number = isp1362_get_frame,
+
+ .hub_status_data = isp1362_hub_status_data,
+ .hub_control = isp1362_hub_control,
+ .bus_suspend = isp1362_bus_suspend,
+ .bus_resume = isp1362_bus_resume,
+};
+
+/*-------------------------------------------------------------------------*/
+
+#define resource_len(r) (((r)->end - (r)->start) + 1)
+
+static int __devexit isp1362_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ struct resource *res;
+
+ remove_debug_file(isp1362_hcd);
+ DBG(0, "%s: Removing HCD\n", __func__);
+ usb_remove_hcd(hcd);
+
+ DBG(0, "%s: Unmapping data_reg @ %08x\n", __func__,
+ (u32)isp1362_hcd->data_reg);
+ iounmap(isp1362_hcd->data_reg);
+
+ DBG(0, "%s: Unmapping addr_reg @ %08x\n", __func__,
+ (u32)isp1362_hcd->addr_reg);
+ iounmap(isp1362_hcd->addr_reg);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
+ if (res)
+ release_mem_region(res->start, resource_len(res));
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
+ if (res)
+ release_mem_region(res->start, resource_len(res));
+
+ DBG(0, "%s: put_hcd\n", __func__);
+ usb_put_hcd(hcd);
+ DBG(0, "%s: Done\n", __func__);
+
+ return 0;
+}
+
+static int __init isp1362_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct isp1362_hcd *isp1362_hcd;
+ struct resource *addr, *data;
+ void __iomem *addr_reg;
+ void __iomem *data_reg;
+ int irq;
+ int retval = 0;
+
+ /* basic sanity checks first. board-specific init logic should
+ * have initialized this the three resources and probably board
+ * specific platform_data. we don't probe for IRQs, and do only
+ * minimal sanity checking.
+ */
+ if (pdev->num_resources < 3) {
+ retval = -ENODEV;
+ goto err1;
+ }
+
+ data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ irq = platform_get_irq(pdev, 0);
+ if (!addr || !data || irq < 0) {
+ retval = -ENODEV;
+ goto err1;
+ }
+
+#ifdef CONFIG_USB_HCD_DMA
+ if (pdev->dev.dma_mask) {
+ struct resource *dma_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+
+ if (!dma_res) {
+ retval = -ENODEV;
+ goto err1;
+ }
+ isp1362_hcd->data_dma = dma_res->start;
+ isp1362_hcd->max_dma_size = resource_len(dma_res);
+ }
+#else
+ if (pdev->dev.dma_mask) {
+ DBG(1, "won't do DMA");
+ retval = -ENODEV;
+ goto err1;
+ }
+#endif
+
+ if (!request_mem_region(addr->start, resource_len(addr), hcd_name)) {
+ retval = -EBUSY;
+ goto err1;
+ }
+ addr_reg = ioremap(addr->start, resource_len(addr));
+ if (addr_reg == NULL) {
+ retval = -ENOMEM;
+ goto err2;
+ }
+
+ if (!request_mem_region(data->start, resource_len(data), hcd_name)) {
+ retval = -EBUSY;
+ goto err3;
+ }
+ data_reg = ioremap(data->start, resource_len(data));
+ if (data_reg == NULL) {
+ retval = -ENOMEM;
+ goto err4;
+ }
+
+ /* allocate and initialize hcd */
+ hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ retval = -ENOMEM;
+ goto err5;
+ }
+ hcd->rsrc_start = data->start;
+ isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ isp1362_hcd->data_reg = data_reg;
+ isp1362_hcd->addr_reg = addr_reg;
+
+ isp1362_hcd->next_statechange = jiffies;
+ spin_lock_init(&isp1362_hcd->lock);
+ INIT_LIST_HEAD(&isp1362_hcd->async);
+ INIT_LIST_HEAD(&isp1362_hcd->periodic);
+ INIT_LIST_HEAD(&isp1362_hcd->isoc);
+ INIT_LIST_HEAD(&isp1362_hcd->remove_list);
+ isp1362_hcd->board = pdev->dev.platform_data;
+#if USE_PLATFORM_DELAY
+ if (!isp1362_hcd->board->delay) {
+ dev_err(hcd->self.controller, "No platform delay function given\n");
+ retval = -ENODEV;
+ goto err6;
+ }
+#endif
+
+#ifdef CONFIG_ARM
+ if (isp1362_hcd->board)
+ set_irq_type(irq, isp1362_hcd->board->int_act_high ? IRQT_RISING : IRQT_FALLING);
+#endif
+
+ retval = usb_add_hcd(hcd, irq, IRQF_TRIGGER_LOW | IRQF_DISABLED | IRQF_SHARED);
+ if (retval != 0)
+ goto err6;
+ pr_info("%s, irq %d\n", hcd->product_desc, irq);
+
+ create_debug_file(isp1362_hcd);
+
+ return 0;
+
+ err6:
+ DBG(0, "%s: Freeing dev %08x\n", __func__, (u32)isp1362_hcd);
+ usb_put_hcd(hcd);
+ err5:
+ DBG(0, "%s: Unmapping data_reg @ %08x\n", __func__, (u32)data_reg);
+ iounmap(data_reg);
+ err4:
+ DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start);
+ release_mem_region(data->start, resource_len(data));
+ err3:
+ DBG(0, "%s: Unmapping addr_reg @ %08x\n", __func__, (u32)addr_reg);
+ iounmap(addr_reg);
+ err2:
+ DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start);
+ release_mem_region(addr->start, resource_len(addr));
+ err1:
+ pr_err("%s: init error, %d\n", __func__, retval);
+
+ return retval;
+}
+
+#ifdef CONFIG_PM
+static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ unsigned long flags;
+ int retval = 0;
+
+ DBG(0, "%s: Suspending device\n", __func__);
+
+ if (state.event == PM_EVENT_FREEZE) {
+ DBG(0, "%s: Suspending root hub\n", __func__);
+ retval = isp1362_bus_suspend(hcd);
+ } else {
+ DBG(0, "%s: Suspending RH ports\n", __func__);
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ }
+ if (retval == 0)
+ pdev->dev.power.power_state = state;
+ return retval;
+}
+
+static int isp1362_resume(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
+ unsigned long flags;
+
+ DBG(0, "%s: Resuming\n", __func__);
+
+ if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+ DBG(0, "%s: Resume RH ports\n", __func__);
+ spin_lock_irqsave(&isp1362_hcd->lock, flags);
+ isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
+ spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ return 0;
+ }
+
+ pdev->dev.power.power_state = PMSG_ON;
+
+ return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
+}
+#else
+#define isp1362_suspend NULL
+#define isp1362_resume NULL
+#endif
+
+static struct platform_driver isp1362_driver = {
+ .probe = isp1362_probe,
+ .remove = __devexit_p(isp1362_remove),
+
+ .suspend = isp1362_suspend,
+ .resume = isp1362_resume,
+ .driver = {
+ .name = (char *)hcd_name,
+ .owner = THIS_MODULE,
+ },
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init isp1362_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+ pr_info("driver %s, %s\n", hcd_name, DRIVER_VERSION);
+ return platform_driver_register(&isp1362_driver);
+}
+module_init(isp1362_init);
+
+static void __exit isp1362_cleanup(void)
+{
+ platform_driver_unregister(&isp1362_driver);
+}
+module_exit(isp1362_cleanup);
diff --git a/drivers/usb/host/isp1362.h b/drivers/usb/host/isp1362.h
new file mode 100644
index 00000000000..fe60f62a32f
--- /dev/null
+++ b/drivers/usb/host/isp1362.h
@@ -0,0 +1,1079 @@
+/*
+ * ISP1362 HCD (Host Controller Driver) for USB.
+ *
+ * COPYRIGHT (C) by L. Wassmann <LW@KARO-electronics.de>
+ */
+
+/* ------------------------------------------------------------------------- */
+/*
+ * Platform specific compile time options
+ */
+#if defined(CONFIG_ARCH_KARO)
+#include <asm/arch/hardware.h>
+#include <asm/arch/pxa-regs.h>
+#include <asm/arch/karo.h>
+
+#define USE_32BIT 1
+
+
+/* These options are mutually eclusive */
+#define USE_PLATFORM_DELAY 1
+#define USE_NDELAY 0
+/*
+ * MAX_ROOT_PORTS: Number of downstream ports
+ *
+ * The chip has two USB ports, one of which can be configured as
+ * an USB device port, so the value of this constant is implementation
+ * specific.
+ */
+#define MAX_ROOT_PORTS 2
+#define DUMMY_DELAY_ACCESS do {} while (0)
+
+/* insert platform specific definitions for other machines here */
+#elif defined(CONFIG_BLACKFIN)
+
+#include <linux/io.h>
+#define USE_32BIT 0
+#define MAX_ROOT_PORTS 2
+#define USE_PLATFORM_DELAY 0
+#define USE_NDELAY 1
+
+#define DUMMY_DELAY_ACCESS \
+ do { \
+ bfin_read16(ASYNC_BANK0_BASE); \
+ bfin_read16(ASYNC_BANK0_BASE); \
+ bfin_read16(ASYNC_BANK0_BASE); \
+ } while (0)
+
+#undef insw
+#undef outsw
+
+#define insw delayed_insw
+#define outsw delayed_outsw
+
+static inline void delayed_outsw(unsigned int addr, void *buf, int len)
+{
+ unsigned short *bp = (unsigned short *)buf;
+ while (len--) {
+ DUMMY_DELAY_ACCESS;
+ outw(*bp++, addr);
+ }
+}
+
+static inline void delayed_insw(unsigned int addr, void *buf, int len)
+{
+ unsigned short *bp = (unsigned short *)buf;
+ while (len--) {
+ DUMMY_DELAY_ACCESS;
+ *bp++ = inw((void *)addr);
+ }
+}
+
+#else
+
+#define MAX_ROOT_PORTS 2
+
+#define USE_32BIT 0
+
+/* These options are mutually eclusive */
+#define USE_PLATFORM_DELAY 0
+#define USE_NDELAY 0
+
+#define DUMMY_DELAY_ACCESS do {} while (0)
+
+#endif
+
+
+/* ------------------------------------------------------------------------- */
+
+#define USB_RESET_WIDTH 50
+#define MAX_XFER_SIZE 1023
+
+/* Buffer sizes */
+#define ISP1362_BUF_SIZE 4096
+#define ISP1362_ISTL_BUFSIZE 512
+#define ISP1362_INTL_BLKSIZE 64
+#define ISP1362_INTL_BUFFERS 16
+#define ISP1362_ATL_BLKSIZE 64
+
+#define ISP1362_REG_WRITE_OFFSET 0x80
+
+#ifdef ISP1362_DEBUG
+typedef const unsigned int isp1362_reg_t;
+
+#define REG_WIDTH_16 0x000
+#define REG_WIDTH_32 0x100
+#define REG_WIDTH_MASK 0x100
+#define REG_NO_MASK 0x0ff
+
+#define REG_ACCESS_R 0x200
+#define REG_ACCESS_W 0x400
+#define REG_ACCESS_RW 0x600
+#define REG_ACCESS_MASK 0x600
+
+#define ISP1362_REG_NO(r) ((r) & REG_NO_MASK)
+
+#define _BUG_ON(x) BUG_ON(x)
+#define _WARN_ON(x) WARN_ON(x)
+
+#define ISP1362_REG(name, addr, width, rw) \
+static isp1362_reg_t ISP1362_REG_##name = ((addr) | (width) | (rw))
+
+#define REG_ACCESS_TEST(r) BUG_ON(((r) & ISP1362_REG_WRITE_OFFSET) && !((r) & REG_ACCESS_W))
+#define REG_WIDTH_TEST(r, w) BUG_ON(((r) & REG_WIDTH_MASK) != (w))
+#else
+typedef const unsigned char isp1362_reg_t;
+#define ISP1362_REG_NO(r) (r)
+#define _BUG_ON(x) do {} while (0)
+#define _WARN_ON(x) do {} while (0)
+
+#define ISP1362_REG(name, addr, width, rw) \
+static isp1362_reg_t ISP1362_REG_##name = addr
+
+#define REG_ACCESS_TEST(r) do {} while (0)
+#define REG_WIDTH_TEST(r, w) do {} while (0)
+#endif
+
+/* OHCI compatible registers */
+/*
+ * Note: Some of the ISP1362 'OHCI' registers implement only
+ * a subset of the bits defined in the OHCI spec.
+ *
+ * Bitmasks for the individual bits of these registers are defined in "ohci.h"
+ */
+ISP1362_REG(HCREVISION, 0x00, REG_WIDTH_32, REG_ACCESS_R);
+ISP1362_REG(HCCONTROL, 0x01, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCCMDSTAT, 0x02, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCINTSTAT, 0x03, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCINTENB, 0x04, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCINTDIS, 0x05, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCFMINTVL, 0x0d, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCFMREM, 0x0e, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCFMNUM, 0x0f, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCLSTHRESH, 0x11, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCRHDESCA, 0x12, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCRHDESCB, 0x13, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCRHSTATUS, 0x14, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCRHPORT1, 0x15, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCRHPORT2, 0x16, REG_WIDTH_32, REG_ACCESS_RW);
+
+/* Philips ISP1362 specific registers */
+ISP1362_REG(HCHWCFG, 0x20, REG_WIDTH_16, REG_ACCESS_RW);
+#define HCHWCFG_DISABLE_SUSPEND (1 << 15)
+#define HCHWCFG_GLOBAL_PWRDOWN (1 << 14)
+#define HCHWCFG_PULLDOWN_DS2 (1 << 13)
+#define HCHWCFG_PULLDOWN_DS1 (1 << 12)
+#define HCHWCFG_CLKNOTSTOP (1 << 11)
+#define HCHWCFG_ANALOG_OC (1 << 10)
+#define HCHWCFG_ONEINT (1 << 9)
+#define HCHWCFG_DACK_MODE (1 << 8)
+#define HCHWCFG_ONEDMA (1 << 7)
+#define HCHWCFG_DACK_POL (1 << 6)
+#define HCHWCFG_DREQ_POL (1 << 5)
+#define HCHWCFG_DBWIDTH_MASK (0x03 << 3)
+#define HCHWCFG_DBWIDTH(n) (((n) << 3) & HCHWCFG_DBWIDTH_MASK)
+#define HCHWCFG_INT_POL (1 << 2)
+#define HCHWCFG_INT_TRIGGER (1 << 1)
+#define HCHWCFG_INT_ENABLE (1 << 0)
+
+ISP1362_REG(HCDMACFG, 0x21, REG_WIDTH_16, REG_ACCESS_RW);
+#define HCDMACFG_CTR_ENABLE (1 << 7)
+#define HCDMACFG_BURST_LEN_MASK (0x03 << 5)
+#define HCDMACFG_BURST_LEN(n) (((n) << 5) & HCDMACFG_BURST_LEN_MASK)
+#define HCDMACFG_BURST_LEN_1 HCDMACFG_BURST_LEN(0)
+#define HCDMACFG_BURST_LEN_4 HCDMACFG_BURST_LEN(1)
+#define HCDMACFG_BURST_LEN_8 HCDMACFG_BURST_LEN(2)
+#define HCDMACFG_DMA_ENABLE (1 << 4)
+#define HCDMACFG_BUF_TYPE_MASK (0x07 << 1)
+#define HCDMACFG_BUF_TYPE(n) (((n) << 1) & HCDMACFG_BUF_TYPE_MASK)
+#define HCDMACFG_BUF_ISTL0 HCDMACFG_BUF_TYPE(0)
+#define HCDMACFG_BUF_ISTL1 HCDMACFG_BUF_TYPE(1)
+#define HCDMACFG_BUF_INTL HCDMACFG_BUF_TYPE(2)
+#define HCDMACFG_BUF_ATL HCDMACFG_BUF_TYPE(3)
+#define HCDMACFG_BUF_DIRECT HCDMACFG_BUF_TYPE(4)
+#define HCDMACFG_DMA_RW_SELECT (1 << 0)
+
+ISP1362_REG(HCXFERCTR, 0x22, REG_WIDTH_16, REG_ACCESS_RW);
+
+ISP1362_REG(HCuPINT, 0x24, REG_WIDTH_16, REG_ACCESS_RW);
+#define HCuPINT_SOF (1 << 0)
+#define HCuPINT_ISTL0 (1 << 1)
+#define HCuPINT_ISTL1 (1 << 2)
+#define HCuPINT_EOT (1 << 3)
+#define HCuPINT_OPR (1 << 4)
+#define HCuPINT_SUSP (1 << 5)
+#define HCuPINT_CLKRDY (1 << 6)
+#define HCuPINT_INTL (1 << 7)
+#define HCuPINT_ATL (1 << 8)
+#define HCuPINT_OTG (1 << 9)
+
+ISP1362_REG(HCuPINTENB, 0x25, REG_WIDTH_16, REG_ACCESS_RW);
+/* same bit definitions apply as for HCuPINT */
+
+ISP1362_REG(HCCHIPID, 0x27, REG_WIDTH_16, REG_ACCESS_R);
+#define HCCHIPID_MASK 0xff00
+#define HCCHIPID_MAGIC 0x3600
+
+ISP1362_REG(HCSCRATCH, 0x28, REG_WIDTH_16, REG_ACCESS_RW);
+
+ISP1362_REG(HCSWRES, 0x29, REG_WIDTH_16, REG_ACCESS_W);
+#define HCSWRES_MAGIC 0x00f6
+
+ISP1362_REG(HCBUFSTAT, 0x2c, REG_WIDTH_16, REG_ACCESS_RW);
+#define HCBUFSTAT_ISTL0_FULL (1 << 0)
+#define HCBUFSTAT_ISTL1_FULL (1 << 1)
+#define HCBUFSTAT_INTL_ACTIVE (1 << 2)
+#define HCBUFSTAT_ATL_ACTIVE (1 << 3)
+#define HCBUFSTAT_RESET_HWPP (1 << 4)
+#define HCBUFSTAT_ISTL0_ACTIVE (1 << 5)
+#define HCBUFSTAT_ISTL1_ACTIVE (1 << 6)
+#define HCBUFSTAT_ISTL0_DONE (1 << 8)
+#define HCBUFSTAT_ISTL1_DONE (1 << 9)
+#define HCBUFSTAT_PAIRED_PTDPP (1 << 10)
+
+ISP1362_REG(HCDIRADDR, 0x32, REG_WIDTH_32, REG_ACCESS_RW);
+#define HCDIRADDR_ADDR_MASK 0x0000ffff
+#define HCDIRADDR_ADDR(n) (((n) << 0) & HCDIRADDR_ADDR_MASK)
+#define HCDIRADDR_COUNT_MASK 0xffff0000
+#define HCDIRADDR_COUNT(n) (((n) << 16) & HCDIRADDR_COUNT_MASK)
+ISP1362_REG(HCDIRDATA, 0x45, REG_WIDTH_16, REG_ACCESS_RW);
+
+ISP1362_REG(HCISTLBUFSZ, 0x30, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(HCISTL0PORT, 0x40, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(HCISTL1PORT, 0x42, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(HCISTLRATE, 0x47, REG_WIDTH_16, REG_ACCESS_RW);
+
+ISP1362_REG(HCINTLBUFSZ, 0x33, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(HCINTLPORT, 0x43, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(HCINTLBLKSZ, 0x53, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(HCINTLDONE, 0x17, REG_WIDTH_32, REG_ACCESS_R);
+ISP1362_REG(HCINTLSKIP, 0x18, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCINTLLAST, 0x19, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCINTLCURR, 0x1a, REG_WIDTH_16, REG_ACCESS_R);
+
+ISP1362_REG(HCATLBUFSZ, 0x34, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(HCATLPORT, 0x44, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(HCATLBLKSZ, 0x54, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(HCATLDONE, 0x1b, REG_WIDTH_32, REG_ACCESS_R);
+ISP1362_REG(HCATLSKIP, 0x1c, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCATLLAST, 0x1d, REG_WIDTH_32, REG_ACCESS_RW);
+ISP1362_REG(HCATLCURR, 0x1e, REG_WIDTH_16, REG_ACCESS_R);
+
+ISP1362_REG(HCATLDTC, 0x51, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(HCATLDTCTO, 0x52, REG_WIDTH_16, REG_ACCESS_RW);
+
+
+ISP1362_REG(OTGCONTROL, 0x62, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(OTGSTATUS, 0x67, REG_WIDTH_16, REG_ACCESS_R);
+ISP1362_REG(OTGINT, 0x68, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(OTGINTENB, 0x69, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(OTGTIMER, 0x6A, REG_WIDTH_16, REG_ACCESS_RW);
+ISP1362_REG(OTGALTTMR, 0x6C, REG_WIDTH_16, REG_ACCESS_RW);
+
+/* Philips transfer descriptor, cpu-endian */
+struct ptd {
+ u16 count;
+#define PTD_COUNT_MSK (0x3ff << 0)
+#define PTD_TOGGLE_MSK (1 << 10)
+#define PTD_ACTIVE_MSK (1 << 11)
+#define PTD_CC_MSK (0xf << 12)
+ u16 mps;
+#define PTD_MPS_MSK (0x3ff << 0)
+#define PTD_SPD_MSK (1 << 10)
+#define PTD_LAST_MSK (1 << 11)
+#define PTD_EP_MSK (0xf << 12)
+ u16 len;
+#define PTD_LEN_MSK (0x3ff << 0)
+#define PTD_DIR_MSK (3 << 10)
+#define PTD_DIR_SETUP (0)
+#define PTD_DIR_OUT (1)
+#define PTD_DIR_IN (2)
+ u16 faddr;
+#define PTD_FA_MSK (0x7f << 0)
+/* PTD Byte 7: [StartingFrame (if ISO PTD) | StartingFrame[0..4], PollingRate[0..2] (if INT PTD)] */
+#define PTD_SF_ISO_MSK (0xff << 8)
+#define PTD_SF_INT_MSK (0x1f << 8)
+#define PTD_PR_MSK (0x07 << 13)
+} __attribute__ ((packed, aligned(2)));
+#define PTD_HEADER_SIZE sizeof(struct ptd)
+
+/* ------------------------------------------------------------------------- */
+/* Copied from ohci.h: */
+/*
+ * Hardware transfer status codes -- CC from PTD
+ */
+#define PTD_CC_NOERROR 0x00
+#define PTD_CC_CRC 0x01
+#define PTD_CC_BITSTUFFING 0x02
+#define PTD_CC_DATATOGGLEM 0x03
+#define PTD_CC_STALL 0x04
+#define PTD_DEVNOTRESP 0x05
+#define PTD_PIDCHECKFAIL 0x06
+#define PTD_UNEXPECTEDPID 0x07
+#define PTD_DATAOVERRUN 0x08
+#define PTD_DATAUNDERRUN 0x09
+ /* 0x0A, 0x0B reserved for hardware */
+#define PTD_BUFFEROVERRUN 0x0C
+#define PTD_BUFFERUNDERRUN 0x0D
+ /* 0x0E, 0x0F reserved for HCD */
+#define PTD_NOTACCESSED 0x0F
+
+
+/* map OHCI TD status codes (CC) to errno values */
+static const int cc_to_error[16] = {
+ /* No Error */ 0,
+ /* CRC Error */ -EILSEQ,
+ /* Bit Stuff */ -EPROTO,
+ /* Data Togg */ -EILSEQ,
+ /* Stall */ -EPIPE,
+ /* DevNotResp */ -ETIMEDOUT,
+ /* PIDCheck */ -EPROTO,
+ /* UnExpPID */ -EPROTO,
+ /* DataOver */ -EOVERFLOW,
+ /* DataUnder */ -EREMOTEIO,
+ /* (for hw) */ -EIO,
+ /* (for hw) */ -EIO,
+ /* BufferOver */ -ECOMM,
+ /* BuffUnder */ -ENOSR,
+ /* (for HCD) */ -EALREADY,
+ /* (for HCD) */ -EALREADY
+};
+
+
+/*
+ * HcControl (control) register masks
+ */
+#define OHCI_CTRL_HCFS (3 << 6) /* host controller functional state */
+#define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */
+#define OHCI_CTRL_RWE (1 << 10) /* remote wakeup enable */
+
+/* pre-shifted values for HCFS */
+# define OHCI_USB_RESET (0 << 6)
+# define OHCI_USB_RESUME (1 << 6)
+# define OHCI_USB_OPER (2 << 6)
+# define OHCI_USB_SUSPEND (3 << 6)
+
+/*
+ * HcCommandStatus (cmdstatus) register masks
+ */
+#define OHCI_HCR (1 << 0) /* host controller reset */
+#define OHCI_SOC (3 << 16) /* scheduling overrun count */
+
+/*
+ * masks used with interrupt registers:
+ * HcInterruptStatus (intrstatus)
+ * HcInterruptEnable (intrenable)
+ * HcInterruptDisable (intrdisable)
+ */
+#define OHCI_INTR_SO (1 << 0) /* scheduling overrun */
+#define OHCI_INTR_WDH (1 << 1) /* writeback of done_head */
+#define OHCI_INTR_SF (1 << 2) /* start frame */
+#define OHCI_INTR_RD (1 << 3) /* resume detect */
+#define OHCI_INTR_UE (1 << 4) /* unrecoverable error */
+#define OHCI_INTR_FNO (1 << 5) /* frame number overflow */
+#define OHCI_INTR_RHSC (1 << 6) /* root hub status change */
+#define OHCI_INTR_OC (1 << 30) /* ownership change */
+#define OHCI_INTR_MIE (1 << 31) /* master interrupt enable */
+
+/* roothub.portstatus [i] bits */
+#define RH_PS_CCS 0x00000001 /* current connect status */
+#define RH_PS_PES 0x00000002 /* port enable status*/
+#define RH_PS_PSS 0x00000004 /* port suspend status */
+#define RH_PS_POCI 0x00000008 /* port over current indicator */
+#define RH_PS_PRS 0x00000010 /* port reset status */
+#define RH_PS_PPS 0x00000100 /* port power status */
+#define RH_PS_LSDA 0x00000200 /* low speed device attached */
+#define RH_PS_CSC 0x00010000 /* connect status change */
+#define RH_PS_PESC 0x00020000 /* port enable status change */
+#define RH_PS_PSSC 0x00040000 /* port suspend status change */
+#define RH_PS_OCIC 0x00080000 /* over current indicator change */
+#define RH_PS_PRSC 0x00100000 /* port reset status change */
+
+/* roothub.status bits */
+#define RH_HS_LPS 0x00000001 /* local power status */
+#define RH_HS_OCI 0x00000002 /* over current indicator */
+#define RH_HS_DRWE 0x00008000 /* device remote wakeup enable */
+#define RH_HS_LPSC 0x00010000 /* local power status change */
+#define RH_HS_OCIC 0x00020000 /* over current indicator change */
+#define RH_HS_CRWE 0x80000000 /* clear remote wakeup enable */
+
+/* roothub.b masks */
+#define RH_B_DR 0x0000ffff /* device removable flags */
+#define RH_B_PPCM 0xffff0000 /* port power control mask */
+
+/* roothub.a masks */
+#define RH_A_NDP (0xff << 0) /* number of downstream ports */
+#define RH_A_PSM (1 << 8) /* power switching mode */
+#define RH_A_NPS (1 << 9) /* no power switching */
+#define RH_A_DT (1 << 10) /* device type (mbz) */
+#define RH_A_OCPM (1 << 11) /* over current protection mode */
+#define RH_A_NOCP (1 << 12) /* no over current protection */
+#define RH_A_POTPGT (0xff << 24) /* power on to power good time */
+
+#define FI 0x2edf /* 12000 bits per frame (-1) */
+#define FSMP(fi) (0x7fff & ((6 * ((fi) - 210)) / 7))
+#define LSTHRESH 0x628 /* lowspeed bit threshold */
+
+/* ------------------------------------------------------------------------- */
+
+/* PTD accessor macros. */
+#define PTD_GET_COUNT(p) (((p)->count & PTD_COUNT_MSK) >> 0)
+#define PTD_COUNT(v) (((v) << 0) & PTD_COUNT_MSK)
+#define PTD_GET_TOGGLE(p) (((p)->count & PTD_TOGGLE_MSK) >> 10)
+#define PTD_TOGGLE(v) (((v) << 10) & PTD_TOGGLE_MSK)
+#define PTD_GET_ACTIVE(p) (((p)->count & PTD_ACTIVE_MSK) >> 11)
+#define PTD_ACTIVE(v) (((v) << 11) & PTD_ACTIVE_MSK)
+#define PTD_GET_CC(p) (((p)->count & PTD_CC_MSK) >> 12)
+#define PTD_CC(v) (((v) << 12) & PTD_CC_MSK)
+#define PTD_GET_MPS(p) (((p)->mps & PTD_MPS_MSK) >> 0)
+#define PTD_MPS(v) (((v) << 0) & PTD_MPS_MSK)
+#define PTD_GET_SPD(p) (((p)->mps & PTD_SPD_MSK) >> 10)
+#define PTD_SPD(v) (((v) << 10) & PTD_SPD_MSK)
+#define PTD_GET_LAST(p) (((p)->mps & PTD_LAST_MSK) >> 11)
+#define PTD_LAST(v) (((v) << 11) & PTD_LAST_MSK)
+#define PTD_GET_EP(p) (((p)->mps & PTD_EP_MSK) >> 12)
+#define PTD_EP(v) (((v) << 12) & PTD_EP_MSK)
+#define PTD_GET_LEN(p) (((p)->len & PTD_LEN_MSK) >> 0)
+#define PTD_LEN(v) (((v) << 0) & PTD_LEN_MSK)
+#define PTD_GET_DIR(p) (((p)->len & PTD_DIR_MSK) >> 10)
+#define PTD_DIR(v) (((v) << 10) & PTD_DIR_MSK)
+#define PTD_GET_FA(p) (((p)->faddr & PTD_FA_MSK) >> 0)
+#define PTD_FA(v) (((v) << 0) & PTD_FA_MSK)
+#define PTD_GET_SF_INT(p) (((p)->faddr & PTD_SF_INT_MSK) >> 8)
+#define PTD_SF_INT(v) (((v) << 8) & PTD_SF_INT_MSK)
+#define PTD_GET_SF_ISO(p) (((p)->faddr & PTD_SF_ISO_MSK) >> 8)
+#define PTD_SF_ISO(v) (((v) << 8) & PTD_SF_ISO_MSK)
+#define PTD_GET_PR(p) (((p)->faddr & PTD_PR_MSK) >> 13)
+#define PTD_PR(v) (((v) << 13) & PTD_PR_MSK)
+
+#define LOG2_PERIODIC_SIZE 5 /* arbitrary; this matches OHCI */
+#define PERIODIC_SIZE (1 << LOG2_PERIODIC_SIZE)
+
+struct isp1362_ep {
+ struct usb_host_endpoint *hep;
+ struct usb_device *udev;
+
+ /* philips transfer descriptor */
+ struct ptd ptd;
+
+ u8 maxpacket;
+ u8 epnum;
+ u8 nextpid;
+ u16 error_count;
+ u16 length; /* of current packet */
+ s16 ptd_offset; /* buffer offset in ISP1362 where
+ PTD has been stored
+ (for access thru HCDIRDATA) */
+ int ptd_index;
+ int num_ptds;
+ void *data; /* to databuf */
+ /* queue of active EPs (the ones transmitted to the chip) */
+ struct list_head active;
+
+ /* periodic schedule */
+ u8 branch;
+ u16 interval;
+ u16 load;
+ u16 last_iso;
+
+ /* async schedule */
+ struct list_head schedule; /* list of all EPs that need processing */
+ struct list_head remove_list;
+ int num_req;
+};
+
+struct isp1362_ep_queue {
+ struct list_head active; /* list of PTDs currently processed by HC */
+ atomic_t finishing;
+ unsigned long buf_map;
+ unsigned long skip_map;
+ int free_ptd;
+ u16 buf_start;
+ u16 buf_size;
+ u16 blk_size; /* PTD buffer block size for ATL and INTL */
+ u8 buf_count;
+ u8 buf_avail;
+ char name[16];
+
+ /* for statistical tracking */
+ u8 stat_maxptds; /* Max # of ptds seen simultaneously in fifo */
+ u8 ptd_count; /* number of ptds submitted to this queue */
+};
+
+struct isp1362_hcd {
+ spinlock_t lock;
+ void __iomem *addr_reg;
+ void __iomem *data_reg;
+
+ struct isp1362_platform_data *board;
+
+ struct proc_dir_entry *pde;
+ unsigned long stat1, stat2, stat4, stat8, stat16;
+
+ /* HC registers */
+ u32 intenb; /* "OHCI" interrupts */
+ u16 irqenb; /* uP interrupts */
+
+ /* Root hub registers */
+ u32 rhdesca;
+ u32 rhdescb;
+ u32 rhstatus;
+ u32 rhport[MAX_ROOT_PORTS];
+ unsigned long next_statechange;
+
+ /* HC control reg shadow copy */
+ u32 hc_control;
+
+ /* async schedule: control, bulk */
+ struct list_head async;
+
+ /* periodic schedule: int */
+ u16 load[PERIODIC_SIZE];
+ struct list_head periodic;
+ u16 fmindex;
+
+ /* periodic schedule: isochronous */
+ struct list_head isoc;
+ int istl_flip:1;
+ int irq_active:1;
+
+ /* Schedules for the current frame */
+ struct isp1362_ep_queue atl_queue;
+ struct isp1362_ep_queue intl_queue;
+ struct isp1362_ep_queue istl_queue[2];
+
+ /* list of PTDs retrieved from HC */
+ struct list_head remove_list;
+ enum {
+ ISP1362_INT_SOF,
+ ISP1362_INT_ISTL0,
+ ISP1362_INT_ISTL1,
+ ISP1362_INT_EOT,
+ ISP1362_INT_OPR,
+ ISP1362_INT_SUSP,
+ ISP1362_INT_CLKRDY,
+ ISP1362_INT_INTL,
+ ISP1362_INT_ATL,
+ ISP1362_INT_OTG,
+ NUM_ISP1362_IRQS
+ } IRQ_NAMES;
+ unsigned int irq_stat[NUM_ISP1362_IRQS];
+ int req_serial;
+};
+
+static inline const char *ISP1362_INT_NAME(int n)
+{
+ switch (n) {
+ case ISP1362_INT_SOF: return "SOF";
+ case ISP1362_INT_ISTL0: return "ISTL0";
+ case ISP1362_INT_ISTL1: return "ISTL1";
+ case ISP1362_INT_EOT: return "EOT";
+ case ISP1362_INT_OPR: return "OPR";
+ case ISP1362_INT_SUSP: return "SUSP";
+ case ISP1362_INT_CLKRDY: return "CLKRDY";
+ case ISP1362_INT_INTL: return "INTL";
+ case ISP1362_INT_ATL: return "ATL";
+ case ISP1362_INT_OTG: return "OTG";
+ default: return "unknown";
+ }
+}
+
+static inline void ALIGNSTAT(struct isp1362_hcd *isp1362_hcd, void *ptr)
+{
+ unsigned p = (unsigned)ptr;
+ if (!(p & 0xf))
+ isp1362_hcd->stat16++;
+ else if (!(p & 0x7))
+ isp1362_hcd->stat8++;
+ else if (!(p & 0x3))
+ isp1362_hcd->stat4++;
+ else if (!(p & 0x1))
+ isp1362_hcd->stat2++;
+ else
+ isp1362_hcd->stat1++;
+}
+
+static inline struct isp1362_hcd *hcd_to_isp1362_hcd(struct usb_hcd *hcd)
+{
+ return (struct isp1362_hcd *) (hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *isp1362_hcd_to_hcd(struct isp1362_hcd *isp1362_hcd)
+{
+ return container_of((void *)isp1362_hcd, struct usb_hcd, hcd_priv);
+}
+
+#define frame_before(f1, f2) ((s16)((u16)f1 - (u16)f2) < 0)
+
+/*
+ * ISP1362 HW Interface
+ */
+
+#ifdef ISP1362_DEBUG
+#define DBG(level, fmt...) \
+ do { \
+ if (dbg_level > level) \
+ pr_debug(fmt); \
+ } while (0)
+#define _DBG(level, fmt...) \
+ do { \
+ if (dbg_level > level) \
+ printk(fmt); \
+ } while (0)
+#else
+#define DBG(fmt...) do {} while (0)
+#define _DBG DBG
+#endif
+
+#ifdef VERBOSE
+# define VDBG(fmt...) DBG(3, fmt)
+#else
+# define VDBG(fmt...) do {} while (0)
+#endif
+
+#ifdef REGISTERS
+# define RDBG(fmt...) DBG(1, fmt)
+#else
+# define RDBG(fmt...) do {} while (0)
+#endif
+
+#ifdef URB_TRACE
+#define URB_DBG(fmt...) DBG(0, fmt)
+#else
+#define URB_DBG(fmt...) do {} while (0)
+#endif
+
+
+#if USE_PLATFORM_DELAY
+#if USE_NDELAY
+#error USE_PLATFORM_DELAY and USE_NDELAY defined simultaneously.
+#endif
+#define isp1362_delay(h, d) (h)->board->delay(isp1362_hcd_to_hcd(h)->self.controller, d)
+#elif USE_NDELAY
+#define isp1362_delay(h, d) ndelay(d)
+#else
+#define isp1362_delay(h, d) do {} while (0)
+#endif
+
+#define get_urb(ep) ({ \
+ BUG_ON(list_empty(&ep->hep->urb_list)); \
+ container_of(ep->hep->urb_list.next, struct urb, urb_list); \
+})
+
+/* basic access functions for ISP1362 chip registers */
+/* NOTE: The contents of the address pointer register cannot be read back! The driver must ensure,
+ * that all register accesses are performed with interrupts disabled, since the interrupt
+ * handler has no way of restoring the previous state.
+ */
+static void isp1362_write_addr(struct isp1362_hcd *isp1362_hcd, isp1362_reg_t reg)
+{
+ /*_BUG_ON((reg & ISP1362_REG_WRITE_OFFSET) && !(reg & REG_ACCESS_W));*/
+ REG_ACCESS_TEST(reg);
+ _BUG_ON(!irqs_disabled());
+ DUMMY_DELAY_ACCESS;
+ writew(ISP1362_REG_NO(reg), isp1362_hcd->addr_reg);
+ DUMMY_DELAY_ACCESS;
+ isp1362_delay(isp1362_hcd, 1);
+}
+
+static void isp1362_write_data16(struct isp1362_hcd *isp1362_hcd, u16 val)
+{
+ _BUG_ON(!irqs_disabled());
+ DUMMY_DELAY_ACCESS;
+ writew(val, isp1362_hcd->data_reg);
+}
+
+static u16 isp1362_read_data16(struct isp1362_hcd *isp1362_hcd)
+{
+ u16 val;
+
+ _BUG_ON(!irqs_disabled());
+ DUMMY_DELAY_ACCESS;
+ val = readw(isp1362_hcd->data_reg);
+
+ return val;
+}
+
+static void isp1362_write_data32(struct isp1362_hcd *isp1362_hcd, u32 val)
+{
+ _BUG_ON(!irqs_disabled());
+#if USE_32BIT
+ DUMMY_DELAY_ACCESS;
+ writel(val, isp1362_hcd->data_reg);
+#else
+ DUMMY_DELAY_ACCESS;
+ writew((u16)val, isp1362_hcd->data_reg);
+ DUMMY_DELAY_ACCESS;
+ writew(val >> 16, isp1362_hcd->data_reg);
+#endif
+}
+
+static u32 isp1362_read_data32(struct isp1362_hcd *isp1362_hcd)
+{
+ u32 val;
+
+ _BUG_ON(!irqs_disabled());
+#if USE_32BIT
+ DUMMY_DELAY_ACCESS;
+ val = readl(isp1362_hcd->data_reg);
+#else
+ DUMMY_DELAY_ACCESS;
+ val = (u32)readw(isp1362_hcd->data_reg);
+ DUMMY_DELAY_ACCESS;
+ val |= (u32)readw(isp1362_hcd->data_reg) << 16;
+#endif
+ return val;
+}
+
+/* use readsw/writesw to access the fifo whenever possible */
+/* assume HCDIRDATA or XFERCTR & addr_reg have been set up */
+static void isp1362_read_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 len)
+{
+ u8 *dp = buf;
+ u16 data;
+
+ if (!len)
+ return;
+
+ _BUG_ON(!irqs_disabled());
+
+ RDBG("%s: Reading %d byte from fifo to mem @ %p\n", __func__, len, buf);
+#if USE_32BIT
+ if (len >= 4) {
+ RDBG("%s: Using readsl for %d dwords\n", __func__, len >> 2);
+ readsl(isp1362_hcd->data_reg, dp, len >> 2);
+ dp += len & ~3;
+ len &= 3;
+ }
+#endif
+ if (len >= 2) {
+ RDBG("%s: Using readsw for %d words\n", __func__, len >> 1);
+ insw((unsigned long)isp1362_hcd->data_reg, dp, len >> 1);
+ dp += len & ~1;
+ len &= 1;
+ }
+
+ BUG_ON(len & ~1);
+ if (len > 0) {
+ data = isp1362_read_data16(isp1362_hcd);
+ RDBG("%s: Reading trailing byte %02x to mem @ %08x\n", __func__,
+ (u8)data, (u32)dp);
+ *dp = (u8)data;
+ }
+}
+
+static void isp1362_write_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 len)
+{
+ u8 *dp = buf;
+ u16 data;
+
+ if (!len)
+ return;
+
+ if ((unsigned)dp & 0x1) {
+ /* not aligned */
+ for (; len > 1; len -= 2) {
+ data = *dp++;
+ data |= *dp++ << 8;
+ isp1362_write_data16(isp1362_hcd, data);
+ }
+ if (len)
+ isp1362_write_data16(isp1362_hcd, *dp);
+ return;
+ }
+
+ _BUG_ON(!irqs_disabled());
+
+ RDBG("%s: Writing %d byte to fifo from memory @%p\n", __func__, len, buf);
+#if USE_32BIT
+ if (len >= 4) {
+ RDBG("%s: Using writesl for %d dwords\n", __func__, len >> 2);
+ writesl(isp1362_hcd->data_reg, dp, len >> 2);
+ dp += len & ~3;
+ len &= 3;
+ }
+#endif
+ if (len >= 2) {
+ RDBG("%s: Using writesw for %d words\n", __func__, len >> 1);
+ outsw((unsigned long)isp1362_hcd->data_reg, dp, len >> 1);
+ dp += len & ~1;
+ len &= 1;
+ }
+
+ BUG_ON(len & ~1);
+ if (len > 0) {
+ /* finally write any trailing byte; we don't need to care
+ * about the high byte of the last word written
+ */
+ data = (u16)*dp;
+ RDBG("%s: Sending trailing byte %02x from mem @ %08x\n", __func__,
+ data, (u32)dp);
+ isp1362_write_data16(isp1362_hcd, data);
+ }
+}
+
+#define isp1362_read_reg16(d, r) ({ \
+ u16 __v; \
+ REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_16); \
+ isp1362_write_addr(d, ISP1362_REG_##r); \
+ __v = isp1362_read_data16(d); \
+ RDBG("%s: Read %04x from %s[%02x]\n", __func__, __v, #r, \
+ ISP1362_REG_NO(ISP1362_REG_##r)); \
+ __v; \
+})
+
+#define isp1362_read_reg32(d, r) ({ \
+ u32 __v; \
+ REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_32); \
+ isp1362_write_addr(d, ISP1362_REG_##r); \
+ __v = isp1362_read_data32(d); \
+ RDBG("%s: Read %08x from %s[%02x]\n", __func__, __v, #r, \
+ ISP1362_REG_NO(ISP1362_REG_##r)); \
+ __v; \
+})
+
+#define isp1362_write_reg16(d, r, v) { \
+ REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_16); \
+ isp1362_write_addr(d, (ISP1362_REG_##r) | ISP1362_REG_WRITE_OFFSET); \
+ isp1362_write_data16(d, (u16)(v)); \
+ RDBG("%s: Wrote %04x to %s[%02x]\n", __func__, (u16)(v), #r, \
+ ISP1362_REG_NO(ISP1362_REG_##r)); \
+}
+
+#define isp1362_write_reg32(d, r, v) { \
+ REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_32); \
+ isp1362_write_addr(d, (ISP1362_REG_##r) | ISP1362_REG_WRITE_OFFSET); \
+ isp1362_write_data32(d, (u32)(v)); \
+ RDBG("%s: Wrote %08x to %s[%02x]\n", __func__, (u32)(v), #r, \
+ ISP1362_REG_NO(ISP1362_REG_##r)); \
+}
+
+#define isp1362_set_mask16(d, r, m) { \
+ u16 __v; \
+ __v = isp1362_read_reg16(d, r); \
+ if ((__v | m) != __v) \
+ isp1362_write_reg16(d, r, __v | m); \
+}
+
+#define isp1362_clr_mask16(d, r, m) { \
+ u16 __v; \
+ __v = isp1362_read_reg16(d, r); \
+ if ((__v & ~m) != __v) \
+ isp1362_write_reg16(d, r, __v & ~m); \
+}
+
+#define isp1362_set_mask32(d, r, m) { \
+ u32 __v; \
+ __v = isp1362_read_reg32(d, r); \
+ if ((__v | m) != __v) \
+ isp1362_write_reg32(d, r, __v | m); \
+}
+
+#define isp1362_clr_mask32(d, r, m) { \
+ u32 __v; \
+ __v = isp1362_read_reg32(d, r); \
+ if ((__v & ~m) != __v) \
+ isp1362_write_reg32(d, r, __v & ~m); \
+}
+
+#ifdef ISP1362_DEBUG
+#define isp1362_show_reg(d, r) { \
+ if ((ISP1362_REG_##r & REG_WIDTH_MASK) == REG_WIDTH_32) \
+ DBG(0, "%-12s[%02x]: %08x\n", #r, \
+ ISP1362_REG_NO(ISP1362_REG_##r), isp1362_read_reg32(d, r)); \
+ else \
+ DBG(0, "%-12s[%02x]: %04x\n", #r, \
+ ISP1362_REG_NO(ISP1362_REG_##r), isp1362_read_reg16(d, r)); \
+}
+#else
+#define isp1362_show_reg(d, r) do {} while (0)
+#endif
+
+static void __attribute__((__unused__)) isp1362_show_regs(struct isp1362_hcd *isp1362_hcd)
+{
+ isp1362_show_reg(isp1362_hcd, HCREVISION);
+ isp1362_show_reg(isp1362_hcd, HCCONTROL);
+ isp1362_show_reg(isp1362_hcd, HCCMDSTAT);
+ isp1362_show_reg(isp1362_hcd, HCINTSTAT);
+ isp1362_show_reg(isp1362_hcd, HCINTENB);
+ isp1362_show_reg(isp1362_hcd, HCFMINTVL);
+ isp1362_show_reg(isp1362_hcd, HCFMREM);
+ isp1362_show_reg(isp1362_hcd, HCFMNUM);
+ isp1362_show_reg(isp1362_hcd, HCLSTHRESH);
+ isp1362_show_reg(isp1362_hcd, HCRHDESCA);
+ isp1362_show_reg(isp1362_hcd, HCRHDESCB);
+ isp1362_show_reg(isp1362_hcd, HCRHSTATUS);
+ isp1362_show_reg(isp1362_hcd, HCRHPORT1);
+ isp1362_show_reg(isp1362_hcd, HCRHPORT2);
+
+ isp1362_show_reg(isp1362_hcd, HCHWCFG);
+ isp1362_show_reg(isp1362_hcd, HCDMACFG);
+ isp1362_show_reg(isp1362_hcd, HCXFERCTR);
+ isp1362_show_reg(isp1362_hcd, HCuPINT);
+
+ if (in_interrupt())
+ DBG(0, "%-12s[%02x]: %04x\n", "HCuPINTENB",
+ ISP1362_REG_NO(ISP1362_REG_HCuPINTENB), isp1362_hcd->irqenb);
+ else
+ isp1362_show_reg(isp1362_hcd, HCuPINTENB);
+ isp1362_show_reg(isp1362_hcd, HCCHIPID);
+ isp1362_show_reg(isp1362_hcd, HCSCRATCH);
+ isp1362_show_reg(isp1362_hcd, HCBUFSTAT);
+ isp1362_show_reg(isp1362_hcd, HCDIRADDR);
+ /* Access would advance fifo
+ * isp1362_show_reg(isp1362_hcd, HCDIRDATA);
+ */
+ isp1362_show_reg(isp1362_hcd, HCISTLBUFSZ);
+ isp1362_show_reg(isp1362_hcd, HCISTLRATE);
+ isp1362_show_reg(isp1362_hcd, HCINTLBUFSZ);
+ isp1362_show_reg(isp1362_hcd, HCINTLBLKSZ);
+ isp1362_show_reg(isp1362_hcd, HCINTLDONE);
+ isp1362_show_reg(isp1362_hcd, HCINTLSKIP);
+ isp1362_show_reg(isp1362_hcd, HCINTLLAST);
+ isp1362_show_reg(isp1362_hcd, HCINTLCURR);
+ isp1362_show_reg(isp1362_hcd, HCATLBUFSZ);
+ isp1362_show_reg(isp1362_hcd, HCATLBLKSZ);
+ /* only valid after ATL_DONE interrupt
+ * isp1362_show_reg(isp1362_hcd, HCATLDONE);
+ */
+ isp1362_show_reg(isp1362_hcd, HCATLSKIP);
+ isp1362_show_reg(isp1362_hcd, HCATLLAST);
+ isp1362_show_reg(isp1362_hcd, HCATLCURR);
+ isp1362_show_reg(isp1362_hcd, HCATLDTC);
+ isp1362_show_reg(isp1362_hcd, HCATLDTCTO);
+}
+
+static void isp1362_write_diraddr(struct isp1362_hcd *isp1362_hcd, u16 offset, u16 len)
+{
+ _BUG_ON(offset & 1);
+ _BUG_ON(offset >= ISP1362_BUF_SIZE);
+ _BUG_ON(len > ISP1362_BUF_SIZE);
+ _BUG_ON(offset + len > ISP1362_BUF_SIZE);
+ len = (len + 1) & ~1;
+
+ isp1362_clr_mask16(isp1362_hcd, HCDMACFG, HCDMACFG_CTR_ENABLE);
+ isp1362_write_reg32(isp1362_hcd, HCDIRADDR,
+ HCDIRADDR_ADDR(offset) | HCDIRADDR_COUNT(len));
+}
+
+static void isp1362_read_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16 offset, int len)
+{
+ _BUG_ON(offset & 1);
+
+ isp1362_write_diraddr(isp1362_hcd, offset, len);
+
+ DBG(3, "%s: Reading %d byte from buffer @%04x to memory @ %08x\n", __func__,
+ len, offset, (u32)buf);
+
+ isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
+ _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
+
+ isp1362_write_addr(isp1362_hcd, ISP1362_REG_HCDIRDATA);
+
+ isp1362_read_fifo(isp1362_hcd, buf, len);
+ _WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
+ isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
+ _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
+}
+
+static void isp1362_write_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16 offset, int len)
+{
+ _BUG_ON(offset & 1);
+
+ isp1362_write_diraddr(isp1362_hcd, offset, len);
+
+ DBG(3, "%s: Writing %d byte to buffer @%04x from memory @ %08x\n", __func__,
+ len, offset, (u32)buf);
+
+ isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
+ _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
+
+ isp1362_write_addr(isp1362_hcd, ISP1362_REG_HCDIRDATA | ISP1362_REG_WRITE_OFFSET);
+ isp1362_write_fifo(isp1362_hcd, buf, len);
+
+ _WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
+ isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
+ _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
+}
+
+static void __attribute__((unused)) dump_data(char *buf, int len)
+{
+ if (dbg_level > 0) {
+ int k;
+ int lf = 0;
+
+ for (k = 0; k < len; ++k) {
+ if (!lf)
+ DBG(0, "%04x:", k);
+ printk(" %02x", ((u8 *) buf)[k]);
+ lf = 1;
+ if (!k)
+ continue;
+ if (k % 16 == 15) {
+ printk("\n");
+ lf = 0;
+ continue;
+ }
+ if (k % 8 == 7)
+ printk(" ");
+ if (k % 4 == 3)
+ printk(" ");
+ }
+ if (lf)
+ printk("\n");
+ }
+}
+
+#if defined(ISP1362_DEBUG) && defined(PTD_TRACE)
+
+static void dump_ptd(struct ptd *ptd)
+{
+ DBG(0, "EP %p: CC=%x EP=%d DIR=%x CNT=%d LEN=%d MPS=%d TGL=%x ACT=%x FA=%d SPD=%x SF=%x PR=%x LST=%x\n",
+ container_of(ptd, struct isp1362_ep, ptd),
+ PTD_GET_CC(ptd), PTD_GET_EP(ptd), PTD_GET_DIR(ptd),
+ PTD_GET_COUNT(ptd), PTD_GET_LEN(ptd), PTD_GET_MPS(ptd),
+ PTD_GET_TOGGLE(ptd), PTD_GET_ACTIVE(ptd), PTD_GET_FA(ptd),
+ PTD_GET_SPD(ptd), PTD_GET_SF_INT(ptd), PTD_GET_PR(ptd), PTD_GET_LAST(ptd));
+ DBG(0, " %04x %04x %04x %04x\n", ptd->count, ptd->mps, ptd->len, ptd->faddr);
+}
+
+static void dump_ptd_out_data(struct ptd *ptd, u8 *buf)
+{
+ if (dbg_level > 0) {
+ if (PTD_GET_DIR(ptd) != PTD_DIR_IN && PTD_GET_LEN(ptd)) {
+ DBG(0, "--out->\n");
+ dump_data(buf, PTD_GET_LEN(ptd));
+ }
+ }
+}
+
+static void dump_ptd_in_data(struct ptd *ptd, u8 *buf)
+{
+ if (dbg_level > 0) {
+ if (PTD_GET_DIR(ptd) == PTD_DIR_IN && PTD_GET_COUNT(ptd)) {
+ DBG(0, "<--in--\n");
+ dump_data(buf, PTD_GET_COUNT(ptd));
+ }
+ DBG(0, "-----\n");
+ }
+}
+
+static void dump_ptd_queue(struct isp1362_ep_queue *epq)
+{
+ struct isp1362_ep *ep;
+ int dbg = dbg_level;
+
+ dbg_level = 1;
+ list_for_each_entry(ep, &epq->active, active) {
+ dump_ptd(&ep->ptd);
+ dump_data(ep->data, ep->length);
+ }
+ dbg_level = dbg;
+}
+#else
+#define dump_ptd(ptd) do {} while (0)
+#define dump_ptd_in_data(ptd, buf) do {} while (0)
+#define dump_ptd_out_data(ptd, buf) do {} while (0)
+#define dump_ptd_data(ptd, buf) do {} while (0)
+#define dump_ptd_queue(epq) do {} while (0)
+#endif
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 15438469f21..9600a58299d 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -386,6 +386,10 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
hwmode |= HW_DACK_POL_HIGH;
if (priv->devflags & ISP1760_FLAG_DREQ_POL_HIGH)
hwmode |= HW_DREQ_POL_HIGH;
+ if (priv->devflags & ISP1760_FLAG_INTR_POL_HIGH)
+ hwmode |= HW_INTR_HIGH_ACT;
+ if (priv->devflags & ISP1760_FLAG_INTR_EDGE_TRIG)
+ hwmode |= HW_INTR_EDGE_TRIG;
/*
* We have to set this first in case we're in 16-bit mode.
diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h
index 462f4943cb1..6931ef5c965 100644
--- a/drivers/usb/host/isp1760-hcd.h
+++ b/drivers/usb/host/isp1760-hcd.h
@@ -142,6 +142,8 @@ typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
#define ISP1760_FLAG_DACK_POL_HIGH 0x00000010 /* DACK active high */
#define ISP1760_FLAG_DREQ_POL_HIGH 0x00000020 /* DREQ active high */
#define ISP1760_FLAG_ISP1761 0x00000040 /* Chip is ISP1761 */
+#define ISP1760_FLAG_INTR_POL_HIGH 0x00000080 /* Interrupt polarity active high */
+#define ISP1760_FLAG_INTR_EDGE_TRIG 0x00000100 /* Interrupt edge triggered */
/* chip memory management */
struct memory_chunk {
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index d4feebfc63b..1c9f977a5c9 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -3,6 +3,7 @@
* Currently there is support for
* - OpenFirmware
* - PCI
+ * - PDEV (generic platform device centralized driver model)
*
* (c) 2007 Sebastian Siewior <bigeasy@linutronix.de>
*
@@ -11,6 +12,7 @@
#include <linux/usb.h>
#include <linux/io.h>
#include <linux/platform_device.h>
+#include <linux/usb/isp1760.h>
#include "../core/hcd.h"
#include "isp1760-hcd.h"
@@ -308,6 +310,8 @@ static int __devinit isp1760_plat_probe(struct platform_device *pdev)
struct resource *mem_res;
struct resource *irq_res;
resource_size_t mem_size;
+ struct isp1760_platform_data *priv = pdev->dev.platform_data;
+ unsigned int devflags = 0;
unsigned long irqflags = IRQF_SHARED | IRQF_DISABLED;
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -330,8 +334,23 @@ static int __devinit isp1760_plat_probe(struct platform_device *pdev)
}
irqflags |= irq_res->flags & IRQF_TRIGGER_MASK;
+ if (priv) {
+ if (priv->is_isp1761)
+ devflags |= ISP1760_FLAG_ISP1761;
+ if (priv->bus_width_16)
+ devflags |= ISP1760_FLAG_BUS_WIDTH_16;
+ if (priv->port1_otg)
+ devflags |= ISP1760_FLAG_OTG_EN;
+ if (priv->analog_oc)
+ devflags |= ISP1760_FLAG_ANALOG_OC;
+ if (priv->dack_polarity_high)
+ devflags |= ISP1760_FLAG_DACK_POL_HIGH;
+ if (priv->dreq_polarity_high)
+ devflags |= ISP1760_FLAG_DREQ_POL_HIGH;
+ }
+
hcd = isp1760_register(mem_res->start, mem_size, irq_res->start,
- irqflags, &pdev->dev, dev_name(&pdev->dev), 0);
+ irqflags, &pdev->dev, dev_name(&pdev->dev), devflags);
if (IS_ERR(hcd)) {
pr_warning("isp1760: Failed to register the HCD device\n");
ret = -ENODEV;
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index bb5e6f67157..7ccffcbe7b6 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -148,7 +148,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
at91_start_hc(pdev);
ohci_hcd_init(hcd_to_ohci(hcd));
- retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_DISABLED);
+ retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED);
if (retval == 0)
return retval;
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
index 2ac4e022a13..e4380082ebb 100644
--- a/drivers/usb/host/ohci-au1xxx.c
+++ b/drivers/usb/host/ohci-au1xxx.c
@@ -248,10 +248,9 @@ static int ohci_hcd_au1xxx_drv_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int ohci_hcd_au1xxx_drv_suspend(struct platform_device *pdev,
- pm_message_t message)
+static int ohci_hcd_au1xxx_drv_suspend(struct device *dev)
{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
unsigned long flags;
int rc;
@@ -274,10 +273,6 @@ static int ohci_hcd_au1xxx_drv_suspend(struct platform_device *pdev,
ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
(void)ohci_readl(ohci, &ohci->regs->intrdisable);
- /* make sure snapshot being resumed re-enumerates everything */
- if (message.event == PM_EVENT_PRETHAW)
- ohci_usb_reset(ohci);
-
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
au1xxx_stop_ohc();
@@ -287,9 +282,9 @@ bail:
return rc;
}
-static int ohci_hcd_au1xxx_drv_resume(struct platform_device *pdev)
+static int ohci_hcd_au1xxx_drv_resume(struct device *dev)
{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
au1xxx_start_ohc();
@@ -298,20 +293,26 @@ static int ohci_hcd_au1xxx_drv_resume(struct platform_device *pdev)
return 0;
}
+
+static struct dev_pm_ops au1xxx_ohci_pmops = {
+ .suspend = ohci_hcd_au1xxx_drv_suspend,
+ .resume = ohci_hcd_au1xxx_drv_resume,
+};
+
+#define AU1XXX_OHCI_PMOPS &au1xxx_ohci_pmops
+
#else
-#define ohci_hcd_au1xxx_drv_suspend NULL
-#define ohci_hcd_au1xxx_drv_resume NULL
+#define AU1XXX_OHCI_PMOPS NULL
#endif
static struct platform_driver ohci_hcd_au1xxx_driver = {
.probe = ohci_hcd_au1xxx_drv_probe,
.remove = ohci_hcd_au1xxx_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
- .suspend = ohci_hcd_au1xxx_drv_suspend,
- .resume = ohci_hcd_au1xxx_drv_resume,
.driver = {
.name = "au1xxx-ohci",
.owner = THIS_MODULE,
+ .pm = AU1XXX_OHCI_PMOPS,
},
};
diff --git a/drivers/usb/host/ohci-ep93xx.c b/drivers/usb/host/ohci-ep93xx.c
index b0dbf4157d2..4e681613e7a 100644
--- a/drivers/usb/host/ohci-ep93xx.c
+++ b/drivers/usb/host/ohci-ep93xx.c
@@ -188,7 +188,6 @@ static int ohci_hcd_ep93xx_drv_resume(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int status;
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 58151687d35..78bb7710f36 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -34,7 +34,6 @@
#include <linux/usb/otg.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
-#include <linux/reboot.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index e44dc2cbca2..f1c06202fdf 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -177,9 +177,13 @@ static inline void pxa27x_setup_hc(struct pxa27x_ohci *ohci,
if (inf->flags & NO_OC_PROTECTION)
uhcrhda |= UHCRHDA_NOCP;
+ else
+ uhcrhda &= ~UHCRHDA_NOCP;
if (inf->flags & OC_MODE_PERPORT)
uhcrhda |= UHCRHDA_OCPM;
+ else
+ uhcrhda &= ~UHCRHDA_OCPM;
if (inf->power_on_delay) {
uhcrhda &= ~UHCRHDA_POTPGT(0xff);
@@ -477,38 +481,47 @@ static int ohci_hcd_pxa27x_drv_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int ohci_hcd_pxa27x_drv_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM
+static int ohci_hcd_pxa27x_drv_suspend(struct device *dev)
{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd);
if (time_before(jiffies, ohci->ohci.next_statechange))
msleep(5);
ohci->ohci.next_statechange = jiffies;
- pxa27x_stop_hc(ohci, &pdev->dev);
+ pxa27x_stop_hc(ohci, dev);
hcd->state = HC_STATE_SUSPENDED;
return 0;
}
-static int ohci_hcd_pxa27x_drv_resume(struct platform_device *pdev)
+static int ohci_hcd_pxa27x_drv_resume(struct device *dev)
{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd);
+ struct pxaohci_platform_data *inf = dev->platform_data;
int status;
if (time_before(jiffies, ohci->ohci.next_statechange))
msleep(5);
ohci->ohci.next_statechange = jiffies;
- if ((status = pxa27x_start_hc(ohci, &pdev->dev)) < 0)
+ if ((status = pxa27x_start_hc(ohci, dev)) < 0)
return status;
+ /* Select Power Management Mode */
+ pxa27x_ohci_select_pmm(ohci, inf->port_mode);
+
ohci_finish_controller_resume(hcd);
return 0;
}
+
+static struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = {
+ .suspend = ohci_hcd_pxa27x_drv_suspend,
+ .resume = ohci_hcd_pxa27x_drv_resume,
+};
#endif
/* work with hotplug and coldplug */
@@ -518,13 +531,12 @@ static struct platform_driver ohci_hcd_pxa27x_driver = {
.probe = ohci_hcd_pxa27x_drv_probe,
.remove = ohci_hcd_pxa27x_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
-#ifdef CONFIG_PM
- .suspend = ohci_hcd_pxa27x_drv_suspend,
- .resume = ohci_hcd_pxa27x_drv_resume,
-#endif
.driver = {
.name = "pxa27x-ohci",
.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &ohci_hcd_pxa27x_pm_ops,
+#endif
},
};
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index c2d80f80448..16fecb8ecc3 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -418,7 +418,7 @@ static struct ed *ed_get (
is_out = !(ep->desc.bEndpointAddress & USB_DIR_IN);
/* FIXME usbcore changes dev->devnum before SET_ADDRESS
- * suceeds ... otherwise we wouldn't need "pipe".
+ * succeeds ... otherwise we wouldn't need "pipe".
*/
info = usb_pipedevice (pipe);
ed->type = usb_pipetype(pipe);
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 5ac489ee3da..50f57f46883 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -33,7 +33,6 @@
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
-#include <linux/reboot.h>
#include <linux/usb.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 83b5f9cea85..23cf3bde476 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -475,4 +475,4 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
quirk_usb_handoff_xhci(pdev);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
+DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index a949259f18b..5b22a4d1c9e 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -719,8 +719,12 @@ retry:
/* port status seems weird until after reset, so
* force the reset and make khubd clean up later.
*/
- sl811->port1 |= (1 << USB_PORT_FEAT_C_CONNECTION)
- | (1 << USB_PORT_FEAT_CONNECTION);
+ if (sl811->stat_insrmv & 1)
+ sl811->port1 |= 1 << USB_PORT_FEAT_CONNECTION;
+ else
+ sl811->port1 &= ~(1 << USB_PORT_FEAT_CONNECTION);
+
+ sl811->port1 |= 1 << USB_PORT_FEAT_C_CONNECTION;
} else if (irqstat & SL11H_INTMASK_RD) {
if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND)) {
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 64e57bfe236..acd582c0280 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -1422,7 +1422,6 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd,
goto err_submit_failed;
/* Add this URB to the QH */
- urbp->qh = qh;
list_add_tail(&urbp->node, &qh->queue);
/* If the new URB is the first and only one on this QH then either
diff --git a/drivers/usb/host/whci/asl.c b/drivers/usb/host/whci/asl.c
index c2050785a81..c632437c764 100644
--- a/drivers/usb/host/whci/asl.c
+++ b/drivers/usb/host/whci/asl.c
@@ -227,11 +227,21 @@ void scan_async_work(struct work_struct *work)
/*
* Now that the ASL is updated, complete the removal of any
* removed qsets.
+ *
+ * If the qset was to be reset, do so and reinsert it into the
+ * ASL if it has pending transfers.
*/
spin_lock_irq(&whc->lock);
list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) {
qset_remove_complete(whc, qset);
+ if (qset->reset) {
+ qset_reset(whc, qset);
+ if (!list_empty(&qset->stds)) {
+ asl_qset_insert_begin(whc, qset);
+ queue_work(whc->workqueue, &whc->async_work);
+ }
+ }
}
spin_unlock_irq(&whc->lock);
@@ -267,7 +277,7 @@ int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
else
err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
if (!err) {
- if (!qset->in_sw_list)
+ if (!qset->in_sw_list && !qset->remove)
asl_qset_insert_begin(whc, qset);
} else
usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb);
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
index e019a5058ab..687b622a161 100644
--- a/drivers/usb/host/whci/hcd.c
+++ b/drivers/usb/host/whci/hcd.c
@@ -192,19 +192,23 @@ static void whc_endpoint_reset(struct usb_hcd *usb_hcd,
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct whc *whc = wusbhc_to_whc(wusbhc);
struct whc_qset *qset;
+ unsigned long flags;
+
+ spin_lock_irqsave(&whc->lock, flags);
qset = ep->hcpriv;
if (qset) {
qset->remove = 1;
+ qset->reset = 1;
if (usb_endpoint_xfer_bulk(&ep->desc)
|| usb_endpoint_xfer_control(&ep->desc))
queue_work(whc->workqueue, &whc->async_work);
else
queue_work(whc->workqueue, &whc->periodic_work);
-
- qset_reset(whc, qset);
}
+
+ spin_unlock_irqrestore(&whc->lock, flags);
}
diff --git a/drivers/usb/host/whci/pzl.c b/drivers/usb/host/whci/pzl.c
index ff4ef9e910d..a9e05bac664 100644
--- a/drivers/usb/host/whci/pzl.c
+++ b/drivers/usb/host/whci/pzl.c
@@ -255,11 +255,21 @@ void scan_periodic_work(struct work_struct *work)
/*
* Now that the PZL is updated, complete the removal of any
* removed qsets.
+ *
+ * If the qset was to be reset, do so and reinsert it into the
+ * PZL if it has pending transfers.
*/
spin_lock_irq(&whc->lock);
list_for_each_entry_safe(qset, t, &whc->periodic_removed_list, list_node) {
qset_remove_complete(whc, qset);
+ if (qset->reset) {
+ qset_reset(whc, qset);
+ if (!list_empty(&qset->stds)) {
+ qset_insert_in_sw_list(whc, qset);
+ queue_work(whc->workqueue, &whc->periodic_work);
+ }
+ }
}
spin_unlock_irq(&whc->lock);
@@ -295,7 +305,7 @@ int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
else
err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
if (!err) {
- if (!qset->in_sw_list)
+ if (!qset->in_sw_list && !qset->remove)
qset_insert_in_sw_list(whc, qset);
} else
usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb);
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index 640b38fbd05..1b9dc157157 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -103,7 +103,6 @@ static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
void qset_clear(struct whc *whc, struct whc_qset *qset)
{
qset->td_start = qset->td_end = qset->ntds = 0;
- qset->remove = 0;
qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
@@ -125,7 +124,7 @@ void qset_clear(struct whc *whc, struct whc_qset *qset)
*/
void qset_reset(struct whc *whc, struct whc_qset *qset)
{
- wait_for_completion(&qset->remove_complete);
+ qset->reset = 0;
qset->qh.status &= ~QH_STATUS_SEQ_MASK;
qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
@@ -156,6 +155,7 @@ struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
{
+ qset->remove = 0;
list_del_init(&qset->list_node);
complete(&qset->remove_complete);
}
diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/usb/host/whci/whci-hc.h
index 794dba0d0f0..e8d0001605b 100644
--- a/drivers/usb/host/whci/whci-hc.h
+++ b/drivers/usb/host/whci/whci-hc.h
@@ -264,6 +264,7 @@ struct whc_qset {
unsigned in_sw_list:1;
unsigned in_hw_list:1;
unsigned remove:1;
+ unsigned reset:1;
struct urb *pause_after_urb;
struct completion remove_complete;
int max_burst;
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 705e3432415..33128d52f21 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -413,7 +413,8 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
int i;
struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
- dma_addr_t dma = ctx->dma + ((unsigned long)slot_ctx - (unsigned long)ctx);
+ dma_addr_t dma = ctx->dma +
+ ((unsigned long)slot_ctx - (unsigned long)ctx->bytes);
int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
xhci_dbg(xhci, "Slot Context:\n");
@@ -459,7 +460,7 @@ void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
for (i = 0; i < last_ep_ctx; ++i) {
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
dma_addr_t dma = ctx->dma +
- ((unsigned long)ep_ctx - (unsigned long)ctx);
+ ((unsigned long)ep_ctx - (unsigned long)ctx->bytes);
xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 816c39caca1..99911e727e0 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -22,12 +22,18 @@
#include <linux/irq.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include "xhci.h"
#define DRIVER_AUTHOR "Sarah Sharp"
#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
+/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
+static int link_quirk;
+module_param(link_quirk, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
+
/* TODO: copied from ehci-hcd.c - can this be refactored? */
/*
* handshake - spin reading hc until handshake completes or fails
@@ -214,6 +220,12 @@ int xhci_init(struct usb_hcd *hcd)
xhci_dbg(xhci, "xhci_init\n");
spin_lock_init(&xhci->lock);
+ if (link_quirk) {
+ xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
+ xhci->quirks |= XHCI_LINK_TRB_QUIRK;
+ } else {
+ xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
+ }
retval = xhci_mem_init(xhci, GFP_KERNEL);
xhci_dbg(xhci, "Finished xhci_init\n");
@@ -339,13 +351,14 @@ void xhci_event_ring_work(unsigned long arg)
xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
xhci_dbg_cmd_ptrs(xhci);
for (i = 0; i < MAX_HC_SLOTS; ++i) {
- if (xhci->devs[i]) {
- for (j = 0; j < 31; ++j) {
- if (xhci->devs[i]->ep_rings[j]) {
- xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j);
- xhci_debug_segment(xhci, xhci->devs[i]->ep_rings[j]->deq_seg);
- }
- }
+ if (!xhci->devs[i])
+ continue;
+ for (j = 0; j < 31; ++j) {
+ struct xhci_ring *ring = xhci->devs[i]->eps[j].ring;
+ if (!ring)
+ continue;
+ xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j);
+ xhci_debug_segment(xhci, ring->deq_seg);
}
}
@@ -555,13 +568,22 @@ unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
return 1 << (xhci_get_endpoint_index(desc) + 1);
}
+/* Find the flag for this endpoint (for use in the control context). Use the
+ * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
+ * bit 1, etc.
+ */
+unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
+{
+ return 1 << (ep_index + 1);
+}
+
/* Compute the last valid endpoint context index. Basically, this is the
* endpoint index plus one. For slot contexts with more than valid endpoint,
* we find the most significant bit set in the added contexts flags.
* e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
* fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
*/
-static inline unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
+unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
{
return fls(added_ctxs) - 1;
}
@@ -589,6 +611,71 @@ int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
return 1;
}
+static int xhci_configure_endpoint(struct xhci_hcd *xhci,
+ struct usb_device *udev, struct xhci_command *command,
+ bool ctx_change, bool must_succeed);
+
+/*
+ * Full speed devices may have a max packet size greater than 8 bytes, but the
+ * USB core doesn't know that until it reads the first 8 bytes of the
+ * descriptor. If the usb_device's max packet size changes after that point,
+ * we need to issue an evaluate context command and wait on it.
+ */
+static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
+ unsigned int ep_index, struct urb *urb)
+{
+ struct xhci_container_ctx *in_ctx;
+ struct xhci_container_ctx *out_ctx;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ struct xhci_ep_ctx *ep_ctx;
+ int max_packet_size;
+ int hw_max_packet_size;
+ int ret = 0;
+
+ out_ctx = xhci->devs[slot_id]->out_ctx;
+ ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
+ hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2);
+ max_packet_size = urb->dev->ep0.desc.wMaxPacketSize;
+ if (hw_max_packet_size != max_packet_size) {
+ xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
+ xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
+ max_packet_size);
+ xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
+ hw_max_packet_size);
+ xhci_dbg(xhci, "Issuing evaluate context command.\n");
+
+ /* Set up the modified control endpoint 0 */
+ xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
+ xhci->devs[slot_id]->out_ctx, ep_index);
+ in_ctx = xhci->devs[slot_id]->in_ctx;
+ ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
+ ep_ctx->ep_info2 &= ~MAX_PACKET_MASK;
+ ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size);
+
+ /* Set up the input context flags for the command */
+ /* FIXME: This won't work if a non-default control endpoint
+ * changes max packet sizes.
+ */
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ ctrl_ctx->add_flags = EP0_FLAG;
+ ctrl_ctx->drop_flags = 0;
+
+ xhci_dbg(xhci, "Slot %d input context\n", slot_id);
+ xhci_dbg_ctx(xhci, in_ctx, ep_index);
+ xhci_dbg(xhci, "Slot %d output context\n", slot_id);
+ xhci_dbg_ctx(xhci, out_ctx, ep_index);
+
+ ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
+ true, false);
+
+ /* Clean up the input context for later use by bandwidth
+ * functions.
+ */
+ ctrl_ctx->add_flags = SLOT_FLAG;
+ }
+ return ret;
+}
+
/*
* non-error returns are a promise to giveback() the urb later
* we drop ownership so next owner (or urb unlink) can get it
@@ -600,13 +687,13 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
int ret = 0;
unsigned int slot_id, ep_index;
+
if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0)
return -EINVAL;
slot_id = urb->dev->slot_id;
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
- spin_lock_irqsave(&xhci->lock, flags);
if (!xhci->devs || !xhci->devs[slot_id]) {
if (!in_interrupt())
dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n");
@@ -619,19 +706,38 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
ret = -ESHUTDOWN;
goto exit;
}
- if (usb_endpoint_xfer_control(&urb->ep->desc))
+ if (usb_endpoint_xfer_control(&urb->ep->desc)) {
+ /* Check to see if the max packet size for the default control
+ * endpoint changed during FS device enumeration
+ */
+ if (urb->dev->speed == USB_SPEED_FULL) {
+ ret = xhci_check_maxpacket(xhci, slot_id,
+ ep_index, urb);
+ if (ret < 0)
+ return ret;
+ }
+
/* We have a spinlock and interrupts disabled, so we must pass
* atomic context to this function, which may allocate memory.
*/
+ spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
- else if (usb_endpoint_xfer_bulk(&urb->ep->desc))
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
+ spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
- else
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
+ slot_id, ep_index);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ } else {
ret = -EINVAL;
+ }
exit:
- spin_unlock_irqrestore(&xhci->lock, flags);
return ret;
}
@@ -674,6 +780,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
struct xhci_td *td;
unsigned int ep_index;
struct xhci_ring *ep_ring;
+ struct xhci_virt_ep *ep;
xhci = hcd_to_xhci(hcd);
spin_lock_irqsave(&xhci->lock, flags);
@@ -686,17 +793,18 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
xhci_dbg(xhci, "Event ring:\n");
xhci_debug_ring(xhci, xhci->event_ring);
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
- ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index];
+ ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
+ ep_ring = ep->ring;
xhci_dbg(xhci, "Endpoint ring:\n");
xhci_debug_ring(xhci, ep_ring);
td = (struct xhci_td *) urb->hcpriv;
- ep_ring->cancels_pending++;
- list_add_tail(&td->cancelled_td_list, &ep_ring->cancelled_td_list);
+ ep->cancels_pending++;
+ list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
/* Queue a stop endpoint command, but only if this is
* the first cancellation to be handled.
*/
- if (ep_ring->cancels_pending == 1) {
+ if (ep->cancels_pending == 1) {
xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
xhci_ring_cmd_db(xhci);
}
@@ -930,6 +1038,141 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir
}
}
+static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
+ struct usb_device *udev, int *cmd_status)
+{
+ int ret;
+
+ switch (*cmd_status) {
+ case COMP_ENOMEM:
+ dev_warn(&udev->dev, "Not enough host controller resources "
+ "for new device state.\n");
+ ret = -ENOMEM;
+ /* FIXME: can we allocate more resources for the HC? */
+ break;
+ case COMP_BW_ERR:
+ dev_warn(&udev->dev, "Not enough bandwidth "
+ "for new device state.\n");
+ ret = -ENOSPC;
+ /* FIXME: can we go back to the old state? */
+ break;
+ case COMP_TRB_ERR:
+ /* the HCD set up something wrong */
+ dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
+ "add flag = 1, "
+ "and endpoint is not disabled.\n");
+ ret = -EINVAL;
+ break;
+ case COMP_SUCCESS:
+ dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
+ ret = 0;
+ break;
+ default:
+ xhci_err(xhci, "ERROR: unexpected command completion "
+ "code 0x%x.\n", *cmd_status);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
+ struct usb_device *udev, int *cmd_status)
+{
+ int ret;
+ struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
+
+ switch (*cmd_status) {
+ case COMP_EINVAL:
+ dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
+ "context command.\n");
+ ret = -EINVAL;
+ break;
+ case COMP_EBADSLT:
+ dev_warn(&udev->dev, "WARN: slot not enabled for"
+ "evaluate context command.\n");
+ case COMP_CTX_STATE:
+ dev_warn(&udev->dev, "WARN: invalid context state for "
+ "evaluate context command.\n");
+ xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
+ ret = -EINVAL;
+ break;
+ case COMP_SUCCESS:
+ dev_dbg(&udev->dev, "Successful evaluate context command\n");
+ ret = 0;
+ break;
+ default:
+ xhci_err(xhci, "ERROR: unexpected command completion "
+ "code 0x%x.\n", *cmd_status);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+/* Issue a configure endpoint command or evaluate context command
+ * and wait for it to finish.
+ */
+static int xhci_configure_endpoint(struct xhci_hcd *xhci,
+ struct usb_device *udev,
+ struct xhci_command *command,
+ bool ctx_change, bool must_succeed)
+{
+ int ret;
+ int timeleft;
+ unsigned long flags;
+ struct xhci_container_ctx *in_ctx;
+ struct completion *cmd_completion;
+ int *cmd_status;
+ struct xhci_virt_device *virt_dev;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ virt_dev = xhci->devs[udev->slot_id];
+ if (command) {
+ in_ctx = command->in_ctx;
+ cmd_completion = command->completion;
+ cmd_status = &command->status;
+ command->command_trb = xhci->cmd_ring->enqueue;
+ list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
+ } else {
+ in_ctx = virt_dev->in_ctx;
+ cmd_completion = &virt_dev->cmd_completion;
+ cmd_status = &virt_dev->cmd_status;
+ }
+
+ if (!ctx_change)
+ ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
+ udev->slot_id, must_succeed);
+ else
+ ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
+ udev->slot_id);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
+ return -ENOMEM;
+ }
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ /* Wait for the configure endpoint command to complete */
+ timeleft = wait_for_completion_interruptible_timeout(
+ cmd_completion,
+ USB_CTRL_SET_TIMEOUT);
+ if (timeleft <= 0) {
+ xhci_warn(xhci, "%s while waiting for %s command\n",
+ timeleft == 0 ? "Timeout" : "Signal",
+ ctx_change == 0 ?
+ "configure endpoint" :
+ "evaluate context");
+ /* FIXME cancel the configure endpoint command */
+ return -ETIME;
+ }
+
+ if (!ctx_change)
+ return xhci_configure_endpoint_result(xhci, udev, cmd_status);
+ return xhci_evaluate_context_result(xhci, udev, cmd_status);
+}
+
/* Called after one or more calls to xhci_add_endpoint() or
* xhci_drop_endpoint(). If this call fails, the USB core is expected
* to call xhci_reset_bandwidth().
@@ -944,8 +1187,6 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
int i;
int ret = 0;
- int timeleft;
- unsigned long flags;
struct xhci_hcd *xhci;
struct xhci_virt_device *virt_dev;
struct xhci_input_control_ctx *ctrl_ctx;
@@ -975,56 +1216,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
xhci_dbg_ctx(xhci, virt_dev->in_ctx,
LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
- spin_lock_irqsave(&xhci->lock, flags);
- ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma,
- udev->slot_id);
- if (ret < 0) {
- spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
- return -ENOMEM;
- }
- xhci_ring_cmd_db(xhci);
- spin_unlock_irqrestore(&xhci->lock, flags);
-
- /* Wait for the configure endpoint command to complete */
- timeleft = wait_for_completion_interruptible_timeout(
- &virt_dev->cmd_completion,
- USB_CTRL_SET_TIMEOUT);
- if (timeleft <= 0) {
- xhci_warn(xhci, "%s while waiting for configure endpoint command\n",
- timeleft == 0 ? "Timeout" : "Signal");
- /* FIXME cancel the configure endpoint command */
- return -ETIME;
- }
-
- switch (virt_dev->cmd_status) {
- case COMP_ENOMEM:
- dev_warn(&udev->dev, "Not enough host controller resources "
- "for new device state.\n");
- ret = -ENOMEM;
- /* FIXME: can we allocate more resources for the HC? */
- break;
- case COMP_BW_ERR:
- dev_warn(&udev->dev, "Not enough bandwidth "
- "for new device state.\n");
- ret = -ENOSPC;
- /* FIXME: can we go back to the old state? */
- break;
- case COMP_TRB_ERR:
- /* the HCD set up something wrong */
- dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, add flag = 1, "
- "and endpoint is not disabled.\n");
- ret = -EINVAL;
- break;
- case COMP_SUCCESS:
- dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
- break;
- default:
- xhci_err(xhci, "ERROR: unexpected command completion "
- "code 0x%x.\n", virt_dev->cmd_status);
- ret = -EINVAL;
- break;
- }
+ ret = xhci_configure_endpoint(xhci, udev, NULL,
+ false, false);
if (ret) {
/* Callee should call reset_bandwidth() */
return ret;
@@ -1037,10 +1230,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
xhci_zero_in_ctx(xhci, virt_dev);
/* Free any old rings */
for (i = 1; i < 31; ++i) {
- if (virt_dev->new_ep_rings[i]) {
- xhci_ring_free(xhci, virt_dev->ep_rings[i]);
- virt_dev->ep_rings[i] = virt_dev->new_ep_rings[i];
- virt_dev->new_ep_rings[i] = NULL;
+ if (virt_dev->eps[i].new_ring) {
+ xhci_ring_free(xhci, virt_dev->eps[i].ring);
+ virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
+ virt_dev->eps[i].new_ring = NULL;
}
}
@@ -1067,14 +1260,93 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
virt_dev = xhci->devs[udev->slot_id];
/* Free any rings allocated for added endpoints */
for (i = 0; i < 31; ++i) {
- if (virt_dev->new_ep_rings[i]) {
- xhci_ring_free(xhci, virt_dev->new_ep_rings[i]);
- virt_dev->new_ep_rings[i] = NULL;
+ if (virt_dev->eps[i].new_ring) {
+ xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
+ virt_dev->eps[i].new_ring = NULL;
}
}
xhci_zero_in_ctx(xhci, virt_dev);
}
+static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx,
+ struct xhci_container_ctx *out_ctx,
+ u32 add_flags, u32 drop_flags)
+{
+ struct xhci_input_control_ctx *ctrl_ctx;
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ ctrl_ctx->add_flags = add_flags;
+ ctrl_ctx->drop_flags = drop_flags;
+ xhci_slot_copy(xhci, in_ctx, out_ctx);
+ ctrl_ctx->add_flags |= SLOT_FLAG;
+
+ xhci_dbg(xhci, "Input Context:\n");
+ xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
+}
+
+void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ struct xhci_dequeue_state *deq_state)
+{
+ struct xhci_container_ctx *in_ctx;
+ struct xhci_ep_ctx *ep_ctx;
+ u32 added_ctxs;
+ dma_addr_t addr;
+
+ xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
+ xhci->devs[slot_id]->out_ctx, ep_index);
+ in_ctx = xhci->devs[slot_id]->in_ctx;
+ ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
+ addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
+ deq_state->new_deq_ptr);
+ if (addr == 0) {
+ xhci_warn(xhci, "WARN Cannot submit config ep after "
+ "reset ep command\n");
+ xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
+ deq_state->new_deq_seg,
+ deq_state->new_deq_ptr);
+ return;
+ }
+ ep_ctx->deq = addr | deq_state->new_cycle_state;
+
+ added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
+ xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
+ xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
+}
+
+void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
+ struct usb_device *udev, unsigned int ep_index)
+{
+ struct xhci_dequeue_state deq_state;
+ struct xhci_virt_ep *ep;
+
+ xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
+ ep = &xhci->devs[udev->slot_id]->eps[ep_index];
+ /* We need to move the HW's dequeue pointer past this TD,
+ * or it will attempt to resend it on the next doorbell ring.
+ */
+ xhci_find_new_dequeue_state(xhci, udev->slot_id,
+ ep_index, ep->stopped_td,
+ &deq_state);
+
+ /* HW with the reset endpoint quirk will use the saved dequeue state to
+ * issue a configure endpoint command later.
+ */
+ if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
+ xhci_dbg(xhci, "Queueing new dequeue state\n");
+ xhci_queue_new_dequeue_state(xhci, udev->slot_id,
+ ep_index, &deq_state);
+ } else {
+ /* Better hope no one uses the input context between now and the
+ * reset endpoint completion!
+ */
+ xhci_dbg(xhci, "Setting up input context for "
+ "configure endpoint command\n");
+ xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
+ ep_index, &deq_state);
+ }
+}
+
/* Deal with stalled endpoints. The core should have sent the control message
* to clear the halt condition. However, we need to make the xHCI hardware
* reset its sequence number, since a device will expect a sequence number of
@@ -1089,8 +1361,7 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
unsigned int ep_index;
unsigned long flags;
int ret;
- struct xhci_dequeue_state deq_state;
- struct xhci_ring *ep_ring;
+ struct xhci_virt_ep *virt_ep;
xhci = hcd_to_xhci(hcd);
udev = (struct usb_device *) ep->hcpriv;
@@ -1100,12 +1371,16 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
if (!ep->hcpriv)
return;
ep_index = xhci_get_endpoint_index(&ep->desc);
- ep_ring = xhci->devs[udev->slot_id]->ep_rings[ep_index];
- if (!ep_ring->stopped_td) {
+ virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
+ if (!virt_ep->stopped_td) {
xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
ep->desc.bEndpointAddress);
return;
}
+ if (usb_endpoint_xfer_control(&ep->desc)) {
+ xhci_dbg(xhci, "Control endpoint stall already handled.\n");
+ return;
+ }
xhci_dbg(xhci, "Queueing reset endpoint command\n");
spin_lock_irqsave(&xhci->lock, flags);
@@ -1116,17 +1391,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
* command. Better hope that last command worked!
*/
if (!ret) {
- xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
- /* We need to move the HW's dequeue pointer past this TD,
- * or it will attempt to resend it on the next doorbell ring.
- */
- xhci_find_new_dequeue_state(xhci, udev->slot_id,
- ep_index, ep_ring->stopped_td, &deq_state);
- xhci_dbg(xhci, "Queueing new dequeue state\n");
- xhci_queue_new_dequeue_state(xhci, ep_ring,
- udev->slot_id,
- ep_index, &deq_state);
- kfree(ep_ring->stopped_td);
+ xhci_cleanup_stalled_ring(xhci, udev, ep_index);
+ kfree(virt_ep->stopped_td);
xhci_ring_cmd_db(xhci);
}
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1328,6 +1594,88 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
return 0;
}
+/* Once a hub descriptor is fetched for a device, we need to update the xHC's
+ * internal data structures for the device.
+ */
+int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_virt_device *vdev;
+ struct xhci_command *config_cmd;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ struct xhci_slot_ctx *slot_ctx;
+ unsigned long flags;
+ unsigned think_time;
+ int ret;
+
+ /* Ignore root hubs */
+ if (!hdev->parent)
+ return 0;
+
+ vdev = xhci->devs[hdev->slot_id];
+ if (!vdev) {
+ xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
+ return -EINVAL;
+ }
+ config_cmd = xhci_alloc_command(xhci, true, mem_flags);
+ if (!config_cmd) {
+ xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
+ ctrl_ctx->add_flags |= SLOT_FLAG;
+ slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
+ slot_ctx->dev_info |= DEV_HUB;
+ if (tt->multi)
+ slot_ctx->dev_info |= DEV_MTT;
+ if (xhci->hci_version > 0x95) {
+ xhci_dbg(xhci, "xHCI version %x needs hub "
+ "TT think time and number of ports\n",
+ (unsigned int) xhci->hci_version);
+ slot_ctx->dev_info2 |= XHCI_MAX_PORTS(hdev->maxchild);
+ /* Set TT think time - convert from ns to FS bit times.
+ * 0 = 8 FS bit times, 1 = 16 FS bit times,
+ * 2 = 24 FS bit times, 3 = 32 FS bit times.
+ */
+ think_time = tt->think_time;
+ if (think_time != 0)
+ think_time = (think_time / 666) - 1;
+ slot_ctx->tt_info |= TT_THINK_TIME(think_time);
+ } else {
+ xhci_dbg(xhci, "xHCI version %x doesn't need hub "
+ "TT think time or number of ports\n",
+ (unsigned int) xhci->hci_version);
+ }
+ slot_ctx->dev_state = 0;
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ xhci_dbg(xhci, "Set up %s for hub device.\n",
+ (xhci->hci_version > 0x95) ?
+ "configure endpoint" : "evaluate context");
+ xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
+ xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
+
+ /* Issue and wait for the configure endpoint or
+ * evaluate context command.
+ */
+ if (xhci->hci_version > 0x95)
+ ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
+ false, false);
+ else
+ ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
+ true, false);
+
+ xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
+ xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
+
+ xhci_free_command(xhci, config_cmd);
+ return ret;
+}
+
int xhci_get_frame(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index e6b9a1c6002..1db4fea8c17 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -94,6 +94,9 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
val &= ~TRB_TYPE_BITMASK;
val |= TRB_TYPE(TRB_LINK);
+ /* Always set the chain bit with 0.95 hardware */
+ if (xhci_link_trb_quirk(xhci))
+ val |= TRB_CHAIN;
prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
}
xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
@@ -141,7 +144,6 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
return 0;
INIT_LIST_HEAD(&ring->td_list);
- INIT_LIST_HEAD(&ring->cancelled_td_list);
if (num_segs == 0)
return ring;
@@ -262,8 +264,8 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
return;
for (i = 0; i < 31; ++i)
- if (dev->ep_rings[i])
- xhci_ring_free(xhci, dev->ep_rings[i]);
+ if (dev->eps[i].ring)
+ xhci_ring_free(xhci, dev->eps[i].ring);
if (dev->in_ctx)
xhci_free_container_ctx(xhci, dev->in_ctx);
@@ -278,6 +280,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
struct usb_device *udev, gfp_t flags)
{
struct xhci_virt_device *dev;
+ int i;
/* Slot ID 0 is reserved */
if (slot_id == 0 || xhci->devs[slot_id]) {
@@ -306,12 +309,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
(unsigned long long)dev->in_ctx->dma);
+ /* Initialize the cancellation list for each endpoint */
+ for (i = 0; i < 31; i++)
+ INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
+
/* Allocate endpoint 0 ring */
- dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
- if (!dev->ep_rings[0])
+ dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags);
+ if (!dev->eps[0].ring)
goto fail;
init_completion(&dev->cmd_completion);
+ INIT_LIST_HEAD(&dev->cmd_list);
/* Point to output device context in dcbaa. */
xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma;
@@ -352,9 +360,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
/* 3) Only the control endpoint is valid - one endpoint context */
slot_ctx->dev_info |= LAST_CTX(1);
+ slot_ctx->dev_info |= (u32) udev->route;
switch (udev->speed) {
case USB_SPEED_SUPER:
- slot_ctx->dev_info |= (u32) udev->route;
slot_ctx->dev_info |= (u32) SLOT_SPEED_SS;
break;
case USB_SPEED_HIGH:
@@ -382,14 +390,12 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
/* Is this a LS/FS device under a HS hub? */
- /*
- * FIXME: I don't think this is right, where does the TT info for the
- * roothub or parent hub come from?
- */
if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
udev->tt) {
slot_ctx->tt_info = udev->tt->hub->slot_id;
slot_ctx->tt_info |= udev->ttport << 8;
+ if (udev->tt->multi)
+ slot_ctx->dev_info |= DEV_MTT;
}
xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
@@ -398,22 +404,35 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
/* Step 5 */
ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP);
/*
- * See section 4.3 bullet 6:
- * The default Max Packet size for ep0 is "8 bytes for a USB2
- * LS/FS/HS device or 512 bytes for a USB3 SS device"
* XXX: Not sure about wireless USB devices.
*/
- if (udev->speed == USB_SPEED_SUPER)
+ switch (udev->speed) {
+ case USB_SPEED_SUPER:
ep0_ctx->ep_info2 |= MAX_PACKET(512);
- else
+ break;
+ case USB_SPEED_HIGH:
+ /* USB core guesses at a 64-byte max packet first for FS devices */
+ case USB_SPEED_FULL:
+ ep0_ctx->ep_info2 |= MAX_PACKET(64);
+ break;
+ case USB_SPEED_LOW:
ep0_ctx->ep_info2 |= MAX_PACKET(8);
+ break;
+ case USB_SPEED_VARIABLE:
+ xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
+ return -EINVAL;
+ break;
+ default:
+ /* New speed? */
+ BUG();
+ }
/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
ep0_ctx->ep_info2 |= MAX_BURST(0);
ep0_ctx->ep_info2 |= ERROR_COUNT(3);
ep0_ctx->deq =
- dev->ep_rings[0]->first_seg->dma;
- ep0_ctx->deq |= dev->ep_rings[0]->cycle_state;
+ dev->eps[0].ring->first_seg->dma;
+ ep0_ctx->deq |= dev->eps[0].ring->cycle_state;
/* Steps 7 and 8 were done in xhci_alloc_virt_device() */
@@ -523,10 +542,11 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
/* Set up the endpoint ring */
- virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags);
- if (!virt_dev->new_ep_rings[ep_index])
+ virt_dev->eps[ep_index].new_ring =
+ xhci_ring_alloc(xhci, 1, true, mem_flags);
+ if (!virt_dev->eps[ep_index].new_ring)
return -ENOMEM;
- ep_ring = virt_dev->new_ep_rings[ep_index];
+ ep_ring = virt_dev->eps[ep_index].new_ring;
ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
@@ -598,6 +618,48 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci,
*/
}
+/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
+ * Useful when you want to change one particular aspect of the endpoint and then
+ * issue a configure endpoint command.
+ */
+void xhci_endpoint_copy(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx,
+ struct xhci_container_ctx *out_ctx,
+ unsigned int ep_index)
+{
+ struct xhci_ep_ctx *out_ep_ctx;
+ struct xhci_ep_ctx *in_ep_ctx;
+
+ out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
+ in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
+
+ in_ep_ctx->ep_info = out_ep_ctx->ep_info;
+ in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
+ in_ep_ctx->deq = out_ep_ctx->deq;
+ in_ep_ctx->tx_info = out_ep_ctx->tx_info;
+}
+
+/* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
+ * Useful when you want to change one particular aspect of the endpoint and then
+ * issue a configure endpoint command. Only the context entries field matters,
+ * but we'll copy the whole thing anyway.
+ */
+void xhci_slot_copy(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx,
+ struct xhci_container_ctx *out_ctx)
+{
+ struct xhci_slot_ctx *in_slot_ctx;
+ struct xhci_slot_ctx *out_slot_ctx;
+
+ in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
+ out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
+
+ in_slot_ctx->dev_info = out_slot_ctx->dev_info;
+ in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
+ in_slot_ctx->tt_info = out_slot_ctx->tt_info;
+ in_slot_ctx->dev_state = out_slot_ctx->dev_state;
+}
+
/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
{
@@ -695,6 +757,44 @@ static void scratchpad_free(struct xhci_hcd *xhci)
xhci->scratchpad = NULL;
}
+struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
+ bool allocate_completion, gfp_t mem_flags)
+{
+ struct xhci_command *command;
+
+ command = kzalloc(sizeof(*command), mem_flags);
+ if (!command)
+ return NULL;
+
+ command->in_ctx =
+ xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags);
+ if (!command->in_ctx)
+ return NULL;
+
+ if (allocate_completion) {
+ command->completion =
+ kzalloc(sizeof(struct completion), mem_flags);
+ if (!command->completion) {
+ xhci_free_container_ctx(xhci, command->in_ctx);
+ return NULL;
+ }
+ init_completion(command->completion);
+ }
+
+ command->status = 0;
+ INIT_LIST_HEAD(&command->cmd_list);
+ return command;
+}
+
+void xhci_free_command(struct xhci_hcd *xhci,
+ struct xhci_command *command)
+{
+ xhci_free_container_ctx(xhci,
+ command->in_ctx);
+ kfree(command->completion);
+ kfree(command);
+}
+
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 592fe7e623f..06595ec27bb 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -24,6 +24,10 @@
#include "xhci.h"
+/* Device for a quirk */
+#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
+#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
+
static const char hcd_name[] = "xhci_hcd";
/* called after powerup, by probe or system-pm "wakeup" */
@@ -59,9 +63,20 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
+ xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+ xhci->hci_version = HC_VERSION(xhci->hcc_params);
xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
xhci_print_registers(xhci);
+ /* Look for vendor-specific quirks */
+ if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
+ pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
+ pdev->revision == 0x0) {
+ xhci->quirks |= XHCI_RESET_EP_QUIRK;
+ xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure"
+ " endpoint cmd after reset endpoint\n");
+ }
+
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
if (retval)
@@ -121,6 +136,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
.check_bandwidth = xhci_check_bandwidth,
.reset_bandwidth = xhci_reset_bandwidth,
.address_device = xhci_address_device,
+ .update_hub_device = xhci_update_hub_device,
/*
* scheduling support
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index aa88a067148..173c39c7648 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -172,8 +172,9 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
* have their chain bit cleared (so that each Link TRB is a separate TD).
*
* Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
- * set, but other sections talk about dealing with the chain bit set.
- * Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB.
+ * set, but other sections talk about dealing with the chain bit set. This was
+ * fixed in the 0.96 specification errata, but we have to assume that all 0.95
+ * xHCI hardware can't handle the chain bit being cleared on a link TRB.
*/
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
{
@@ -191,8 +192,14 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
while (last_trb(xhci, ring, ring->enq_seg, next)) {
if (!consumer) {
if (ring != xhci->event_ring) {
- next->link.control &= ~TRB_CHAIN;
- next->link.control |= chain;
+ /* If we're not dealing with 0.95 hardware,
+ * carry over the chain bit of the previous TRB
+ * (which may mean the chain bit is cleared).
+ */
+ if (!xhci_link_trb_quirk(xhci)) {
+ next->link.control &= ~TRB_CHAIN;
+ next->link.control |= chain;
+ }
/* Give this link TRB to the hardware */
wmb();
if (next->link.control & TRB_CYCLE)
@@ -289,16 +296,18 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
unsigned int slot_id,
unsigned int ep_index)
{
- struct xhci_ring *ep_ring;
+ struct xhci_virt_ep *ep;
+ unsigned int ep_state;
u32 field;
__u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
- ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ ep_state = ep->ep_state;
/* Don't ring the doorbell for this endpoint if there are pending
* cancellations because the we don't want to interrupt processing.
*/
- if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)
- && !(ep_ring->state & EP_HALTED)) {
+ if (!ep->cancels_pending && !(ep_state & SET_DEQ_PENDING)
+ && !(ep_state & EP_HALTED)) {
field = xhci_readl(xhci, db_addr) & DB_MASK;
xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
/* Flush PCI posted writes - FIXME Matthew Wilcox says this
@@ -354,7 +363,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_td *cur_td, struct xhci_dequeue_state *state)
{
struct xhci_virt_device *dev = xhci->devs[slot_id];
- struct xhci_ring *ep_ring = dev->ep_rings[ep_index];
+ struct xhci_ring *ep_ring = dev->eps[ep_index].ring;
struct xhci_generic_trb *trb;
struct xhci_ep_ctx *ep_ctx;
dma_addr_t addr;
@@ -362,7 +371,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
state->new_cycle_state = 0;
xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
- ep_ring->stopped_trb,
+ dev->eps[ep_index].stopped_trb,
&state->new_cycle_state);
if (!state->new_deq_seg)
BUG();
@@ -442,9 +451,11 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
union xhci_trb *deq_ptr, u32 cycle_state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
- struct xhci_ring *ep_ring, unsigned int slot_id,
- unsigned int ep_index, struct xhci_dequeue_state *deq_state)
+ unsigned int slot_id, unsigned int ep_index,
+ struct xhci_dequeue_state *deq_state)
{
+ struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+
xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
"new deq ptr = %p (0x%llx dma), new cycle = %u\n",
deq_state->new_deq_seg,
@@ -461,8 +472,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
* if the ring is running, and ringing the doorbell starts the
* ring running.
*/
- ep_ring->state |= SET_DEQ_PENDING;
- xhci_ring_cmd_db(xhci);
+ ep->ep_state |= SET_DEQ_PENDING;
}
/*
@@ -481,6 +491,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
unsigned int slot_id;
unsigned int ep_index;
struct xhci_ring *ep_ring;
+ struct xhci_virt_ep *ep;
struct list_head *entry;
struct xhci_td *cur_td = 0;
struct xhci_td *last_unlinked_td;
@@ -493,9 +504,10 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
memset(&deq_state, 0, sizeof(deq_state));
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
- ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ ep_ring = ep->ring;
- if (list_empty(&ep_ring->cancelled_td_list))
+ if (list_empty(&ep->cancelled_td_list))
return;
/* Fix up the ep ring first, so HW stops executing cancelled TDs.
@@ -503,7 +515,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
* it. We're also in the event handler, so we can't get re-interrupted
* if another Stop Endpoint command completes
*/
- list_for_each(entry, &ep_ring->cancelled_td_list) {
+ list_for_each(entry, &ep->cancelled_td_list) {
cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
cur_td->first_trb,
@@ -512,7 +524,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
* If we stopped on the TD we need to cancel, then we have to
* move the xHC endpoint ring dequeue pointer past this TD.
*/
- if (cur_td == ep_ring->stopped_td)
+ if (cur_td == ep->stopped_td)
xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
&deq_state);
else
@@ -523,14 +535,15 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
* the cancelled TD list for URB completion later.
*/
list_del(&cur_td->td_list);
- ep_ring->cancels_pending--;
+ ep->cancels_pending--;
}
last_unlinked_td = cur_td;
/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
- xhci_queue_new_dequeue_state(xhci, ep_ring,
+ xhci_queue_new_dequeue_state(xhci,
slot_id, ep_index, &deq_state);
+ xhci_ring_cmd_db(xhci);
} else {
/* Otherwise just ring the doorbell to restart the ring */
ring_ep_doorbell(xhci, slot_id, ep_index);
@@ -543,7 +556,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
* So stop when we've completed the URB for the last TD we unlinked.
*/
do {
- cur_td = list_entry(ep_ring->cancelled_td_list.next,
+ cur_td = list_entry(ep->cancelled_td_list.next,
struct xhci_td, cancelled_td_list);
list_del(&cur_td->cancelled_td_list);
@@ -590,7 +603,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
dev = xhci->devs[slot_id];
- ep_ring = dev->ep_rings[ep_index];
+ ep_ring = dev->eps[ep_index].ring;
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
@@ -634,7 +647,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
ep_ctx->deq);
}
- ep_ring->state &= ~SET_DEQ_PENDING;
+ dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
ring_ep_doorbell(xhci, slot_id, ep_index);
}
@@ -644,18 +657,60 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
{
int slot_id;
unsigned int ep_index;
+ struct xhci_ring *ep_ring;
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
+ ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
/* This command will only fail if the endpoint wasn't halted,
* but we don't care.
*/
xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
(unsigned int) GET_COMP_CODE(event->status));
- /* Clear our internal halted state and restart the ring */
- xhci->devs[slot_id]->ep_rings[ep_index]->state &= ~EP_HALTED;
- ring_ep_doorbell(xhci, slot_id, ep_index);
+ /* HW with the reset endpoint quirk needs to have a configure endpoint
+ * command complete before the endpoint can be used. Queue that here
+ * because the HW can't handle two commands being queued in a row.
+ */
+ if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
+ xhci_dbg(xhci, "Queueing configure endpoint command\n");
+ xhci_queue_configure_endpoint(xhci,
+ xhci->devs[slot_id]->in_ctx->dma, slot_id,
+ false);
+ xhci_ring_cmd_db(xhci);
+ } else {
+ /* Clear our internal halted state and restart the ring */
+ xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
+ ring_ep_doorbell(xhci, slot_id, ep_index);
+ }
+}
+
+/* Check to see if a command in the device's command queue matches this one.
+ * Signal the completion or free the command, and return 1. Return 0 if the
+ * completed command isn't at the head of the command list.
+ */
+static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ struct xhci_event_cmd *event)
+{
+ struct xhci_command *command;
+
+ if (list_empty(&virt_dev->cmd_list))
+ return 0;
+
+ command = list_entry(virt_dev->cmd_list.next,
+ struct xhci_command, cmd_list);
+ if (xhci->cmd_ring->dequeue != command->command_trb)
+ return 0;
+
+ command->status =
+ GET_COMP_CODE(event->status);
+ list_del(&command->cmd_list);
+ if (command->completion)
+ complete(command->completion);
+ else
+ xhci_free_command(xhci, command);
+ return 1;
}
static void handle_cmd_completion(struct xhci_hcd *xhci,
@@ -664,6 +719,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
int slot_id = TRB_TO_SLOT_ID(event->flags);
u64 cmd_dma;
dma_addr_t cmd_dequeue_dma;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ struct xhci_virt_device *virt_dev;
+ unsigned int ep_index;
+ struct xhci_ring *ep_ring;
+ unsigned int ep_state;
cmd_dma = event->cmd_trb;
cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
@@ -691,6 +751,47 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
xhci_free_virt_device(xhci, slot_id);
break;
case TRB_TYPE(TRB_CONFIG_EP):
+ virt_dev = xhci->devs[slot_id];
+ if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
+ break;
+ /*
+ * Configure endpoint commands can come from the USB core
+ * configuration or alt setting changes, or because the HW
+ * needed an extra configure endpoint command after a reset
+ * endpoint command. In the latter case, the xHCI driver is
+ * not waiting on the configure endpoint command.
+ */
+ ctrl_ctx = xhci_get_input_control_ctx(xhci,
+ virt_dev->in_ctx);
+ /* Input ctx add_flags are the endpoint index plus one */
+ ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
+ ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+ if (!ep_ring) {
+ /* This must have been an initial configure endpoint */
+ xhci->devs[slot_id]->cmd_status =
+ GET_COMP_CODE(event->status);
+ complete(&xhci->devs[slot_id]->cmd_completion);
+ break;
+ }
+ ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
+ xhci_dbg(xhci, "Completed config ep cmd - last ep index = %d, "
+ "state = %d\n", ep_index, ep_state);
+ if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
+ ep_state & EP_HALTED) {
+ /* Clear our internal halted state and restart ring */
+ xhci->devs[slot_id]->eps[ep_index].ep_state &=
+ ~EP_HALTED;
+ ring_ep_doorbell(xhci, slot_id, ep_index);
+ } else {
+ xhci->devs[slot_id]->cmd_status =
+ GET_COMP_CODE(event->status);
+ complete(&xhci->devs[slot_id]->cmd_completion);
+ }
+ break;
+ case TRB_TYPE(TRB_EVAL_CONTEXT):
+ virt_dev = xhci->devs[slot_id];
+ if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
+ break;
xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
complete(&xhci->devs[slot_id]->cmd_completion);
break;
@@ -805,7 +906,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
struct xhci_transfer_event *event)
{
struct xhci_virt_device *xdev;
+ struct xhci_virt_ep *ep;
struct xhci_ring *ep_ring;
+ unsigned int slot_id;
int ep_index;
struct xhci_td *td = 0;
dma_addr_t event_dma;
@@ -814,9 +917,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
struct urb *urb = 0;
int status = -EINPROGRESS;
struct xhci_ep_ctx *ep_ctx;
+ u32 trb_comp_code;
xhci_dbg(xhci, "In %s\n", __func__);
- xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
+ slot_id = TRB_TO_SLOT_ID(event->flags);
+ xdev = xhci->devs[slot_id];
if (!xdev) {
xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
return -ENODEV;
@@ -825,7 +930,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/* Endpoint ID is 1 based, our index is zero based */
ep_index = TRB_TO_EP_ID(event->flags) - 1;
xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
- ep_ring = xdev->ep_rings[ep_index];
+ ep = &xdev->eps[ep_index];
+ ep_ring = ep->ring;
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
@@ -870,7 +976,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
(unsigned int) event->flags);
/* Look for common error cases */
- switch (GET_COMP_CODE(event->transfer_len)) {
+ trb_comp_code = GET_COMP_CODE(event->transfer_len);
+ switch (trb_comp_code) {
/* Skip codes that require special handling depending on
* transfer type
*/
@@ -885,7 +992,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
break;
case COMP_STALL:
xhci_warn(xhci, "WARN: Stalled endpoint\n");
- ep_ring->state |= EP_HALTED;
+ ep->ep_state |= EP_HALTED;
status = -EPIPE;
break;
case COMP_TRB_ERR:
@@ -913,7 +1020,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/* Was this a control transfer? */
if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
xhci_debug_trb(xhci, xhci->event_ring->dequeue);
- switch (GET_COMP_CODE(event->transfer_len)) {
+ switch (trb_comp_code) {
case COMP_SUCCESS:
if (event_trb == ep_ring->dequeue) {
xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
@@ -928,8 +1035,37 @@ static int handle_tx_event(struct xhci_hcd *xhci,
break;
case COMP_SHORT_TX:
xhci_warn(xhci, "WARN: short transfer on control ep\n");
- status = -EREMOTEIO;
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ status = -EREMOTEIO;
+ else
+ status = 0;
break;
+ case COMP_BABBLE:
+ /* The 0.96 spec says a babbling control endpoint
+ * is not halted. The 0.96 spec says it is. Some HW
+ * claims to be 0.95 compliant, but it halts the control
+ * endpoint anyway. Check if a babble halted the
+ * endpoint.
+ */
+ if (ep_ctx->ep_info != EP_STATE_HALTED)
+ break;
+ /* else fall through */
+ case COMP_STALL:
+ /* Did we transfer part of the data (middle) phase? */
+ if (event_trb != ep_ring->dequeue &&
+ event_trb != td->last_trb)
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length
+ - TRB_LEN(event->transfer_len);
+ else
+ td->urb->actual_length = 0;
+
+ ep->stopped_td = td;
+ ep->stopped_trb = event_trb;
+ xhci_queue_reset_ep(xhci, slot_id, ep_index);
+ xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
+ xhci_ring_cmd_db(xhci);
+ goto td_cleanup;
default:
/* Others already handled above */
break;
@@ -943,7 +1079,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (event_trb == td->last_trb) {
if (td->urb->actual_length != 0) {
/* Don't overwrite a previously set error code */
- if (status == -EINPROGRESS || status == 0)
+ if ((status == -EINPROGRESS ||
+ status == 0) &&
+ (td->urb->transfer_flags
+ & URB_SHORT_NOT_OK))
/* Did we already see a short data stage? */
status = -EREMOTEIO;
} else {
@@ -952,7 +1091,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
}
} else {
/* Maybe the event was for the data stage? */
- if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) {
+ if (trb_comp_code != COMP_STOP_INVAL) {
/* We didn't stop on a link TRB in the middle */
td->urb->actual_length =
td->urb->transfer_buffer_length -
@@ -964,7 +1103,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
}
}
} else {
- switch (GET_COMP_CODE(event->transfer_len)) {
+ switch (trb_comp_code) {
case COMP_SUCCESS:
/* Double check that the HW transferred everything. */
if (event_trb != td->last_trb) {
@@ -975,7 +1114,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
else
status = 0;
} else {
- xhci_dbg(xhci, "Successful bulk transfer!\n");
+ if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
+ xhci_dbg(xhci, "Successful bulk "
+ "transfer!\n");
+ else
+ xhci_dbg(xhci, "Successful interrupt "
+ "transfer!\n");
status = 0;
}
break;
@@ -1001,11 +1145,17 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td->urb->actual_length =
td->urb->transfer_buffer_length -
TRB_LEN(event->transfer_len);
- if (td->urb->actual_length < 0) {
+ if (td->urb->transfer_buffer_length <
+ td->urb->actual_length) {
xhci_warn(xhci, "HC gave bad length "
"of %d bytes left\n",
TRB_LEN(event->transfer_len));
td->urb->actual_length = 0;
+ if (td->urb->transfer_flags &
+ URB_SHORT_NOT_OK)
+ status = -EREMOTEIO;
+ else
+ status = 0;
}
/* Don't overwrite a previously set error code */
if (status == -EINPROGRESS) {
@@ -1041,30 +1191,31 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/* If the ring didn't stop on a Link or No-op TRB, add
* in the actual bytes transferred from the Normal TRB
*/
- if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL)
+ if (trb_comp_code != COMP_STOP_INVAL)
td->urb->actual_length +=
TRB_LEN(cur_trb->generic.field[2]) -
TRB_LEN(event->transfer_len);
}
}
- if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL ||
- GET_COMP_CODE(event->transfer_len) == COMP_STOP) {
+ if (trb_comp_code == COMP_STOP_INVAL ||
+ trb_comp_code == COMP_STOP) {
/* The Endpoint Stop Command completion will take care of any
* stopped TDs. A stopped TD may be restarted, so don't update
* the ring dequeue pointer or take this TD off any lists yet.
*/
- ep_ring->stopped_td = td;
- ep_ring->stopped_trb = event_trb;
+ ep->stopped_td = td;
+ ep->stopped_trb = event_trb;
} else {
- if (GET_COMP_CODE(event->transfer_len) == COMP_STALL) {
+ if (trb_comp_code == COMP_STALL ||
+ trb_comp_code == COMP_BABBLE) {
/* The transfer is completed from the driver's
* perspective, but we need to issue a set dequeue
* command for this stalled endpoint to move the dequeue
* pointer past the TD. We can't do that here because
* the halt condition must be cleared first.
*/
- ep_ring->stopped_td = td;
- ep_ring->stopped_trb = event_trb;
+ ep->stopped_td = td;
+ ep->stopped_trb = event_trb;
} else {
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
@@ -1072,16 +1223,41 @@ static int handle_tx_event(struct xhci_hcd *xhci,
inc_deq(xhci, ep_ring, false);
}
+td_cleanup:
/* Clean up the endpoint's TD list */
urb = td->urb;
+ /* Do one last check of the actual transfer length.
+ * If the host controller said we transferred more data than
+ * the buffer length, urb->actual_length will be a very big
+ * number (since it's unsigned). Play it safe and say we didn't
+ * transfer anything.
+ */
+ if (urb->actual_length > urb->transfer_buffer_length) {
+ xhci_warn(xhci, "URB transfer length is wrong, "
+ "xHC issue? req. len = %u, "
+ "act. len = %u\n",
+ urb->transfer_buffer_length,
+ urb->actual_length);
+ urb->actual_length = 0;
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ status = -EREMOTEIO;
+ else
+ status = 0;
+ }
list_del(&td->td_list);
/* Was this TD slated to be cancelled but completed anyway? */
if (!list_empty(&td->cancelled_td_list)) {
list_del(&td->cancelled_td_list);
- ep_ring->cancels_pending--;
+ ep->cancels_pending--;
}
- /* Leave the TD around for the reset endpoint function to use */
- if (GET_COMP_CODE(event->transfer_len) != COMP_STALL) {
+ /* Leave the TD around for the reset endpoint function to use
+ * (but only if it's not a control endpoint, since we already
+ * queued the Set TR dequeue pointer command for stalled
+ * control endpoints).
+ */
+ if (usb_endpoint_xfer_control(&urb->ep->desc) ||
+ (trb_comp_code != COMP_STALL &&
+ trb_comp_code != COMP_BABBLE)) {
kfree(td);
}
urb->hcpriv = NULL;
@@ -1094,7 +1270,7 @@ cleanup:
if (urb) {
usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n",
- urb, td->urb->actual_length, status);
+ urb, urb->actual_length, status);
spin_unlock(&xhci->lock);
usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
spin_lock(&xhci->lock);
@@ -1235,7 +1411,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
{
int ret;
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
- ret = prepare_ring(xhci, xdev->ep_rings[ep_index],
+ ret = prepare_ring(xhci, xdev->eps[ep_index].ring,
ep_ctx->ep_info & EP_STATE_MASK,
num_trbs, mem_flags);
if (ret)
@@ -1255,9 +1431,9 @@ static int prepare_transfer(struct xhci_hcd *xhci,
(*td)->urb = urb;
urb->hcpriv = (void *) (*td);
/* Add this TD to the tail of the endpoint ring's TD list */
- list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list);
- (*td)->start_seg = xdev->ep_rings[ep_index]->enq_seg;
- (*td)->first_trb = xdev->ep_rings[ep_index]->enqueue;
+ list_add_tail(&(*td)->td_list, &xdev->eps[ep_index].ring->td_list);
+ (*td)->start_seg = xdev->eps[ep_index].ring->enq_seg;
+ (*td)->first_trb = xdev->eps[ep_index].ring->enqueue;
return 0;
}
@@ -1335,6 +1511,47 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
ring_ep_doorbell(xhci, slot_id, ep_index);
}
+/*
+ * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
+ * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
+ * (comprised of sg list entries) can take several service intervals to
+ * transmit.
+ */
+int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index)
+{
+ struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
+ xhci->devs[slot_id]->out_ctx, ep_index);
+ int xhci_interval;
+ int ep_interval;
+
+ xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
+ ep_interval = urb->interval;
+ /* Convert to microframes */
+ if (urb->dev->speed == USB_SPEED_LOW ||
+ urb->dev->speed == USB_SPEED_FULL)
+ ep_interval *= 8;
+ /* FIXME change this to a warning and a suggestion to use the new API
+ * to set the polling interval (once the API is added).
+ */
+ if (xhci_interval != ep_interval) {
+ if (!printk_ratelimit())
+ dev_dbg(&urb->dev->dev, "Driver uses different interval"
+ " (%d microframe%s) than xHCI "
+ "(%d microframe%s)\n",
+ ep_interval,
+ ep_interval == 1 ? "" : "s",
+ xhci_interval,
+ xhci_interval == 1 ? "" : "s");
+ urb->interval = xhci_interval;
+ /* Convert back to frames for LS/FS devices */
+ if (urb->dev->speed == USB_SPEED_LOW ||
+ urb->dev->speed == USB_SPEED_FULL)
+ urb->interval /= 8;
+ }
+ return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
+}
+
static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
@@ -1350,7 +1567,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct xhci_generic_trb *start_trb;
int start_cycle;
- ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+ ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
num_trbs = count_sg_trbs_needed(xhci, urb);
num_sgs = urb->num_sgs;
@@ -1483,7 +1700,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (urb->sg)
return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
- ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+ ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
num_trbs = 0;
/* How much data is (potentially) left before the 64KB boundary? */
@@ -1594,7 +1811,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
u32 field, length_field;
struct xhci_td *td;
- ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+ ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
/*
* Need to copy setup packet into setup TRB, so we can't use the setup
@@ -1677,12 +1894,27 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/**** Command Ring Operations ****/
-/* Generic function for queueing a command TRB on the command ring */
-static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4)
+/* Generic function for queueing a command TRB on the command ring.
+ * Check to make sure there's room on the command ring for one command TRB.
+ * Also check that there's room reserved for commands that must not fail.
+ * If this is a command that must not fail, meaning command_must_succeed = TRUE,
+ * then only check for the number of reserved spots.
+ * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
+ * because the command event handler may want to resubmit a failed command.
+ */
+static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
+ u32 field3, u32 field4, bool command_must_succeed)
{
- if (!room_on_ring(xhci, xhci->cmd_ring, 1)) {
+ int reserved_trbs = xhci->cmd_ring_reserved_trbs;
+ if (!command_must_succeed)
+ reserved_trbs++;
+
+ if (!room_on_ring(xhci, xhci->cmd_ring, reserved_trbs)) {
if (!in_interrupt())
xhci_err(xhci, "ERR: No room for command on command ring\n");
+ if (command_must_succeed)
+ xhci_err(xhci, "ERR: Reserved TRB counting for "
+ "unfailable commands failed.\n");
return -ENOMEM;
}
queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
@@ -1693,7 +1925,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 fiel
/* Queue a no-op command on the command ring */
static int queue_cmd_noop(struct xhci_hcd *xhci)
{
- return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP));
+ return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP), false);
}
/*
@@ -1712,7 +1944,7 @@ void *xhci_setup_one_noop(struct xhci_hcd *xhci)
int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
{
return queue_command(xhci, 0, 0, 0,
- TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id));
+ TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
}
/* Queue an address device command TRB */
@@ -1721,16 +1953,28 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
{
return queue_command(xhci, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
- TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
+ TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
+ false);
}
/* Queue a configure endpoint command TRB */
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+ u32 slot_id, bool command_must_succeed)
+{
+ return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+ upper_32_bits(in_ctx_ptr), 0,
+ TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
+ command_must_succeed);
+}
+
+/* Queue an evaluate context command TRB */
+int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id)
{
return queue_command(xhci, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
- TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
+ TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
+ false);
}
int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
@@ -1741,7 +1985,7 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
u32 type = TRB_TYPE(TRB_STOP_RING);
return queue_command(xhci, 0, 0, 0,
- trb_slot_id | trb_ep_index | type);
+ trb_slot_id | trb_ep_index | type, false);
}
/* Set Transfer Ring Dequeue Pointer command.
@@ -1765,7 +2009,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
}
return queue_command(xhci, lower_32_bits(addr) | cycle_state,
upper_32_bits(addr), 0,
- trb_slot_id | trb_ep_index | type);
+ trb_slot_id | trb_ep_index | type, false);
}
int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
@@ -1775,5 +2019,6 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 type = TRB_TYPE(TRB_RESET_EP);
- return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type);
+ return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
+ false);
}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index d31d32206ba..4b254b6fa24 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -509,6 +509,8 @@ struct xhci_slot_ctx {
#define MAX_EXIT (0xffff)
/* Root hub port number that is needed to access the USB device */
#define ROOT_HUB_PORT(p) (((p) & 0xff) << 16)
+/* Maximum number of ports under a hub device */
+#define XHCI_MAX_PORTS(p) (((p) & 0xff) << 24)
/* tt_info bitmasks */
/*
@@ -522,6 +524,7 @@ struct xhci_slot_ctx {
* '0' if the device is not low or full speed.
*/
#define TT_PORT (0xff << 8)
+#define TT_THINK_TIME(p) (((p) & 0x3) << 16)
/* dev_state bitmasks */
/* USB device address - assigned by the HC */
@@ -581,6 +584,7 @@ struct xhci_ep_ctx {
/* bit 15 is Linear Stream Array */
/* Interval - period between requests to an endpoint - 125u increments. */
#define EP_INTERVAL(p) ((p & 0xff) << 16)
+#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff))
/* ep_info2 bitmasks */
/*
@@ -589,6 +593,7 @@ struct xhci_ep_ctx {
*/
#define FORCE_EVENT (0x1)
#define ERROR_COUNT(p) (((p) & 0x3) << 1)
+#define CTX_TO_EP_TYPE(p) (((p) >> 3) & 0x7)
#define EP_TYPE(p) ((p) << 3)
#define ISOC_OUT_EP 1
#define BULK_OUT_EP 2
@@ -601,6 +606,8 @@ struct xhci_ep_ctx {
/* bit 7 is Host Initiate Disable - for disabling stream selection */
#define MAX_BURST(p) (((p)&0xff) << 8)
#define MAX_PACKET(p) (((p)&0xffff) << 16)
+#define MAX_PACKET_MASK (0xffff << 16)
+#define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff)
/**
@@ -616,11 +623,44 @@ struct xhci_input_control_ctx {
u32 rsvd2[6];
};
+/* Represents everything that is needed to issue a command on the command ring.
+ * It's useful to pre-allocate these for commands that cannot fail due to
+ * out-of-memory errors, like freeing streams.
+ */
+struct xhci_command {
+ /* Input context for changing device state */
+ struct xhci_container_ctx *in_ctx;
+ u32 status;
+ /* If completion is null, no one is waiting on this command
+ * and the structure can be freed after the command completes.
+ */
+ struct completion *completion;
+ union xhci_trb *command_trb;
+ struct list_head cmd_list;
+};
+
/* drop context bitmasks */
#define DROP_EP(x) (0x1 << x)
/* add context bitmasks */
#define ADD_EP(x) (0x1 << x)
+struct xhci_virt_ep {
+ struct xhci_ring *ring;
+ /* Temporary storage in case the configure endpoint command fails and we
+ * have to restore the device state to the previous state
+ */
+ struct xhci_ring *new_ring;
+ unsigned int ep_state;
+#define SET_DEQ_PENDING (1 << 0)
+#define EP_HALTED (1 << 1)
+ /* ---- Related to URB cancellation ---- */
+ struct list_head cancelled_td_list;
+ unsigned int cancels_pending;
+ /* The TRB that was last reported in a stopped endpoint ring */
+ union xhci_trb *stopped_trb;
+ struct xhci_td *stopped_td;
+};
+
struct xhci_virt_device {
/*
* Commands to the hardware are passed an "input context" that
@@ -633,16 +673,11 @@ struct xhci_virt_device {
struct xhci_container_ctx *out_ctx;
/* Used for addressing devices and configuration changes */
struct xhci_container_ctx *in_ctx;
-
- /* FIXME when stream support is added */
- struct xhci_ring *ep_rings[31];
- /* Temporary storage in case the configure endpoint command fails and we
- * have to restore the device state to the previous state
- */
- struct xhci_ring *new_ep_rings[31];
+ struct xhci_virt_ep eps[31];
struct completion cmd_completion;
/* Status of the last command issued for this device */
u32 cmd_status;
+ struct list_head cmd_list;
};
@@ -905,6 +940,8 @@ union xhci_trb {
* It must also be greater than 16.
*/
#define TRBS_PER_SEGMENT 64
+/* Allow two commands + a link TRB, along with any reserved command TRBs */
+#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
#define SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
/* TRB buffer pointers can't cross 64KB boundaries */
#define TRB_MAX_BUFF_SHIFT 16
@@ -926,6 +963,12 @@ struct xhci_td {
union xhci_trb *last_trb;
};
+struct xhci_dequeue_state {
+ struct xhci_segment *new_deq_seg;
+ union xhci_trb *new_deq_ptr;
+ int new_cycle_state;
+};
+
struct xhci_ring {
struct xhci_segment *first_seg;
union xhci_trb *enqueue;
@@ -935,15 +978,6 @@ struct xhci_ring {
struct xhci_segment *deq_seg;
unsigned int deq_updates;
struct list_head td_list;
- /* ---- Related to URB cancellation ---- */
- struct list_head cancelled_td_list;
- unsigned int cancels_pending;
- unsigned int state;
-#define SET_DEQ_PENDING (1 << 0)
-#define EP_HALTED (1 << 1)
- /* The TRB that was last reported in a stopped endpoint ring */
- union xhci_trb *stopped_trb;
- struct xhci_td *stopped_td;
/*
* Write the cycle state into the TRB cycle field to give ownership of
* the TRB to the host controller (if we are the producer), or to check
@@ -952,12 +986,6 @@ struct xhci_ring {
u32 cycle_state;
};
-struct xhci_dequeue_state {
- struct xhci_segment *new_deq_seg;
- union xhci_trb *new_deq_ptr;
- int new_cycle_state;
-};
-
struct xhci_erst_entry {
/* 64-bit event ring segment address */
u64 seg_addr;
@@ -1034,6 +1062,7 @@ struct xhci_hcd {
/* data structures */
struct xhci_device_context_array *dcbaa;
struct xhci_ring *cmd_ring;
+ unsigned int cmd_ring_reserved_trbs;
struct xhci_ring *event_ring;
struct xhci_erst erst;
/* Scratchpad */
@@ -1058,6 +1087,9 @@ struct xhci_hcd {
int noops_submitted;
int noops_handled;
int error_bitmask;
+ unsigned int quirks;
+#define XHCI_LINK_TRB_QUIRK (1 << 0)
+#define XHCI_RESET_EP_QUIRK (1 << 1)
};
/* For testing purposes */
@@ -1136,6 +1168,13 @@ static inline void xhci_write_64(struct xhci_hcd *xhci,
writel(val_hi, ptr + 1);
}
+static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
+{
+ u32 temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+ return ((HC_VERSION(temp) == 0x95) &&
+ (xhci->quirks & XHCI_LINK_TRB_QUIRK));
+}
+
/* xHCI debugging */
void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
void xhci_print_registers(struct xhci_hcd *xhci);
@@ -1150,7 +1189,7 @@ void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
-/* xHCI memory managment */
+/* xHCI memory management */
void xhci_mem_cleanup(struct xhci_hcd *xhci);
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags);
void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
@@ -1158,11 +1197,24 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device
int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
+unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index);
+unsigned int xhci_last_valid_endpoint(u32 added_ctxs);
void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
+void xhci_endpoint_copy(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx,
+ struct xhci_container_ctx *out_ctx,
+ unsigned int ep_index);
+void xhci_slot_copy(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx,
+ struct xhci_container_ctx *out_ctx);
int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
struct usb_device *udev, struct usb_host_endpoint *ep,
gfp_t mem_flags);
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
+struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
+ bool allocate_completion, gfp_t mem_flags);
+void xhci_free_command(struct xhci_hcd *xhci,
+ struct xhci_command *command);
#ifdef CONFIG_PCI
/* xHCI PCI glue */
@@ -1182,6 +1234,8 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd);
int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags);
int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
@@ -1205,7 +1259,11 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index);
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index);
+int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
+ int slot_id, unsigned int ep_index);
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+ u32 slot_id, bool command_must_succeed);
+int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id);
int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index);
@@ -1213,8 +1271,13 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_td *cur_td, struct xhci_dequeue_state *state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
- struct xhci_ring *ep_ring, unsigned int slot_id,
- unsigned int ep_index, struct xhci_dequeue_state *deq_state);
+ unsigned int slot_id, unsigned int ep_index,
+ struct xhci_dequeue_state *deq_state);
+void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
+ struct usb_device *udev, unsigned int ep_index);
+void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ struct xhci_dequeue_state *deq_state);
/* xHCI roothub code */
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 4541dfcea88..459a7287fe0 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -653,33 +653,6 @@ static struct scsi_host_template mts_scsi_host_template = {
.max_sectors= 256, /* 128 K */
};
-struct vendor_product
-{
- char* name;
- enum
- {
- mts_sup_unknown=0,
- mts_sup_alpha,
- mts_sup_full
- }
- support_status;
-} ;
-
-
-/* These are taken from the msmUSB.inf file on the Windows driver CD */
-static const struct vendor_product mts_supported_products[] =
-{
- { "Phantom 336CX", mts_sup_unknown},
- { "Phantom 336CX", mts_sup_unknown},
- { "Scanmaker X6", mts_sup_alpha},
- { "Phantom C6", mts_sup_unknown},
- { "Phantom 336CX", mts_sup_unknown},
- { "ScanMaker V6USL", mts_sup_unknown},
- { "ScanMaker V6USL", mts_sup_unknown},
- { "Scanmaker V6UL", mts_sup_unknown},
- { "Scanmaker V6UPL", mts_sup_alpha},
-};
-
/* The entries of microtek_table must correspond, line-by-line to
the entries of mts_supported_products[]. */
@@ -711,7 +684,6 @@ static int mts_usb_probe(struct usb_interface *intf,
int err_retval = -ENOMEM;
struct mts_desc * new_desc;
- struct vendor_product const* p;
struct usb_device *dev = interface_to_usbdev (intf);
/* the current altsetting on the interface we're probing */
@@ -726,15 +698,6 @@ static int mts_usb_probe(struct usb_interface *intf,
MTS_DEBUG_GOT_HERE();
- p = &mts_supported_products[id - mts_usb_ids];
-
- MTS_DEBUG_GOT_HERE();
-
- MTS_DEBUG( "found model %s\n", p->name );
- if ( p->support_status != mts_sup_full )
- MTS_MESSAGE( "model %s is not known to be fully supported, reports welcome!\n",
- p->name );
-
/* the current altsetting on the interface we're probing */
altsetting = intf->cur_altsetting;
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 6da8887538c..1337a9ce80b 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -96,6 +96,8 @@ static int idmouse_probe(struct usb_interface *interface,
const struct usb_device_id *id);
static void idmouse_disconnect(struct usb_interface *interface);
+static int idmouse_suspend(struct usb_interface *intf, pm_message_t message);
+static int idmouse_resume(struct usb_interface *intf);
/* file operation pointers */
static const struct file_operations idmouse_fops = {
@@ -117,7 +119,11 @@ static struct usb_driver idmouse_driver = {
.name = DRIVER_SHORT,
.probe = idmouse_probe,
.disconnect = idmouse_disconnect,
+ .suspend = idmouse_suspend,
+ .resume = idmouse_resume,
+ .reset_resume = idmouse_resume,
.id_table = idmouse_table,
+ .supports_autosuspend = 1,
};
static int idmouse_create_image(struct usb_idmouse *dev)
@@ -197,6 +203,17 @@ reset:
return result;
}
+/* PM operations are nops as this driver does IO only during open() */
+static int idmouse_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ return 0;
+}
+
+static int idmouse_resume(struct usb_interface *intf)
+{
+ return 0;
+}
+
static inline void idmouse_delete(struct usb_idmouse *dev)
{
kfree(dev->bulk_in_buffer);
@@ -235,9 +252,13 @@ static int idmouse_open(struct inode *inode, struct file *file)
} else {
/* create a new image and check for success */
+ result = usb_autopm_get_interface(interface);
+ if (result)
+ goto error;
result = idmouse_create_image (dev);
if (result)
goto error;
+ usb_autopm_put_interface(interface);
/* increment our usage count for the driver */
++dev->open;
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index ad4fb15b5dc..90f130126c1 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -412,6 +412,9 @@ static unsigned int ld_usb_poll(struct file *file, poll_table *wait)
dev = file->private_data;
+ if (!dev->intf)
+ return POLLERR | POLLHUP;
+
poll_wait(file, &dev->read_wait, wait);
poll_wait(file, &dev->write_wait, wait);
@@ -767,6 +770,9 @@ static void ld_usb_disconnect(struct usb_interface *intf)
ld_usb_delete(dev);
} else {
dev->intf = NULL;
+ /* wake up pollers */
+ wake_up_interruptible_all(&dev->read_wait);
+ wake_up_interruptible_all(&dev->write_wait);
mutex_unlock(&dev->mutex);
}
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 97efeaec4d5..faa6d623de7 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -552,6 +552,9 @@ static unsigned int tower_poll (struct file *file, poll_table *wait)
dev = file->private_data;
+ if (!dev->udev)
+ return POLLERR | POLLHUP;
+
poll_wait(file, &dev->read_wait, wait);
poll_wait(file, &dev->write_wait, wait);
@@ -1025,6 +1028,9 @@ static void tower_disconnect (struct usb_interface *interface)
tower_delete (dev);
} else {
dev->udev = NULL;
+ /* wake up pollers */
+ wake_up_interruptible_all(&dev->read_wait);
+ wake_up_interruptible_all(&dev->write_wait);
mutex_unlock(&dev->lock);
}
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index b4ec716de7d..0025847743f 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -79,14 +79,12 @@ sisusb_free_buffers(struct sisusb_usb_data *sisusb)
for (i = 0; i < NUMOBUFS; i++) {
if (sisusb->obuf[i]) {
- usb_buffer_free(sisusb->sisusb_dev, sisusb->obufsize,
- sisusb->obuf[i], sisusb->transfer_dma_out[i]);
+ kfree(sisusb->obuf[i]);
sisusb->obuf[i] = NULL;
}
}
if (sisusb->ibuf) {
- usb_buffer_free(sisusb->sisusb_dev, sisusb->ibufsize,
- sisusb->ibuf, sisusb->transfer_dma_in);
+ kfree(sisusb->ibuf);
sisusb->ibuf = NULL;
}
}
@@ -230,8 +228,7 @@ sisusb_bulk_completeout(struct urb *urb)
static int
sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index, unsigned int pipe, void *data,
- int len, int *actual_length, int timeout, unsigned int tflags,
- dma_addr_t transfer_dma)
+ int len, int *actual_length, int timeout, unsigned int tflags)
{
struct urb *urb = sisusb->sisurbout[index];
int retval, byteswritten = 0;
@@ -245,9 +242,6 @@ sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index, unsigned int pipe,
urb->transfer_flags |= tflags;
urb->actual_length = 0;
- if ((urb->transfer_dma = transfer_dma))
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
-
/* Set up context */
sisusb->urbout_context[index].actual_length = (timeout) ?
NULL : actual_length;
@@ -297,8 +291,8 @@ sisusb_bulk_completein(struct urb *urb)
}
static int
-sisusb_bulkin_msg(struct sisusb_usb_data *sisusb, unsigned int pipe, void *data, int len,
- int *actual_length, int timeout, unsigned int tflags, dma_addr_t transfer_dma)
+sisusb_bulkin_msg(struct sisusb_usb_data *sisusb, unsigned int pipe, void *data,
+ int len, int *actual_length, int timeout, unsigned int tflags)
{
struct urb *urb = sisusb->sisurbin;
int retval, readbytes = 0;
@@ -311,9 +305,6 @@ sisusb_bulkin_msg(struct sisusb_usb_data *sisusb, unsigned int pipe, void *data,
urb->transfer_flags |= tflags;
urb->actual_length = 0;
- if ((urb->transfer_dma = transfer_dma))
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
-
sisusb->completein = 0;
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval == 0) {
@@ -422,8 +413,7 @@ static int sisusb_send_bulk_msg(struct sisusb_usb_data *sisusb, int ep, int len,
thispass,
&transferred_len,
async ? 0 : 5 * HZ,
- tflags,
- sisusb->transfer_dma_out[index]);
+ tflags);
if (result == -ETIMEDOUT) {
@@ -432,29 +422,16 @@ static int sisusb_send_bulk_msg(struct sisusb_usb_data *sisusb, int ep, int len,
return -ETIME;
continue;
+ }
- } else if ((result == 0) && !async && transferred_len) {
+ if ((result == 0) && !async && transferred_len) {
thispass -= transferred_len;
- if (thispass) {
- if (sisusb->transfer_dma_out) {
- /* If DMA, copy remaining
- * to beginning of buffer
- */
- memcpy(buffer,
- buffer + transferred_len,
- thispass);
- } else {
- /* If not DMA, simply increase
- * the pointer
- */
- buffer += transferred_len;
- }
- }
+ buffer += transferred_len;
} else
break;
- };
+ }
if (result)
return result;
@@ -530,8 +507,7 @@ static int sisusb_recv_bulk_msg(struct sisusb_usb_data *sisusb, int ep, int len,
thispass,
&transferred_len,
5 * HZ,
- tflags,
- sisusb->transfer_dma_in);
+ tflags);
if (transferred_len)
thispass = transferred_len;
@@ -3132,8 +3108,7 @@ static int sisusb_probe(struct usb_interface *intf,
/* Allocate buffers */
sisusb->ibufsize = SISUSB_IBUF_SIZE;
- if (!(sisusb->ibuf = usb_buffer_alloc(dev, SISUSB_IBUF_SIZE,
- GFP_KERNEL, &sisusb->transfer_dma_in))) {
+ if (!(sisusb->ibuf = kmalloc(SISUSB_IBUF_SIZE, GFP_KERNEL))) {
dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate memory for input buffer");
retval = -ENOMEM;
goto error_2;
@@ -3142,9 +3117,7 @@ static int sisusb_probe(struct usb_interface *intf,
sisusb->numobufs = 0;
sisusb->obufsize = SISUSB_OBUF_SIZE;
for (i = 0; i < NUMOBUFS; i++) {
- if (!(sisusb->obuf[i] = usb_buffer_alloc(dev, SISUSB_OBUF_SIZE,
- GFP_KERNEL,
- &sisusb->transfer_dma_out[i]))) {
+ if (!(sisusb->obuf[i] = kmalloc(SISUSB_OBUF_SIZE, GFP_KERNEL))) {
if (i == 0) {
dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate memory for output buffer\n");
retval = -ENOMEM;
diff --git a/drivers/usb/misc/sisusbvga/sisusb.h b/drivers/usb/misc/sisusbvga/sisusb.h
index cf0b4a5883f..55492a5930b 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.h
+++ b/drivers/usb/misc/sisusbvga/sisusb.h
@@ -123,8 +123,6 @@ struct sisusb_usb_data {
int numobufs; /* number of obufs = number of out urbs */
char *obuf[NUMOBUFS], *ibuf; /* transfer buffers */
int obufsize, ibufsize;
- dma_addr_t transfer_dma_out[NUMOBUFS];
- dma_addr_t transfer_dma_in;
struct urb *sisurbout[NUMOBUFS];
struct urb *sisurbin;
unsigned char urbstatus[NUMOBUFS];
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index 28a6a3a0953..3db255537e7 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -38,6 +38,7 @@ static char *display_textmodes[] = {"raw", "hex", "ascii", NULL};
struct usb_sevsegdev {
struct usb_device *udev;
+ struct usb_interface *intf;
u8 powered;
u8 mode_msb;
@@ -46,6 +47,8 @@ struct usb_sevsegdev {
u8 textmode;
u8 text[MAXLEN];
u16 textlength;
+
+ u8 shadow_power; /* for PM */
};
/* sysfs_streq can't replace this completely
@@ -65,6 +68,12 @@ static void update_display_powered(struct usb_sevsegdev *mydev)
{
int rc;
+ if (!mydev->shadow_power && mydev->powered) {
+ rc = usb_autopm_get_interface(mydev->intf);
+ if (rc < 0)
+ return;
+ }
+
rc = usb_control_msg(mydev->udev,
usb_sndctrlpipe(mydev->udev, 0),
0x12,
@@ -76,12 +85,18 @@ static void update_display_powered(struct usb_sevsegdev *mydev)
2000);
if (rc < 0)
dev_dbg(&mydev->udev->dev, "power retval = %d\n", rc);
+
+ if (mydev->shadow_power && !mydev->powered)
+ usb_autopm_put_interface(mydev->intf);
}
static void update_display_mode(struct usb_sevsegdev *mydev)
{
int rc;
+ if(mydev->shadow_power != 1)
+ return;
+
rc = usb_control_msg(mydev->udev,
usb_sndctrlpipe(mydev->udev, 0),
0x12,
@@ -96,14 +111,17 @@ static void update_display_mode(struct usb_sevsegdev *mydev)
dev_dbg(&mydev->udev->dev, "mode retval = %d\n", rc);
}
-static void update_display_visual(struct usb_sevsegdev *mydev)
+static void update_display_visual(struct usb_sevsegdev *mydev, gfp_t mf)
{
int rc;
int i;
unsigned char *buffer;
u8 decimals = 0;
- buffer = kzalloc(MAXLEN, GFP_KERNEL);
+ if(mydev->shadow_power != 1)
+ return;
+
+ buffer = kzalloc(MAXLEN, mf);
if (!buffer) {
dev_err(&mydev->udev->dev, "out of memory\n");
return;
@@ -163,7 +181,7 @@ static ssize_t set_attr_##name(struct device *dev, \
struct usb_sevsegdev *mydev = usb_get_intfdata(intf); \
\
mydev->name = simple_strtoul(buf, NULL, 10); \
- update_fcn(mydev); \
+ update_fcn(mydev); \
\
return count; \
} \
@@ -194,7 +212,7 @@ static ssize_t set_attr_text(struct device *dev,
if (end > 0)
memcpy(mydev->text, buf, end);
- update_display_visual(mydev);
+ update_display_visual(mydev, GFP_KERNEL);
return count;
}
@@ -242,7 +260,7 @@ static ssize_t set_attr_decimals(struct device *dev,
if (buf[i] == '1')
mydev->decimals[end-1-i] = 1;
- update_display_visual(mydev);
+ update_display_visual(mydev, GFP_KERNEL);
return count;
}
@@ -286,7 +304,7 @@ static ssize_t set_attr_textmode(struct device *dev,
for (i = 0; display_textmodes[i]; i++) {
if (sysfs_streq(display_textmodes[i], buf)) {
mydev->textmode = i;
- update_display_visual(mydev);
+ update_display_visual(mydev, GFP_KERNEL);
return count;
}
}
@@ -330,6 +348,7 @@ static int sevseg_probe(struct usb_interface *interface,
}
mydev->udev = usb_get_dev(udev);
+ mydev->intf = interface;
usb_set_intfdata(interface, mydev);
/*set defaults */
@@ -364,11 +383,49 @@ static void sevseg_disconnect(struct usb_interface *interface)
dev_info(&interface->dev, "USB 7 Segment now disconnected\n");
}
+static int sevseg_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct usb_sevsegdev *mydev;
+
+ mydev = usb_get_intfdata(intf);
+ mydev->shadow_power = 0;
+
+ return 0;
+}
+
+static int sevseg_resume(struct usb_interface *intf)
+{
+ struct usb_sevsegdev *mydev;
+
+ mydev = usb_get_intfdata(intf);
+ mydev->shadow_power = 1;
+ update_display_mode(mydev);
+ update_display_visual(mydev, GFP_NOIO);
+
+ return 0;
+}
+
+static int sevseg_reset_resume(struct usb_interface *intf)
+{
+ struct usb_sevsegdev *mydev;
+
+ mydev = usb_get_intfdata(intf);
+ mydev->shadow_power = 1;
+ update_display_mode(mydev);
+ update_display_visual(mydev, GFP_NOIO);
+
+ return 0;
+}
+
static struct usb_driver sevseg_driver = {
.name = "usbsevseg",
.probe = sevseg_probe,
.disconnect = sevseg_disconnect,
+ .suspend = sevseg_suspend,
+ .resume = sevseg_resume,
+ .reset_resume = sevseg_reset_resume,
.id_table = id_table,
+ .supports_autosuspend = 1,
};
static int __init usb_sevseg_init(void)
diff --git a/drivers/usb/mon/Kconfig b/drivers/usb/mon/Kconfig
index f28f350cd96..635745f57fb 100644
--- a/drivers/usb/mon/Kconfig
+++ b/drivers/usb/mon/Kconfig
@@ -5,11 +5,9 @@
config USB_MON
tristate "USB Monitor"
depends on USB
- default y if USB=y
- default m if USB=m
help
If you select this option, a component which captures the USB traffic
between peripheral-specific drivers and HC drivers will be built.
For more information, see <file:Documentation/usb/usbmon.txt>.
- If unsure, say Y (if allowed), otherwise M.
+ If unsure, say Y, if allowed, otherwise M.
diff --git a/drivers/usb/mon/Makefile b/drivers/usb/mon/Makefile
index c6516b56673..384b198faa7 100644
--- a/drivers/usb/mon/Makefile
+++ b/drivers/usb/mon/Makefile
@@ -2,6 +2,6 @@
# Makefile for USB monitor
#
-usbmon-objs := mon_main.o mon_stat.o mon_text.o mon_bin.o mon_dma.o
+usbmon-objs := mon_main.o mon_stat.o mon_text.o mon_bin.o
obj-$(CONFIG_USB_MON) += usbmon.o
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 0f7a30b7d2d..dfdc43e2e00 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -220,9 +220,8 @@ static void mon_free_buff(struct mon_pgmap *map, int npages);
/*
* This is a "chunked memcpy". It does not manipulate any counters.
- * But it returns the new offset for repeated application.
*/
-unsigned int mon_copy_to_buff(const struct mon_reader_bin *this,
+static void mon_copy_to_buff(const struct mon_reader_bin *this,
unsigned int off, const unsigned char *from, unsigned int length)
{
unsigned int step_len;
@@ -247,7 +246,6 @@ unsigned int mon_copy_to_buff(const struct mon_reader_bin *this,
from += step_len;
length -= step_len;
}
- return off;
}
/*
@@ -400,15 +398,8 @@ static char mon_bin_get_data(const struct mon_reader_bin *rp,
unsigned int offset, struct urb *urb, unsigned int length)
{
- if (urb->dev->bus->uses_dma &&
- (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
- mon_dmapeek_vec(rp, offset, urb->transfer_dma, length);
- return 0;
- }
-
if (urb->transfer_buffer == NULL)
return 'Z';
-
mon_copy_to_buff(rp, offset, urb->transfer_buffer, length);
return 0;
}
@@ -635,7 +626,6 @@ static int mon_bin_open(struct inode *inode, struct file *file)
spin_lock_init(&rp->b_lock);
init_waitqueue_head(&rp->b_wait);
mutex_init(&rp->fetch_lock);
-
rp->b_size = BUFF_DFL;
size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE);
diff --git a/drivers/usb/mon/mon_dma.c b/drivers/usb/mon/mon_dma.c
deleted file mode 100644
index 140cc80bd2b..00000000000
--- a/drivers/usb/mon/mon_dma.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * The USB Monitor, inspired by Dave Harding's USBMon.
- *
- * mon_dma.c: Library which snoops on DMA areas.
- *
- * Copyright (C) 2005 Pete Zaitcev (zaitcev@redhat.com)
- */
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/highmem.h>
-#include <asm/page.h>
-
-#include <linux/usb.h> /* Only needed for declarations in usb_mon.h */
-#include "usb_mon.h"
-
-/*
- * PC-compatibles, are, fortunately, sufficiently cache-coherent for this.
- */
-#if defined(__i386__) || defined(__x86_64__) /* CONFIG_ARCH_I386 doesn't exit */
-#define MON_HAS_UNMAP 1
-
-#define phys_to_page(phys) pfn_to_page((phys) >> PAGE_SHIFT)
-
-char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len)
-{
- struct page *pg;
- unsigned long flags;
- unsigned char *map;
- unsigned char *ptr;
-
- /*
- * On i386, a DMA handle is the "physical" address of a page.
- * In other words, the bus address is equal to physical address.
- * There is no IOMMU.
- */
- pg = phys_to_page(dma_addr);
-
- /*
- * We are called from hardware IRQs in case of callbacks.
- * But we can be called from softirq or process context in case
- * of submissions. In such case, we need to protect KM_IRQ0.
- */
- local_irq_save(flags);
- map = kmap_atomic(pg, KM_IRQ0);
- ptr = map + (dma_addr & (PAGE_SIZE-1));
- memcpy(dst, ptr, len);
- kunmap_atomic(map, KM_IRQ0);
- local_irq_restore(flags);
- return 0;
-}
-
-void mon_dmapeek_vec(const struct mon_reader_bin *rp,
- unsigned int offset, dma_addr_t dma_addr, unsigned int length)
-{
- unsigned long flags;
- unsigned int step_len;
- struct page *pg;
- unsigned char *map;
- unsigned long page_off, page_len;
-
- local_irq_save(flags);
- while (length) {
- /* compute number of bytes we are going to copy in this page */
- step_len = length;
- page_off = dma_addr & (PAGE_SIZE-1);
- page_len = PAGE_SIZE - page_off;
- if (page_len < step_len)
- step_len = page_len;
-
- /* copy data and advance pointers */
- pg = phys_to_page(dma_addr);
- map = kmap_atomic(pg, KM_IRQ0);
- offset = mon_copy_to_buff(rp, offset, map + page_off, step_len);
- kunmap_atomic(map, KM_IRQ0);
- dma_addr += step_len;
- length -= step_len;
- }
- local_irq_restore(flags);
-}
-
-#endif /* __i386__ */
-
-#ifndef MON_HAS_UNMAP
-char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len)
-{
- return 'D';
-}
-
-void mon_dmapeek_vec(const struct mon_reader_bin *rp,
- unsigned int offset, dma_addr_t dma_addr, unsigned int length)
-{
- ;
-}
-
-#endif /* MON_HAS_UNMAP */
diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
index 5e0ab4201c0..e0c2db3b767 100644
--- a/drivers/usb/mon/mon_main.c
+++ b/drivers/usb/mon/mon_main.c
@@ -361,7 +361,6 @@ static int __init mon_init(void)
}
// MOD_INC_USE_COUNT(which_module?);
-
mutex_lock(&usb_bus_list_lock);
list_for_each_entry (ubus, &usb_bus_list, bus_list) {
mon_bus_init(ubus);
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index a7eb4c99342..9f1a9227ebe 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -150,20 +150,6 @@ static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb,
return '>';
}
- /*
- * The check to see if it's safe to poke at data has an enormous
- * number of corner cases, but it seems that the following is
- * more or less safe.
- *
- * We do not even try to look at transfer_buffer, because it can
- * contain non-NULL garbage in case the upper level promised to
- * set DMA for the HCD.
- */
- if (urb->dev->bus->uses_dma &&
- (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
- return mon_dmapeek(ep->data, urb->transfer_dma, len);
- }
-
if (urb->transfer_buffer == NULL)
return 'Z'; /* '0' would be not as pretty. */
diff --git a/drivers/usb/mon/usb_mon.h b/drivers/usb/mon/usb_mon.h
index f5d84ff8c10..df9a4df342c 100644
--- a/drivers/usb/mon/usb_mon.h
+++ b/drivers/usb/mon/usb_mon.h
@@ -65,20 +65,6 @@ int __init mon_bin_init(void);
void mon_bin_exit(void);
/*
- * DMA interface.
- *
- * XXX The vectored side needs a serious re-thinking. Abstracting vectors,
- * like in Paolo's original patch, produces a double pkmap. We need an idea.
-*/
-extern char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len);
-
-struct mon_reader_bin;
-extern void mon_dmapeek_vec(const struct mon_reader_bin *rp,
- unsigned int offset, dma_addr_t dma_addr, unsigned int len);
-extern unsigned int mon_copy_to_buff(const struct mon_reader_bin *rp,
- unsigned int offset, const unsigned char *from, unsigned int len);
-
-/*
*/
extern struct mutex mon_lock;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 1d26beddf2c..3a61ddb62bd 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1850,6 +1850,10 @@ static void musb_free(struct musb *musb)
dma_controller_destroy(c);
}
+#ifdef CONFIG_USB_MUSB_OTG
+ put_device(musb->xceiv->dev);
+#endif
+
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
musb_platform_exit(musb);
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
@@ -1859,10 +1863,6 @@ static void musb_free(struct musb *musb)
clk_put(musb->clock);
}
-#ifdef CONFIG_USB_MUSB_OTG
- put_device(musb->xceiv->dev);
-#endif
-
#ifdef CONFIG_USB_MUSB_HDRC_HCD
usb_put_hcd(musb_to_hcd(musb));
#else
diff --git a/drivers/usb/otg/isp1301_omap.c b/drivers/usb/otg/isp1301_omap.c
index e0d56ef2bcb..77a5f418899 100644
--- a/drivers/usb/otg/isp1301_omap.c
+++ b/drivers/usb/otg/isp1301_omap.c
@@ -117,24 +117,7 @@ static void enable_vbus_draw(struct isp1301 *isp, unsigned mA)
pr_debug(" VBUS %d mA error %d\n", mA, status);
}
-static void enable_vbus_source(struct isp1301 *isp)
-{
- /* this board won't supply more than 8mA vbus power.
- * some boards can switch a 100ma "unit load" (or more).
- */
-}
-
-
-/* products will deliver OTG messages with LEDs, GUI, etc */
-static inline void notresponding(struct isp1301 *isp)
-{
- printk(KERN_NOTICE "OTG device not responding.\n");
-}
-
-
-#endif
-
-#if defined(CONFIG_MACH_OMAP_H4)
+#else
static void enable_vbus_draw(struct isp1301 *isp, unsigned mA)
{
@@ -144,6 +127,8 @@ static void enable_vbus_draw(struct isp1301 *isp, unsigned mA)
*/
}
+#endif
+
static void enable_vbus_source(struct isp1301 *isp)
{
/* this board won't supply more than 8mA vbus power.
@@ -159,8 +144,6 @@ static inline void notresponding(struct isp1301 *isp)
}
-#endif
-
/*-------------------------------------------------------------------------*/
static struct i2c_driver isp1301_driver;
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 5d25d3e52bf..131e61adaaf 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -31,10 +31,20 @@ static int debug;
static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x6547, 0x0232) },
+ { USB_DEVICE(0x18ec, 0x3118) }, /* USB to IrDA adapter */
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
+static int is_irda(struct usb_serial *serial)
+{
+ struct usb_device *dev = serial->dev;
+ if (le16_to_cpu(dev->descriptor.idVendor) == 0x18ec &&
+ le16_to_cpu(dev->descriptor.idProduct) == 0x3118)
+ return 1;
+ return 0;
+}
+
static inline void ARK3116_SND(struct usb_serial *serial, int seq,
__u8 request, __u8 requesttype,
__u16 value, __u16 index)
@@ -84,11 +94,21 @@ static int ark3116_attach(struct usb_serial *serial)
return -ENOMEM;
}
+ if (is_irda(serial))
+ dbg("IrDA mode");
+
/* 3 */
ARK3116_SND(serial, 3, 0xFE, 0x40, 0x0008, 0x0002);
ARK3116_SND(serial, 4, 0xFE, 0x40, 0x0008, 0x0001);
ARK3116_SND(serial, 5, 0xFE, 0x40, 0x0000, 0x0008);
- ARK3116_SND(serial, 6, 0xFE, 0x40, 0x0000, 0x000B);
+ ARK3116_SND(serial, 6, 0xFE, 0x40, is_irda(serial) ? 0x0001 : 0x0000,
+ 0x000B);
+
+ if (is_irda(serial)) {
+ ARK3116_SND(serial, 1001, 0xFE, 0x40, 0x0000, 0x000C);
+ ARK3116_SND(serial, 1002, 0xFE, 0x40, 0x0041, 0x000D);
+ ARK3116_SND(serial, 1003, 0xFE, 0x40, 0x0001, 0x000A);
+ }
/* <-- seq7 */
ARK3116_RCV(serial, 7, 0xFE, 0xC0, 0x0000, 0x0003, 0x00, buf);
@@ -125,6 +145,8 @@ static int ark3116_attach(struct usb_serial *serial)
ARK3116_SND(serial, 147, 0xFE, 0x40, 0x0083, 0x0003);
ARK3116_SND(serial, 148, 0xFE, 0x40, 0x0038, 0x0000);
ARK3116_SND(serial, 149, 0xFE, 0x40, 0x0001, 0x0001);
+ if (is_irda(serial))
+ ARK3116_SND(serial, 1004, 0xFE, 0x40, 0x0000, 0x0009);
ARK3116_SND(serial, 150, 0xFE, 0x40, 0x0003, 0x0003);
ARK3116_RCV(serial, 151, 0xFE, 0xC0, 0x0000, 0x0004, 0x03, buf);
ARK3116_SND(serial, 152, 0xFE, 0x40, 0x0000, 0x0003);
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 8c894a7d5dc..59eff721fcc 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -56,6 +56,18 @@
#define CH341_BAUDBASE_FACTOR 1532620800
#define CH341_BAUDBASE_DIVMAX 3
+/* Break support - the information used to implement this was gleaned from
+ * the Net/FreeBSD uchcom.c driver by Takanori Watanabe. Domo arigato.
+ */
+
+#define CH341_REQ_WRITE_REG 0x9A
+#define CH341_REQ_READ_REG 0x95
+#define CH341_REG_BREAK1 0x05
+#define CH341_REG_BREAK2 0x18
+#define CH341_NBREAK_BITS_REG1 0x01
+#define CH341_NBREAK_BITS_REG2 0x40
+
+
static int debug;
static struct usb_device_id id_table [] = {
@@ -373,6 +385,45 @@ static void ch341_set_termios(struct tty_struct *tty,
*/
}
+static void ch341_break_ctl(struct tty_struct *tty, int break_state)
+{
+ const uint16_t ch341_break_reg =
+ CH341_REG_BREAK1 | ((uint16_t) CH341_REG_BREAK2 << 8);
+ struct usb_serial_port *port = tty->driver_data;
+ int r;
+ uint16_t reg_contents;
+ uint8_t break_reg[2];
+
+ dbg("%s()", __func__);
+
+ r = ch341_control_in(port->serial->dev, CH341_REQ_READ_REG,
+ ch341_break_reg, 0, break_reg, sizeof(break_reg));
+ if (r < 0) {
+ printk(KERN_WARNING "%s: USB control read error whilst getting"
+ " break register contents.\n", __FILE__);
+ return;
+ }
+ dbg("%s - initial ch341 break register contents - reg1: %x, reg2: %x",
+ __func__, break_reg[0], break_reg[1]);
+ if (break_state != 0) {
+ dbg("%s - Enter break state requested", __func__);
+ break_reg[0] &= ~CH341_NBREAK_BITS_REG1;
+ break_reg[1] &= ~CH341_NBREAK_BITS_REG2;
+ } else {
+ dbg("%s - Leave break state requested", __func__);
+ break_reg[0] |= CH341_NBREAK_BITS_REG1;
+ break_reg[1] |= CH341_NBREAK_BITS_REG2;
+ }
+ dbg("%s - New ch341 break register contents - reg1: %x, reg2: %x",
+ __func__, break_reg[0], break_reg[1]);
+ reg_contents = (uint16_t)break_reg[0] | ((uint16_t)break_reg[1] << 8);
+ r = ch341_control_out(port->serial->dev, CH341_REQ_WRITE_REG,
+ ch341_break_reg, reg_contents);
+ if (r < 0)
+ printk(KERN_WARNING "%s: USB control write error whilst setting"
+ " break register contents.\n", __FILE__);
+}
+
static int ch341_tiocmset(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear)
{
@@ -576,6 +627,7 @@ static struct usb_serial_driver ch341_device = {
.close = ch341_close,
.ioctl = ch341_ioctl,
.set_termios = ch341_set_termios,
+ .break_ctl = ch341_break_ctl,
.tiocmget = ch341_tiocmget,
.tiocmset = ch341_tiocmset,
.read_int_callback = ch341_read_int_callback,
diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h
index e772b01ac3a..1fd360e0406 100644
--- a/drivers/usb/serial/cypress_m8.h
+++ b/drivers/usb/serial/cypress_m8.h
@@ -57,7 +57,7 @@
#define UART_RI 0x10 /* ring indicator - modem - device to host */
#define UART_CD 0x40 /* carrier detect - modem - device to host */
#define CYP_ERROR 0x08 /* received from input report - device to host */
-/* Note - the below has nothing to to with the "feature report" reset */
+/* Note - the below has nothing to do with the "feature report" reset */
#define CONTROL_RESET 0x08 /* sent with output report - host to device */
/* End of RS-232 protocol definitions */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 76a17f915ee..4f883b1773d 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -176,6 +176,9 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_SNIFFER_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
@@ -694,6 +697,8 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(DE_VID, WHT_PID) },
{ USB_DEVICE(ADI_VID, ADI_GNICE_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
{ USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
@@ -702,6 +707,8 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
{ USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
+ { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 8c92b88166a..6f31e0d7189 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -81,6 +81,9 @@
/* OpenDCC (www.opendcc.de) product id */
#define FTDI_OPENDCC_PID 0xBFD8
+#define FTDI_OPENDCC_SNIFFER_PID 0xBFD9
+#define FTDI_OPENDCC_THROTTLE_PID 0xBFDA
+#define FTDI_OPENDCC_GATEWAY_PID 0xBFDB
/* Sprog II (Andrew Crosland's SprogII DCC interface) */
#define FTDI_SPROG_II 0xF0C8
@@ -930,6 +933,7 @@
*/
#define ADI_VID 0x0456
#define ADI_GNICE_PID 0xF000
+#define ADI_GNICEPLUS_PID 0xF001
/*
* JETI SPECTROMETER SPECBOS 1201
@@ -968,6 +972,12 @@
#define MARVELL_OPENRD_PID 0x9e90
/*
+ * Hameg HO820 and HO870 interface (using VID 0x0403)
+ */
+#define HAMEG_HO820_PID 0xed74
+#define HAMEG_HO870_PID 0xed71
+
+/*
* BmRequestType: 1100 0000b
* bRequest: FTDI_E2_READ
* wValue: 0
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index d9398e9f30c..deba08c7a01 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -19,7 +19,7 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
-
+#include <linux/kfifo.h>
static int debug;
@@ -166,24 +166,6 @@ static void generic_cleanup(struct usb_serial_port *port)
}
}
-int usb_serial_generic_resume(struct usb_serial *serial)
-{
- struct usb_serial_port *port;
- int i, c = 0, r;
-
- for (i = 0; i < serial->num_ports; i++) {
- port = serial->port[i];
- if (port->port.count && port->read_urb) {
- r = usb_submit_urb(port->read_urb, GFP_NOIO);
- if (r < 0)
- c++;
- }
- }
-
- return c ? -EIO : 0;
-}
-EXPORT_SYMBOL_GPL(usb_serial_generic_resume);
-
void usb_serial_generic_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
@@ -272,12 +254,81 @@ error_no_buffer:
return bwrite;
}
+/**
+ * usb_serial_generic_write_start - kick off an URB write
+ * @port: Pointer to the &struct usb_serial_port data
+ *
+ * Returns the number of bytes queued on success. This will be zero if there
+ * was nothing to send. Otherwise, it returns a negative errno value
+ */
+static int usb_serial_generic_write_start(struct usb_serial_port *port)
+{
+ struct usb_serial *serial = port->serial;
+ unsigned char *data;
+ int result;
+ int count;
+ unsigned long flags;
+ bool start_io;
+
+ /* Atomically determine whether we can and need to start a USB
+ * operation. */
+ spin_lock_irqsave(&port->lock, flags);
+ if (port->write_urb_busy)
+ start_io = false;
+ else {
+ start_io = (__kfifo_len(port->write_fifo) != 0);
+ port->write_urb_busy = start_io;
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (!start_io)
+ return 0;
+
+ data = port->write_urb->transfer_buffer;
+ count = kfifo_get(port->write_fifo, data, port->bulk_out_size);
+ usb_serial_debug_data(debug, &port->dev, __func__, count, data);
+
+ /* set up our urb */
+ usb_fill_bulk_urb(port->write_urb, serial->dev,
+ usb_sndbulkpipe(serial->dev,
+ port->bulk_out_endpointAddress),
+ port->write_urb->transfer_buffer, count,
+ ((serial->type->write_bulk_callback) ?
+ serial->type->write_bulk_callback :
+ usb_serial_generic_write_bulk_callback),
+ port);
+
+ /* send the data out the bulk port */
+ result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
+ if (result) {
+ dev_err(&port->dev,
+ "%s - failed submitting write urb, error %d\n",
+ __func__, result);
+ /* don't have to grab the lock here, as we will
+ retry if != 0 */
+ port->write_urb_busy = 0;
+ } else
+ result = count;
+
+ return result;
+}
+
+/**
+ * usb_serial_generic_write - generic write function for serial USB devices
+ * @tty: Pointer to &struct tty_struct for the device
+ * @port: Pointer to the &usb_serial_port structure for the device
+ * @buf: Pointer to the data to write
+ * @count: Number of bytes to write
+ *
+ * Returns the number of characters actually written, which may be anything
+ * from zero to @count. If an error occurs, it returns the negative errno
+ * value.
+ */
int usb_serial_generic_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count)
{
struct usb_serial *serial = port->serial;
int result;
- unsigned char *data;
dbg("%s - port %d", __func__, port->number);
@@ -287,57 +338,20 @@ int usb_serial_generic_write(struct tty_struct *tty,
}
/* only do something if we have a bulk out endpoint */
- if (serial->num_bulk_out) {
- unsigned long flags;
-
- if (serial->type->max_in_flight_urbs)
- return usb_serial_multi_urb_write(tty, port,
- buf, count);
-
- spin_lock_irqsave(&port->lock, flags);
- if (port->write_urb_busy) {
- spin_unlock_irqrestore(&port->lock, flags);
- dbg("%s - already writing", __func__);
- return 0;
- }
- port->write_urb_busy = 1;
- spin_unlock_irqrestore(&port->lock, flags);
-
- count = (count > port->bulk_out_size) ?
- port->bulk_out_size : count;
-
- memcpy(port->write_urb->transfer_buffer, buf, count);
- data = port->write_urb->transfer_buffer;
- usb_serial_debug_data(debug, &port->dev, __func__, count, data);
+ if (!serial->num_bulk_out)
+ return 0;
- /* set up our urb */
- usb_fill_bulk_urb(port->write_urb, serial->dev,
- usb_sndbulkpipe(serial->dev,
- port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer, count,
- ((serial->type->write_bulk_callback) ?
- serial->type->write_bulk_callback :
- usb_serial_generic_write_bulk_callback),
- port);
+ if (serial->type->max_in_flight_urbs)
+ return usb_serial_multi_urb_write(tty, port,
+ buf, count);
- /* send the data out the bulk port */
- port->write_urb_busy = 1;
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- if (result) {
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, result);
- /* don't have to grab the lock here, as we will
- retry if != 0 */
- port->write_urb_busy = 0;
- } else
- result = count;
+ count = kfifo_put(port->write_fifo, buf, count);
+ result = usb_serial_generic_write_start(port);
- return result;
- }
+ if (result >= 0)
+ result = count;
- /* no bulk out, so return 0 bytes written */
- return 0;
+ return result;
}
EXPORT_SYMBOL_GPL(usb_serial_generic_write);
@@ -355,9 +369,8 @@ int usb_serial_generic_write_room(struct tty_struct *tty)
room = port->bulk_out_size *
(serial->type->max_in_flight_urbs -
port->urbs_in_flight);
- } else if (serial->num_bulk_out && !(port->write_urb_busy)) {
- room = port->bulk_out_size;
- }
+ } else if (serial->num_bulk_out)
+ room = port->write_fifo->size - __kfifo_len(port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
dbg("%s - returns %d", __func__, room);
@@ -377,11 +390,8 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
spin_lock_irqsave(&port->lock, flags);
chars = port->tx_bytes_flight;
spin_unlock_irqrestore(&port->lock, flags);
- } else if (serial->num_bulk_out) {
- /* FIXME: Locking */
- if (port->write_urb_busy)
- chars = port->write_urb->transfer_buffer_length;
- }
+ } else if (serial->num_bulk_out)
+ chars = kfifo_len(port->write_fifo);
dbg("%s - returns %d", __func__, chars);
return chars;
@@ -485,16 +495,23 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb)
if (port->urbs_in_flight < 0)
port->urbs_in_flight = 0;
spin_unlock_irqrestore(&port->lock, flags);
+
+ if (status) {
+ dbg("%s - nonzero multi-urb write bulk status "
+ "received: %d", __func__, status);
+ return;
+ }
} else {
- /* Handle the case for single urb mode */
port->write_urb_busy = 0;
- }
- if (status) {
- dbg("%s - nonzero write bulk status received: %d",
- __func__, status);
- return;
+ if (status) {
+ dbg("%s - nonzero multi-urb write bulk status "
+ "received: %d", __func__, status);
+ kfifo_reset(port->write_fifo);
+ } else
+ usb_serial_generic_write_start(port);
}
+
usb_serial_port_softint(port);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_write_bulk_callback);
@@ -559,6 +576,33 @@ int usb_serial_handle_break(struct usb_serial_port *port)
}
EXPORT_SYMBOL_GPL(usb_serial_handle_break);
+int usb_serial_generic_resume(struct usb_serial *serial)
+{
+ struct usb_serial_port *port;
+ int i, c = 0, r;
+
+ for (i = 0; i < serial->num_ports; i++) {
+ port = serial->port[i];
+ if (!port->port.count)
+ continue;
+
+ if (port->read_urb) {
+ r = usb_submit_urb(port->read_urb, GFP_NOIO);
+ if (r < 0)
+ c++;
+ }
+
+ if (port->write_urb) {
+ r = usb_serial_generic_write_start(port);
+ if (r < 0)
+ c++;
+ }
+ }
+
+ return c ? -EIO : 0;
+}
+EXPORT_SYMBOL_GPL(usb_serial_generic_resume);
+
void usb_serial_generic_disconnect(struct usb_serial *serial)
{
int i;
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index dc0f832657e..b97960ac92f 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2540,7 +2540,7 @@ static int calc_baud_rate_divisor(int baudrate, int *divisor)
/*****************************************************************************
* send_cmd_write_uart_register
- * this function builds up a uart register message and sends to to the device.
+ * this function builds up a uart register message and sends to the device.
*****************************************************************************/
static int send_cmd_write_uart_register(struct edgeport_port *edge_port,
__u8 regNum, __u8 regValue)
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 6138c1cda35..e6e02b178d2 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -40,7 +40,7 @@ static int debug;
/*
* Version Information
*/
-#define DRIVER_VERSION "v0.10"
+#define DRIVER_VERSION "v0.11"
#define DRIVER_DESC "Infinity USB Unlimited Phoenix driver"
static struct usb_device_id id_table[] = {
@@ -64,6 +64,7 @@ static int cdmode = 1;
static int iuu_cardin;
static int iuu_cardout;
static int xmas;
+static int vcc_default = 5;
static void read_rxcmd_callback(struct urb *urb);
@@ -79,6 +80,7 @@ struct iuu_private {
u8 *buf; /* used for initialize speed */
u8 *dbgbuf; /* debug buffer */
u8 len;
+ int vcc; /* vcc (either 3 or 5 V) */
};
@@ -114,6 +116,7 @@ static int iuu_startup(struct usb_serial *serial)
kfree(priv);
return -ENOMEM;
}
+ priv->vcc = vcc_default;
spin_lock_init(&priv->lock);
init_waitqueue_head(&priv->delta_msr_wait);
usb_set_serial_port_data(serial->port[0], priv);
@@ -1009,11 +1012,7 @@ static void iuu_close(struct usb_serial_port *port)
usb_kill_urb(port->write_urb);
usb_kill_urb(port->read_urb);
usb_kill_urb(port->interrupt_in_urb);
- msleep(1000);
- /* wait one second to free all buffers */
iuu_led(port, 0, 0, 0xF000, 0xFF);
- msleep(1000);
- usb_reset_device(port->serial->dev);
}
}
@@ -1182,6 +1181,95 @@ static int iuu_open(struct tty_struct *tty, struct usb_serial_port *port)
return result;
}
+/* how to change VCC */
+static int iuu_vcc_set(struct usb_serial_port *port, unsigned int vcc)
+{
+ int status;
+ u8 *buf;
+
+ buf = kmalloc(5, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ dbg("%s - enter", __func__);
+
+ buf[0] = IUU_SET_VCC;
+ buf[1] = vcc & 0xFF;
+ buf[2] = (vcc >> 8) & 0xFF;
+ buf[3] = (vcc >> 16) & 0xFF;
+ buf[4] = (vcc >> 24) & 0xFF;
+
+ status = bulk_immediate(port, buf, 5);
+ kfree(buf);
+
+ if (status != IUU_OPERATION_OK)
+ dbg("%s - vcc error status = %2x", __func__, status);
+ else
+ dbg("%s - vcc OK !", __func__);
+
+ return status;
+}
+
+/*
+ * Sysfs Attributes
+ */
+
+static ssize_t show_vcc_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_serial_port *port = to_usb_serial_port(dev);
+ struct iuu_private *priv = usb_get_serial_port_data(port);
+
+ return sprintf(buf, "%d\n", priv->vcc);
+}
+
+static ssize_t store_vcc_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb_serial_port *port = to_usb_serial_port(dev);
+ struct iuu_private *priv = usb_get_serial_port_data(port);
+ unsigned long v;
+
+ if (strict_strtoul(buf, 10, &v)) {
+ dev_err(dev, "%s - vcc_mode: %s is not a unsigned long\n",
+ __func__, buf);
+ goto fail_store_vcc_mode;
+ }
+
+ dbg("%s: setting vcc_mode = %ld", __func__, v);
+
+ if ((v != 3) && (v != 5)) {
+ dev_err(dev, "%s - vcc_mode %ld is invalid\n", __func__, v);
+ } else {
+ iuu_vcc_set(port, v);
+ priv->vcc = v;
+ }
+fail_store_vcc_mode:
+ return count;
+}
+
+static DEVICE_ATTR(vcc_mode, S_IRUSR | S_IWUSR, show_vcc_mode,
+ store_vcc_mode);
+
+static int iuu_create_sysfs_attrs(struct usb_serial_port *port)
+{
+ dbg("%s", __func__);
+
+ return device_create_file(&port->dev, &dev_attr_vcc_mode);
+}
+
+static int iuu_remove_sysfs_attrs(struct usb_serial_port *port)
+{
+ dbg("%s", __func__);
+
+ device_remove_file(&port->dev, &dev_attr_vcc_mode);
+ return 0;
+}
+
+/*
+ * End Sysfs Attributes
+ */
+
static struct usb_serial_driver iuu_device = {
.driver = {
.owner = THIS_MODULE,
@@ -1189,6 +1277,8 @@ static struct usb_serial_driver iuu_device = {
},
.id_table = id_table,
.num_ports = 1,
+ .port_probe = iuu_create_sysfs_attrs,
+ .port_remove = iuu_remove_sysfs_attrs,
.open = iuu_open,
.close = iuu_close,
.write = iuu_uart_write,
@@ -1238,14 +1328,19 @@ module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
module_param(xmas, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(xmas, "xmas color enabled or not");
+MODULE_PARM_DESC(xmas, "Xmas colors enabled or not");
module_param(boost, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(boost, "overclock boost percent 100 to 500");
+MODULE_PARM_DESC(boost, "Card overclock boost (in percent 100-500)");
module_param(clockmode, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(clockmode, "1=3Mhz579,2=3Mhz680,3=6Mhz");
+MODULE_PARM_DESC(clockmode, "Card clock mode (1=3.579 MHz, 2=3.680 MHz, "
+ "3=6 Mhz)");
module_param(cdmode, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(cdmode, "Card detect mode 0=none, 1=CD, 2=!CD, 3=DSR, "
- "4=!DSR, 5=CTS, 6=!CTS, 7=RING, 8=!RING");
+MODULE_PARM_DESC(cdmode, "Card detect mode (0=none, 1=CD, 2=!CD, 3=DSR, "
+ "4=!DSR, 5=CTS, 6=!CTS, 7=RING, 8=!RING)");
+
+module_param(vcc_default, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(vcc_default, "Set default VCC (either 3 for 3.3V or 5 "
+ "for 5V). Default to 5.");
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index a61673133d7..f7373371b13 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -38,7 +38,7 @@
* 0.3a - implemented pools of write URBs
* 0.3 - alpha version for public testing
* 0.2 - TIOCMGET works, so autopilot(1) can be used!
- * 0.1 - can be used to to pilot-xfer -p /dev/ttyUSB0 -l
+ * 0.1 - can be used to do pilot-xfer -p /dev/ttyUSB0 -l
*
* The driver skeleton is mainly based on mct_u232.c and various other
* pieces of code shamelessly copied from the drivers/usb/serial/ directory.
diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c
index b66b71ccd12..99bd00f5188 100644
--- a/drivers/usb/serial/moto_modem.c
+++ b/drivers/usb/serial/moto_modem.c
@@ -8,7 +8,7 @@
* published by the Free Software Foundation.
*
* {sigh}
- * Mororola should be using the CDC ACM USB spec, but instead
+ * Motorola should be using the CDC ACM USB spec, but instead
* they try to just "do their own thing"... This driver should handle a
* few phones in which a basic "dumb serial connection" is needed to be
* able to get a connection through to them.
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index fe47051dbef..f66e3988321 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -291,6 +291,7 @@ static int option_resume(struct usb_serial *serial);
#define TELIT_VENDOR_ID 0x1bc7
#define TELIT_PRODUCT_UC864E 0x1003
+#define TELIT_PRODUCT_UC864G 0x1004
/* ZTE PRODUCTS */
#define ZTE_VENDOR_ID 0x19d2
@@ -299,6 +300,7 @@ static int option_resume(struct usb_serial *serial);
#define ZTE_PRODUCT_MF626 0x0031
#define ZTE_PRODUCT_CDMA_TECH 0xfffe
#define ZTE_PRODUCT_AC8710 0xfff1
+#define ZTE_PRODUCT_AC2726 0xfff5
#define BENQ_VENDOR_ID 0x04a5
#define BENQ_PRODUCT_H10 0x4068
@@ -502,6 +504,7 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) },
@@ -571,6 +574,7 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
{ USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
{ USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) },
@@ -592,6 +596,7 @@ static struct usb_driver option_driver = {
#ifdef CONFIG_PM
.suspend = usb_serial_suspend,
.resume = usb_serial_resume,
+ .supports_autosuspend = 1,
#endif
.id_table = option_ids,
.no_dynamic_id = 1,
@@ -639,6 +644,12 @@ static int debug;
#define IN_BUFLEN 4096
#define OUT_BUFLEN 4096
+struct option_intf_private {
+ spinlock_t susp_lock;
+ unsigned int suspended:1;
+ int in_flight;
+};
+
struct option_port_private {
/* Input endpoints and buffer for this port */
struct urb *in_urbs[N_IN_URB];
@@ -647,6 +658,8 @@ struct option_port_private {
struct urb *out_urbs[N_OUT_URB];
u8 *out_buffer[N_OUT_URB];
unsigned long out_busy; /* Bit vector of URBs in use */
+ int opened;
+ struct usb_anchor delayed;
/* Settings for the port */
int rts_state; /* Handshaking pins (outputs) */
@@ -693,12 +706,17 @@ module_exit(option_exit);
static int option_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
+ struct option_intf_private *data;
/* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */
if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID &&
serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 &&
serial->interface->cur_altsetting->desc.bInterfaceClass == 0x8)
return -ENODEV;
+ data = serial->private = kzalloc(sizeof(struct option_intf_private), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ spin_lock_init(&data->susp_lock);
return 0;
}
@@ -755,12 +773,15 @@ static int option_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
struct option_port_private *portdata;
+ struct option_intf_private *intfdata;
int i;
int left, todo;
struct urb *this_urb = NULL; /* spurious */
int err;
+ unsigned long flags;
portdata = usb_get_serial_port_data(port);
+ intfdata = port->serial->private;
dbg("%s: write (%d chars)", __func__, count);
@@ -782,17 +803,33 @@ static int option_write(struct tty_struct *tty, struct usb_serial_port *port,
dbg("%s: endpoint %d buf %d", __func__,
usb_pipeendpoint(this_urb->pipe), i);
+ err = usb_autopm_get_interface_async(port->serial->interface);
+ if (err < 0)
+ break;
+
/* send the data */
memcpy(this_urb->transfer_buffer, buf, todo);
this_urb->transfer_buffer_length = todo;
- err = usb_submit_urb(this_urb, GFP_ATOMIC);
- if (err) {
- dbg("usb_submit_urb %p (write bulk) failed "
- "(%d)", this_urb, err);
- clear_bit(i, &portdata->out_busy);
- continue;
+ spin_lock_irqsave(&intfdata->susp_lock, flags);
+ if (intfdata->suspended) {
+ usb_anchor_urb(this_urb, &portdata->delayed);
+ spin_unlock_irqrestore(&intfdata->susp_lock, flags);
+ } else {
+ intfdata->in_flight++;
+ spin_unlock_irqrestore(&intfdata->susp_lock, flags);
+ err = usb_submit_urb(this_urb, GFP_ATOMIC);
+ if (err) {
+ dbg("usb_submit_urb %p (write bulk) failed "
+ "(%d)", this_urb, err);
+ clear_bit(i, &portdata->out_busy);
+ spin_lock_irqsave(&intfdata->susp_lock, flags);
+ intfdata->in_flight--;
+ spin_unlock_irqrestore(&intfdata->susp_lock, flags);
+ continue;
+ }
}
+
portdata->tx_start_time[i] = jiffies;
buf += todo;
left -= todo;
@@ -836,7 +873,10 @@ static void option_indat_callback(struct urb *urb)
if (err)
printk(KERN_ERR "%s: resubmit read urb failed. "
"(%d)", __func__, err);
+ else
+ usb_mark_last_busy(port->serial->dev);
}
+
}
return;
}
@@ -845,15 +885,21 @@ static void option_outdat_callback(struct urb *urb)
{
struct usb_serial_port *port;
struct option_port_private *portdata;
+ struct option_intf_private *intfdata;
int i;
dbg("%s", __func__);
port = urb->context;
+ intfdata = port->serial->private;
usb_serial_port_softint(port);
-
+ usb_autopm_put_interface_async(port->serial->interface);
portdata = usb_get_serial_port_data(port);
+ spin_lock(&intfdata->susp_lock);
+ intfdata->in_flight--;
+ spin_unlock(&intfdata->susp_lock);
+
for (i = 0; i < N_OUT_URB; ++i) {
if (portdata->out_urbs[i] == urb) {
smp_mb__before_clear_bit();
@@ -963,10 +1009,13 @@ static int option_chars_in_buffer(struct tty_struct *tty)
static int option_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct option_port_private *portdata;
+ struct option_intf_private *intfdata;
+ struct usb_serial *serial = port->serial;
int i, err;
struct urb *urb;
portdata = usb_get_serial_port_data(port);
+ intfdata = serial->private;
dbg("%s", __func__);
@@ -985,6 +1034,12 @@ static int option_open(struct tty_struct *tty, struct usb_serial_port *port)
option_send_setup(port);
+ serial->interface->needs_remote_wakeup = 1;
+ spin_lock_irq(&intfdata->susp_lock);
+ portdata->opened = 1;
+ spin_unlock_irq(&intfdata->susp_lock);
+ usb_autopm_put_interface(serial->interface);
+
return 0;
}
@@ -1009,16 +1064,23 @@ static void option_close(struct usb_serial_port *port)
int i;
struct usb_serial *serial = port->serial;
struct option_port_private *portdata;
+ struct option_intf_private *intfdata = port->serial->private;
dbg("%s", __func__);
portdata = usb_get_serial_port_data(port);
if (serial->dev) {
/* Stop reading/writing urbs */
+ spin_lock_irq(&intfdata->susp_lock);
+ portdata->opened = 0;
+ spin_unlock_irq(&intfdata->susp_lock);
+
for (i = 0; i < N_IN_URB; i++)
usb_kill_urb(portdata->in_urbs[i]);
for (i = 0; i < N_OUT_URB; i++)
usb_kill_urb(portdata->out_urbs[i]);
+ usb_autopm_get_interface(serial->interface);
+ serial->interface->needs_remote_wakeup = 0;
}
}
@@ -1123,6 +1185,7 @@ static int option_startup(struct usb_serial *serial)
__func__, i);
return 1;
}
+ init_usb_anchor(&portdata->delayed);
for (j = 0; j < N_IN_URB; j++) {
buffer = (u8 *)__get_free_page(GFP_KERNEL);
@@ -1225,18 +1288,52 @@ static void option_release(struct usb_serial *serial)
#ifdef CONFIG_PM
static int option_suspend(struct usb_serial *serial, pm_message_t message)
{
+ struct option_intf_private *intfdata = serial->private;
+ int b;
+
dbg("%s entered", __func__);
+
+ if (serial->dev->auto_pm) {
+ spin_lock_irq(&intfdata->susp_lock);
+ b = intfdata->in_flight;
+ spin_unlock_irq(&intfdata->susp_lock);
+
+ if (b)
+ return -EBUSY;
+ }
+
+ spin_lock_irq(&intfdata->susp_lock);
+ intfdata->suspended = 1;
+ spin_unlock_irq(&intfdata->susp_lock);
stop_read_write_urbs(serial);
return 0;
}
+static void play_delayed(struct usb_serial_port *port)
+{
+ struct option_intf_private *data;
+ struct option_port_private *portdata;
+ struct urb *urb;
+ int err;
+
+ portdata = usb_get_serial_port_data(port);
+ data = port->serial->private;
+ while ((urb = usb_get_from_anchor(&portdata->delayed))) {
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (!err)
+ data->in_flight++;
+ }
+}
+
static int option_resume(struct usb_serial *serial)
{
- int err, i, j;
+ int i, j;
struct usb_serial_port *port;
- struct urb *urb;
+ struct option_intf_private *intfdata = serial->private;
struct option_port_private *portdata;
+ struct urb *urb;
+ int err = 0;
dbg("%s entered", __func__);
/* get the interrupt URBs resubmitted unconditionally */
@@ -1251,7 +1348,7 @@ static int option_resume(struct usb_serial *serial)
if (err < 0) {
err("%s: Error %d for interrupt URB of port%d",
__func__, err, i);
- return err;
+ goto err_out;
}
}
@@ -1259,27 +1356,32 @@ static int option_resume(struct usb_serial *serial)
/* walk all ports */
port = serial->port[i];
portdata = usb_get_serial_port_data(port);
- mutex_lock(&port->mutex);
/* skip closed ports */
- if (!port->port.count) {
- mutex_unlock(&port->mutex);
+ spin_lock_irq(&intfdata->susp_lock);
+ if (!portdata->opened) {
+ spin_unlock_irq(&intfdata->susp_lock);
continue;
}
for (j = 0; j < N_IN_URB; j++) {
urb = portdata->in_urbs[j];
- err = usb_submit_urb(urb, GFP_NOIO);
+ err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
- mutex_unlock(&port->mutex);
err("%s: Error %d for bulk URB %d",
__func__, err, i);
- return err;
+ spin_unlock_irq(&intfdata->susp_lock);
+ goto err_out;
}
}
- mutex_unlock(&port->mutex);
+ play_delayed(port);
+ spin_unlock_irq(&intfdata->susp_lock);
}
- return 0;
+ spin_lock_irq(&intfdata->susp_lock);
+ intfdata->suspended = 0;
+ spin_unlock_irq(&intfdata->susp_lock);
+err_out:
+ return err;
}
#endif
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index a63ea99936f..1128e01525b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -96,6 +96,7 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
+ { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
{ } /* Terminating entry */
};
@@ -527,6 +528,12 @@ static void pl2303_set_termios(struct tty_struct *tty,
int baud;
int i;
u8 control;
+ const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600,
+ 4800, 7200, 9600, 14400, 19200, 28800, 38400,
+ 57600, 115200, 230400, 460800, 614400,
+ 921600, 1228800, 2457600, 3000000, 6000000 };
+ int baud_floor, baud_ceil;
+ int k;
dbg("%s - port %d", __func__, port->number);
@@ -572,9 +579,39 @@ static void pl2303_set_termios(struct tty_struct *tty,
dbg("%s - data bits = %d", __func__, buf[6]);
}
+ /* For reference buf[0]:buf[3] baud rate value */
+ /* NOTE: Only the values defined in baud_sup are supported !
+ * => if unsupported values are set, the PL2303 seems to use
+ * 9600 baud (at least my PL2303X always does)
+ */
baud = tty_get_baud_rate(tty);
- dbg("%s - baud = %d", __func__, baud);
+ dbg("%s - baud requested = %d", __func__, baud);
if (baud) {
+ /* Set baudrate to nearest supported value */
+ for (k=0; k<ARRAY_SIZE(baud_sup); k++) {
+ if (baud_sup[k] / baud) {
+ baud_ceil = baud_sup[k];
+ if (k==0) {
+ baud = baud_ceil;
+ } else {
+ baud_floor = baud_sup[k-1];
+ if ((baud_ceil % baud)
+ > (baud % baud_floor))
+ baud = baud_floor;
+ else
+ baud = baud_ceil;
+ }
+ break;
+ }
+ }
+ if (baud > 1228800) {
+ /* type_0, type_1 only support up to 1228800 baud */
+ if (priv->type != HX)
+ baud = 1228800;
+ else if (baud > 6000000)
+ baud = 6000000;
+ }
+ dbg("%s - baud set = %d", __func__, baud);
buf[0] = baud & 0xff;
buf[1] = (baud >> 8) & 0xff;
buf[2] = (baud >> 16) & 0xff;
@@ -585,8 +622,16 @@ static void pl2303_set_termios(struct tty_struct *tty,
/* For reference buf[4]=1 is 1.5 stop bits */
/* For reference buf[4]=2 is 2 stop bits */
if (cflag & CSTOPB) {
- buf[4] = 2;
- dbg("%s - stop bits = 2", __func__);
+ /* NOTE: Comply with "real" UARTs / RS232:
+ * use 1.5 instead of 2 stop bits with 5 data bits
+ */
+ if ((cflag & CSIZE) == CS5) {
+ buf[4] = 1;
+ dbg("%s - stop bits = 1.5", __func__);
+ } else {
+ buf[4] = 2;
+ dbg("%s - stop bits = 2", __func__);
+ }
} else {
buf[4] = 0;
dbg("%s - stop bits = 1", __func__);
@@ -599,11 +644,21 @@ static void pl2303_set_termios(struct tty_struct *tty,
/* For reference buf[5]=3 is mark parity */
/* For reference buf[5]=4 is space parity */
if (cflag & PARODD) {
- buf[5] = 1;
- dbg("%s - parity = odd", __func__);
+ if (cflag & CMSPAR) {
+ buf[5] = 3;
+ dbg("%s - parity = mark", __func__);
+ } else {
+ buf[5] = 1;
+ dbg("%s - parity = odd", __func__);
+ }
} else {
- buf[5] = 2;
- dbg("%s - parity = even", __func__);
+ if (cflag & CMSPAR) {
+ buf[5] = 4;
+ dbg("%s - parity = space", __func__);
+ } else {
+ buf[5] = 2;
+ dbg("%s - parity = even", __func__);
+ }
}
} else {
buf[5] = 0;
@@ -647,7 +702,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
pl2303_vendor_write(0x0, 0x0, serial);
}
- /* FIXME: Need to read back resulting baud rate */
+ /* Save resulting baud rate */
if (baud)
tty_encode_baud_rate(tty, baud, baud);
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index ee9505e1dd9..d640dc95156 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -130,3 +130,7 @@
/* Sony, USB data cable for CMD-Jxx mobile phones */
#define SONY_VENDOR_ID 0x054c
#define SONY_QN3USB_PRODUCT_ID 0x0437
+
+/* Sanwa KB-USB2 multimeter cable (ID: 11ad:0001) */
+#define SANWA_VENDOR_ID 0x11ad
+#define SANWA_PRODUCT_ID 0x0001
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 55391bbe123..8c075b2416b 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -51,6 +51,12 @@ struct sierra_iface_info {
const u8 *ifaceinfo; /* pointer to the array holding the numbers */
};
+struct sierra_intf_private {
+ spinlock_t susp_lock;
+ unsigned int suspended:1;
+ int in_flight;
+};
+
static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
{
int result;
@@ -144,6 +150,7 @@ static int sierra_probe(struct usb_serial *serial,
{
int result = 0;
struct usb_device *udev;
+ struct sierra_intf_private *data;
u8 ifnum;
udev = serial->dev;
@@ -171,6 +178,11 @@ static int sierra_probe(struct usb_serial *serial,
return -ENODEV;
}
+ data = serial->private = kzalloc(sizeof(struct sierra_intf_private), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ spin_lock_init(&data->susp_lock);
+
return result;
}
@@ -261,13 +273,18 @@ static struct usb_driver sierra_driver = {
.name = "sierra",
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
+ .suspend = usb_serial_suspend,
+ .resume = usb_serial_resume,
.id_table = id_table,
.no_dynamic_id = 1,
+ .supports_autosuspend = 1,
};
struct sierra_port_private {
spinlock_t lock; /* lock the structure */
int outstanding_urbs; /* number of out urbs in flight */
+ struct usb_anchor active;
+ struct usb_anchor delayed;
/* Input endpoints and buffers for this port */
struct urb *in_urbs[N_IN_URB];
@@ -279,6 +296,8 @@ struct sierra_port_private {
int dsr_state;
int dcd_state;
int ri_state;
+
+ unsigned int opened:1;
};
static int sierra_send_setup(struct usb_serial_port *port)
@@ -390,21 +409,25 @@ static void sierra_outdat_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct sierra_port_private *portdata = usb_get_serial_port_data(port);
+ struct sierra_intf_private *intfdata;
int status = urb->status;
- unsigned long flags;
dev_dbg(&port->dev, "%s - port %d\n", __func__, port->number);
+ intfdata = port->serial->private;
/* free up the transfer buffer, as usb_free_urb() does not do this */
kfree(urb->transfer_buffer);
-
+ usb_autopm_put_interface_async(port->serial->interface);
if (status)
dev_dbg(&port->dev, "%s - nonzero write bulk status "
"received: %d\n", __func__, status);
- spin_lock_irqsave(&portdata->lock, flags);
+ spin_lock(&portdata->lock);
--portdata->outstanding_urbs;
- spin_unlock_irqrestore(&portdata->lock, flags);
+ spin_unlock(&portdata->lock);
+ spin_lock(&intfdata->susp_lock);
+ --intfdata->in_flight;
+ spin_unlock(&intfdata->susp_lock);
usb_serial_port_softint(port);
}
@@ -414,6 +437,7 @@ static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
struct sierra_port_private *portdata = usb_get_serial_port_data(port);
+ struct sierra_intf_private *intfdata;
struct usb_serial *serial = port->serial;
unsigned long flags;
unsigned char *buffer;
@@ -426,9 +450,9 @@ static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
return 0;
portdata = usb_get_serial_port_data(port);
+ intfdata = serial->private;
dev_dbg(&port->dev, "%s: write (%zd bytes)\n", __func__, writesize);
-
spin_lock_irqsave(&portdata->lock, flags);
dev_dbg(&port->dev, "%s - outstanding_urbs: %d\n", __func__,
portdata->outstanding_urbs);
@@ -442,6 +466,14 @@ static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
portdata->outstanding_urbs);
spin_unlock_irqrestore(&portdata->lock, flags);
+ retval = usb_autopm_get_interface_async(serial->interface);
+ if (retval < 0) {
+ spin_lock_irqsave(&portdata->lock, flags);
+ portdata->outstanding_urbs--;
+ spin_unlock_irqrestore(&portdata->lock, flags);
+ goto error_simple;
+ }
+
buffer = kmalloc(writesize, GFP_ATOMIC);
if (!buffer) {
dev_err(&port->dev, "out of memory\n");
@@ -468,14 +500,29 @@ static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
/* Handle the need to send a zero length packet */
urb->transfer_flags |= URB_ZERO_PACKET;
+ spin_lock_irqsave(&intfdata->susp_lock, flags);
+
+ if (intfdata->suspended) {
+ usb_anchor_urb(urb, &portdata->delayed);
+ spin_unlock_irqrestore(&intfdata->susp_lock, flags);
+ goto skip_power;
+ } else {
+ usb_anchor_urb(urb, &portdata->active);
+ }
/* send it down the pipe */
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval) {
+ usb_unanchor_urb(urb);
+ spin_unlock_irqrestore(&intfdata->susp_lock, flags);
dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed "
"with status = %d\n", __func__, retval);
goto error;
+ } else {
+ intfdata->in_flight++;
+ spin_unlock_irqrestore(&intfdata->susp_lock, flags);
}
+skip_power:
/* we are done with this urb, so let the host driver
* really free it when it is finished with it */
usb_free_urb(urb);
@@ -491,6 +538,8 @@ error_no_buffer:
dev_dbg(&port->dev, "%s - 2. outstanding_urbs: %d\n", __func__,
portdata->outstanding_urbs);
spin_unlock_irqrestore(&portdata->lock, flags);
+ usb_autopm_put_interface_async(serial->interface);
+error_simple:
return retval;
}
@@ -530,6 +579,7 @@ static void sierra_indat_callback(struct urb *urb)
/* Resubmit urb so we continue receiving */
if (port->port.count && status != -ESHUTDOWN && status != -EPERM) {
+ usb_mark_last_busy(port->serial->dev);
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err)
dev_err(&port->dev, "resubmit read urb failed."
@@ -591,6 +641,7 @@ static void sierra_instat_callback(struct urb *urb)
/* Resubmit urb so we continue receiving IRQ data */
if (port->port.count && status != -ESHUTDOWN && status != -ENOENT) {
+ usb_mark_last_busy(serial->dev);
urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err)
@@ -711,6 +762,8 @@ static void sierra_close(struct usb_serial_port *port)
int i;
struct usb_serial *serial = port->serial;
struct sierra_port_private *portdata;
+ struct sierra_intf_private *intfdata = port->serial->private;
+
dev_dbg(&port->dev, "%s\n", __func__);
portdata = usb_get_serial_port_data(port);
@@ -723,6 +776,10 @@ static void sierra_close(struct usb_serial_port *port)
if (!serial->disconnected)
sierra_send_setup(port);
mutex_unlock(&serial->disc_mutex);
+ spin_lock_irq(&intfdata->susp_lock);
+ portdata->opened = 0;
+ spin_unlock_irq(&intfdata->susp_lock);
+
/* Stop reading urbs */
sierra_stop_rx_urbs(port);
@@ -731,6 +788,8 @@ static void sierra_close(struct usb_serial_port *port)
sierra_release_urb(portdata->in_urbs[i]);
portdata->in_urbs[i] = NULL;
}
+ usb_autopm_get_interface(serial->interface);
+ serial->interface->needs_remote_wakeup = 0;
}
}
@@ -738,6 +797,7 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct sierra_port_private *portdata;
struct usb_serial *serial = port->serial;
+ struct sierra_intf_private *intfdata = serial->private;
int i;
int err;
int endpoint;
@@ -771,6 +831,12 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port)
}
sierra_send_setup(port);
+ serial->interface->needs_remote_wakeup = 1;
+ spin_lock_irq(&intfdata->susp_lock);
+ portdata->opened = 1;
+ spin_unlock_irq(&intfdata->susp_lock);
+ usb_autopm_put_interface(serial->interface);
+
return 0;
}
@@ -818,6 +884,8 @@ static int sierra_startup(struct usb_serial *serial)
return -ENOMEM;
}
spin_lock_init(&portdata->lock);
+ init_usb_anchor(&portdata->active);
+ init_usb_anchor(&portdata->delayed);
/* Set the port private data pointer */
usb_set_serial_port_data(port, portdata);
}
@@ -844,6 +912,88 @@ static void sierra_release(struct usb_serial *serial)
}
}
+#ifdef CONFIG_PM
+static void stop_read_write_urbs(struct usb_serial *serial)
+{
+ int i, j;
+ struct usb_serial_port *port;
+ struct sierra_port_private *portdata;
+
+ /* Stop reading/writing urbs */
+ for (i = 0; i < serial->num_ports; ++i) {
+ port = serial->port[i];
+ portdata = usb_get_serial_port_data(port);
+ for (j = 0; j < N_IN_URB; j++)
+ usb_kill_urb(portdata->in_urbs[j]);
+ usb_kill_anchored_urbs(&portdata->active);
+ }
+}
+
+static int sierra_suspend(struct usb_serial *serial, pm_message_t message)
+{
+ struct sierra_intf_private *intfdata;
+ int b;
+
+ if (serial->dev->auto_pm) {
+ intfdata = serial->private;
+ spin_lock_irq(&intfdata->susp_lock);
+ b = intfdata->in_flight;
+
+ if (b) {
+ spin_unlock_irq(&intfdata->susp_lock);
+ return -EBUSY;
+ } else {
+ intfdata->suspended = 1;
+ spin_unlock_irq(&intfdata->susp_lock);
+ }
+ }
+ stop_read_write_urbs(serial);
+
+ return 0;
+}
+
+static int sierra_resume(struct usb_serial *serial)
+{
+ struct usb_serial_port *port;
+ struct sierra_intf_private *intfdata = serial->private;
+ struct sierra_port_private *portdata;
+ struct urb *urb;
+ int ec = 0;
+ int i, err;
+
+ spin_lock_irq(&intfdata->susp_lock);
+ for (i = 0; i < serial->num_ports; i++) {
+ port = serial->port[i];
+ portdata = usb_get_serial_port_data(port);
+
+ while ((urb = usb_get_from_anchor(&portdata->delayed))) {
+ usb_anchor_urb(urb, &portdata->active);
+ intfdata->in_flight++;
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
+ intfdata->in_flight--;
+ usb_unanchor_urb(urb);
+ usb_scuttle_anchored_urbs(&portdata->delayed);
+ break;
+ }
+ }
+
+ if (portdata->opened) {
+ err = sierra_submit_rx_urbs(port, GFP_ATOMIC);
+ if (err)
+ ec++;
+ }
+ }
+ intfdata->suspended = 0;
+ spin_unlock_irq(&intfdata->susp_lock);
+
+ return ec ? -EIO : 0;
+}
+#else
+#define sierra_suspend NULL
+#define sierra_resume NULL
+#endif
+
static struct usb_serial_driver sierra_device = {
.driver = {
.owner = THIS_MODULE,
@@ -864,6 +1014,8 @@ static struct usb_serial_driver sierra_device = {
.tiocmset = sierra_tiocmset,
.attach = sierra_startup,
.release = sierra_release,
+ .suspend = sierra_suspend,
+ .resume = sierra_resume,
.read_int_callback = sierra_instat_callback,
};
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 61e7c40b94f..1e58220403d 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -544,7 +544,7 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
}
/* Set Baud Rate */
- baud = tty_get_baud_rate(tty);;
+ baud = tty_get_baud_rate(tty);
switch (baud) {
case 300: buf[0] = 0x00; break;
case 600: buf[0] = 0x01; break;
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 9d7ca4868d3..ff75a3589e7 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -35,6 +35,7 @@
#include <linux/serial.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
+#include <linux/kfifo.h>
#include "pl2303.h"
/*
@@ -292,8 +293,6 @@ static int serial_open(struct tty_struct *tty, struct file *filp)
static void serial_down(struct usb_serial_port *port)
{
struct usb_serial_driver *drv = port->serial->type;
- struct usb_serial *serial;
- struct module *owner;
/*
* The console is magical. Do not hang up the console hardware
@@ -309,12 +308,8 @@ static void serial_down(struct usb_serial_port *port)
return;
mutex_lock(&port->mutex);
- serial = port->serial;
- owner = serial->type->driver.owner;
-
if (drv->close)
drv->close(port);
-
mutex_unlock(&port->mutex);
}
@@ -631,6 +626,8 @@ static void port_release(struct device *dev)
usb_free_urb(port->write_urb);
usb_free_urb(port->interrupt_in_urb);
usb_free_urb(port->interrupt_out_urb);
+ if (!IS_ERR(port->write_fifo) && port->write_fifo)
+ kfifo_free(port->write_fifo);
kfree(port->bulk_in_buffer);
kfree(port->bulk_out_buffer);
kfree(port->interrupt_in_buffer);
@@ -970,6 +967,10 @@ int usb_serial_probe(struct usb_interface *interface,
dev_err(&interface->dev, "No free urbs available\n");
goto probe_error;
}
+ port->write_fifo = kfifo_alloc(PAGE_SIZE, GFP_KERNEL,
+ &port->lock);
+ if (IS_ERR(port->write_fifo))
+ goto probe_error;
buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
port->bulk_out_size = buffer_size;
port->bulk_out_endpointAddress = endpoint->bEndpointAddress;
@@ -1163,15 +1164,19 @@ int usb_serial_suspend(struct usb_interface *intf, pm_message_t message)
serial->suspending = 1;
+ if (serial->type->suspend) {
+ r = serial->type->suspend(serial, message);
+ if (r < 0)
+ goto err_out;
+ }
+
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
if (port)
kill_traffic(port);
}
- if (serial->type->suspend)
- r = serial->type->suspend(serial, message);
-
+err_out:
return r;
}
EXPORT_SYMBOL(usb_serial_suspend);
diff --git a/drivers/usb/storage/datafab.c b/drivers/usb/storage/datafab.c
index 2b6e565262c..ded836b02d7 100644
--- a/drivers/usb/storage/datafab.c
+++ b/drivers/usb/storage/datafab.c
@@ -334,7 +334,7 @@ static int datafab_determine_lun(struct us_data *us,
unsigned char *buf;
int count = 0, rc;
- if (!us || !info)
+ if (!info)
return USB_STOR_TRANSPORT_ERROR;
memcpy(command, scommand, 8);
@@ -399,7 +399,7 @@ static int datafab_id_device(struct us_data *us,
unsigned char *reply;
int rc;
- if (!us || !info)
+ if (!info)
return USB_STOR_TRANSPORT_ERROR;
if (info->lun == -1) {
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
index ec17c96371a..105d900150c 100644
--- a/drivers/usb/storage/initializers.c
+++ b/drivers/usb/storage/initializers.c
@@ -102,5 +102,5 @@ int usb_stor_huawei_e220_init(struct us_data *us)
USB_TYPE_STANDARD | USB_RECIP_DEVICE,
0x01, 0x0, NULL, 0x0, 1000);
US_DEBUGP("Huawei mode set result is %d\n", result);
- return (result ? 0 : -ENODEV);
+ return 0;
}
diff --git a/drivers/usb/storage/jumpshot.c b/drivers/usb/storage/jumpshot.c
index 1c69420e3ac..6168596c5ac 100644
--- a/drivers/usb/storage/jumpshot.c
+++ b/drivers/usb/storage/jumpshot.c
@@ -335,7 +335,7 @@ static int jumpshot_id_device(struct us_data *us,
unsigned char *reply;
int rc;
- if (!us || !info)
+ if (!info)
return USB_STOR_TRANSPORT_ERROR;
command[0] = 0xE0;
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index 380233bd6a3..80e65f29921 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -163,7 +163,7 @@ static void usb_onetouch_pm_hook(struct us_data *us, int action)
usb_kill_urb(onetouch->irq);
break;
case US_RESUME:
- if (usb_submit_urb(onetouch->irq, GFP_KERNEL) != 0)
+ if (usb_submit_urb(onetouch->irq, GFP_NOIO) != 0)
dev_err(&onetouch->irq->dev->dev,
"usb_submit_urb failed\n");
break;
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 7477d411959..079ae0f7bec 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -66,13 +66,6 @@ UNUSUAL_DEV( 0x03eb, 0x2002, 0x0100, 0x0100,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE),
-/* modified by Tobias Lorenz <tobias.lorenz@gmx.net> */
-UNUSUAL_DEV( 0x03ee, 0x6901, 0x0000, 0x0200,
- "Mitsumi",
- "USB FDD",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_SINGLE_LUN ),
-
/* Reported by Rodolfo Quesada <rquesada@roqz.net> */
UNUSUAL_DEV( 0x03ee, 0x6906, 0x0003, 0x0003,
"VIA Technologies Inc.",
@@ -233,13 +226,6 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_MAX_SECTORS_64 ),
-/* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */
-UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210,
- "SMSC",
- "FDC GOLD-2.30",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_SINGLE_LUN ),
-
#ifdef NO_SDDR09
UNUSUAL_DEV( 0x0436, 0x0005, 0x0100, 0x0100,
"Microtech",
@@ -664,19 +650,13 @@ UNUSUAL_DEV( 0x055d, 0x2020, 0x0000, 0x0210,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_SINGLE_LUN ),
-
+/* We keep this entry to force the transport; firmware 3.00 and later is ok. */
UNUSUAL_DEV( 0x057b, 0x0000, 0x0000, 0x0299,
"Y-E Data",
"Flashbuster-U",
US_SC_DEVICE, US_PR_CB, NULL,
US_FL_SINGLE_LUN),
-UNUSUAL_DEV( 0x057b, 0x0000, 0x0300, 0x9999,
- "Y-E Data",
- "Flashbuster-U",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_SINGLE_LUN),
-
/* Reported by Johann Cardon <johann.cardon@free.fr>
* This entry is needed only because the device reports
* bInterfaceClass = 0xff (vendor-specific)
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index 60ba631e99c..b62f2bc064f 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -18,7 +18,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/mutex.h>
@@ -28,7 +28,7 @@
#define USB_SKEL_PRODUCT_ID 0xfff0
/* table of devices that work with this driver */
-static struct usb_device_id skel_table [] = {
+static struct usb_device_id skel_table[] = {
{ USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
{ } /* Terminating entry */
};
@@ -52,15 +52,21 @@ struct usb_skel {
struct usb_interface *interface; /* the interface for this device */
struct semaphore limit_sem; /* limiting the number of writes in progress */
struct usb_anchor submitted; /* in case we need to retract our submissions */
+ struct urb *bulk_in_urb; /* the urb to read data with */
unsigned char *bulk_in_buffer; /* the buffer to receive data */
size_t bulk_in_size; /* the size of the receive buffer */
+ size_t bulk_in_filled; /* number of bytes in the buffer */
+ size_t bulk_in_copied; /* already copied to user space */
__u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */
__u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */
int errors; /* the last request tanked */
int open_count; /* count the number of openers */
+ bool ongoing_read; /* a read is going on */
+ bool processed_urb; /* indicates we haven't processed the urb */
spinlock_t err_lock; /* lock for errors */
struct kref kref;
struct mutex io_mutex; /* synchronize I/O with disconnect */
+ struct completion bulk_in_completion; /* to wait for an ongoing read */
};
#define to_skel_dev(d) container_of(d, struct usb_skel, kref)
@@ -71,6 +77,7 @@ static void skel_delete(struct kref *kref)
{
struct usb_skel *dev = to_skel_dev(kref);
+ usb_free_urb(dev->bulk_in_urb);
usb_put_dev(dev->udev);
kfree(dev->bulk_in_buffer);
kfree(dev);
@@ -87,7 +94,7 @@ static int skel_open(struct inode *inode, struct file *file)
interface = usb_find_interface(&skel_driver, subminor);
if (!interface) {
- err ("%s - error, can't find device for minor %d",
+ err("%s - error, can't find device for minor %d",
__func__, subminor);
retval = -ENODEV;
goto exit;
@@ -174,38 +181,190 @@ static int skel_flush(struct file *file, fl_owner_t id)
return res;
}
-static ssize_t skel_read(struct file *file, char *buffer, size_t count, loff_t *ppos)
+static void skel_read_bulk_callback(struct urb *urb)
{
struct usb_skel *dev;
- int retval;
- int bytes_read;
+
+ dev = urb->context;
+
+ spin_lock(&dev->err_lock);
+ /* sync/async unlink faults aren't errors */
+ if (urb->status) {
+ if (!(urb->status == -ENOENT ||
+ urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN))
+ err("%s - nonzero write bulk status received: %d",
+ __func__, urb->status);
+
+ dev->errors = urb->status;
+ } else {
+ dev->bulk_in_filled = urb->actual_length;
+ }
+ dev->ongoing_read = 0;
+ spin_unlock(&dev->err_lock);
+
+ complete(&dev->bulk_in_completion);
+}
+
+static int skel_do_read_io(struct usb_skel *dev, size_t count)
+{
+ int rv;
+
+ /* prepare a read */
+ usb_fill_bulk_urb(dev->bulk_in_urb,
+ dev->udev,
+ usb_rcvbulkpipe(dev->udev,
+ dev->bulk_in_endpointAddr),
+ dev->bulk_in_buffer,
+ min(dev->bulk_in_size, count),
+ skel_read_bulk_callback,
+ dev);
+ /* tell everybody to leave the URB alone */
+ spin_lock_irq(&dev->err_lock);
+ dev->ongoing_read = 1;
+ spin_unlock_irq(&dev->err_lock);
+
+ /* do it */
+ rv = usb_submit_urb(dev->bulk_in_urb, GFP_KERNEL);
+ if (rv < 0) {
+ err("%s - failed submitting read urb, error %d",
+ __func__, rv);
+ dev->bulk_in_filled = 0;
+ rv = (rv == -ENOMEM) ? rv : -EIO;
+ spin_lock_irq(&dev->err_lock);
+ dev->ongoing_read = 0;
+ spin_unlock_irq(&dev->err_lock);
+ }
+
+ return rv;
+}
+
+static ssize_t skel_read(struct file *file, char *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct usb_skel *dev;
+ int rv;
+ bool ongoing_io;
dev = (struct usb_skel *)file->private_data;
- mutex_lock(&dev->io_mutex);
+ /* if we cannot read at all, return EOF */
+ if (!dev->bulk_in_urb || !count)
+ return 0;
+
+ /* no concurrent readers */
+ rv = mutex_lock_interruptible(&dev->io_mutex);
+ if (rv < 0)
+ return rv;
+
if (!dev->interface) { /* disconnect() was called */
- retval = -ENODEV;
+ rv = -ENODEV;
goto exit;
}
- /* do a blocking bulk read to get data from the device */
- retval = usb_bulk_msg(dev->udev,
- usb_rcvbulkpipe(dev->udev, dev->bulk_in_endpointAddr),
- dev->bulk_in_buffer,
- min(dev->bulk_in_size, count),
- &bytes_read, 10000);
-
- /* if the read was successful, copy the data to userspace */
- if (!retval) {
- if (copy_to_user(buffer, dev->bulk_in_buffer, bytes_read))
- retval = -EFAULT;
- else
- retval = bytes_read;
+ /* if IO is under way, we must not touch things */
+retry:
+ spin_lock_irq(&dev->err_lock);
+ ongoing_io = dev->ongoing_read;
+ spin_unlock_irq(&dev->err_lock);
+
+ if (ongoing_io) {
+ /* nonblocking IO shall not wait */
+ if (file->f_flags & O_NONBLOCK) {
+ rv = -EAGAIN;
+ goto exit;
+ }
+ /*
+ * IO may take forever
+ * hence wait in an interruptible state
+ */
+ rv = wait_for_completion_interruptible(&dev->bulk_in_completion);
+ if (rv < 0)
+ goto exit;
+ /*
+ * by waiting we also semiprocessed the urb
+ * we must finish now
+ */
+ dev->bulk_in_copied = 0;
+ dev->processed_urb = 1;
+ }
+
+ if (!dev->processed_urb) {
+ /*
+ * the URB hasn't been processed
+ * do it now
+ */
+ wait_for_completion(&dev->bulk_in_completion);
+ dev->bulk_in_copied = 0;
+ dev->processed_urb = 1;
+ }
+
+ /* errors must be reported */
+ rv = dev->errors;
+ if (rv < 0) {
+ /* any error is reported once */
+ dev->errors = 0;
+ /* to preserve notifications about reset */
+ rv = (rv == -EPIPE) ? rv : -EIO;
+ /* no data to deliver */
+ dev->bulk_in_filled = 0;
+ /* report it */
+ goto exit;
}
+ /*
+ * if the buffer is filled we may satisfy the read
+ * else we need to start IO
+ */
+
+ if (dev->bulk_in_filled) {
+ /* we had read data */
+ size_t available = dev->bulk_in_filled - dev->bulk_in_copied;
+ size_t chunk = min(available, count);
+
+ if (!available) {
+ /*
+ * all data has been used
+ * actual IO needs to be done
+ */
+ rv = skel_do_read_io(dev, count);
+ if (rv < 0)
+ goto exit;
+ else
+ goto retry;
+ }
+ /*
+ * data is available
+ * chunk tells us how much shall be copied
+ */
+
+ if (copy_to_user(buffer,
+ dev->bulk_in_buffer + dev->bulk_in_copied,
+ chunk))
+ rv = -EFAULT;
+ else
+ rv = chunk;
+
+ dev->bulk_in_copied += chunk;
+
+ /*
+ * if we are asked for more than we have,
+ * we start IO but don't wait
+ */
+ if (available < count)
+ skel_do_read_io(dev, count - chunk);
+ } else {
+ /* no data in the buffer */
+ rv = skel_do_read_io(dev, count);
+ if (rv < 0)
+ goto exit;
+ else if (!file->f_flags & O_NONBLOCK)
+ goto retry;
+ rv = -EAGAIN;
+ }
exit:
mutex_unlock(&dev->io_mutex);
- return retval;
+ return rv;
}
static void skel_write_bulk_callback(struct urb *urb)
@@ -216,7 +375,7 @@ static void skel_write_bulk_callback(struct urb *urb)
/* sync/async unlink faults aren't errors */
if (urb->status) {
- if(!(urb->status == -ENOENT ||
+ if (!(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN))
err("%s - nonzero write bulk status received: %d",
@@ -233,7 +392,8 @@ static void skel_write_bulk_callback(struct urb *urb)
up(&dev->limit_sem);
}
-static ssize_t skel_write(struct file *file, const char *user_buffer, size_t count, loff_t *ppos)
+static ssize_t skel_write(struct file *file, const char *user_buffer,
+ size_t count, loff_t *ppos)
{
struct usb_skel *dev;
int retval = 0;
@@ -247,14 +407,25 @@ static ssize_t skel_write(struct file *file, const char *user_buffer, size_t cou
if (count == 0)
goto exit;
- /* limit the number of URBs in flight to stop a user from using up all RAM */
- if (down_interruptible(&dev->limit_sem)) {
- retval = -ERESTARTSYS;
- goto exit;
+ /*
+ * limit the number of URBs in flight to stop a user from using up all
+ * RAM
+ */
+ if (!file->f_flags & O_NONBLOCK) {
+ if (down_interruptible(&dev->limit_sem)) {
+ retval = -ERESTARTSYS;
+ goto exit;
+ }
+ } else {
+ if (down_trylock(&dev->limit_sem)) {
+ retval = -EAGAIN;
+ goto exit;
+ }
}
spin_lock_irq(&dev->err_lock);
- if ((retval = dev->errors) < 0) {
+ retval = dev->errors;
+ if (retval < 0) {
/* any error is reported once */
dev->errors = 0;
/* to preserve notifications about reset */
@@ -271,7 +442,8 @@ static ssize_t skel_write(struct file *file, const char *user_buffer, size_t cou
goto error;
}
- buf = usb_buffer_alloc(dev->udev, writesize, GFP_KERNEL, &urb->transfer_dma);
+ buf = usb_buffer_alloc(dev->udev, writesize, GFP_KERNEL,
+ &urb->transfer_dma);
if (!buf) {
retval = -ENOMEM;
goto error;
@@ -301,11 +473,15 @@ static ssize_t skel_write(struct file *file, const char *user_buffer, size_t cou
retval = usb_submit_urb(urb, GFP_KERNEL);
mutex_unlock(&dev->io_mutex);
if (retval) {
- err("%s - failed submitting write urb, error %d", __func__, retval);
+ err("%s - failed submitting write urb, error %d", __func__,
+ retval);
goto error_unanchor;
}
- /* release our reference to this urb, the USB core will eventually free it entirely */
+ /*
+ * release our reference to this urb, the USB core will eventually free
+ * it entirely
+ */
usb_free_urb(urb);
@@ -343,7 +519,8 @@ static struct usb_class_driver skel_class = {
.minor_base = USB_SKEL_MINOR_BASE,
};
-static int skel_probe(struct usb_interface *interface, const struct usb_device_id *id)
+static int skel_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
{
struct usb_skel *dev;
struct usb_host_interface *iface_desc;
@@ -363,6 +540,7 @@ static int skel_probe(struct usb_interface *interface, const struct usb_device_i
mutex_init(&dev->io_mutex);
spin_lock_init(&dev->err_lock);
init_usb_anchor(&dev->submitted);
+ init_completion(&dev->bulk_in_completion);
dev->udev = usb_get_dev(interface_to_usbdev(interface));
dev->interface = interface;
@@ -384,6 +562,11 @@ static int skel_probe(struct usb_interface *interface, const struct usb_device_i
err("Could not allocate bulk_in_buffer");
goto error;
}
+ dev->bulk_in_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->bulk_in_urb) {
+ err("Could not allocate bulk_in_urb");
+ goto error;
+ }
}
if (!dev->bulk_out_endpointAddr &&
@@ -453,6 +636,7 @@ static void skel_draw_down(struct usb_skel *dev)
time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000);
if (!time)
usb_kill_anchored_urbs(&dev->submitted);
+ usb_kill_urb(dev->bulk_in_urb);
}
static int skel_suspend(struct usb_interface *intf, pm_message_t message)
@@ -465,7 +649,7 @@ static int skel_suspend(struct usb_interface *intf, pm_message_t message)
return 0;
}
-static int skel_resume (struct usb_interface *intf)
+static int skel_resume(struct usb_interface *intf)
{
return 0;
}
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
index 586d350cdb4..d6bea3e0b54 100644
--- a/drivers/usb/wusbcore/wa-hc.h
+++ b/drivers/usb/wusbcore/wa-hc.h
@@ -47,7 +47,7 @@
* to an endpoint on a WUSB device that is connected to a
* HWA RC.
*
- * xfer Transfer managment -- this is all the code that gets a
+ * xfer Transfer management -- this is all the code that gets a
* buffer and pushes it to a device (or viceversa). *
*
* Some day a lot of this code will be shared between this driver and
diff --git a/drivers/uwb/i1480/i1480u-wlp/netdev.c b/drivers/uwb/i1480/i1480u-wlp/netdev.c
index 73055530e60..b236e696994 100644
--- a/drivers/uwb/i1480/i1480u-wlp/netdev.c
+++ b/drivers/uwb/i1480/i1480u-wlp/netdev.c
@@ -214,7 +214,7 @@ int i1480u_open(struct net_device *net_dev)
netif_wake_queue(net_dev);
#ifdef i1480u_FLOW_CONTROL
- result = usb_submit_urb(i1480u->notif_urb, GFP_KERNEL);;
+ result = usb_submit_urb(i1480u->notif_urb, GFP_KERNEL);
if (result < 0) {
dev_err(dev, "Can't submit notification URB: %d\n", result);
goto error_notif_urb_submit;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 11af4cb8924..9bbb2855ea9 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1275,26 +1275,6 @@ config FB_MATROX_MAVEN
painting procedures (the secondary head does not use acceleration
engine).
-config FB_MATROX_MULTIHEAD
- bool "Multihead support"
- depends on FB_MATROX
- ---help---
- Say Y here if you have more than one (supported) Matrox device in
- your computer and you want to use all of them for different monitors
- ("multihead"). If you have only one device, you should say N because
- the driver compiled with Y is larger and a bit slower, especially on
- ia32 (ix86).
-
- If you said M to "Matrox unified accelerated driver" and N here, you
- will still be able to use several Matrox devices simultaneously:
- insert several instances of the module matroxfb into the kernel
- with insmod, supplying the parameter "dev=N" where N is 0, 1, etc.
- for the different Matrox devices. This method is slightly faster but
- uses 40 KB of kernel memory per Matrox card.
-
- There is no need for enabling 'Matrox multihead support' if you have
- only one Matrox card in the box.
-
config FB_RADEON
tristate "ATI Radeon display support"
depends on FB && PCI
@@ -2041,6 +2021,17 @@ config FB_SH7760
and 8, 15 or 16 bpp color; 90 degrees clockwise display rotation for
panels <= 320 pixel horizontal resolution.
+config FB_DA8XX
+ tristate "DA8xx/OMAP-L1xx Framebuffer support"
+ depends on FB && ARCH_DAVINCI_DA8XX
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ ---help---
+ This is the frame buffer device driver for the TI LCD controller
+ found on DA8xx/OMAP-L1xx SoCs.
+ If unsure, say N.
+
config FB_VIRTUAL
tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
depends on FB
@@ -2117,6 +2108,17 @@ config FB_MB862XX_LIME
---help---
Framebuffer support for Fujitsu Lime GDC on host CPU bus.
+config FB_EP93XX
+ tristate "EP93XX frame buffer support"
+ depends on FB && ARCH_EP93XX
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ ---help---
+ Framebuffer driver for the Cirrus Logic EP93XX series of processors.
+ This driver is also available as a module. The module will be called
+ ep93xx-fb.
+
config FB_PRE_INIT_FB
bool "Don't reinitialize, use bootloader's GDC/Display configuration"
depends on FB_MB862XX_LIME
@@ -2124,6 +2126,14 @@ config FB_PRE_INIT_FB
Select this option if display contents should be inherited as set by
the bootloader.
+config FB_MSM
+ tristate
+ depends on FB && ARCH_MSM
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ default y
+
config FB_MX3
tristate "MX3 Framebuffer support"
depends on FB && MX3_IPU
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 01a819f4737..80232e12488 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -85,6 +85,7 @@ obj-$(CONFIG_FB_Q40) += q40fb.o
obj-$(CONFIG_FB_TGA) += tgafb.o
obj-$(CONFIG_FB_HP300) += hpfb.o
obj-$(CONFIG_FB_G364) += g364fb.o
+obj-$(CONFIG_FB_EP93XX) += ep93xx-fb.o
obj-$(CONFIG_FB_SA1100) += sa1100fb.o
obj-$(CONFIG_FB_HIT) += hitfb.o
obj-$(CONFIG_FB_EPSON1355) += epson1355fb.o
@@ -126,6 +127,7 @@ obj-$(CONFIG_FB_OMAP) += omap/
obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o
obj-$(CONFIG_FB_CARMINE) += carminefb.o
obj-$(CONFIG_FB_MB862XX) += mb862xx/
+obj-$(CONFIG_FB_MSM) += msm/
# Platform or fallback drivers go here
obj-$(CONFIG_FB_UVESA) += uvesafb.o
@@ -136,6 +138,7 @@ obj-$(CONFIG_FB_OF) += offb.o
obj-$(CONFIG_FB_BF54X_LQ043) += bf54x-lq043fb.o
obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o
obj-$(CONFIG_FB_MX3) += mx3fb.o
+obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o
# the test framebuffer is last
obj-$(CONFIG_FB_VIRTUAL) += vfb.o
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 63d3739d43a..913b4a47ae5 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -132,7 +132,7 @@
#endif
#define PRINTKI(fmt, args...) printk(KERN_INFO "atyfb: " fmt, ## args)
-#define PRINTKE(fmt, args...) printk(KERN_ERR "atyfb: " fmt, ## args)
+#define PRINTKE(fmt, args...) printk(KERN_ERR "atyfb: " fmt, ## args)
#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || \
defined (CONFIG_FB_ATY_GENERIC_LCD) || defined(CONFIG_FB_ATY_BACKLIGHT)
@@ -188,24 +188,23 @@ u32 aty_ld_lcd(int index, const struct atyfb_par *par)
*/
static void ATIReduceRatio(int *Numerator, int *Denominator)
{
- int Multiplier, Divider, Remainder;
+ int Multiplier, Divider, Remainder;
- Multiplier = *Numerator;
- Divider = *Denominator;
+ Multiplier = *Numerator;
+ Divider = *Denominator;
- while ((Remainder = Multiplier % Divider))
- {
- Multiplier = Divider;
- Divider = Remainder;
- }
+ while ((Remainder = Multiplier % Divider)) {
+ Multiplier = Divider;
+ Divider = Remainder;
+ }
- *Numerator /= Divider;
- *Denominator /= Divider;
+ *Numerator /= Divider;
+ *Denominator /= Divider;
}
#endif
- /*
- * The Hardware parameters for each card
- */
+/*
+ * The Hardware parameters for each card
+ */
struct pci_mmap_map {
unsigned long voff;
@@ -223,17 +222,19 @@ static struct fb_fix_screeninfo atyfb_fix __devinitdata = {
.ypanstep = 1,
};
- /*
- * Frame buffer device API
- */
+/*
+ * Frame buffer device API
+ */
static int atyfb_open(struct fb_info *info, int user);
static int atyfb_release(struct fb_info *info, int user);
-static int atyfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info);
+static int atyfb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info);
static int atyfb_set_par(struct fb_info *info);
static int atyfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
- u_int transp, struct fb_info *info);
-static int atyfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info);
+ u_int transp, struct fb_info *info);
+static int atyfb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info);
static int atyfb_blank(int blank, struct fb_info *info);
static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg);
#ifdef __sparc__
@@ -241,9 +242,9 @@ static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma);
#endif
static int atyfb_sync(struct fb_info *info);
- /*
- * Internal routines
- */
+/*
+ * Internal routines
+ */
static int aty_init(struct fb_info *info);
@@ -254,8 +255,11 @@ static int store_video_par(char *videopar, unsigned char m64_num);
static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc);
static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc);
-static int aty_var_to_crtc(const struct fb_info *info, const struct fb_var_screeninfo *var, struct crtc *crtc);
-static int aty_crtc_to_var(const struct crtc *crtc, struct fb_var_screeninfo *var);
+static int aty_var_to_crtc(const struct fb_info *info,
+ const struct fb_var_screeninfo *var,
+ struct crtc *crtc);
+static int aty_crtc_to_var(const struct crtc *crtc,
+ struct fb_var_screeninfo *var);
static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info);
#ifdef CONFIG_PPC
static int read_aty_sense(const struct atyfb_par *par);
@@ -264,9 +268,9 @@ static int read_aty_sense(const struct atyfb_par *par);
static DEFINE_MUTEX(reboot_lock);
static struct fb_info *reboot_info;
- /*
- * Interface used by the world
- */
+/*
+ * Interface used by the world
+ */
static struct fb_var_screeninfo default_var = {
/* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */
@@ -452,14 +456,14 @@ static int __devinit correct_chipset(struct atyfb_par *par)
type = chip_id & CFG_CHIP_TYPE;
rev = (chip_id & CFG_CHIP_REV) >> 24;
- switch(par->pci_id) {
+ switch (par->pci_id) {
#ifdef CONFIG_FB_ATY_GX
case PCI_CHIP_MACH64GX:
- if(type != 0x00d7)
+ if (type != 0x00d7)
return -ENODEV;
break;
case PCI_CHIP_MACH64CX:
- if(type != 0x0057)
+ if (type != 0x0057)
return -ENODEV;
break;
#endif
@@ -564,7 +568,8 @@ static char *aty_xl_ram[8] __devinitdata = {
};
#endif /* CONFIG_FB_ATY_CT */
-static u32 atyfb_get_pixclock(struct fb_var_screeninfo *var, struct atyfb_par *par)
+static u32 atyfb_get_pixclock(struct fb_var_screeninfo *var,
+ struct atyfb_par *par)
{
u32 pixclock = var->pixclock;
#ifdef CONFIG_FB_ATY_GENERIC_LCD
@@ -572,7 +577,7 @@ static u32 atyfb_get_pixclock(struct fb_var_screeninfo *var, struct atyfb_par *p
par->pll.ct.xres = 0;
if (par->lcd_table != 0) {
lcd_on_off = aty_ld_lcd(LCD_GEN_CNTL, par);
- if(lcd_on_off & LCD_ON) {
+ if (lcd_on_off & LCD_ON) {
par->pll.ct.xres = var->xres;
pixclock = par->lcd_pixclock;
}
@@ -584,7 +589,7 @@ static u32 atyfb_get_pixclock(struct fb_var_screeninfo *var, struct atyfb_par *p
#if defined(CONFIG_PPC)
/*
- * Apple monitor sense
+ * Apple monitor sense
*/
static int __devinit read_aty_sense(const struct atyfb_par *par)
@@ -625,16 +630,16 @@ static int __devinit read_aty_sense(const struct atyfb_par *par)
/* ------------------------------------------------------------------------- */
/*
- * CRTC programming
+ * CRTC programming
*/
static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc)
{
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table != 0) {
- if(!M64_HAS(LT_LCD_REGS)) {
- crtc->lcd_index = aty_ld_le32(LCD_INDEX, par);
- aty_st_le32(LCD_INDEX, crtc->lcd_index, par);
+ if (!M64_HAS(LT_LCD_REGS)) {
+ crtc->lcd_index = aty_ld_le32(LCD_INDEX, par);
+ aty_st_le32(LCD_INDEX, crtc->lcd_index, par);
}
crtc->lcd_config_panel = aty_ld_lcd(CNFG_PANEL, par);
crtc->lcd_gen_cntl = aty_ld_lcd(LCD_GEN_CNTL, par);
@@ -642,7 +647,7 @@ static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc)
/* switch to non shadow registers */
aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl &
- ~(CRTC_RW_SELECT | SHADOW_EN | SHADOW_RW_EN), par);
+ ~(CRTC_RW_SELECT | SHADOW_EN | SHADOW_RW_EN), par);
/* save stretching */
crtc->horz_stretching = aty_ld_lcd(HORZ_STRETCHING, par);
@@ -663,7 +668,7 @@ static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc)
if (par->lcd_table != 0) {
/* switch to shadow registers */
aty_st_lcd(LCD_GEN_CNTL, (crtc->lcd_gen_cntl & ~CRTC_RW_SELECT) |
- SHADOW_EN | SHADOW_RW_EN, par);
+ SHADOW_EN | SHADOW_RW_EN, par);
crtc->shadow_h_tot_disp = aty_ld_le32(CRTC_H_TOTAL_DISP, par);
crtc->shadow_h_sync_strt_wid = aty_ld_le32(CRTC_H_SYNC_STRT_WID, par);
@@ -680,21 +685,20 @@ static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc)
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table != 0) {
/* stop CRTC */
- aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl & ~(CRTC_EXT_DISP_EN | CRTC_EN), par);
+ aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl &
+ ~(CRTC_EXT_DISP_EN | CRTC_EN), par);
/* update non-shadow registers first */
aty_st_lcd(CNFG_PANEL, crtc->lcd_config_panel, par);
aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl &
- ~(CRTC_RW_SELECT | SHADOW_EN | SHADOW_RW_EN), par);
+ ~(CRTC_RW_SELECT | SHADOW_EN | SHADOW_RW_EN), par);
/* temporarily disable stretching */
- aty_st_lcd(HORZ_STRETCHING,
- crtc->horz_stretching &
- ~(HORZ_STRETCH_MODE | HORZ_STRETCH_EN), par);
- aty_st_lcd(VERT_STRETCHING,
- crtc->vert_stretching &
- ~(VERT_STRETCH_RATIO1 | VERT_STRETCH_RATIO2 |
- VERT_STRETCH_USE0 | VERT_STRETCH_EN), par);
+ aty_st_lcd(HORZ_STRETCHING, crtc->horz_stretching &
+ ~(HORZ_STRETCH_MODE | HORZ_STRETCH_EN), par);
+ aty_st_lcd(VERT_STRETCHING, crtc->vert_stretching &
+ ~(VERT_STRETCH_RATIO1 | VERT_STRETCH_RATIO2 |
+ VERT_STRETCH_USE0 | VERT_STRETCH_EN), par);
}
#endif
/* turn off CRT */
@@ -702,17 +706,19 @@ static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc)
DPRINTK("setting up CRTC\n");
DPRINTK("set primary CRT to %ix%i %c%c composite %c\n",
- ((((crtc->h_tot_disp>>16) & 0xff) + 1)<<3), (((crtc->v_tot_disp>>16) & 0x7ff) + 1),
- (crtc->h_sync_strt_wid & 0x200000)?'N':'P', (crtc->v_sync_strt_wid & 0x200000)?'N':'P',
- (crtc->gen_cntl & CRTC_CSYNC_EN)?'P':'N');
-
- DPRINTK("CRTC_H_TOTAL_DISP: %x\n",crtc->h_tot_disp);
- DPRINTK("CRTC_H_SYNC_STRT_WID: %x\n",crtc->h_sync_strt_wid);
- DPRINTK("CRTC_V_TOTAL_DISP: %x\n",crtc->v_tot_disp);
- DPRINTK("CRTC_V_SYNC_STRT_WID: %x\n",crtc->v_sync_strt_wid);
+ ((((crtc->h_tot_disp >> 16) & 0xff) + 1) << 3),
+ (((crtc->v_tot_disp >> 16) & 0x7ff) + 1),
+ (crtc->h_sync_strt_wid & 0x200000) ? 'N' : 'P',
+ (crtc->v_sync_strt_wid & 0x200000) ? 'N' : 'P',
+ (crtc->gen_cntl & CRTC_CSYNC_EN) ? 'P' : 'N');
+
+ DPRINTK("CRTC_H_TOTAL_DISP: %x\n", crtc->h_tot_disp);
+ DPRINTK("CRTC_H_SYNC_STRT_WID: %x\n", crtc->h_sync_strt_wid);
+ DPRINTK("CRTC_V_TOTAL_DISP: %x\n", crtc->v_tot_disp);
+ DPRINTK("CRTC_V_SYNC_STRT_WID: %x\n", crtc->v_sync_strt_wid);
DPRINTK("CRTC_OFF_PITCH: %x\n", crtc->off_pitch);
DPRINTK("CRTC_VLINE_CRNT_VLINE: %x\n", crtc->vline_crnt_vline);
- DPRINTK("CRTC_GEN_CNTL: %x\n",crtc->gen_cntl);
+ DPRINTK("CRTC_GEN_CNTL: %x\n", crtc->gen_cntl);
aty_st_le32(CRTC_H_TOTAL_DISP, crtc->h_tot_disp, par);
aty_st_le32(CRTC_H_SYNC_STRT_WID, crtc->h_sync_strt_wid, par);
@@ -732,16 +738,22 @@ static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc)
if (par->lcd_table != 0) {
/* switch to shadow registers */
aty_st_lcd(LCD_GEN_CNTL, (crtc->lcd_gen_cntl & ~CRTC_RW_SELECT) |
- (SHADOW_EN | SHADOW_RW_EN), par);
+ SHADOW_EN | SHADOW_RW_EN, par);
DPRINTK("set shadow CRT to %ix%i %c%c\n",
- ((((crtc->shadow_h_tot_disp>>16) & 0xff) + 1)<<3), (((crtc->shadow_v_tot_disp>>16) & 0x7ff) + 1),
- (crtc->shadow_h_sync_strt_wid & 0x200000)?'N':'P', (crtc->shadow_v_sync_strt_wid & 0x200000)?'N':'P');
-
- DPRINTK("SHADOW CRTC_H_TOTAL_DISP: %x\n", crtc->shadow_h_tot_disp);
- DPRINTK("SHADOW CRTC_H_SYNC_STRT_WID: %x\n", crtc->shadow_h_sync_strt_wid);
- DPRINTK("SHADOW CRTC_V_TOTAL_DISP: %x\n", crtc->shadow_v_tot_disp);
- DPRINTK("SHADOW CRTC_V_SYNC_STRT_WID: %x\n", crtc->shadow_v_sync_strt_wid);
+ ((((crtc->shadow_h_tot_disp >> 16) & 0xff) + 1) << 3),
+ (((crtc->shadow_v_tot_disp >> 16) & 0x7ff) + 1),
+ (crtc->shadow_h_sync_strt_wid & 0x200000) ? 'N' : 'P',
+ (crtc->shadow_v_sync_strt_wid & 0x200000) ? 'N' : 'P');
+
+ DPRINTK("SHADOW CRTC_H_TOTAL_DISP: %x\n",
+ crtc->shadow_h_tot_disp);
+ DPRINTK("SHADOW CRTC_H_SYNC_STRT_WID: %x\n",
+ crtc->shadow_h_sync_strt_wid);
+ DPRINTK("SHADOW CRTC_V_TOTAL_DISP: %x\n",
+ crtc->shadow_v_tot_disp);
+ DPRINTK("SHADOW CRTC_V_SYNC_STRT_WID: %x\n",
+ crtc->shadow_v_sync_strt_wid);
aty_st_le32(CRTC_H_TOTAL_DISP, crtc->shadow_h_tot_disp, par);
aty_st_le32(CRTC_H_SYNC_STRT_WID, crtc->shadow_h_sync_strt_wid, par);
@@ -752,16 +764,16 @@ static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc)
DPRINTK("LCD_GEN_CNTL: %x\n", crtc->lcd_gen_cntl);
DPRINTK("HORZ_STRETCHING: %x\n", crtc->horz_stretching);
DPRINTK("VERT_STRETCHING: %x\n", crtc->vert_stretching);
- if(!M64_HAS(LT_LCD_REGS))
- DPRINTK("EXT_VERT_STRETCH: %x\n", crtc->ext_vert_stretch);
+ if (!M64_HAS(LT_LCD_REGS))
+ DPRINTK("EXT_VERT_STRETCH: %x\n", crtc->ext_vert_stretch);
aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl, par);
aty_st_lcd(HORZ_STRETCHING, crtc->horz_stretching, par);
aty_st_lcd(VERT_STRETCHING, crtc->vert_stretching, par);
- if(!M64_HAS(LT_LCD_REGS)) {
- aty_st_lcd(EXT_VERT_STRETCH, crtc->ext_vert_stretch, par);
- aty_ld_le32(LCD_INDEX, par);
- aty_st_le32(LCD_INDEX, crtc->lcd_index, par);
+ if (!M64_HAS(LT_LCD_REGS)) {
+ aty_st_lcd(EXT_VERT_STRETCH, crtc->ext_vert_stretch, par);
+ aty_ld_le32(LCD_INDEX, par);
+ aty_st_le32(LCD_INDEX, crtc->lcd_index, par);
}
}
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
@@ -779,7 +791,8 @@ static u32 calc_line_length(struct atyfb_par *par, u32 vxres, u32 bpp)
}
static int aty_var_to_crtc(const struct fb_info *info,
- const struct fb_var_screeninfo *var, struct crtc *crtc)
+ const struct fb_var_screeninfo *var,
+ struct crtc *crtc)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 xres, yres, vxres, vyres, xoffset, yoffset, bpp;
@@ -814,34 +827,32 @@ static int aty_var_to_crtc(const struct fb_info *info,
if (bpp <= 8) {
bpp = 8;
pix_width = CRTC_PIX_WIDTH_8BPP;
- dp_pix_width =
- HOST_8BPP | SRC_8BPP | DST_8BPP |
- BYTE_ORDER_LSB_TO_MSB;
+ dp_pix_width = HOST_8BPP | SRC_8BPP | DST_8BPP |
+ BYTE_ORDER_LSB_TO_MSB;
dp_chain_mask = DP_CHAIN_8BPP;
} else if (bpp <= 15) {
bpp = 16;
pix_width = CRTC_PIX_WIDTH_15BPP;
dp_pix_width = HOST_15BPP | SRC_15BPP | DST_15BPP |
- BYTE_ORDER_LSB_TO_MSB;
+ BYTE_ORDER_LSB_TO_MSB;
dp_chain_mask = DP_CHAIN_15BPP;
} else if (bpp <= 16) {
bpp = 16;
pix_width = CRTC_PIX_WIDTH_16BPP;
dp_pix_width = HOST_16BPP | SRC_16BPP | DST_16BPP |
- BYTE_ORDER_LSB_TO_MSB;
+ BYTE_ORDER_LSB_TO_MSB;
dp_chain_mask = DP_CHAIN_16BPP;
} else if (bpp <= 24 && M64_HAS(INTEGRATED)) {
bpp = 24;
pix_width = CRTC_PIX_WIDTH_24BPP;
- dp_pix_width =
- HOST_8BPP | SRC_8BPP | DST_8BPP |
- BYTE_ORDER_LSB_TO_MSB;
+ dp_pix_width = HOST_8BPP | SRC_8BPP | DST_8BPP |
+ BYTE_ORDER_LSB_TO_MSB;
dp_chain_mask = DP_CHAIN_24BPP;
} else if (bpp <= 32) {
bpp = 32;
pix_width = CRTC_PIX_WIDTH_32BPP;
dp_pix_width = HOST_32BPP | SRC_32BPP | DST_32BPP |
- BYTE_ORDER_LSB_TO_MSB;
+ BYTE_ORDER_LSB_TO_MSB;
dp_chain_mask = DP_CHAIN_32BPP;
} else
FAIL("invalid bpp");
@@ -854,9 +865,9 @@ static int aty_var_to_crtc(const struct fb_info *info,
h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
v_sync_pol = sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1;
- if((xres > 1600) || (yres > 1200)) {
+ if ((xres > 1600) || (yres > 1200)) {
FAIL("MACH64 chips are designed for max 1600x1200\n"
- "select anoter resolution.");
+ "select anoter resolution.");
}
h_sync_strt = h_disp + var->right_margin;
h_sync_end = h_sync_strt + var->hsync_len;
@@ -869,11 +880,12 @@ static int aty_var_to_crtc(const struct fb_info *info,
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table != 0) {
- if(!M64_HAS(LT_LCD_REGS)) {
- u32 lcd_index = aty_ld_le32(LCD_INDEX, par);
- crtc->lcd_index = lcd_index &
- ~(LCD_INDEX_MASK | LCD_DISPLAY_DIS | LCD_SRC_SEL | CRTC2_DISPLAY_DIS);
- aty_st_le32(LCD_INDEX, lcd_index, par);
+ if (!M64_HAS(LT_LCD_REGS)) {
+ u32 lcd_index = aty_ld_le32(LCD_INDEX, par);
+ crtc->lcd_index = lcd_index &
+ ~(LCD_INDEX_MASK | LCD_DISPLAY_DIS |
+ LCD_SRC_SEL | CRTC2_DISPLAY_DIS);
+ aty_st_le32(LCD_INDEX, lcd_index, par);
}
if (!M64_HAS(MOBIL_BUS))
@@ -888,12 +900,14 @@ static int aty_var_to_crtc(const struct fb_info *info,
USE_SHADOWED_ROWCUR | SHADOW_EN | SHADOW_RW_EN);
crtc->lcd_gen_cntl |= DONT_SHADOW_VPAR | LOCK_8DOT;
- if((crtc->lcd_gen_cntl & LCD_ON) &&
- ((xres > par->lcd_width) || (yres > par->lcd_height))) {
- /* We cannot display the mode on the LCD. If the CRT is enabled
- we can turn off the LCD.
- If the CRT is off, it isn't a good idea to switch it on; we don't
- know if one is connected. So it's better to fail then.
+ if ((crtc->lcd_gen_cntl & LCD_ON) &&
+ ((xres > par->lcd_width) || (yres > par->lcd_height))) {
+ /*
+ * We cannot display the mode on the LCD. If the CRT is
+ * enabled we can turn off the LCD.
+ * If the CRT is off, it isn't a good idea to switch it
+ * on; we don't know if one is connected. So it's better
+ * to fail then.
*/
if (crtc->lcd_gen_cntl & CRT_ON) {
if (!(var->activate & FB_ACTIVATE_TEST))
@@ -916,17 +930,18 @@ static int aty_var_to_crtc(const struct fb_info *info,
vmode &= ~(FB_VMODE_DOUBLE | FB_VMODE_INTERLACED);
- /* This is horror! When we simulate, say 640x480 on an 800x600
- LCD monitor, the CRTC should be programmed 800x600 values for
- the non visible part, but 640x480 for the visible part.
- This code has been tested on a laptop with it's 1400x1050 LCD
- monitor and a conventional monitor both switched on.
- Tested modes: 1280x1024, 1152x864, 1024x768, 800x600,
- works with little glitches also with DOUBLESCAN modes
+ /*
+ * This is horror! When we simulate, say 640x480 on an 800x600
+ * LCD monitor, the CRTC should be programmed 800x600 values for
+ * the non visible part, but 640x480 for the visible part.
+ * This code has been tested on a laptop with it's 1400x1050 LCD
+ * monitor and a conventional monitor both switched on.
+ * Tested modes: 1280x1024, 1152x864, 1024x768, 800x600,
+ * works with little glitches also with DOUBLESCAN modes
*/
if (yres < par->lcd_height) {
VScan = par->lcd_height / yres;
- if(VScan > 1) {
+ if (VScan > 1) {
VScan = 2;
vmode |= FB_VMODE_DOUBLE;
}
@@ -952,7 +967,7 @@ static int aty_var_to_crtc(const struct fb_info *info,
FAIL_MAX("h_disp too large", h_disp, 0xff);
FAIL_MAX("h_sync_strt too large", h_sync_strt, 0x1ff);
/*FAIL_MAX("h_sync_wid too large", h_sync_wid, 0x1f);*/
- if(h_sync_wid > 0x1f)
+ if (h_sync_wid > 0x1f)
h_sync_wid = 0x1f;
FAIL_MAX("h_total too large", h_total, 0x1ff);
@@ -978,7 +993,7 @@ static int aty_var_to_crtc(const struct fb_info *info,
FAIL_MAX("v_disp too large", v_disp, 0x7ff);
FAIL_MAX("v_sync_stsrt too large", v_sync_strt, 0x7ff);
/*FAIL_MAX("v_sync_wid too large", v_sync_wid, 0x1f);*/
- if(v_sync_wid > 0x1f)
+ if (v_sync_wid > 0x1f)
v_sync_wid = 0x1f;
FAIL_MAX("v_total too large", v_total, 0x7ff);
@@ -995,11 +1010,13 @@ static int aty_var_to_crtc(const struct fb_info *info,
((line_length / bpp) << 22);
crtc->vline_crnt_vline = 0;
- crtc->h_tot_disp = h_total | (h_disp<<16);
- crtc->h_sync_strt_wid = (h_sync_strt & 0xff) | (h_sync_dly<<8) |
- ((h_sync_strt & 0x100)<<4) | (h_sync_wid<<16) | (h_sync_pol<<21);
- crtc->v_tot_disp = v_total | (v_disp<<16);
- crtc->v_sync_strt_wid = v_sync_strt | (v_sync_wid<<16) | (v_sync_pol<<21);
+ crtc->h_tot_disp = h_total | (h_disp << 16);
+ crtc->h_sync_strt_wid = (h_sync_strt & 0xff) | (h_sync_dly << 8) |
+ ((h_sync_strt & 0x100) << 4) | (h_sync_wid << 16) |
+ (h_sync_pol << 21);
+ crtc->v_tot_disp = v_total | (v_disp << 16);
+ crtc->v_sync_strt_wid = v_sync_strt | (v_sync_wid << 16) |
+ (v_sync_pol << 21);
/* crtc->gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_PRESERVED_MASK; */
crtc->gen_cntl = CRTC_EXT_DISP_EN | CRTC_EN | pix_width | c_sync;
@@ -1014,13 +1031,15 @@ static int aty_var_to_crtc(const struct fb_info *info,
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table != 0) {
vdisplay = yres;
- if(vmode & FB_VMODE_DOUBLE)
+ if (vmode & FB_VMODE_DOUBLE)
vdisplay <<= 1;
crtc->gen_cntl &= ~(CRTC2_EN | CRTC2_PIX_WIDTH);
crtc->lcd_gen_cntl &= ~(HORZ_DIVBY2_EN | DIS_HOR_CRT_DIVBY2 |
- /*TVCLK_PM_EN | VCLK_DAC_PM_EN |*/
- USE_SHADOWED_VEND | USE_SHADOWED_ROWCUR | SHADOW_EN | SHADOW_RW_EN);
- crtc->lcd_gen_cntl |= (DONT_SHADOW_VPAR/* | LOCK_8DOT*/);
+ /*TVCLK_PM_EN | VCLK_DAC_PM_EN |*/
+ USE_SHADOWED_VEND |
+ USE_SHADOWED_ROWCUR |
+ SHADOW_EN | SHADOW_RW_EN);
+ crtc->lcd_gen_cntl |= DONT_SHADOW_VPAR/* | LOCK_8DOT*/;
/* MOBILITY M1 tested, FIXME: LT */
crtc->horz_stretching = aty_ld_lcd(HORZ_STRETCHING, par);
@@ -1028,28 +1047,32 @@ static int aty_var_to_crtc(const struct fb_info *info,
crtc->ext_vert_stretch = aty_ld_lcd(EXT_VERT_STRETCH, par) &
~(AUTO_VERT_RATIO | VERT_STRETCH_MODE | VERT_STRETCH_RATIO3);
- crtc->horz_stretching &=
- ~(HORZ_STRETCH_RATIO | HORZ_STRETCH_LOOP | AUTO_HORZ_RATIO |
- HORZ_STRETCH_MODE | HORZ_STRETCH_EN);
+ crtc->horz_stretching &= ~(HORZ_STRETCH_RATIO |
+ HORZ_STRETCH_LOOP | AUTO_HORZ_RATIO |
+ HORZ_STRETCH_MODE | HORZ_STRETCH_EN);
if (xres < par->lcd_width && crtc->lcd_gen_cntl & LCD_ON) {
do {
/*
- * The horizontal blender misbehaves when HDisplay is less than a
- * a certain threshold (440 for a 1024-wide panel). It doesn't
- * stretch such modes enough. Use pixel replication instead of
- * blending to stretch modes that can be made to exactly fit the
- * panel width. The undocumented "NoLCDBlend" option allows the
- * pixel-replicated mode to be slightly wider or narrower than the
- * panel width. It also causes a mode that is exactly half as wide
- * as the panel to be pixel-replicated, rather than blended.
- */
+ * The horizontal blender misbehaves when
+ * HDisplay is less than a certain threshold
+ * (440 for a 1024-wide panel). It doesn't
+ * stretch such modes enough. Use pixel
+ * replication instead of blending to stretch
+ * modes that can be made to exactly fit the
+ * panel width. The undocumented "NoLCDBlend"
+ * option allows the pixel-replicated mode to
+ * be slightly wider or narrower than the
+ * panel width. It also causes a mode that is
+ * exactly half as wide as the panel to be
+ * pixel-replicated, rather than blended.
+ */
int HDisplay = xres & ~7;
int nStretch = par->lcd_width / HDisplay;
int Remainder = par->lcd_width % HDisplay;
if ((!Remainder && ((nStretch > 2))) ||
- (((HDisplay * 16) / par->lcd_width) < 7)) {
- static const char StretchLoops[] = {10, 12, 13, 15, 16};
+ (((HDisplay * 16) / par->lcd_width) < 7)) {
+ static const char StretchLoops[] = { 10, 12, 13, 15, 16 };
int horz_stretch_loop = -1, BestRemainder;
int Numerator = HDisplay, Denominator = par->lcd_width;
int Index = 5;
@@ -1098,12 +1121,12 @@ static int aty_var_to_crtc(const struct fb_info *info,
(((vdisplay * (VERT_STRETCH_RATIO0 + 1)) / par->lcd_height) & VERT_STRETCH_RATIO0));
if (!M64_HAS(LT_LCD_REGS) &&
- xres <= (M64_HAS(MOBIL_BUS)?1024:800))
+ xres <= (M64_HAS(MOBIL_BUS) ? 1024 : 800))
crtc->ext_vert_stretch |= VERT_STRETCH_MODE;
} else {
/*
- * Don't use vertical blending if the mode is too wide or not
- * vertically stretched.
+ * Don't use vertical blending if the mode is too wide
+ * or not vertically stretched.
*/
crtc->vert_stretching = 0;
}
@@ -1125,11 +1148,11 @@ static int aty_var_to_crtc(const struct fb_info *info,
return 0;
}
-static int aty_crtc_to_var(const struct crtc *crtc, struct fb_var_screeninfo *var)
+static int aty_crtc_to_var(const struct crtc *crtc,
+ struct fb_var_screeninfo *var)
{
u32 xres, yres, bpp, left, right, upper, lower, hslen, vslen, sync;
- u32 h_total, h_disp, h_sync_strt, h_sync_dly, h_sync_wid,
- h_sync_pol;
+ u32 h_total, h_disp, h_sync_strt, h_sync_dly, h_sync_wid, h_sync_pol;
u32 v_total, v_disp, v_sync_strt, v_sync_wid, v_sync_pol, c_sync;
u32 pix_width;
u32 double_scan, interlace;
@@ -1161,8 +1184,8 @@ static int aty_crtc_to_var(const struct crtc *crtc, struct fb_var_screeninfo *va
lower = v_sync_strt - v_disp;
vslen = v_sync_wid;
sync = (h_sync_pol ? 0 : FB_SYNC_HOR_HIGH_ACT) |
- (v_sync_pol ? 0 : FB_SYNC_VERT_HIGH_ACT) |
- (c_sync ? FB_SYNC_COMP_HIGH_ACT : 0);
+ (v_sync_pol ? 0 : FB_SYNC_VERT_HIGH_ACT) |
+ (c_sync ? FB_SYNC_COMP_HIGH_ACT : 0);
switch (pix_width) {
#if 0
@@ -1252,20 +1275,21 @@ static int aty_crtc_to_var(const struct crtc *crtc, struct fb_var_screeninfo *va
var->vsync_len = vslen;
var->sync = sync;
var->vmode = FB_VMODE_NONINTERLACED;
- /* In double scan mode, the vertical parameters are doubled, so we need to
- half them to get the right values.
- In interlaced mode the values are already correct, so no correction is
- necessary.
+ /*
+ * In double scan mode, the vertical parameters are doubled,
+ * so we need to halve them to get the right values.
+ * In interlaced mode the values are already correct,
+ * so no correction is necessary.
*/
if (interlace)
var->vmode = FB_VMODE_INTERLACED;
if (double_scan) {
var->vmode = FB_VMODE_DOUBLE;
- var->yres>>=1;
- var->upper_margin>>=1;
- var->lower_margin>>=1;
- var->vsync_len>>=1;
+ var->yres >>= 1;
+ var->upper_margin >>= 1;
+ var->lower_margin >>= 1;
+ var->vsync_len >>= 1;
}
return 0;
@@ -1286,7 +1310,8 @@ static int atyfb_set_par(struct fb_info *info)
if (par->asleep)
return 0;
- if ((err = aty_var_to_crtc(info, var, &par->crtc)))
+ err = aty_var_to_crtc(info, var, &par->crtc);
+ if (err)
return err;
pixclock = atyfb_get_pixclock(var, par);
@@ -1295,7 +1320,9 @@ static int atyfb_set_par(struct fb_info *info)
PRINTKE("Invalid pixclock\n");
return -EINVAL;
} else {
- if((err = par->pll_ops->var_to_pll(info, pixclock, var->bits_per_pixel, &par->pll)))
+ err = par->pll_ops->var_to_pll(info, pixclock,
+ var->bits_per_pixel, &par->pll);
+ if (err)
return err;
}
@@ -1313,22 +1340,23 @@ static int atyfb_set_par(struct fb_info *info)
wait_for_idle(par);
aty_set_crtc(par, &par->crtc);
- par->dac_ops->set_dac(info, &par->pll, var->bits_per_pixel, par->accel_flags);
+ par->dac_ops->set_dac(info, &par->pll,
+ var->bits_per_pixel, par->accel_flags);
par->pll_ops->set_pll(info, &par->pll);
#ifdef DEBUG
- if(par->pll_ops && par->pll_ops->pll_to_var)
- pixclock_in_ps = par->pll_ops->pll_to_var(info, &(par->pll));
+ if (par->pll_ops && par->pll_ops->pll_to_var)
+ pixclock_in_ps = par->pll_ops->pll_to_var(info, &par->pll);
else
pixclock_in_ps = 0;
- if(0 == pixclock_in_ps) {
+ if (0 == pixclock_in_ps) {
PRINTKE("ALERT ops->pll_to_var get 0\n");
pixclock_in_ps = pixclock;
}
memset(&debug, 0, sizeof(debug));
- if(!aty_crtc_to_var(&(par->crtc), &debug)) {
+ if (!aty_crtc_to_var(&par->crtc, &debug)) {
u32 hSync, vRefresh;
u32 h_disp, h_sync_strt, h_sync_end, h_total;
u32 v_disp, v_sync_strt, v_sync_end, v_total;
@@ -1344,16 +1372,20 @@ static int atyfb_set_par(struct fb_info *info)
hSync = 1000000000 / (pixclock_in_ps * h_total);
vRefresh = (hSync * 1000) / v_total;
- if (par->crtc.gen_cntl & CRTC_INTERLACE_EN)
- vRefresh *= 2;
- if (par->crtc.gen_cntl & CRTC_DBL_SCAN_EN)
- vRefresh /= 2;
+ if (par->crtc.gen_cntl & CRTC_INTERLACE_EN)
+ vRefresh *= 2;
+ if (par->crtc.gen_cntl & CRTC_DBL_SCAN_EN)
+ vRefresh /= 2;
DPRINTK("atyfb_set_par\n");
- DPRINTK(" Set Visible Mode to %ix%i-%i\n", var->xres, var->yres, var->bits_per_pixel);
- DPRINTK(" Virtual resolution %ix%i, pixclock_in_ps %i (calculated %i)\n",
- var->xres_virtual, var->yres_virtual, pixclock, pixclock_in_ps);
- DPRINTK(" Dot clock: %i MHz\n", 1000000 / pixclock_in_ps);
+ DPRINTK(" Set Visible Mode to %ix%i-%i\n",
+ var->xres, var->yres, var->bits_per_pixel);
+ DPRINTK(" Virtual resolution %ix%i, "
+ "pixclock_in_ps %i (calculated %i)\n",
+ var->xres_virtual, var->yres_virtual,
+ pixclock, pixclock_in_ps);
+ DPRINTK(" Dot clock: %i MHz\n",
+ 1000000 / pixclock_in_ps);
DPRINTK(" Horizontal sync: %i kHz\n", hSync);
DPRINTK(" Vertical refresh: %i Hz\n", vRefresh);
DPRINTK(" x style: %i.%03i %i %i %i %i %i %i %i %i\n",
@@ -1448,7 +1480,8 @@ static int atyfb_set_par(struct fb_info *info)
base = 0x2000;
printk("debug atyfb: Mach64 non-shadow register values:");
for (i = 0; i < 256; i = i+4) {
- if(i%16 == 0) printk("\ndebug atyfb: 0x%04X: ", base + i);
+ if (i % 16 == 0)
+ printk("\ndebug atyfb: 0x%04X: ", base + i);
printk(" %08X", aty_ld_le32(i, par));
}
printk("\n\n");
@@ -1458,8 +1491,10 @@ static int atyfb_set_par(struct fb_info *info)
base = 0x00;
printk("debug atyfb: Mach64 PLL register values:");
for (i = 0; i < 64; i++) {
- if(i%16 == 0) printk("\ndebug atyfb: 0x%02X: ", base + i);
- if(i%4 == 0) printk(" ");
+ if (i % 16 == 0)
+ printk("\ndebug atyfb: 0x%02X: ", base + i);
+ if (i % 4 == 0)
+ printk(" ");
printk("%02X", aty_ld_pll_ct(i, par));
}
printk("\n\n");
@@ -1470,19 +1505,21 @@ static int atyfb_set_par(struct fb_info *info)
/* LCD registers */
base = 0x00;
printk("debug atyfb: LCD register values:");
- if(M64_HAS(LT_LCD_REGS)) {
- for(i = 0; i <= POWER_MANAGEMENT; i++) {
- if(i == EXT_VERT_STRETCH)
- continue;
- printk("\ndebug atyfb: 0x%04X: ", lt_lcd_regs[i]);
- printk(" %08X", aty_ld_lcd(i, par));
- }
-
+ if (M64_HAS(LT_LCD_REGS)) {
+ for (i = 0; i <= POWER_MANAGEMENT; i++) {
+ if (i == EXT_VERT_STRETCH)
+ continue;
+ printk("\ndebug atyfb: 0x%04X: ",
+ lt_lcd_regs[i]);
+ printk(" %08X", aty_ld_lcd(i, par));
+ }
} else {
- for (i = 0; i < 64; i++) {
- if(i%4 == 0) printk("\ndebug atyfb: 0x%02X: ", base + i);
- printk(" %08X", aty_ld_lcd(i, par));
- }
+ for (i = 0; i < 64; i++) {
+ if (i % 4 == 0)
+ printk("\ndebug atyfb: 0x%02X: ",
+ base + i);
+ printk(" %08X", aty_ld_lcd(i, par));
+ }
}
printk("\n\n");
}
@@ -1500,9 +1537,10 @@ static int atyfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
union aty_pll pll;
u32 pixclock;
- memcpy(&pll, &(par->pll), sizeof(pll));
+ memcpy(&pll, &par->pll, sizeof(pll));
- if((err = aty_var_to_crtc(info, var, &crtc)))
+ err = aty_var_to_crtc(info, var, &crtc);
+ if (err)
return err;
pixclock = atyfb_get_pixclock(var, par);
@@ -1512,7 +1550,9 @@ static int atyfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
PRINTKE("Invalid pixclock\n");
return -EINVAL;
} else {
- if((err = par->pll_ops->var_to_pll(info, pixclock, var->bits_per_pixel, &pll)))
+ err = par->pll_ops->var_to_pll(info, pixclock,
+ var->bits_per_pixel, &pll);
+ if (err)
return err;
}
@@ -1539,9 +1579,9 @@ static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info)
}
- /*
- * Open/Release the frame buffer device
- */
+/*
+ * Open/Release the frame buffer device
+ */
static int atyfb_open(struct fb_info *info, int user)
{
@@ -1553,7 +1593,7 @@ static int atyfb_open(struct fb_info *info, int user)
par->mmaped = 0;
#endif
}
- return (0);
+ return 0;
}
static irqreturn_t aty_irq(int irq, void *dev_id)
@@ -1568,7 +1608,8 @@ static irqreturn_t aty_irq(int irq, void *dev_id)
if (int_cntl & CRTC_VBLANK_INT) {
/* clear interrupt */
- aty_st_le32(CRTC_INT_CNTL, (int_cntl & CRTC_INT_EN_MASK) | CRTC_VBLANK_INT_AK, par);
+ aty_st_le32(CRTC_INT_CNTL, (int_cntl & CRTC_INT_EN_MASK) |
+ CRTC_VBLANK_INT_AK, par);
par->vblank.count++;
if (par->vblank.pan_display) {
par->vblank.pan_display = 0;
@@ -1603,9 +1644,11 @@ static int aty_enable_irq(struct atyfb_par *par, int reenable)
spin_lock_irq(&par->int_lock);
int_cntl = aty_ld_le32(CRTC_INT_CNTL, par) & CRTC_INT_EN_MASK;
if (!(int_cntl & CRTC_VBLANK_INT_EN)) {
- printk("atyfb: someone disabled IRQ [%08x]\n", int_cntl);
+ printk("atyfb: someone disabled IRQ [%08x]\n",
+ int_cntl);
/* re-enable interrupt */
- aty_st_le32(CRTC_INT_CNTL, int_cntl | CRTC_VBLANK_INT_EN, par );
+ aty_st_le32(CRTC_INT_CNTL, int_cntl |
+ CRTC_VBLANK_INT_EN, par);
}
spin_unlock_irq(&par->int_lock);
}
@@ -1625,7 +1668,7 @@ static int aty_disable_irq(struct atyfb_par *par)
spin_lock_irq(&par->int_lock);
int_cntl = aty_ld_le32(CRTC_INT_CNTL, par) & CRTC_INT_EN_MASK;
/* disable interrupt */
- aty_st_le32(CRTC_INT_CNTL, int_cntl & ~CRTC_VBLANK_INT_EN, par );
+ aty_st_le32(CRTC_INT_CNTL, int_cntl & ~CRTC_VBLANK_INT_EN, par);
spin_unlock_irq(&par->int_lock);
free_irq(par->irq, par);
}
@@ -1636,50 +1679,62 @@ static int aty_disable_irq(struct atyfb_par *par)
static int atyfb_release(struct fb_info *info, int user)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
- if (user) {
- par->open--;
- mdelay(1);
- wait_for_idle(par);
- if (!par->open) {
#ifdef __sparc__
- int was_mmaped = par->mmaped;
+ int was_mmaped;
+#endif
- par->mmaped = 0;
+ if (!user)
+ return 0;
- if (was_mmaped) {
- struct fb_var_screeninfo var;
+ par->open--;
+ mdelay(1);
+ wait_for_idle(par);
- /* Now reset the default display config, we have no
- * idea what the program(s) which mmap'd the chip did
- * to the configuration, nor whether it restored it
- * correctly.
- */
- var = default_var;
- if (noaccel)
- var.accel_flags &= ~FB_ACCELF_TEXT;
- else
- var.accel_flags |= FB_ACCELF_TEXT;
- if (var.yres == var.yres_virtual) {
- u32 videoram = (info->fix.smem_len - (PAGE_SIZE << 2));
- var.yres_virtual = ((videoram * 8) / var.bits_per_pixel) / var.xres_virtual;
- if (var.yres_virtual < var.yres)
- var.yres_virtual = var.yres;
- }
- }
-#endif
- aty_disable_irq(par);
+ if (par->open)
+ return 0;
+
+#ifdef __sparc__
+ was_mmaped = par->mmaped;
+
+ par->mmaped = 0;
+
+ if (was_mmaped) {
+ struct fb_var_screeninfo var;
+
+ /*
+ * Now reset the default display config, we have
+ * no idea what the program(s) which mmap'd the
+ * chip did to the configuration, nor whether it
+ * restored it correctly.
+ */
+ var = default_var;
+ if (noaccel)
+ var.accel_flags &= ~FB_ACCELF_TEXT;
+ else
+ var.accel_flags |= FB_ACCELF_TEXT;
+ if (var.yres == var.yres_virtual) {
+ u32 videoram = (info->fix.smem_len - (PAGE_SIZE << 2));
+ var.yres_virtual =
+ ((videoram * 8) / var.bits_per_pixel) /
+ var.xres_virtual;
+ if (var.yres_virtual < var.yres)
+ var.yres_virtual = var.yres;
}
}
- return (0);
+#endif
+ aty_disable_irq(par);
+
+ return 0;
}
- /*
- * Pan or Wrap the Display
- *
- * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
- */
+/*
+ * Pan or Wrap the Display
+ *
+ * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
+ */
-static int atyfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
+static int atyfb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 xres, yres, xoffset, yoffset;
@@ -1690,7 +1745,8 @@ static int atyfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info
yres >>= 1;
xoffset = (var->xoffset + 7) & ~7;
yoffset = var->yoffset;
- if (xoffset + xres > par->crtc.vxres || yoffset + yres > par->crtc.vyres)
+ if (xoffset + xres > par->crtc.vxres ||
+ yoffset + yres > par->crtc.vyres)
return -EINVAL;
info->var.xoffset = xoffset;
info->var.yoffset = yoffset;
@@ -1727,10 +1783,10 @@ static int aty_waitforvblank(struct atyfb_par *par, u32 crtc)
return ret;
count = vbl->count;
- ret = wait_event_interruptible_timeout(vbl->wait, count != vbl->count, HZ/10);
- if (ret < 0) {
+ ret = wait_event_interruptible_timeout(vbl->wait,
+ count != vbl->count, HZ/10);
+ if (ret < 0)
return ret;
- }
if (ret == 0) {
aty_enable_irq(par, 1);
return -ETIMEDOUT;
@@ -1784,7 +1840,8 @@ static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
fbtyp.fb_depth = info->var.bits_per_pixel;
fbtyp.fb_cmsize = info->cmap.len;
fbtyp.fb_size = info->fix.smem_len;
- if (copy_to_user((struct fbtype __user *) arg, &fbtyp, sizeof(fbtyp)))
+ if (copy_to_user((struct fbtype __user *) arg, &fbtyp,
+ sizeof(fbtyp)))
return -EFAULT;
break;
#endif /* __sparc__ */
@@ -1804,7 +1861,7 @@ static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
case ATYIO_CLKR:
if (M64_HAS(INTEGRATED)) {
struct atyclk clk;
- union aty_pll *pll = &(par->pll);
+ union aty_pll *pll = &par->pll;
u32 dsp_config = pll->ct.dsp_config;
u32 dsp_on_off = pll->ct.dsp_on_off;
clk.ref_clk_per = par->ref_clk_per;
@@ -1829,8 +1886,9 @@ static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
case ATYIO_CLKW:
if (M64_HAS(INTEGRATED)) {
struct atyclk clk;
- union aty_pll *pll = &(par->pll);
- if (copy_from_user(&clk, (struct atyclk __user *) arg, sizeof(clk)))
+ union aty_pll *pll = &par->pll;
+ if (copy_from_user(&clk, (struct atyclk __user *) arg,
+ sizeof(clk)))
return -EFAULT;
par->ref_clk_per = clk.ref_clk_per;
pll->ct.pll_ref_div = clk.pll_ref_div;
@@ -1841,8 +1899,10 @@ static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
pll->ct.vclk_fb_div = clk.vclk_fb_div;
pll->ct.vclk_post_div_real = clk.vclk_post_div;
pll->ct.dsp_config = (clk.dsp_xclks_per_row & 0x3fff) |
- ((clk.dsp_loop_latency & 0xf)<<16)| ((clk.dsp_precision & 7)<<20);
- pll->ct.dsp_on_off = (clk.dsp_off & 0x7ff) | ((clk.dsp_on & 0x7ff)<<16);
+ ((clk.dsp_loop_latency & 0xf) << 16) |
+ ((clk.dsp_precision & 7) << 20);
+ pll->ct.dsp_on_off = (clk.dsp_off & 0x7ff) |
+ ((clk.dsp_on & 0x7ff) << 16);
/*aty_calc_pll_ct(info, &pll->ct);*/
aty_set_pll_ct(info, pll);
} else
@@ -1913,8 +1973,7 @@ static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
continue;
map_size = par->mmap_map[i].size - (offset - start);
- map_offset =
- par->mmap_map[i].poff + (offset - start);
+ map_offset = par->mmap_map[i].poff + (offset - start);
break;
}
if (!map_size) {
@@ -1924,8 +1983,7 @@ static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
if (page + map_size > size)
map_size = size - page;
- pgprot_val(vma->vm_page_prot) &=
- ~(par->mmap_map[i].prot_mask);
+ pgprot_val(vma->vm_page_prot) &= ~(par->mmap_map[i].prot_mask);
pgprot_val(vma->vm_page_prot) |= par->mmap_map[i].prot_flag;
if (remap_pfn_range(vma, vma->vm_start + page,
@@ -2029,7 +2087,8 @@ static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
par->asleep = 1;
par->lock_blank = 1;
- /* Because we may change PCI D state ourselves, we need to
+ /*
+ * Because we may change PCI D state ourselves, we need to
* first save the config space content so the core can
* restore it properly on resume.
*/
@@ -2080,7 +2139,8 @@ static int atyfb_pci_resume(struct pci_dev *pdev)
acquire_console_sem();
- /* PCI state will have been restored by the core, so
+ /*
+ * PCI state will have been restored by the core, so
* we should be in D0 now with our config space fully
* restored
*/
@@ -2192,8 +2252,8 @@ static void aty_bl_init(struct atyfb_par *par)
info->bl_dev = bd;
fb_bl_default_curve(info, 0,
- 0x3F * FB_BACKLIGHT_MAX / MAX_LEVEL,
- 0xFF * FB_BACKLIGHT_MAX / MAX_LEVEL);
+ 0x3F * FB_BACKLIGHT_MAX / MAX_LEVEL,
+ 0xFF * FB_BACKLIGHT_MAX / MAX_LEVEL);
bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
bd->props.brightness = bd->props.max_brightness;
@@ -2236,16 +2296,16 @@ static void __devinit aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
size = ARRAY_SIZE(ragepro_tbl);
}
- for (i=0; i < size; i++) {
+ for (i = 0; i < size; i++) {
if (xclk < refresh_tbl[i])
- break;
+ break;
}
par->mem_refresh_rate = i;
}
- /*
- * Initialisation
- */
+/*
+ * Initialisation
+ */
static struct fb_info *fb_list = NULL;
@@ -2375,8 +2435,10 @@ static int __devinit aty_init(struct fb_info *info)
}
#endif
#ifdef CONFIG_PPC_PMAC
- /* The Apple iBook1 uses non-standard memory frequencies. We detect it
- * and set the frequency manually. */
+ /*
+ * The Apple iBook1 uses non-standard memory frequencies.
+ * We detect it and set the frequency manually.
+ */
if (machine_is_compatible("PowerBook2,1")) {
par->pll_limits.mclk = 70;
par->pll_limits.xclk = 53;
@@ -2421,13 +2483,14 @@ static int __devinit aty_init(struct fb_info *info)
/* save previous video mode */
aty_get_crtc(par, &par->saved_crtc);
- if(par->pll_ops->get_pll)
+ if (par->pll_ops->get_pll)
par->pll_ops->get_pll(info, &par->saved_pll);
par->mem_cntl = aty_ld_le32(MEM_CNTL, par);
gtb_memsize = M64_HAS(GTB_DSP);
if (gtb_memsize)
- switch (par->mem_cntl & 0xF) { /* 0xF used instead of MEM_SIZE_ALIAS */
+ /* 0xF used instead of MEM_SIZE_ALIAS */
+ switch (par->mem_cntl & 0xF) {
case MEM_SIZE_512K:
info->fix.smem_len = 0x80000;
break;
@@ -2496,8 +2559,8 @@ static int __devinit aty_init(struct fb_info *info)
}
/*
- * Reg Block 0 (CT-compatible block) is at mmio_start
- * Reg Block 1 (multimedia extensions) is at mmio_start - 0x400
+ * Reg Block 0 (CT-compatible block) is at mmio_start
+ * Reg Block 1 (multimedia extensions) is at mmio_start - 0x400
*/
if (M64_HAS(GX)) {
info->fix.mmio_len = 0x400;
@@ -2516,84 +2579,98 @@ static int __devinit aty_init(struct fb_info *info)
}
PRINTKI("%d%c %s, %s MHz XTAL, %d MHz PLL, %d Mhz MCLK, %d MHz XCLK\n",
- info->fix.smem_len == 0x80000 ? 512 : (info->fix.smem_len >> 20),
- info->fix.smem_len == 0x80000 ? 'K' : 'M', ramname, xtal, par->pll_limits.pll_max,
- par->pll_limits.mclk, par->pll_limits.xclk);
+ info->fix.smem_len == 0x80000 ? 512 : (info->fix.smem_len>>20),
+ info->fix.smem_len == 0x80000 ? 'K' : 'M', ramname, xtal,
+ par->pll_limits.pll_max, par->pll_limits.mclk,
+ par->pll_limits.xclk);
#if defined(DEBUG) && defined(CONFIG_FB_ATY_CT)
if (M64_HAS(INTEGRATED)) {
int i;
- printk("debug atyfb: BUS_CNTL DAC_CNTL MEM_CNTL EXT_MEM_CNTL CRTC_GEN_CNTL "
- "DSP_CONFIG DSP_ON_OFF CLOCK_CNTL\n"
- "debug atyfb: %08x %08x %08x %08x %08x %08x %08x %08x\n"
+ printk("debug atyfb: BUS_CNTL DAC_CNTL MEM_CNTL "
+ "EXT_MEM_CNTL CRTC_GEN_CNTL DSP_CONFIG "
+ "DSP_ON_OFF CLOCK_CNTL\n"
+ "debug atyfb: %08x %08x %08x "
+ "%08x %08x %08x "
+ "%08x %08x\n"
"debug atyfb: PLL",
- aty_ld_le32(BUS_CNTL, par), aty_ld_le32(DAC_CNTL, par),
- aty_ld_le32(MEM_CNTL, par), aty_ld_le32(EXT_MEM_CNTL, par),
- aty_ld_le32(CRTC_GEN_CNTL, par), aty_ld_le32(DSP_CONFIG, par),
- aty_ld_le32(DSP_ON_OFF, par), aty_ld_le32(CLOCK_CNTL, par));
+ aty_ld_le32(BUS_CNTL, par),
+ aty_ld_le32(DAC_CNTL, par),
+ aty_ld_le32(MEM_CNTL, par),
+ aty_ld_le32(EXT_MEM_CNTL, par),
+ aty_ld_le32(CRTC_GEN_CNTL, par),
+ aty_ld_le32(DSP_CONFIG, par),
+ aty_ld_le32(DSP_ON_OFF, par),
+ aty_ld_le32(CLOCK_CNTL, par));
for (i = 0; i < 40; i++)
printk(" %02x", aty_ld_pll_ct(i, par));
printk("\n");
}
#endif
- if(par->pll_ops->init_pll)
+ if (par->pll_ops->init_pll)
par->pll_ops->init_pll(info, &par->pll);
if (par->pll_ops->resume_pll)
par->pll_ops->resume_pll(info, &par->pll);
/*
- * Last page of 8 MB (4 MB on ISA) aperture is MMIO,
- * unless the auxiliary register aperture is used.
+ * Last page of 8 MB (4 MB on ISA) aperture is MMIO,
+ * unless the auxiliary register aperture is used.
*/
-
if (!par->aux_start &&
- (info->fix.smem_len == 0x800000 || (par->bus_type == ISA && info->fix.smem_len == 0x400000)))
+ (info->fix.smem_len == 0x800000 ||
+ (par->bus_type == ISA && info->fix.smem_len == 0x400000)))
info->fix.smem_len -= GUI_RESERVE;
/*
- * Disable register access through the linear aperture
- * if the auxiliary aperture is used so we can access
- * the full 8 MB of video RAM on 8 MB boards.
+ * Disable register access through the linear aperture
+ * if the auxiliary aperture is used so we can access
+ * the full 8 MB of video RAM on 8 MB boards.
*/
if (par->aux_start)
- aty_st_le32(BUS_CNTL, aty_ld_le32(BUS_CNTL, par) | BUS_APER_REG_DIS, par);
+ aty_st_le32(BUS_CNTL, aty_ld_le32(BUS_CNTL, par) |
+ BUS_APER_REG_DIS, par);
#ifdef CONFIG_MTRR
par->mtrr_aper = -1;
par->mtrr_reg = -1;
if (!nomtrr) {
/* Cover the whole resource. */
- par->mtrr_aper = mtrr_add(par->res_start, par->res_size, MTRR_TYPE_WRCOMB, 1);
- if (par->mtrr_aper >= 0 && !par->aux_start) {
+ par->mtrr_aper = mtrr_add(par->res_start, par->res_size,
+ MTRR_TYPE_WRCOMB, 1);
+ if (par->mtrr_aper >= 0 && !par->aux_start) {
/* Make a hole for mmio. */
- par->mtrr_reg = mtrr_add(par->res_start + 0x800000 - GUI_RESERVE,
- GUI_RESERVE, MTRR_TYPE_UNCACHABLE, 1);
+ par->mtrr_reg = mtrr_add(par->res_start + 0x800000 -
+ GUI_RESERVE, GUI_RESERVE,
+ MTRR_TYPE_UNCACHABLE, 1);
if (par->mtrr_reg < 0) {
mtrr_del(par->mtrr_aper, 0, 0);
par->mtrr_aper = -1;
}
- }
+ }
}
#endif
info->fbops = &atyfb_ops;
info->pseudo_palette = par->pseudo_palette;
info->flags = FBINFO_DEFAULT |
- FBINFO_HWACCEL_IMAGEBLIT |
- FBINFO_HWACCEL_FILLRECT |
- FBINFO_HWACCEL_COPYAREA |
- FBINFO_HWACCEL_YPAN;
+ FBINFO_HWACCEL_IMAGEBLIT |
+ FBINFO_HWACCEL_FILLRECT |
+ FBINFO_HWACCEL_COPYAREA |
+ FBINFO_HWACCEL_YPAN;
#ifdef CONFIG_PMAC_BACKLIGHT
if (M64_HAS(G3_PB_1_1) && machine_is_compatible("PowerBook1,1")) {
- /* these bits let the 101 powerbook wake up from sleep -- paulus */
- aty_st_lcd(POWER_MANAGEMENT, aty_ld_lcd(POWER_MANAGEMENT, par)
- | (USE_F32KHZ | TRISTATE_MEM_EN), par);
+ /*
+ * these bits let the 101 powerbook
+ * wake up from sleep -- paulus
+ */
+ aty_st_lcd(POWER_MANAGEMENT, aty_ld_lcd(POWER_MANAGEMENT, par) |
+ USE_F32KHZ | TRISTATE_MEM_EN, par);
} else
#endif
if (M64_HAS(MOBIL_BUS) && backlight) {
#ifdef CONFIG_FB_ATY_BACKLIGHT
- aty_bl_init (par);
+ aty_bl_init(par);
#endif
}
@@ -2601,8 +2678,8 @@ static int __devinit aty_init(struct fb_info *info)
#ifdef CONFIG_PPC
if (machine_is(powermac)) {
/*
- * FIXME: The NVRAM stuff should be put in a Mac-specific file, as it
- * applies to all Mac video cards
+ * FIXME: The NVRAM stuff should be put in a Mac-specific file,
+ * as it applies to all Mac video cards
*/
if (mode) {
if (mac_find_mode(&var, info, mode, 8))
@@ -2615,8 +2692,7 @@ static int __devinit aty_init(struct fb_info *info)
default_vmode = VMODE_1024_768_60;
else if (machine_is_compatible("iMac"))
default_vmode = VMODE_1024_768_75;
- else if (machine_is_compatible
- ("PowerBook2,1"))
+ else if (machine_is_compatible("PowerBook2,1"))
/* iBook with 800x600 LCD */
default_vmode = VMODE_800_600_60;
else
@@ -2630,7 +2706,7 @@ static int __devinit aty_init(struct fb_info *info)
if (default_cmode < CMODE_8 || default_cmode > CMODE_32)
default_cmode = CMODE_8;
if (!mac_vmode_to_var(default_vmode, default_cmode,
- &var))
+ &var))
has_var = 1;
}
}
@@ -2702,12 +2778,12 @@ aty_init_exit:
#ifdef CONFIG_MTRR
if (par->mtrr_reg >= 0) {
- mtrr_del(par->mtrr_reg, 0, 0);
- par->mtrr_reg = -1;
+ mtrr_del(par->mtrr_reg, 0, 0);
+ par->mtrr_reg = -1;
}
if (par->mtrr_aper >= 0) {
- mtrr_del(par->mtrr_aper, 0, 0);
- par->mtrr_aper = -1;
+ mtrr_del(par->mtrr_aper, 0, 0);
+ par->mtrr_aper = -1;
}
#endif
return ret;
@@ -2735,18 +2811,18 @@ static int __devinit store_video_par(char *video_str, unsigned char m64_num)
phys_size[m64_num] = size;
phys_guiregbase[m64_num] = guiregbase;
PRINTKI("stored them all: $%08lX $%08lX $%08lX \n", vmembase, size,
- guiregbase);
+ guiregbase);
return 0;
- mach64_invalid:
+ mach64_invalid:
phys_vmembase[m64_num] = 0;
return -1;
}
#endif /* CONFIG_ATARI */
- /*
- * Blank the display.
- */
+/*
+ * Blank the display.
+ */
static int atyfb_blank(int blank, struct fb_info *info)
{
@@ -2768,20 +2844,20 @@ static int atyfb_blank(int blank, struct fb_info *info)
gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par);
gen_cntl &= ~0x400004c;
switch (blank) {
- case FB_BLANK_UNBLANK:
- break;
- case FB_BLANK_NORMAL:
- gen_cntl |= 0x4000040;
- break;
- case FB_BLANK_VSYNC_SUSPEND:
- gen_cntl |= 0x4000048;
- break;
- case FB_BLANK_HSYNC_SUSPEND:
- gen_cntl |= 0x4000044;
- break;
- case FB_BLANK_POWERDOWN:
- gen_cntl |= 0x400004c;
- break;
+ case FB_BLANK_UNBLANK:
+ break;
+ case FB_BLANK_NORMAL:
+ gen_cntl |= 0x4000040;
+ break;
+ case FB_BLANK_VSYNC_SUSPEND:
+ gen_cntl |= 0x4000048;
+ break;
+ case FB_BLANK_HSYNC_SUSPEND:
+ gen_cntl |= 0x4000044;
+ break;
+ case FB_BLANK_POWERDOWN:
+ gen_cntl |= 0x400004c;
+ break;
}
aty_st_le32(CRTC_GEN_CNTL, gen_cntl, par);
@@ -2806,15 +2882,15 @@ static void aty_st_pal(u_int regno, u_int red, u_int green, u_int blue,
aty_st_8(DAC_DATA, blue, par);
}
- /*
- * Set a single color register. The values supplied are already
- * rounded down to the hardware's capabilities (according to the
- * entries in the var structure). Return != 0 for invalid regno.
- * !! 4 & 8 = PSEUDO, > 8 = DIRECTCOLOR
- */
+/*
+ * Set a single color register. The values supplied are already
+ * rounded down to the hardware's capabilities (according to the
+ * entries in the var structure). Return != 0 for invalid regno.
+ * !! 4 & 8 = PSEUDO, > 8 = DIRECTCOLOR
+ */
static int atyfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
- u_int transp, struct fb_info *info)
+ u_int transp, struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
int i, depth;
@@ -2868,16 +2944,15 @@ static int atyfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
if (depth == 16) {
if (regno < 32)
aty_st_pal(regno << 3, red,
- par->palette[regno<<1].green,
+ par->palette[regno << 1].green,
blue, par);
- red = par->palette[regno>>1].red;
- blue = par->palette[regno>>1].blue;
+ red = par->palette[regno >> 1].red;
+ blue = par->palette[regno >> 1].blue;
regno <<= 2;
} else if (depth == 15) {
regno <<= 3;
- for(i = 0; i < 8; i++) {
- aty_st_pal(regno + i, red, green, blue, par);
- }
+ for (i = 0; i < 8; i++)
+ aty_st_pal(regno + i, red, green, blue, par);
}
}
aty_st_pal(regno, red, green, blue, par);
@@ -2890,7 +2965,8 @@ static int atyfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
#ifdef __sparc__
static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
- struct fb_info *info, unsigned long addr)
+ struct fb_info *info,
+ unsigned long addr)
{
struct atyfb_par *par = info->par;
struct device_node *dp;
@@ -2978,7 +3054,8 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
j++;
}
- if((ret = correct_chipset(par)))
+ ret = correct_chipset(par);
+ if (ret)
return ret;
if (IS_XL(pdev->device)) {
@@ -3108,28 +3185,28 @@ static void __devinit aty_init_lcd(struct atyfb_par *par, u32 bios_base)
u32 driv_inf_tab, sig;
u16 lcd_ofs;
- /* To support an LCD panel, we should know it's dimensions and
+ /*
+ * To support an LCD panel, we should know it's dimensions and
* it's desired pixel clock.
* There are two ways to do it:
* - Check the startup video mode and calculate the panel
* size from it. This is unreliable.
* - Read it from the driver information table in the video BIOS.
- */
+ */
/* Address of driver information table is at offset 0x78. */
driv_inf_tab = bios_base + *((u16 *)(bios_base+0x78));
/* Check for the driver information table signature. */
- sig = (*(u32 *)driv_inf_tab);
+ sig = *(u32 *)driv_inf_tab;
if ((sig == 0x54504c24) || /* Rage LT pro */
- (sig == 0x544d5224) || /* Rage mobility */
- (sig == 0x54435824) || /* Rage XC */
- (sig == 0x544c5824)) { /* Rage XL */
+ (sig == 0x544d5224) || /* Rage mobility */
+ (sig == 0x54435824) || /* Rage XC */
+ (sig == 0x544c5824)) { /* Rage XL */
PRINTKI("BIOS contains driver information table.\n");
- lcd_ofs = (*(u16 *)(driv_inf_tab + 10));
+ lcd_ofs = *(u16 *)(driv_inf_tab + 10);
par->lcd_table = 0;
- if (lcd_ofs != 0) {
+ if (lcd_ofs != 0)
par->lcd_table = bios_base + lcd_ofs;
- }
}
if (par->lcd_table != 0) {
@@ -3144,14 +3221,16 @@ static void __devinit aty_init_lcd(struct atyfb_par *par, u32 bios_base)
u16 width, height, panel_type, refresh_rates;
u16 *lcdmodeptr;
u32 format;
- u8 lcd_refresh_rates[16] = {50,56,60,67,70,72,75,76,85,90,100,120,140,150,160,200};
- /* The most important information is the panel size at
+ u8 lcd_refresh_rates[16] = { 50, 56, 60, 67, 70, 72, 75, 76, 85,
+ 90, 100, 120, 140, 150, 160, 200 };
+ /*
+ * The most important information is the panel size at
* offset 25 and 27, but there's some other nice information
* which we print to the screen.
*/
id = *(u8 *)par->lcd_table;
- strncpy(model,(char *)par->lcd_table+1,24);
- model[23]=0;
+ strncpy(model, (char *)par->lcd_table+1, 24);
+ model[23] = 0;
width = par->lcd_width = *(u16 *)(par->lcd_table+25);
height = par->lcd_height = *(u16 *)(par->lcd_table+27);
@@ -3164,7 +3243,7 @@ static void __devinit aty_init_lcd(struct atyfb_par *par, u32 bios_base)
txtdual = "dual (split) ";
else
txtdual = "";
- tech = (panel_type>>2) & 63;
+ tech = (panel_type >> 2) & 63;
switch (tech) {
case 0:
txtmonitor = "passive matrix";
@@ -3224,22 +3303,24 @@ static void __devinit aty_init_lcd(struct atyfb_par *par, u32 bios_base)
}
}
PRINTKI("%s%s %s monitor detected: %s\n",
- txtdual ,txtcolour, txtmonitor, model);
+ txtdual, txtcolour, txtmonitor, model);
PRINTKI(" id=%d, %dx%d pixels, %s\n",
id, width, height, txtformat);
refresh_rates_buf[0] = 0;
refresh_rates = *(u16 *)(par->lcd_table+62);
m = 1;
f = 0;
- for (i=0;i<16;i++) {
+ for (i = 0; i < 16; i++) {
if (refresh_rates & m) {
if (f == 0) {
- sprintf(strbuf, "%d", lcd_refresh_rates[i]);
+ sprintf(strbuf, "%d",
+ lcd_refresh_rates[i]);
f++;
} else {
- sprintf(strbuf, ",%d", lcd_refresh_rates[i]);
+ sprintf(strbuf, ",%d",
+ lcd_refresh_rates[i]);
}
- strcat(refresh_rates_buf,strbuf);
+ strcat(refresh_rates_buf, strbuf);
}
m = m << 1;
}
@@ -3247,7 +3328,8 @@ static void __devinit aty_init_lcd(struct atyfb_par *par, u32 bios_base)
PRINTKI(" supports refresh rates [%s], default %d Hz\n",
refresh_rates_buf, lcd_refresh_rates[default_refresh_rate]);
par->lcd_refreshrate = lcd_refresh_rates[default_refresh_rate];
- /* We now need to determine the crtc parameters for the
+ /*
+ * We now need to determine the crtc parameters for the
* LCD monitor. This is tricky, because they are not stored
* individually in the BIOS. Instead, the BIOS contains a
* table of display modes that work for this monitor.
@@ -3382,7 +3464,9 @@ static int __devinit init_from_bios(struct atyfb_par *par)
}
#endif /* __i386__ */
-static int __devinit atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info, unsigned long addr)
+static int __devinit atyfb_setup_generic(struct pci_dev *pdev,
+ struct fb_info *info,
+ unsigned long addr)
{
struct atyfb_par *par = info->par;
u16 tmp;
@@ -3429,10 +3513,12 @@ static int __devinit atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *i
goto atyfb_setup_generic_fail;
}
- if((ret = correct_chipset(par)))
+ ret = correct_chipset(par);
+ if (ret)
goto atyfb_setup_generic_fail;
#ifdef __i386__
- if((ret = init_from_bios(par)))
+ ret = init_from_bios(par);
+ if (ret)
goto atyfb_setup_generic_fail;
#endif
if (!(aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_EXT_DISP_EN))
@@ -3457,7 +3543,8 @@ atyfb_setup_generic_fail:
#endif /* !__sparc__ */
-static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int __devinit atyfb_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
unsigned long addr, res_start, res_size;
struct fb_info *info;
@@ -3482,10 +3569,10 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_devi
/* Reserve space */
res_start = rp->start;
res_size = rp->end - rp->start + 1;
- if (!request_mem_region (res_start, res_size, "atyfb"))
+ if (!request_mem_region(res_start, res_size, "atyfb"))
return -EBUSY;
- /* Allocate framebuffer */
+ /* Allocate framebuffer */
info = framebuffer_alloc(sizeof(struct atyfb_par), &pdev->dev);
if (!info) {
PRINTKE("atyfb_pci_probe() can't alloc fb_info\n");
@@ -3573,7 +3660,8 @@ static int __init atyfb_atari_probe(void)
for (m64_num = 0; m64_num < mach64_count; m64_num++) {
if (!phys_vmembase[m64_num] || !phys_size[m64_num] ||
!phys_guiregbase[m64_num]) {
- PRINTKI("phys_*[%d] parameters not set => returning early. \n", m64_num);
+ PRINTKI("phys_*[%d] parameters not set => "
+ "returning early. \n", m64_num);
continue;
}
@@ -3589,8 +3677,8 @@ static int __init atyfb_atari_probe(void)
par->irq = (unsigned int) -1; /* something invalid */
/*
- * Map the video memory (physical address given) to somewhere in the
- * kernel address space.
+ * Map the video memory (physical address given)
+ * to somewhere in the kernel address space.
*/
info->screen_base = ioremap(phys_vmembase[m64_num], phys_size[m64_num]);
info->fix.smem_start = (unsigned long)info->screen_base; /* Fake! */
@@ -3661,12 +3749,12 @@ static void __devexit atyfb_remove(struct fb_info *info)
#ifdef CONFIG_MTRR
if (par->mtrr_reg >= 0) {
- mtrr_del(par->mtrr_reg, 0, 0);
- par->mtrr_reg = -1;
+ mtrr_del(par->mtrr_reg, 0, 0);
+ par->mtrr_reg = -1;
}
if (par->mtrr_aper >= 0) {
- mtrr_del(par->mtrr_aper, 0, 0);
- par->mtrr_aper = -1;
+ mtrr_del(par->mtrr_aper, 0, 0);
+ par->mtrr_aper = -1;
}
#endif
#ifndef __sparc__
@@ -3900,29 +3988,29 @@ static const struct dmi_system_id atyfb_reboot_ids[] = {
static int __init atyfb_init(void)
{
- int err1 = 1, err2 = 1;
+ int err1 = 1, err2 = 1;
#ifndef MODULE
- char *option = NULL;
+ char *option = NULL;
- if (fb_get_options("atyfb", &option))
- return -ENODEV;
- atyfb_setup(option);
+ if (fb_get_options("atyfb", &option))
+ return -ENODEV;
+ atyfb_setup(option);
#endif
#ifdef CONFIG_PCI
- err1 = pci_register_driver(&atyfb_driver);
+ err1 = pci_register_driver(&atyfb_driver);
#endif
#ifdef CONFIG_ATARI
- err2 = atyfb_atari_probe();
+ err2 = atyfb_atari_probe();
#endif
- if (err1 && err2)
- return -ENODEV;
+ if (err1 && err2)
+ return -ENODEV;
- if (dmi_check_system(atyfb_reboot_ids))
- register_reboot_notifier(&atyfb_reboot_notifier);
+ if (dmi_check_system(atyfb_reboot_ids))
+ register_reboot_notifier(&atyfb_reboot_notifier);
- return 0;
+ return 0;
}
static void __exit atyfb_exit(void)
@@ -3951,8 +4039,7 @@ MODULE_PARM_DESC(mclk, "int: override memory clock");
module_param(xclk, int, 0);
MODULE_PARM_DESC(xclk, "int: override accelerated engine clock");
module_param(comp_sync, int, 0);
-MODULE_PARM_DESC(comp_sync,
- "Set composite sync signal to low (0) or high (1)");
+MODULE_PARM_DESC(comp_sync, "Set composite sync signal to low (0) or high (1)");
module_param(mode, charp, 0);
MODULE_PARM_DESC(mode, "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\" ");
#ifdef CONFIG_MTRR
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index 378f27745a1..a699aab6382 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -715,8 +715,11 @@ int au1100fb_setup(char *options)
}
/* Mode option (only option that start with digit) */
else if (isdigit(this_opt[0])) {
- mode = kmalloc(strlen(this_opt) + 1, GFP_KERNEL);
- strncpy(mode, this_opt, strlen(this_opt) + 1);
+ mode = kstrdup(this_opt, GFP_KERNEL);
+ if (!mode) {
+ print_err("memory allocation failed");
+ return -ENOMEM;
+ }
}
/* Unsupported option */
else {
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index f8a4bb20f41..2211a852af9 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -639,3 +639,4 @@ module_exit(corgi_lcd_exit);
MODULE_DESCRIPTION("LCD and backlight driver for SHARP C7x0/Cxx00");
MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:corgi-lcd");
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 93bb4340cc6..701a1081e19 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -154,34 +154,38 @@ static int da903x_backlight_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int da903x_backlight_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int da903x_backlight_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct backlight_device *bl = platform_get_drvdata(pdev);
return da903x_backlight_set(bl, 0);
}
-static int da903x_backlight_resume(struct platform_device *pdev)
+static int da903x_backlight_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct backlight_device *bl = platform_get_drvdata(pdev);
backlight_update_status(bl);
return 0;
}
-#else
-#define da903x_backlight_suspend NULL
-#define da903x_backlight_resume NULL
+
+static struct dev_pm_ops da903x_backlight_pm_ops = {
+ .suspend = da903x_backlight_suspend,
+ .resume = da903x_backlight_resume,
+};
#endif
static struct platform_driver da903x_backlight_driver = {
.driver = {
.name = "da903x-backlight",
.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &da903x_backlight_pm_ops,
+#endif
},
.probe = da903x_backlight_probe,
.remove = da903x_backlight_remove,
- .suspend = da903x_backlight_suspend,
- .resume = da903x_backlight_resume,
};
static int __init da903x_backlight_init(void)
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index 2eb206bf73e..4631ca8fa4a 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -328,3 +328,4 @@ module_exit(ltv350qv_exit);
MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
MODULE_DESCRIPTION("Samsung LTV350QV LCD Driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:ltv350qv");
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 51422fc4f60..bbfb502add6 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -472,3 +472,4 @@ module_exit(tdo24m_exit);
MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>");
MODULE_DESCRIPTION("Driver for Toppoly TDO24M LCD Panel");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:tdo24m");
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index b7fbc75a62f..50ec17dfc51 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -300,4 +300,4 @@ module_exit(tosa_lcd_exit);
MODULE_AUTHOR("Dmitry Baryshkov");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("LCD/Backlight control for Sharp SL-6000 PDA");
-
+MODULE_ALIAS("spi:tosa-lcd");
diff --git a/drivers/video/backlight/vgg2432a4.c b/drivers/video/backlight/vgg2432a4.c
index 8e653b8a6f1..b49063c831e 100644
--- a/drivers/video/backlight/vgg2432a4.c
+++ b/drivers/video/backlight/vgg2432a4.c
@@ -280,5 +280,4 @@ module_exit(vgg2432a4_exit);
MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>");
MODULE_DESCRIPTION("VGG2432A4 LCD Driver");
MODULE_LICENSE("GPL v2");
-
-
+MODULE_ALIAS("spi:VGG2432A4");
diff --git a/drivers/video/cfbcopyarea.c b/drivers/video/cfbcopyarea.c
index df03f3776dc..79e5f40e648 100644
--- a/drivers/video/cfbcopyarea.c
+++ b/drivers/video/cfbcopyarea.c
@@ -114,7 +114,7 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
d0 >>= right;
} else if (src_idx+n <= bits) {
// Single source word
- d0 <<= left;;
+ d0 <<= left;
} else {
// 2 source words
d1 = FB_READL(src + 1);
diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
index 69864b1b3f9..6b7c8fbc549 100644
--- a/drivers/video/console/bitblit.c
+++ b/drivers/video/console/bitblit.c
@@ -25,7 +25,7 @@ static inline void update_attr(u8 *dst, u8 *src, int attribute,
struct vc_data *vc)
{
int i, offset = (vc->vc_font.height < 10) ? 1 : 2;
- int width = (vc->vc_font.width + 7) >> 3;
+ int width = DIV_ROUND_UP(vc->vc_font.width, 8);
unsigned int cellsize = vc->vc_font.height * width;
u8 c;
@@ -144,7 +144,7 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info,
int fg, int bg)
{
struct fb_image image;
- u32 width = (vc->vc_font.width + 7)/8;
+ u32 width = DIV_ROUND_UP(vc->vc_font.width, 8);
u32 cellsize = width * vc->vc_font.height;
u32 maxcnt = info->pixmap.size/cellsize;
u32 scan_align = info->pixmap.scan_align - 1;
@@ -173,7 +173,7 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info,
cnt = count;
image.width = vc->vc_font.width * cnt;
- pitch = ((image.width + 7) >> 3) + scan_align;
+ pitch = DIV_ROUND_UP(image.width, 8) + scan_align;
pitch &= ~scan_align;
size = pitch * image.height + buf_align;
size &= ~buf_align;
@@ -239,7 +239,7 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
struct fb_cursor cursor;
struct fbcon_ops *ops = info->fbcon_par;
unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
- int w = (vc->vc_font.width + 7) >> 3, c;
+ int w = DIV_ROUND_UP(vc->vc_font.width, 8), c;
int y = real_y(ops->p, vc->vc_y);
int attribute, use_sw = (vc->vc_cursor_type & 0x10);
int err = 1;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 3a44695b9c0..5a686cea23f 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -114,6 +114,7 @@ static int last_fb_vc = MAX_NR_CONSOLES - 1;
static int fbcon_is_default = 1;
static int fbcon_has_exited;
static int primary_device = -1;
+static int fbcon_has_console_bind;
#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
static int map_override;
@@ -544,6 +545,8 @@ static int fbcon_takeover(int show_logo)
con2fb_map[i] = -1;
}
info_idx = -1;
+ } else {
+ fbcon_has_console_bind = 1;
}
return err;
@@ -725,7 +728,7 @@ static int con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo,
int oldidx, int found)
{
struct fbcon_ops *ops = oldinfo->fbcon_par;
- int err = 0;
+ int err = 0, ret;
if (oldinfo->fbops->fb_release &&
oldinfo->fbops->fb_release(oldinfo, 0)) {
@@ -752,8 +755,14 @@ static int con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo,
newinfo in an undefined state. Thus, a call to
fb_set_par() may be needed for the newinfo.
*/
- if (newinfo->fbops->fb_set_par)
- newinfo->fbops->fb_set_par(newinfo);
+ if (newinfo->fbops->fb_set_par) {
+ ret = newinfo->fbops->fb_set_par(newinfo);
+
+ if (ret)
+ printk(KERN_ERR "con2fb_release_oldinfo: "
+ "detected unhandled fb_set_par error, "
+ "error code %d\n", ret);
+ }
}
return err;
@@ -763,11 +772,18 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
int unit, int show_logo)
{
struct fbcon_ops *ops = info->fbcon_par;
+ int ret;
ops->currcon = fg_console;
- if (info->fbops->fb_set_par && !(ops->flags & FBCON_FLAGS_INIT))
- info->fbops->fb_set_par(info);
+ if (info->fbops->fb_set_par && !(ops->flags & FBCON_FLAGS_INIT)) {
+ ret = info->fbops->fb_set_par(info);
+
+ if (ret)
+ printk(KERN_ERR "con2fb_init_display: detected "
+ "unhandled fb_set_par error, "
+ "error code %d\n", ret);
+ }
ops->flags |= FBCON_FLAGS_INIT;
ops->graphics = 0;
@@ -1006,7 +1022,7 @@ static void fbcon_init(struct vc_data *vc, int init)
struct vc_data *svc = *default_mode;
struct display *t, *p = &fb_display[vc->vc_num];
int logo = 1, new_rows, new_cols, rows, cols, charcnt = 256;
- int cap;
+ int cap, ret;
if (info_idx == -1 || info == NULL)
return;
@@ -1092,8 +1108,15 @@ static void fbcon_init(struct vc_data *vc, int init)
*/
if (CON_IS_VISIBLE(vc) && vc->vc_mode == KD_TEXT) {
if (info->fbops->fb_set_par &&
- !(ops->flags & FBCON_FLAGS_INIT))
- info->fbops->fb_set_par(info);
+ !(ops->flags & FBCON_FLAGS_INIT)) {
+ ret = info->fbops->fb_set_par(info);
+
+ if (ret)
+ printk(KERN_ERR "fbcon_init: detected "
+ "unhandled fb_set_par error, "
+ "error code %d\n", ret);
+ }
+
ops->flags |= FBCON_FLAGS_INIT;
}
@@ -2119,7 +2142,7 @@ static int fbcon_switch(struct vc_data *vc)
struct fbcon_ops *ops;
struct display *p = &fb_display[vc->vc_num];
struct fb_var_screeninfo var;
- int i, prev_console, charcnt = 256;
+ int i, ret, prev_console, charcnt = 256;
info = registered_fb[con2fb_map[vc->vc_num]];
ops = info->fbcon_par;
@@ -2174,8 +2197,14 @@ static int fbcon_switch(struct vc_data *vc)
if (old_info != NULL && (old_info != info ||
info->flags & FBINFO_MISC_ALWAYS_SETPAR)) {
- if (info->fbops->fb_set_par)
- info->fbops->fb_set_par(info);
+ if (info->fbops->fb_set_par) {
+ ret = info->fbops->fb_set_par(info);
+
+ if (ret)
+ printk(KERN_ERR "fbcon_switch: detected "
+ "unhandled fb_set_par error, "
+ "error code %d\n", ret);
+ }
if (old_info != info)
fbcon_del_cursor_timer(old_info);
@@ -2923,6 +2952,10 @@ static int fbcon_unbind(void)
ret = unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
fbcon_is_default);
+
+ if (!ret)
+ fbcon_has_console_bind = 0;
+
return ret;
}
#else
@@ -2936,6 +2969,9 @@ static int fbcon_fb_unbind(int idx)
{
int i, new_idx = -1, ret = 0;
+ if (!fbcon_has_console_bind)
+ return 0;
+
for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (con2fb_map[i] != idx &&
con2fb_map[i] != -1) {
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index d31b203bf65..3772433c49d 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -216,7 +216,7 @@ static void newport_get_screensize(void)
}
newport_xsize = newport_ysize = 0;
- for (i = 0; linetable[i + 1] && (i < sizeof(linetable)); i += 2) {
+ for (i = 0; i < ARRAY_SIZE(linetable) - 1 && linetable[i + 1]; i += 2) {
cols = 0;
newport_vc2_set(npregs, VC2_IREG_RADDR, linetable[i]);
npregs->set.dcbmode = (NPORT_DMODE_AVC2 | VC2_REGADDR_RAM |
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 74e96cf83b7..da55ccaf4d5 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -589,12 +589,14 @@ static void vgacon_init(struct vc_data *c, int init)
static void vgacon_deinit(struct vc_data *c)
{
- /* When closing the last console, reset video origin */
- if (!--vgacon_uni_pagedir[1]) {
+ /* When closing the active console, reset video origin */
+ if (CON_IS_VISIBLE(c)) {
c->vc_visible_origin = vga_vram_base;
vga_set_mem_top(c);
- con_free_unimap(c);
}
+
+ if (!--vgacon_uni_pagedir[1])
+ con_free_unimap(c);
c->vc_uni_pagedir_loc = &c->vc_uni_pagedir;
con_set_default_unimap(c);
}
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
new file mode 100644
index 00000000000..42e1005e291
--- /dev/null
+++ b/drivers/video/da8xx-fb.c
@@ -0,0 +1,890 @@
+/*
+ * Copyright (C) 2008-2009 MontaVista Software Inc.
+ * Copyright (C) 2008-2009 Texas Instruments Inc
+ *
+ * Based on the LCD driver for TI Avalanche processors written by
+ * Ajay Singh and Shalom Hai.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option)any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fb.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <video/da8xx-fb.h>
+
+#define DRIVER_NAME "da8xx_lcdc"
+
+/* LCD Status Register */
+#define LCD_END_OF_FRAME0 BIT(8)
+#define LCD_FIFO_UNDERFLOW BIT(5)
+#define LCD_SYNC_LOST BIT(2)
+
+/* LCD DMA Control Register */
+#define LCD_DMA_BURST_SIZE(x) ((x) << 4)
+#define LCD_DMA_BURST_1 0x0
+#define LCD_DMA_BURST_2 0x1
+#define LCD_DMA_BURST_4 0x2
+#define LCD_DMA_BURST_8 0x3
+#define LCD_DMA_BURST_16 0x4
+#define LCD_END_OF_FRAME_INT_ENA BIT(2)
+#define LCD_DUAL_FRAME_BUFFER_ENABLE BIT(0)
+
+/* LCD Control Register */
+#define LCD_CLK_DIVISOR(x) ((x) << 8)
+#define LCD_RASTER_MODE 0x01
+
+/* LCD Raster Control Register */
+#define LCD_PALETTE_LOAD_MODE(x) ((x) << 20)
+#define PALETTE_AND_DATA 0x00
+#define PALETTE_ONLY 0x01
+
+#define LCD_MONO_8BIT_MODE BIT(9)
+#define LCD_RASTER_ORDER BIT(8)
+#define LCD_TFT_MODE BIT(7)
+#define LCD_UNDERFLOW_INT_ENA BIT(6)
+#define LCD_MONOCHROME_MODE BIT(1)
+#define LCD_RASTER_ENABLE BIT(0)
+#define LCD_TFT_ALT_ENABLE BIT(23)
+#define LCD_STN_565_ENABLE BIT(24)
+
+/* LCD Raster Timing 2 Register */
+#define LCD_AC_BIAS_TRANSITIONS_PER_INT(x) ((x) << 16)
+#define LCD_AC_BIAS_FREQUENCY(x) ((x) << 8)
+#define LCD_SYNC_CTRL BIT(25)
+#define LCD_SYNC_EDGE BIT(24)
+#define LCD_INVERT_PIXEL_CLOCK BIT(22)
+#define LCD_INVERT_LINE_CLOCK BIT(21)
+#define LCD_INVERT_FRAME_CLOCK BIT(20)
+
+/* LCD Block */
+#define LCD_CTRL_REG 0x4
+#define LCD_STAT_REG 0x8
+#define LCD_RASTER_CTRL_REG 0x28
+#define LCD_RASTER_TIMING_0_REG 0x2C
+#define LCD_RASTER_TIMING_1_REG 0x30
+#define LCD_RASTER_TIMING_2_REG 0x34
+#define LCD_DMA_CTRL_REG 0x40
+#define LCD_DMA_FRM_BUF_BASE_ADDR_0_REG 0x44
+#define LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG 0x48
+
+#define WSI_TIMEOUT 50
+#define PALETTE_SIZE 256
+#define LEFT_MARGIN 64
+#define RIGHT_MARGIN 64
+#define UPPER_MARGIN 32
+#define LOWER_MARGIN 32
+
+static resource_size_t da8xx_fb_reg_base;
+static struct resource *lcdc_regs;
+
+static inline unsigned int lcdc_read(unsigned int addr)
+{
+ return (unsigned int)__raw_readl(da8xx_fb_reg_base + (addr));
+}
+
+static inline void lcdc_write(unsigned int val, unsigned int addr)
+{
+ __raw_writel(val, da8xx_fb_reg_base + (addr));
+}
+
+struct da8xx_fb_par {
+ resource_size_t p_palette_base;
+ unsigned char *v_palette_base;
+ struct clk *lcdc_clk;
+ int irq;
+ unsigned short pseudo_palette[16];
+ unsigned int databuf_sz;
+ unsigned int palette_sz;
+};
+
+/* Variable Screen Information */
+static struct fb_var_screeninfo da8xx_fb_var __devinitdata = {
+ .xoffset = 0,
+ .yoffset = 0,
+ .transp = {0, 0, 0},
+ .nonstd = 0,
+ .activate = 0,
+ .height = -1,
+ .width = -1,
+ .pixclock = 46666, /* 46us - AUO display */
+ .accel_flags = 0,
+ .left_margin = LEFT_MARGIN,
+ .right_margin = RIGHT_MARGIN,
+ .upper_margin = UPPER_MARGIN,
+ .lower_margin = LOWER_MARGIN,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED
+};
+
+static struct fb_fix_screeninfo da8xx_fb_fix __devinitdata = {
+ .id = "DA8xx FB Drv",
+ .type = FB_TYPE_PACKED_PIXELS,
+ .type_aux = 0,
+ .visual = FB_VISUAL_PSEUDOCOLOR,
+ .xpanstep = 1,
+ .ypanstep = 1,
+ .ywrapstep = 1,
+ .accel = FB_ACCEL_NONE
+};
+
+struct da8xx_panel {
+ const char name[25]; /* Full name <vendor>_<model> */
+ unsigned short width;
+ unsigned short height;
+ int hfp; /* Horizontal front porch */
+ int hbp; /* Horizontal back porch */
+ int hsw; /* Horizontal Sync Pulse Width */
+ int vfp; /* Vertical front porch */
+ int vbp; /* Vertical back porch */
+ int vsw; /* Vertical Sync Pulse Width */
+ int pxl_clk; /* Pixel clock */
+ unsigned char invert_pxl_clk; /* Invert Pixel clock */
+};
+
+static struct da8xx_panel known_lcd_panels[] = {
+ /* Sharp LCD035Q3DG01 */
+ [0] = {
+ .name = "Sharp_LCD035Q3DG01",
+ .width = 320,
+ .height = 240,
+ .hfp = 8,
+ .hbp = 6,
+ .hsw = 0,
+ .vfp = 2,
+ .vbp = 2,
+ .vsw = 0,
+ .pxl_clk = 0x10,
+ .invert_pxl_clk = 1,
+ },
+ /* Sharp LK043T1DG01 */
+ [1] = {
+ .name = "Sharp_LK043T1DG01",
+ .width = 480,
+ .height = 272,
+ .hfp = 2,
+ .hbp = 2,
+ .hsw = 41,
+ .vfp = 2,
+ .vbp = 2,
+ .vsw = 10,
+ .pxl_clk = 0x12,
+ .invert_pxl_clk = 0,
+ },
+};
+
+/* Disable the Raster Engine of the LCD Controller */
+static void lcd_disable_raster(struct da8xx_fb_par *par)
+{
+ u32 reg;
+
+ reg = lcdc_read(LCD_RASTER_CTRL_REG);
+ if (reg & LCD_RASTER_ENABLE)
+ lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
+}
+
+static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
+{
+ u32 tmp = par->p_palette_base + par->databuf_sz - 4;
+ u32 reg;
+
+ /* Update the databuf in the hw. */
+ lcdc_write(par->p_palette_base, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
+ lcdc_write(tmp, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
+
+ /* Start the DMA. */
+ reg = lcdc_read(LCD_RASTER_CTRL_REG);
+ reg &= ~(3 << 20);
+ if (load_mode == LOAD_DATA)
+ reg |= LCD_PALETTE_LOAD_MODE(PALETTE_AND_DATA);
+ else if (load_mode == LOAD_PALETTE)
+ reg |= LCD_PALETTE_LOAD_MODE(PALETTE_ONLY);
+
+ lcdc_write(reg, LCD_RASTER_CTRL_REG);
+}
+
+/* Configure the Burst Size of DMA */
+static int lcd_cfg_dma(int burst_size)
+{
+ u32 reg;
+
+ reg = lcdc_read(LCD_DMA_CTRL_REG) & 0x00000001;
+ switch (burst_size) {
+ case 1:
+ reg |= LCD_DMA_BURST_SIZE(LCD_DMA_BURST_1);
+ break;
+ case 2:
+ reg |= LCD_DMA_BURST_SIZE(LCD_DMA_BURST_2);
+ break;
+ case 4:
+ reg |= LCD_DMA_BURST_SIZE(LCD_DMA_BURST_4);
+ break;
+ case 8:
+ reg |= LCD_DMA_BURST_SIZE(LCD_DMA_BURST_8);
+ break;
+ case 16:
+ reg |= LCD_DMA_BURST_SIZE(LCD_DMA_BURST_16);
+ break;
+ default:
+ return -EINVAL;
+ }
+ lcdc_write(reg, LCD_DMA_CTRL_REG);
+
+ return 0;
+}
+
+static void lcd_cfg_ac_bias(int period, int transitions_per_int)
+{
+ u32 reg;
+
+ /* Set the AC Bias Period and Number of Transisitons per Interrupt */
+ reg = lcdc_read(LCD_RASTER_TIMING_2_REG) & 0xFFF00000;
+ reg |= LCD_AC_BIAS_FREQUENCY(period) |
+ LCD_AC_BIAS_TRANSITIONS_PER_INT(transitions_per_int);
+ lcdc_write(reg, LCD_RASTER_TIMING_2_REG);
+}
+
+static void lcd_cfg_horizontal_sync(int back_porch, int pulse_width,
+ int front_porch)
+{
+ u32 reg;
+
+ reg = lcdc_read(LCD_RASTER_TIMING_0_REG) & 0xf;
+ reg |= ((back_porch & 0xff) << 24)
+ | ((front_porch & 0xff) << 16)
+ | ((pulse_width & 0x3f) << 10);
+ lcdc_write(reg, LCD_RASTER_TIMING_0_REG);
+}
+
+static void lcd_cfg_vertical_sync(int back_porch, int pulse_width,
+ int front_porch)
+{
+ u32 reg;
+
+ reg = lcdc_read(LCD_RASTER_TIMING_1_REG) & 0x3ff;
+ reg |= ((back_porch & 0xff) << 24)
+ | ((front_porch & 0xff) << 16)
+ | ((pulse_width & 0x3f) << 10);
+ lcdc_write(reg, LCD_RASTER_TIMING_1_REG);
+}
+
+static int lcd_cfg_display(const struct lcd_ctrl_config *cfg)
+{
+ u32 reg;
+
+ reg = lcdc_read(LCD_RASTER_CTRL_REG) & ~(LCD_TFT_MODE |
+ LCD_MONO_8BIT_MODE |
+ LCD_MONOCHROME_MODE);
+
+ switch (cfg->p_disp_panel->panel_shade) {
+ case MONOCHROME:
+ reg |= LCD_MONOCHROME_MODE;
+ if (cfg->mono_8bit_mode)
+ reg |= LCD_MONO_8BIT_MODE;
+ break;
+ case COLOR_ACTIVE:
+ reg |= LCD_TFT_MODE;
+ if (cfg->tft_alt_mode)
+ reg |= LCD_TFT_ALT_ENABLE;
+ break;
+
+ case COLOR_PASSIVE:
+ if (cfg->stn_565_mode)
+ reg |= LCD_STN_565_ENABLE;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* enable additional interrupts here */
+ reg |= LCD_UNDERFLOW_INT_ENA;
+
+ lcdc_write(reg, LCD_RASTER_CTRL_REG);
+
+ reg = lcdc_read(LCD_RASTER_TIMING_2_REG);
+
+ if (cfg->sync_ctrl)
+ reg |= LCD_SYNC_CTRL;
+ else
+ reg &= ~LCD_SYNC_CTRL;
+
+ if (cfg->sync_edge)
+ reg |= LCD_SYNC_EDGE;
+ else
+ reg &= ~LCD_SYNC_EDGE;
+
+ if (cfg->invert_line_clock)
+ reg |= LCD_INVERT_LINE_CLOCK;
+ else
+ reg &= ~LCD_INVERT_LINE_CLOCK;
+
+ if (cfg->invert_frm_clock)
+ reg |= LCD_INVERT_FRAME_CLOCK;
+ else
+ reg &= ~LCD_INVERT_FRAME_CLOCK;
+
+ lcdc_write(reg, LCD_RASTER_TIMING_2_REG);
+
+ return 0;
+}
+
+static int lcd_cfg_frame_buffer(struct da8xx_fb_par *par, u32 width, u32 height,
+ u32 bpp, u32 raster_order)
+{
+ u32 bpl, reg;
+
+ /* Disable Dual Frame Buffer. */
+ reg = lcdc_read(LCD_DMA_CTRL_REG);
+ lcdc_write(reg & ~LCD_DUAL_FRAME_BUFFER_ENABLE,
+ LCD_DMA_CTRL_REG);
+ /* Set the Panel Width */
+ /* Pixels per line = (PPL + 1)*16 */
+ /*0x3F in bits 4..9 gives max horisontal resolution = 1024 pixels*/
+ width &= 0x3f0;
+ reg = lcdc_read(LCD_RASTER_TIMING_0_REG);
+ reg &= 0xfffffc00;
+ reg |= ((width >> 4) - 1) << 4;
+ lcdc_write(reg, LCD_RASTER_TIMING_0_REG);
+
+ /* Set the Panel Height */
+ reg = lcdc_read(LCD_RASTER_TIMING_1_REG);
+ reg = ((height - 1) & 0x3ff) | (reg & 0xfffffc00);
+ lcdc_write(reg, LCD_RASTER_TIMING_1_REG);
+
+ /* Set the Raster Order of the Frame Buffer */
+ reg = lcdc_read(LCD_RASTER_CTRL_REG) & ~(1 << 8);
+ if (raster_order)
+ reg |= LCD_RASTER_ORDER;
+ lcdc_write(reg, LCD_RASTER_CTRL_REG);
+
+ switch (bpp) {
+ case 1:
+ case 2:
+ case 4:
+ case 16:
+ par->palette_sz = 16 * 2;
+ break;
+
+ case 8:
+ par->palette_sz = 256 * 2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ bpl = width * bpp / 8;
+ par->databuf_sz = height * bpl + par->palette_sz;
+
+ return 0;
+}
+
+static int fb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp,
+ struct fb_info *info)
+{
+ struct da8xx_fb_par *par = info->par;
+ unsigned short *palette = (unsigned short *)par->v_palette_base;
+ u_short pal;
+
+ if (regno > 255)
+ return 1;
+
+ if (info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+ return 1;
+
+ if (info->var.bits_per_pixel == 8) {
+ red >>= 4;
+ green >>= 8;
+ blue >>= 12;
+
+ pal = (red & 0x0f00);
+ pal |= (green & 0x00f0);
+ pal |= (blue & 0x000f);
+
+ palette[regno] = pal;
+
+ } else if ((info->var.bits_per_pixel == 16) && regno < 16) {
+ red >>= (16 - info->var.red.length);
+ red <<= info->var.red.offset;
+
+ green >>= (16 - info->var.green.length);
+ green <<= info->var.green.offset;
+
+ blue >>= (16 - info->var.blue.length);
+ blue <<= info->var.blue.offset;
+
+ par->pseudo_palette[regno] = red | green | blue;
+
+ palette[0] = 0x4000;
+ }
+
+ return 0;
+}
+
+static void lcd_reset(struct da8xx_fb_par *par)
+{
+ /* Disable the Raster if previously Enabled */
+ if (lcdc_read(LCD_RASTER_CTRL_REG) & LCD_RASTER_ENABLE)
+ lcd_disable_raster(par);
+
+ /* DMA has to be disabled */
+ lcdc_write(0, LCD_DMA_CTRL_REG);
+ lcdc_write(0, LCD_RASTER_CTRL_REG);
+}
+
+static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
+ struct da8xx_panel *panel)
+{
+ u32 bpp;
+ int ret = 0;
+
+ lcd_reset(par);
+
+ /* Configure the LCD clock divisor. */
+ lcdc_write(LCD_CLK_DIVISOR(panel->pxl_clk) |
+ (LCD_RASTER_MODE & 0x1), LCD_CTRL_REG);
+
+ if (panel->invert_pxl_clk)
+ lcdc_write((lcdc_read(LCD_RASTER_TIMING_2_REG) |
+ LCD_INVERT_PIXEL_CLOCK), LCD_RASTER_TIMING_2_REG);
+ else
+ lcdc_write((lcdc_read(LCD_RASTER_TIMING_2_REG) &
+ ~LCD_INVERT_PIXEL_CLOCK), LCD_RASTER_TIMING_2_REG);
+
+ /* Configure the DMA burst size. */
+ ret = lcd_cfg_dma(cfg->dma_burst_sz);
+ if (ret < 0)
+ return ret;
+
+ /* Configure the AC bias properties. */
+ lcd_cfg_ac_bias(cfg->ac_bias, cfg->ac_bias_intrpt);
+
+ /* Configure the vertical and horizontal sync properties. */
+ lcd_cfg_vertical_sync(panel->vbp, panel->vsw, panel->vfp);
+ lcd_cfg_horizontal_sync(panel->hbp, panel->hsw, panel->hfp);
+
+ /* Configure for disply */
+ ret = lcd_cfg_display(cfg);
+ if (ret < 0)
+ return ret;
+
+ if (QVGA != cfg->p_disp_panel->panel_type)
+ return -EINVAL;
+
+ if (cfg->bpp <= cfg->p_disp_panel->max_bpp &&
+ cfg->bpp >= cfg->p_disp_panel->min_bpp)
+ bpp = cfg->bpp;
+ else
+ bpp = cfg->p_disp_panel->max_bpp;
+ if (bpp == 12)
+ bpp = 16;
+ ret = lcd_cfg_frame_buffer(par, (unsigned int)panel->width,
+ (unsigned int)panel->height, bpp,
+ cfg->raster_order);
+ if (ret < 0)
+ return ret;
+
+ /* Configure FDD */
+ lcdc_write((lcdc_read(LCD_RASTER_CTRL_REG) & 0xfff00fff) |
+ (cfg->fdd << 12), LCD_RASTER_CTRL_REG);
+
+ return 0;
+}
+
+static irqreturn_t lcdc_irq_handler(int irq, void *arg)
+{
+ u32 stat = lcdc_read(LCD_STAT_REG);
+ u32 reg;
+
+ if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) {
+ reg = lcdc_read(LCD_RASTER_CTRL_REG);
+ lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
+ lcdc_write(stat, LCD_STAT_REG);
+ lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
+ } else
+ lcdc_write(stat, LCD_STAT_REG);
+
+ return IRQ_HANDLED;
+}
+
+static int fb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ int err = 0;
+
+ switch (var->bits_per_pixel) {
+ case 1:
+ case 8:
+ var->red.offset = 0;
+ var->red.length = 8;
+ var->green.offset = 0;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ break;
+ case 4:
+ var->red.offset = 0;
+ var->red.length = 4;
+ var->green.offset = 0;
+ var->green.length = 4;
+ var->blue.offset = 0;
+ var->blue.length = 4;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ break;
+ case 16: /* RGB 565 */
+ var->red.offset = 0;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->blue.offset = 11;
+ var->blue.length = 5;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ var->red.msb_right = 0;
+ var->green.msb_right = 0;
+ var->blue.msb_right = 0;
+ var->transp.msb_right = 0;
+ return err;
+}
+
+static int __devexit fb_remove(struct platform_device *dev)
+{
+ struct fb_info *info = dev_get_drvdata(&dev->dev);
+
+ if (info) {
+ struct da8xx_fb_par *par = info->par;
+
+ if (lcdc_read(LCD_RASTER_CTRL_REG) & LCD_RASTER_ENABLE)
+ lcd_disable_raster(par);
+ lcdc_write(0, LCD_RASTER_CTRL_REG);
+
+ /* disable DMA */
+ lcdc_write(0, LCD_DMA_CTRL_REG);
+
+ unregister_framebuffer(info);
+ fb_dealloc_cmap(&info->cmap);
+ dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE,
+ info->screen_base,
+ info->fix.smem_start);
+ free_irq(par->irq, par);
+ clk_disable(par->lcdc_clk);
+ clk_put(par->lcdc_clk);
+ framebuffer_release(info);
+ iounmap((void __iomem *)da8xx_fb_reg_base);
+ release_mem_region(lcdc_regs->start, resource_size(lcdc_regs));
+
+ }
+ return 0;
+}
+
+static int fb_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg)
+{
+ struct lcd_sync_arg sync_arg;
+
+ switch (cmd) {
+ case FBIOGET_CONTRAST:
+ case FBIOPUT_CONTRAST:
+ case FBIGET_BRIGHTNESS:
+ case FBIPUT_BRIGHTNESS:
+ case FBIGET_COLOR:
+ case FBIPUT_COLOR:
+ return -ENOTTY;
+ case FBIPUT_HSYNC:
+ if (copy_from_user(&sync_arg, (char *)arg,
+ sizeof(struct lcd_sync_arg)))
+ return -EFAULT;
+ lcd_cfg_horizontal_sync(sync_arg.back_porch,
+ sync_arg.pulse_width,
+ sync_arg.front_porch);
+ break;
+ case FBIPUT_VSYNC:
+ if (copy_from_user(&sync_arg, (char *)arg,
+ sizeof(struct lcd_sync_arg)))
+ return -EFAULT;
+ lcd_cfg_vertical_sync(sync_arg.back_porch,
+ sync_arg.pulse_width,
+ sync_arg.front_porch);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct fb_ops da8xx_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = fb_check_var,
+ .fb_setcolreg = fb_setcolreg,
+ .fb_ioctl = fb_ioctl,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+};
+
+static int __init fb_probe(struct platform_device *device)
+{
+ struct da8xx_lcdc_platform_data *fb_pdata =
+ device->dev.platform_data;
+ struct lcd_ctrl_config *lcd_cfg;
+ struct da8xx_panel *lcdc_info;
+ struct fb_info *da8xx_fb_info;
+ struct clk *fb_clk = NULL;
+ struct da8xx_fb_par *par;
+ resource_size_t len;
+ int ret, i;
+
+ if (fb_pdata == NULL) {
+ dev_err(&device->dev, "Can not get platform data\n");
+ return -ENOENT;
+ }
+
+ lcdc_regs = platform_get_resource(device, IORESOURCE_MEM, 0);
+ if (!lcdc_regs) {
+ dev_err(&device->dev,
+ "Can not get memory resource for LCD controller\n");
+ return -ENOENT;
+ }
+
+ len = resource_size(lcdc_regs);
+
+ lcdc_regs = request_mem_region(lcdc_regs->start, len, lcdc_regs->name);
+ if (!lcdc_regs)
+ return -EBUSY;
+
+ da8xx_fb_reg_base = (resource_size_t)ioremap(lcdc_regs->start, len);
+ if (!da8xx_fb_reg_base) {
+ ret = -EBUSY;
+ goto err_request_mem;
+ }
+
+ fb_clk = clk_get(&device->dev, NULL);
+ if (IS_ERR(fb_clk)) {
+ dev_err(&device->dev, "Can not get device clock\n");
+ ret = -ENODEV;
+ goto err_ioremap;
+ }
+ ret = clk_enable(fb_clk);
+ if (ret)
+ goto err_clk_put;
+
+ for (i = 0, lcdc_info = known_lcd_panels;
+ i < ARRAY_SIZE(known_lcd_panels);
+ i++, lcdc_info++) {
+ if (strcmp(fb_pdata->type, lcdc_info->name) == 0)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(known_lcd_panels)) {
+ dev_err(&device->dev, "GLCD: No valid panel found\n");
+ ret = ENODEV;
+ goto err_clk_disable;
+ } else
+ dev_info(&device->dev, "GLCD: Found %s panel\n",
+ fb_pdata->type);
+
+ lcd_cfg = (struct lcd_ctrl_config *)fb_pdata->controller_data;
+
+ da8xx_fb_info = framebuffer_alloc(sizeof(struct da8xx_fb_par),
+ &device->dev);
+ if (!da8xx_fb_info) {
+ dev_dbg(&device->dev, "Memory allocation failed for fb_info\n");
+ ret = -ENOMEM;
+ goto err_clk_disable;
+ }
+
+ par = da8xx_fb_info->par;
+
+ if (lcd_init(par, lcd_cfg, lcdc_info) < 0) {
+ dev_err(&device->dev, "lcd_init failed\n");
+ ret = -EFAULT;
+ goto err_release_fb;
+ }
+
+ /* allocate frame buffer */
+ da8xx_fb_info->screen_base = dma_alloc_coherent(NULL,
+ par->databuf_sz + PAGE_SIZE,
+ (resource_size_t *)
+ &da8xx_fb_info->fix.smem_start,
+ GFP_KERNEL | GFP_DMA);
+
+ if (!da8xx_fb_info->screen_base) {
+ dev_err(&device->dev,
+ "GLCD: kmalloc for frame buffer failed\n");
+ ret = -EINVAL;
+ goto err_release_fb;
+ }
+
+ /* move palette base pointer by (PAGE_SIZE - palette_sz) bytes */
+ par->v_palette_base = da8xx_fb_info->screen_base +
+ (PAGE_SIZE - par->palette_sz);
+ par->p_palette_base = da8xx_fb_info->fix.smem_start +
+ (PAGE_SIZE - par->palette_sz);
+
+ /* the rest of the frame buffer is pixel data */
+ da8xx_fb_fix.smem_start = par->p_palette_base + par->palette_sz;
+ da8xx_fb_fix.smem_len = par->databuf_sz - par->palette_sz;
+ da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8;
+
+ par->lcdc_clk = fb_clk;
+
+ par->irq = platform_get_irq(device, 0);
+ if (par->irq < 0) {
+ ret = -ENOENT;
+ goto err_release_fb_mem;
+ }
+
+ ret = request_irq(par->irq, lcdc_irq_handler, 0, DRIVER_NAME, par);
+ if (ret)
+ goto err_release_fb_mem;
+
+ /* Initialize par */
+ da8xx_fb_info->var.bits_per_pixel = lcd_cfg->bpp;
+
+ da8xx_fb_var.xres = lcdc_info->width;
+ da8xx_fb_var.xres_virtual = lcdc_info->width;
+
+ da8xx_fb_var.yres = lcdc_info->height;
+ da8xx_fb_var.yres_virtual = lcdc_info->height;
+
+ da8xx_fb_var.grayscale =
+ lcd_cfg->p_disp_panel->panel_shade == MONOCHROME ? 1 : 0;
+ da8xx_fb_var.bits_per_pixel = lcd_cfg->bpp;
+
+ da8xx_fb_var.hsync_len = lcdc_info->hsw;
+ da8xx_fb_var.vsync_len = lcdc_info->vsw;
+
+ /* Initialize fbinfo */
+ da8xx_fb_info->flags = FBINFO_FLAG_DEFAULT;
+ da8xx_fb_info->fix = da8xx_fb_fix;
+ da8xx_fb_info->var = da8xx_fb_var;
+ da8xx_fb_info->fbops = &da8xx_fb_ops;
+ da8xx_fb_info->pseudo_palette = par->pseudo_palette;
+
+ ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0);
+ if (ret)
+ goto err_free_irq;
+
+ /* First palette_sz byte of the frame buffer is the palette */
+ da8xx_fb_info->cmap.len = par->palette_sz;
+
+ /* Flush the buffer to the screen. */
+ lcd_blit(LOAD_DATA, par);
+
+ /* initialize var_screeninfo */
+ da8xx_fb_var.activate = FB_ACTIVATE_FORCE;
+ fb_set_var(da8xx_fb_info, &da8xx_fb_var);
+
+ dev_set_drvdata(&device->dev, da8xx_fb_info);
+ /* Register the Frame Buffer */
+ if (register_framebuffer(da8xx_fb_info) < 0) {
+ dev_err(&device->dev,
+ "GLCD: Frame Buffer Registration Failed!\n");
+ ret = -EINVAL;
+ goto err_dealloc_cmap;
+ }
+
+ /* enable raster engine */
+ lcdc_write(lcdc_read(LCD_RASTER_CTRL_REG) |
+ LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
+
+ return 0;
+
+err_dealloc_cmap:
+ fb_dealloc_cmap(&da8xx_fb_info->cmap);
+
+err_free_irq:
+ free_irq(par->irq, par);
+
+err_release_fb_mem:
+ dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE,
+ da8xx_fb_info->screen_base,
+ da8xx_fb_info->fix.smem_start);
+
+err_release_fb:
+ framebuffer_release(da8xx_fb_info);
+
+err_clk_disable:
+ clk_disable(fb_clk);
+
+err_clk_put:
+ clk_put(fb_clk);
+
+err_ioremap:
+ iounmap((void __iomem *)da8xx_fb_reg_base);
+
+err_request_mem:
+ release_mem_region(lcdc_regs->start, len);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int fb_suspend(struct platform_device *dev, pm_message_t state)
+{
+ return -EBUSY;
+}
+static int fb_resume(struct platform_device *dev)
+{
+ return -EBUSY;
+}
+#else
+#define fb_suspend NULL
+#define fb_resume NULL
+#endif
+
+static struct platform_driver da8xx_fb_driver = {
+ .probe = fb_probe,
+ .remove = fb_remove,
+ .suspend = fb_suspend,
+ .resume = fb_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init da8xx_fb_init(void)
+{
+ return platform_driver_register(&da8xx_fb_driver);
+}
+
+static void __exit da8xx_fb_cleanup(void)
+{
+ platform_driver_unregister(&da8xx_fb_driver);
+}
+
+module_init(da8xx_fb_init);
+module_exit(da8xx_fb_cleanup);
+
+MODULE_DESCRIPTION("Framebuffer driver for TI da8xx/omap-l1xx");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
new file mode 100644
index 00000000000..bd9d46f9529
--- /dev/null
+++ b/drivers/video/ep93xx-fb.c
@@ -0,0 +1,646 @@
+/*
+ * linux/drivers/video/ep93xx-fb.c
+ *
+ * Framebuffer support for the EP93xx series.
+ *
+ * Copyright (C) 2007 Bluewater Systems Ltd
+ * Author: Ryan Mallon <ryan@bluewatersys.com>
+ *
+ * Copyright (c) 2009 H Hartley Sweeten <hsweeten@visionengravers.com>
+ *
+ * Based on the Cirrus Logic ep93xxfb driver, and various other ep93xxfb
+ * drivers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/fb.h>
+
+#include <mach/fb.h>
+
+/* Vertical Frame Timing Registers */
+#define EP93XXFB_VLINES_TOTAL 0x0000 /* SW locked */
+#define EP93XXFB_VSYNC 0x0004 /* SW locked */
+#define EP93XXFB_VACTIVE 0x0008 /* SW locked */
+#define EP93XXFB_VBLANK 0x0228 /* SW locked */
+#define EP93XXFB_VCLK 0x000c /* SW locked */
+
+/* Horizontal Frame Timing Registers */
+#define EP93XXFB_HCLKS_TOTAL 0x0010 /* SW locked */
+#define EP93XXFB_HSYNC 0x0014 /* SW locked */
+#define EP93XXFB_HACTIVE 0x0018 /* SW locked */
+#define EP93XXFB_HBLANK 0x022c /* SW locked */
+#define EP93XXFB_HCLK 0x001c /* SW locked */
+
+/* Frame Buffer Memory Configuration Registers */
+#define EP93XXFB_SCREEN_PAGE 0x0028
+#define EP93XXFB_SCREEN_HPAGE 0x002c
+#define EP93XXFB_SCREEN_LINES 0x0030
+#define EP93XXFB_LINE_LENGTH 0x0034
+#define EP93XXFB_VLINE_STEP 0x0038
+#define EP93XXFB_LINE_CARRY 0x003c /* SW locked */
+#define EP93XXFB_EOL_OFFSET 0x0230
+
+/* Other Video Registers */
+#define EP93XXFB_BRIGHTNESS 0x0020
+#define EP93XXFB_ATTRIBS 0x0024 /* SW locked */
+#define EP93XXFB_SWLOCK 0x007c /* SW locked */
+#define EP93XXFB_AC_RATE 0x0214
+#define EP93XXFB_FIFO_LEVEL 0x0234
+#define EP93XXFB_PIXELMODE 0x0054
+#define EP93XXFB_PIXELMODE_32BPP (0x7 << 0)
+#define EP93XXFB_PIXELMODE_24BPP (0x6 << 0)
+#define EP93XXFB_PIXELMODE_16BPP (0x4 << 0)
+#define EP93XXFB_PIXELMODE_8BPP (0x2 << 0)
+#define EP93XXFB_PIXELMODE_SHIFT_1P_24B (0x0 << 3)
+#define EP93XXFB_PIXELMODE_SHIFT_1P_18B (0x1 << 3)
+#define EP93XXFB_PIXELMODE_COLOR_LUT (0x0 << 10)
+#define EP93XXFB_PIXELMODE_COLOR_888 (0x4 << 10)
+#define EP93XXFB_PIXELMODE_COLOR_555 (0x5 << 10)
+#define EP93XXFB_PARL_IF_OUT 0x0058
+#define EP93XXFB_PARL_IF_IN 0x005c
+
+/* Blink Control Registers */
+#define EP93XXFB_BLINK_RATE 0x0040
+#define EP93XXFB_BLINK_MASK 0x0044
+#define EP93XXFB_BLINK_PATTRN 0x0048
+#define EP93XXFB_PATTRN_MASK 0x004c
+#define EP93XXFB_BKGRND_OFFSET 0x0050
+
+/* Hardware Cursor Registers */
+#define EP93XXFB_CURSOR_ADR_START 0x0060
+#define EP93XXFB_CURSOR_ADR_RESET 0x0064
+#define EP93XXFB_CURSOR_SIZE 0x0068
+#define EP93XXFB_CURSOR_COLOR1 0x006c
+#define EP93XXFB_CURSOR_COLOR2 0x0070
+#define EP93XXFB_CURSOR_BLINK_COLOR1 0x021c
+#define EP93XXFB_CURSOR_BLINK_COLOR2 0x0220
+#define EP93XXFB_CURSOR_XY_LOC 0x0074
+#define EP93XXFB_CURSOR_DSCAN_HY_LOC 0x0078
+#define EP93XXFB_CURSOR_BLINK_RATE_CTRL 0x0224
+
+/* LUT Registers */
+#define EP93XXFB_GRY_SCL_LUTR 0x0080
+#define EP93XXFB_GRY_SCL_LUTG 0x0280
+#define EP93XXFB_GRY_SCL_LUTB 0x0300
+#define EP93XXFB_LUT_SW_CONTROL 0x0218
+#define EP93XXFB_LUT_SW_CONTROL_SWTCH (1 << 0)
+#define EP93XXFB_LUT_SW_CONTROL_SSTAT (1 << 1)
+#define EP93XXFB_COLOR_LUT 0x0400
+
+/* Video Signature Registers */
+#define EP93XXFB_VID_SIG_RSLT_VAL 0x0200
+#define EP93XXFB_VID_SIG_CTRL 0x0204
+#define EP93XXFB_VSIG 0x0208
+#define EP93XXFB_HSIG 0x020c
+#define EP93XXFB_SIG_CLR_STR 0x0210
+
+/* Minimum / Maximum resolutions supported */
+#define EP93XXFB_MIN_XRES 64
+#define EP93XXFB_MIN_YRES 64
+#define EP93XXFB_MAX_XRES 1024
+#define EP93XXFB_MAX_YRES 768
+
+struct ep93xx_fbi {
+ struct ep93xxfb_mach_info *mach_info;
+ struct clk *clk;
+ struct resource *res;
+ void __iomem *mmio_base;
+ unsigned int pseudo_palette[256];
+};
+
+static int check_screenpage_bug = 1;
+module_param(check_screenpage_bug, int, 0644);
+MODULE_PARM_DESC(check_screenpage_bug,
+ "Check for bit 27 screen page bug. Default = 1");
+
+static inline unsigned int ep93xxfb_readl(struct ep93xx_fbi *fbi,
+ unsigned int off)
+{
+ return __raw_readl(fbi->mmio_base + off);
+}
+
+static inline void ep93xxfb_writel(struct ep93xx_fbi *fbi,
+ unsigned int val, unsigned int off)
+{
+ __raw_writel(val, fbi->mmio_base + off);
+}
+
+/*
+ * Write to one of the locked raster registers.
+ */
+static inline void ep93xxfb_out_locked(struct ep93xx_fbi *fbi,
+ unsigned int val, unsigned int reg)
+{
+ /*
+ * We don't need a lock or delay here since the raster register
+ * block will remain unlocked until the next access.
+ */
+ ep93xxfb_writel(fbi, 0xaa, EP93XXFB_SWLOCK);
+ ep93xxfb_writel(fbi, val, reg);
+}
+
+static void ep93xxfb_set_video_attribs(struct fb_info *info)
+{
+ struct ep93xx_fbi *fbi = info->par;
+ unsigned int attribs;
+
+ attribs = EP93XXFB_ENABLE;
+ attribs |= fbi->mach_info->flags;
+ ep93xxfb_out_locked(fbi, attribs, EP93XXFB_ATTRIBS);
+}
+
+static int ep93xxfb_set_pixelmode(struct fb_info *info)
+{
+ struct ep93xx_fbi *fbi = info->par;
+ unsigned int val;
+
+ info->var.transp.offset = 0;
+ info->var.transp.length = 0;
+
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ val = EP93XXFB_PIXELMODE_8BPP | EP93XXFB_PIXELMODE_COLOR_LUT |
+ EP93XXFB_PIXELMODE_SHIFT_1P_18B;
+
+ info->var.red.offset = 0;
+ info->var.red.length = 8;
+ info->var.green.offset = 0;
+ info->var.green.length = 8;
+ info->var.blue.offset = 0;
+ info->var.blue.length = 8;
+ info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+ break;
+
+ case 16:
+ val = EP93XXFB_PIXELMODE_16BPP | EP93XXFB_PIXELMODE_COLOR_555 |
+ EP93XXFB_PIXELMODE_SHIFT_1P_18B;
+
+ info->var.red.offset = 11;
+ info->var.red.length = 5;
+ info->var.green.offset = 5;
+ info->var.green.length = 6;
+ info->var.blue.offset = 0;
+ info->var.blue.length = 5;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ break;
+
+ case 24:
+ val = EP93XXFB_PIXELMODE_24BPP | EP93XXFB_PIXELMODE_COLOR_888 |
+ EP93XXFB_PIXELMODE_SHIFT_1P_24B;
+
+ info->var.red.offset = 16;
+ info->var.red.length = 8;
+ info->var.green.offset = 8;
+ info->var.green.length = 8;
+ info->var.blue.offset = 0;
+ info->var.blue.length = 8;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ break;
+
+ case 32:
+ val = EP93XXFB_PIXELMODE_32BPP | EP93XXFB_PIXELMODE_COLOR_888 |
+ EP93XXFB_PIXELMODE_SHIFT_1P_24B;
+
+ info->var.red.offset = 16;
+ info->var.red.length = 8;
+ info->var.green.offset = 8;
+ info->var.green.length = 8;
+ info->var.blue.offset = 0;
+ info->var.blue.length = 8;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ep93xxfb_writel(fbi, val, EP93XXFB_PIXELMODE);
+ return 0;
+}
+
+static void ep93xxfb_set_timing(struct fb_info *info)
+{
+ struct ep93xx_fbi *fbi = info->par;
+ unsigned int vlines_total, hclks_total, start, stop;
+
+ vlines_total = info->var.yres + info->var.upper_margin +
+ info->var.lower_margin + info->var.vsync_len - 1;
+
+ hclks_total = info->var.xres + info->var.left_margin +
+ info->var.right_margin + info->var.hsync_len - 1;
+
+ ep93xxfb_out_locked(fbi, vlines_total, EP93XXFB_VLINES_TOTAL);
+ ep93xxfb_out_locked(fbi, hclks_total, EP93XXFB_HCLKS_TOTAL);
+
+ start = vlines_total;
+ stop = vlines_total - info->var.vsync_len;
+ ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_VSYNC);
+
+ start = vlines_total - info->var.vsync_len - info->var.upper_margin;
+ stop = info->var.lower_margin - 1;
+ ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_VBLANK);
+ ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_VACTIVE);
+
+ start = vlines_total;
+ stop = vlines_total + 1;
+ ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_VCLK);
+
+ start = hclks_total;
+ stop = hclks_total - info->var.hsync_len;
+ ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_HSYNC);
+
+ start = hclks_total - info->var.hsync_len - info->var.left_margin;
+ stop = info->var.right_margin - 1;
+ ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_HBLANK);
+ ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_HACTIVE);
+
+ start = hclks_total;
+ stop = hclks_total;
+ ep93xxfb_out_locked(fbi, start | (stop << 16), EP93XXFB_HCLK);
+
+ ep93xxfb_out_locked(fbi, 0x0, EP93XXFB_LINE_CARRY);
+}
+
+static int ep93xxfb_set_par(struct fb_info *info)
+{
+ struct ep93xx_fbi *fbi = info->par;
+
+ clk_set_rate(fbi->clk, 1000 * PICOS2KHZ(info->var.pixclock));
+
+ ep93xxfb_set_timing(info);
+
+ info->fix.line_length = info->var.xres_virtual *
+ info->var.bits_per_pixel / 8;
+
+ ep93xxfb_writel(fbi, info->fix.smem_start, EP93XXFB_SCREEN_PAGE);
+ ep93xxfb_writel(fbi, info->var.yres - 1, EP93XXFB_SCREEN_LINES);
+ ep93xxfb_writel(fbi, ((info->var.xres * info->var.bits_per_pixel)
+ / 32) - 1, EP93XXFB_LINE_LENGTH);
+ ep93xxfb_writel(fbi, info->fix.line_length / 4, EP93XXFB_VLINE_STEP);
+ ep93xxfb_set_video_attribs(info);
+ return 0;
+}
+
+static int ep93xxfb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ int err;
+
+ err = ep93xxfb_set_pixelmode(info);
+ if (err)
+ return err;
+
+ var->xres = max_t(unsigned int, var->xres, EP93XXFB_MIN_XRES);
+ var->xres = min_t(unsigned int, var->xres, EP93XXFB_MAX_XRES);
+ var->xres_virtual = max(var->xres_virtual, var->xres);
+
+ var->yres = max_t(unsigned int, var->yres, EP93XXFB_MIN_YRES);
+ var->yres = min_t(unsigned int, var->yres, EP93XXFB_MAX_YRES);
+ var->yres_virtual = max(var->yres_virtual, var->yres);
+
+ return 0;
+}
+
+static int ep93xxfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ unsigned int offset = vma->vm_pgoff << PAGE_SHIFT;
+
+ if (offset < info->fix.smem_len) {
+ return dma_mmap_writecombine(info->dev, vma, info->screen_base,
+ info->fix.smem_start,
+ info->fix.smem_len);
+ }
+
+ return -EINVAL;
+}
+
+static int ep93xxfb_blank(int blank_mode, struct fb_info *info)
+{
+ struct ep93xx_fbi *fbi = info->par;
+ unsigned int attribs = ep93xxfb_readl(fbi, EP93XXFB_ATTRIBS);
+
+ if (blank_mode) {
+ if (fbi->mach_info->blank)
+ fbi->mach_info->blank(blank_mode, info);
+ ep93xxfb_out_locked(fbi, attribs & ~EP93XXFB_ENABLE,
+ EP93XXFB_ATTRIBS);
+ clk_disable(fbi->clk);
+ } else {
+ clk_enable(fbi->clk);
+ ep93xxfb_out_locked(fbi, attribs | EP93XXFB_ENABLE,
+ EP93XXFB_ATTRIBS);
+ if (fbi->mach_info->blank)
+ fbi->mach_info->blank(blank_mode, info);
+ }
+
+ return 0;
+}
+
+static inline int ep93xxfb_convert_color(int val, int width)
+{
+ return ((val << width) + 0x7fff - val) >> 16;
+}
+
+static int ep93xxfb_setcolreg(unsigned int regno, unsigned int red,
+ unsigned int green, unsigned int blue,
+ unsigned int transp, struct fb_info *info)
+{
+ struct ep93xx_fbi *fbi = info->par;
+ unsigned int *pal = info->pseudo_palette;
+ unsigned int ctrl, i, rgb, lut_current, lut_stat;
+
+ switch (info->fix.visual) {
+ case FB_VISUAL_PSEUDOCOLOR:
+ rgb = ((red & 0xff00) << 8) | (green & 0xff00) |
+ ((blue & 0xff00) >> 8);
+
+ pal[regno] = rgb;
+ ep93xxfb_writel(fbi, rgb, (EP93XXFB_COLOR_LUT + (regno << 2)));
+ ctrl = ep93xxfb_readl(fbi, EP93XXFB_LUT_SW_CONTROL);
+ lut_stat = !!(ctrl & EP93XXFB_LUT_SW_CONTROL_SSTAT);
+ lut_current = !!(ctrl & EP93XXFB_LUT_SW_CONTROL_SWTCH);
+
+ if (lut_stat == lut_current) {
+ for (i = 0; i < 256; i++) {
+ ep93xxfb_writel(fbi, pal[i],
+ EP93XXFB_COLOR_LUT + (i << 2));
+ }
+
+ ep93xxfb_writel(fbi,
+ ctrl ^ EP93XXFB_LUT_SW_CONTROL_SWTCH,
+ EP93XXFB_LUT_SW_CONTROL);
+ }
+ break;
+
+ case FB_VISUAL_TRUECOLOR:
+ if (regno > 16)
+ return 1;
+
+ red = ep93xxfb_convert_color(red, info->var.red.length);
+ green = ep93xxfb_convert_color(green, info->var.green.length);
+ blue = ep93xxfb_convert_color(blue, info->var.blue.length);
+ transp = ep93xxfb_convert_color(transp,
+ info->var.transp.length);
+
+ pal[regno] = (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset) |
+ (transp << info->var.transp.offset);
+ break;
+
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+static struct fb_ops ep93xxfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = ep93xxfb_check_var,
+ .fb_set_par = ep93xxfb_set_par,
+ .fb_blank = ep93xxfb_blank,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_setcolreg = ep93xxfb_setcolreg,
+ .fb_mmap = ep93xxfb_mmap,
+};
+
+static int __init ep93xxfb_calc_fbsize(struct ep93xxfb_mach_info *mach_info)
+{
+ int i, fb_size = 0;
+
+ if (mach_info->num_modes == EP93XXFB_USE_MODEDB) {
+ fb_size = EP93XXFB_MAX_XRES * EP93XXFB_MAX_YRES *
+ mach_info->bpp / 8;
+ } else {
+ for (i = 0; i < mach_info->num_modes; i++) {
+ const struct fb_videomode *mode;
+ int size;
+
+ mode = &mach_info->modes[i];
+ size = mode->xres * mode->yres * mach_info->bpp / 8;
+ if (size > fb_size)
+ fb_size = size;
+ }
+ }
+
+ return fb_size;
+}
+
+static int __init ep93xxfb_alloc_videomem(struct fb_info *info)
+{
+ struct ep93xx_fbi *fbi = info->par;
+ char __iomem *virt_addr;
+ dma_addr_t phys_addr;
+ unsigned int fb_size;
+
+ fb_size = ep93xxfb_calc_fbsize(fbi->mach_info);
+ virt_addr = dma_alloc_writecombine(info->dev, fb_size,
+ &phys_addr, GFP_KERNEL);
+ if (!virt_addr)
+ return -ENOMEM;
+
+ /*
+ * There is a bug in the ep93xx framebuffer which causes problems
+ * if bit 27 of the physical address is set.
+ * See: http://marc.info/?l=linux-arm-kernel&m=110061245502000&w=2
+ * There does not seem to be any offical errata for this, but I
+ * have confirmed the problem exists on my hardware (ep9315) at
+ * least.
+ */
+ if (check_screenpage_bug && phys_addr & (1 << 27)) {
+ dev_err(info->dev, "ep93xx framebuffer bug. phys addr (0x%x) "
+ "has bit 27 set: cannot init framebuffer\n",
+ phys_addr);
+
+ dma_free_coherent(info->dev, fb_size, virt_addr, phys_addr);
+ return -ENOMEM;
+ }
+
+ info->fix.smem_start = phys_addr;
+ info->fix.smem_len = fb_size;
+ info->screen_base = virt_addr;
+
+ return 0;
+}
+
+static void ep93xxfb_dealloc_videomem(struct fb_info *info)
+{
+ if (info->screen_base)
+ dma_free_coherent(info->dev, info->fix.smem_len,
+ info->screen_base, info->fix.smem_start);
+}
+
+static int __init ep93xxfb_probe(struct platform_device *pdev)
+{
+ struct ep93xxfb_mach_info *mach_info = pdev->dev.platform_data;
+ struct fb_info *info;
+ struct ep93xx_fbi *fbi;
+ struct resource *res;
+ char *video_mode;
+ int err;
+
+ if (!mach_info)
+ return -EINVAL;
+
+ info = framebuffer_alloc(sizeof(struct ep93xx_fbi), &pdev->dev);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = &pdev->dev;
+ platform_set_drvdata(pdev, info);
+ fbi = info->par;
+ fbi->mach_info = mach_info;
+
+ err = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (err)
+ goto failed;
+
+ err = ep93xxfb_alloc_videomem(info);
+ if (err)
+ goto failed;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -ENXIO;
+ goto failed;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (!res) {
+ err = -EBUSY;
+ goto failed;
+ }
+
+ fbi->res = res;
+ fbi->mmio_base = ioremap(res->start, resource_size(res));
+ if (!fbi->mmio_base) {
+ err = -ENXIO;
+ goto failed;
+ }
+
+ strcpy(info->fix.id, pdev->name);
+ info->fbops = &ep93xxfb_ops;
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.accel = FB_ACCEL_NONE;
+ info->var.activate = FB_ACTIVATE_NOW;
+ info->var.vmode = FB_VMODE_NONINTERLACED;
+ info->flags = FBINFO_DEFAULT;
+ info->node = -1;
+ info->state = FBINFO_STATE_RUNNING;
+ info->pseudo_palette = &fbi->pseudo_palette;
+
+ fb_get_options("ep93xx-fb", &video_mode);
+ err = fb_find_mode(&info->var, info, video_mode,
+ fbi->mach_info->modes, fbi->mach_info->num_modes,
+ fbi->mach_info->default_mode, fbi->mach_info->bpp);
+ if (err == 0) {
+ dev_err(info->dev, "No suitable video mode found\n");
+ err = -EINVAL;
+ goto failed;
+ }
+
+ if (mach_info->setup) {
+ err = mach_info->setup(pdev);
+ if (err)
+ return err;
+ }
+
+ err = ep93xxfb_check_var(&info->var, info);
+ if (err)
+ goto failed;
+
+ fbi->clk = clk_get(info->dev, NULL);
+ if (IS_ERR(fbi->clk)) {
+ err = PTR_ERR(fbi->clk);
+ fbi->clk = NULL;
+ goto failed;
+ }
+
+ ep93xxfb_set_par(info);
+ clk_enable(fbi->clk);
+
+ err = register_framebuffer(info);
+ if (err)
+ goto failed;
+
+ dev_info(info->dev, "registered. Mode = %dx%d-%d\n",
+ info->var.xres, info->var.yres, info->var.bits_per_pixel);
+ return 0;
+
+failed:
+ if (fbi->clk)
+ clk_put(fbi->clk);
+ if (fbi->mmio_base)
+ iounmap(fbi->mmio_base);
+ if (fbi->res)
+ release_mem_region(fbi->res->start, resource_size(fbi->res));
+ ep93xxfb_dealloc_videomem(info);
+ if (&info->cmap)
+ fb_dealloc_cmap(&info->cmap);
+ if (fbi->mach_info->teardown)
+ fbi->mach_info->teardown(pdev);
+ kfree(info);
+ platform_set_drvdata(pdev, NULL);
+
+ return err;
+}
+
+static int ep93xxfb_remove(struct platform_device *pdev)
+{
+ struct fb_info *info = platform_get_drvdata(pdev);
+ struct ep93xx_fbi *fbi = info->par;
+
+ unregister_framebuffer(info);
+ clk_disable(fbi->clk);
+ clk_put(fbi->clk);
+ iounmap(fbi->mmio_base);
+ release_mem_region(fbi->res->start, resource_size(fbi->res));
+ ep93xxfb_dealloc_videomem(info);
+ fb_dealloc_cmap(&info->cmap);
+
+ if (fbi->mach_info->teardown)
+ fbi->mach_info->teardown(pdev);
+
+ kfree(info);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver ep93xxfb_driver = {
+ .probe = ep93xxfb_probe,
+ .remove = ep93xxfb_remove,
+ .driver = {
+ .name = "ep93xx-fb",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __devinit ep93xxfb_init(void)
+{
+ return platform_driver_register(&ep93xxfb_driver);
+}
+
+static void __exit ep93xxfb_exit(void)
+{
+ platform_driver_unregister(&ep93xxfb_driver);
+}
+
+module_init(ep93xxfb_init);
+module_exit(ep93xxfb_exit);
+
+MODULE_DESCRIPTION("EP93XX Framebuffer Driver");
+MODULE_ALIAS("platform:ep93xx-fb");
+MODULE_AUTHOR("Ryan Mallon <ryan&bluewatersys.com>, "
+ "H Hartley Sweeten <hsweeten@visionengravers.com");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index a85c818be94..a1f2e7ce730 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -871,8 +871,8 @@ fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var)
err = -EINVAL;
if (err || !info->fbops->fb_pan_display ||
- var->yoffset + yres > info->var.yres_virtual ||
- var->xoffset + info->var.xres > info->var.xres_virtual)
+ var->yoffset > info->var.yres_virtual - yres ||
+ var->xoffset > info->var.xres_virtual - info->var.xres)
return -EINVAL;
if ((err = info->fbops->fb_pan_display(var, info)))
@@ -954,6 +954,7 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
goto done;
if ((var->activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW) {
+ struct fb_var_screeninfo old_var;
struct fb_videomode mode;
if (info->fbops->fb_get_caps) {
@@ -963,10 +964,20 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
goto done;
}
+ old_var = info->var;
info->var = *var;
- if (info->fbops->fb_set_par)
- info->fbops->fb_set_par(info);
+ if (info->fbops->fb_set_par) {
+ ret = info->fbops->fb_set_par(info);
+
+ if (ret) {
+ info->var = old_var;
+ printk(KERN_WARNING "detected "
+ "fb_set_par error, "
+ "error code: %d\n", ret);
+ goto done;
+ }
+ }
fb_pan_display(info, &info->var);
fb_set_cmap(&info->cmap, info);
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 30ae3022f63..66358fa825f 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -710,7 +710,7 @@ static int __init imxfb_probe(struct platform_device *pdev)
fbi->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(fbi->clk)) {
- ret = PTR_ERR(fbi->clk);;
+ ret = PTR_ERR(fbi->clk);
dev_err(&pdev->dev, "unable to get clock: %d\n", ret);
goto failed_getclock;
}
diff --git a/drivers/video/matrox/g450_pll.c b/drivers/video/matrox/g450_pll.c
index d42346e7fdd..09f6e045d5b 100644
--- a/drivers/video/matrox/g450_pll.c
+++ b/drivers/video/matrox/g450_pll.c
@@ -25,16 +25,19 @@ static inline unsigned int g450_f2vco(unsigned char p, unsigned int fin) {
return (p & 0x40) ? fin : fin << ((p & 3) + 1);
}
-static unsigned int g450_mnp2vco(CPMINFO unsigned int mnp) {
+static unsigned int g450_mnp2vco(const struct matrox_fb_info *minfo,
+ unsigned int mnp)
+{
unsigned int m, n;
m = ((mnp >> 16) & 0x0FF) + 1;
n = ((mnp >> 7) & 0x1FE) + 4;
- return (ACCESS_FBINFO(features).pll.ref_freq * n + (m >> 1)) / m;
+ return (minfo->features.pll.ref_freq * n + (m >> 1)) / m;
}
-unsigned int g450_mnp2f(CPMINFO unsigned int mnp) {
- return g450_vco2f(mnp, g450_mnp2vco(PMINFO mnp));
+unsigned int g450_mnp2f(const struct matrox_fb_info *minfo, unsigned int mnp)
+{
+ return g450_vco2f(mnp, g450_mnp2vco(minfo, mnp));
}
static inline unsigned int pll_freq_delta(unsigned int f1, unsigned int f2) {
@@ -49,7 +52,10 @@ static inline unsigned int pll_freq_delta(unsigned int f1, unsigned int f2) {
#define NO_MORE_MNP 0x01FFFFFF
#define G450_MNP_FREQBITS (0xFFFFFF43) /* do not mask high byte so we'll catch NO_MORE_MNP */
-static unsigned int g450_nextpll(CPMINFO const struct matrox_pll_limits* pi, unsigned int* fvco, unsigned int mnp) {
+static unsigned int g450_nextpll(const struct matrox_fb_info *minfo,
+ const struct matrox_pll_limits *pi,
+ unsigned int *fvco, unsigned int mnp)
+{
unsigned int m, n, p;
unsigned int tvco = *fvco;
@@ -90,12 +96,15 @@ static unsigned int g450_nextpll(CPMINFO const struct matrox_pll_limits* pi, uns
} else {
m--;
}
- n = ((tvco * (m+1) + ACCESS_FBINFO(features).pll.ref_freq) / (ACCESS_FBINFO(features).pll.ref_freq * 2)) - 2;
+ n = ((tvco * (m+1) + minfo->features.pll.ref_freq) / (minfo->features.pll.ref_freq * 2)) - 2;
} while (n < 0x03 || n > 0x7A);
return (m << 16) | (n << 8) | p;
}
-static unsigned int g450_firstpll(CPMINFO const struct matrox_pll_limits* pi, unsigned int* vco, unsigned int fout) {
+static unsigned int g450_firstpll(const struct matrox_fb_info *minfo,
+ const struct matrox_pll_limits *pi,
+ unsigned int *vco, unsigned int fout)
+{
unsigned int p;
unsigned int vcomax;
@@ -121,88 +130,94 @@ static unsigned int g450_firstpll(CPMINFO const struct matrox_pll_limits* pi, un
}
*vco = tvco;
}
- return g450_nextpll(PMINFO pi, vco, 0xFF0000 | p);
+ return g450_nextpll(minfo, pi, vco, 0xFF0000 | p);
}
-static inline unsigned int g450_setpll(CPMINFO unsigned int mnp, unsigned int pll) {
+static inline unsigned int g450_setpll(const struct matrox_fb_info *minfo,
+ unsigned int mnp, unsigned int pll)
+{
switch (pll) {
case M_PIXEL_PLL_A:
- matroxfb_DAC_out(PMINFO M1064_XPIXPLLAM, mnp >> 16);
- matroxfb_DAC_out(PMINFO M1064_XPIXPLLAN, mnp >> 8);
- matroxfb_DAC_out(PMINFO M1064_XPIXPLLAP, mnp);
+ matroxfb_DAC_out(minfo, M1064_XPIXPLLAM, mnp >> 16);
+ matroxfb_DAC_out(minfo, M1064_XPIXPLLAN, mnp >> 8);
+ matroxfb_DAC_out(minfo, M1064_XPIXPLLAP, mnp);
return M1064_XPIXPLLSTAT;
case M_PIXEL_PLL_B:
- matroxfb_DAC_out(PMINFO M1064_XPIXPLLBM, mnp >> 16);
- matroxfb_DAC_out(PMINFO M1064_XPIXPLLBN, mnp >> 8);
- matroxfb_DAC_out(PMINFO M1064_XPIXPLLBP, mnp);
+ matroxfb_DAC_out(minfo, M1064_XPIXPLLBM, mnp >> 16);
+ matroxfb_DAC_out(minfo, M1064_XPIXPLLBN, mnp >> 8);
+ matroxfb_DAC_out(minfo, M1064_XPIXPLLBP, mnp);
return M1064_XPIXPLLSTAT;
case M_PIXEL_PLL_C:
- matroxfb_DAC_out(PMINFO M1064_XPIXPLLCM, mnp >> 16);
- matroxfb_DAC_out(PMINFO M1064_XPIXPLLCN, mnp >> 8);
- matroxfb_DAC_out(PMINFO M1064_XPIXPLLCP, mnp);
+ matroxfb_DAC_out(minfo, M1064_XPIXPLLCM, mnp >> 16);
+ matroxfb_DAC_out(minfo, M1064_XPIXPLLCN, mnp >> 8);
+ matroxfb_DAC_out(minfo, M1064_XPIXPLLCP, mnp);
return M1064_XPIXPLLSTAT;
case M_SYSTEM_PLL:
- matroxfb_DAC_out(PMINFO DAC1064_XSYSPLLM, mnp >> 16);
- matroxfb_DAC_out(PMINFO DAC1064_XSYSPLLN, mnp >> 8);
- matroxfb_DAC_out(PMINFO DAC1064_XSYSPLLP, mnp);
+ matroxfb_DAC_out(minfo, DAC1064_XSYSPLLM, mnp >> 16);
+ matroxfb_DAC_out(minfo, DAC1064_XSYSPLLN, mnp >> 8);
+ matroxfb_DAC_out(minfo, DAC1064_XSYSPLLP, mnp);
return DAC1064_XSYSPLLSTAT;
case M_VIDEO_PLL:
- matroxfb_DAC_out(PMINFO M1064_XVIDPLLM, mnp >> 16);
- matroxfb_DAC_out(PMINFO M1064_XVIDPLLN, mnp >> 8);
- matroxfb_DAC_out(PMINFO M1064_XVIDPLLP, mnp);
+ matroxfb_DAC_out(minfo, M1064_XVIDPLLM, mnp >> 16);
+ matroxfb_DAC_out(minfo, M1064_XVIDPLLN, mnp >> 8);
+ matroxfb_DAC_out(minfo, M1064_XVIDPLLP, mnp);
return M1064_XVIDPLLSTAT;
}
return 0;
}
-static inline unsigned int g450_cmppll(CPMINFO unsigned int mnp, unsigned int pll) {
+static inline unsigned int g450_cmppll(const struct matrox_fb_info *minfo,
+ unsigned int mnp, unsigned int pll)
+{
unsigned char m = mnp >> 16;
unsigned char n = mnp >> 8;
unsigned char p = mnp;
switch (pll) {
case M_PIXEL_PLL_A:
- return (matroxfb_DAC_in(PMINFO M1064_XPIXPLLAM) != m ||
- matroxfb_DAC_in(PMINFO M1064_XPIXPLLAN) != n ||
- matroxfb_DAC_in(PMINFO M1064_XPIXPLLAP) != p);
+ return (matroxfb_DAC_in(minfo, M1064_XPIXPLLAM) != m ||
+ matroxfb_DAC_in(minfo, M1064_XPIXPLLAN) != n ||
+ matroxfb_DAC_in(minfo, M1064_XPIXPLLAP) != p);
case M_PIXEL_PLL_B:
- return (matroxfb_DAC_in(PMINFO M1064_XPIXPLLBM) != m ||
- matroxfb_DAC_in(PMINFO M1064_XPIXPLLBN) != n ||
- matroxfb_DAC_in(PMINFO M1064_XPIXPLLBP) != p);
+ return (matroxfb_DAC_in(minfo, M1064_XPIXPLLBM) != m ||
+ matroxfb_DAC_in(minfo, M1064_XPIXPLLBN) != n ||
+ matroxfb_DAC_in(minfo, M1064_XPIXPLLBP) != p);
case M_PIXEL_PLL_C:
- return (matroxfb_DAC_in(PMINFO M1064_XPIXPLLCM) != m ||
- matroxfb_DAC_in(PMINFO M1064_XPIXPLLCN) != n ||
- matroxfb_DAC_in(PMINFO M1064_XPIXPLLCP) != p);
+ return (matroxfb_DAC_in(minfo, M1064_XPIXPLLCM) != m ||
+ matroxfb_DAC_in(minfo, M1064_XPIXPLLCN) != n ||
+ matroxfb_DAC_in(minfo, M1064_XPIXPLLCP) != p);
case M_SYSTEM_PLL:
- return (matroxfb_DAC_in(PMINFO DAC1064_XSYSPLLM) != m ||
- matroxfb_DAC_in(PMINFO DAC1064_XSYSPLLN) != n ||
- matroxfb_DAC_in(PMINFO DAC1064_XSYSPLLP) != p);
+ return (matroxfb_DAC_in(minfo, DAC1064_XSYSPLLM) != m ||
+ matroxfb_DAC_in(minfo, DAC1064_XSYSPLLN) != n ||
+ matroxfb_DAC_in(minfo, DAC1064_XSYSPLLP) != p);
case M_VIDEO_PLL:
- return (matroxfb_DAC_in(PMINFO M1064_XVIDPLLM) != m ||
- matroxfb_DAC_in(PMINFO M1064_XVIDPLLN) != n ||
- matroxfb_DAC_in(PMINFO M1064_XVIDPLLP) != p);
+ return (matroxfb_DAC_in(minfo, M1064_XVIDPLLM) != m ||
+ matroxfb_DAC_in(minfo, M1064_XVIDPLLN) != n ||
+ matroxfb_DAC_in(minfo, M1064_XVIDPLLP) != p);
}
return 1;
}
-static inline int g450_isplllocked(CPMINFO unsigned int regidx) {
+static inline int g450_isplllocked(const struct matrox_fb_info *minfo,
+ unsigned int regidx)
+{
unsigned int j;
for (j = 0; j < 1000; j++) {
- if (matroxfb_DAC_in(PMINFO regidx) & 0x40) {
+ if (matroxfb_DAC_in(minfo, regidx) & 0x40) {
unsigned int r = 0;
int i;
for (i = 0; i < 100; i++) {
- r += matroxfb_DAC_in(PMINFO regidx) & 0x40;
+ r += matroxfb_DAC_in(minfo, regidx) & 0x40;
}
return r >= (90 * 0x40);
}
@@ -211,8 +226,10 @@ static inline int g450_isplllocked(CPMINFO unsigned int regidx) {
return 0;
}
-static int g450_testpll(CPMINFO unsigned int mnp, unsigned int pll) {
- return g450_isplllocked(PMINFO g450_setpll(PMINFO mnp, pll));
+static int g450_testpll(const struct matrox_fb_info *minfo, unsigned int mnp,
+ unsigned int pll)
+{
+ return g450_isplllocked(minfo, g450_setpll(minfo, mnp, pll));
}
static void updatehwstate_clk(struct matrox_hw_state* hw, unsigned int mnp, unsigned int pll) {
@@ -225,13 +242,19 @@ static void updatehwstate_clk(struct matrox_hw_state* hw, unsigned int mnp, unsi
}
}
-void matroxfb_g450_setpll_cond(WPMINFO unsigned int mnp, unsigned int pll) {
- if (g450_cmppll(PMINFO mnp, pll)) {
- g450_setpll(PMINFO mnp, pll);
+void matroxfb_g450_setpll_cond(struct matrox_fb_info *minfo, unsigned int mnp,
+ unsigned int pll)
+{
+ if (g450_cmppll(minfo, mnp, pll)) {
+ g450_setpll(minfo, mnp, pll);
}
}
-static inline unsigned int g450_findworkingpll(WPMINFO unsigned int pll, unsigned int* mnparray, unsigned int mnpcount) {
+static inline unsigned int g450_findworkingpll(struct matrox_fb_info *minfo,
+ unsigned int pll,
+ unsigned int *mnparray,
+ unsigned int mnpcount)
+{
unsigned int found = 0;
unsigned int idx;
unsigned int mnpfound = mnparray[0];
@@ -255,22 +278,22 @@ static inline unsigned int g450_findworkingpll(WPMINFO unsigned int pll, unsigne
while (sptr >= sarray) {
unsigned int mnp = *sptr--;
- if (g450_testpll(PMINFO mnp - 0x0300, pll) &&
- g450_testpll(PMINFO mnp + 0x0300, pll) &&
- g450_testpll(PMINFO mnp - 0x0200, pll) &&
- g450_testpll(PMINFO mnp + 0x0200, pll) &&
- g450_testpll(PMINFO mnp - 0x0100, pll) &&
- g450_testpll(PMINFO mnp + 0x0100, pll)) {
- if (g450_testpll(PMINFO mnp, pll)) {
+ if (g450_testpll(minfo, mnp - 0x0300, pll) &&
+ g450_testpll(minfo, mnp + 0x0300, pll) &&
+ g450_testpll(minfo, mnp - 0x0200, pll) &&
+ g450_testpll(minfo, mnp + 0x0200, pll) &&
+ g450_testpll(minfo, mnp - 0x0100, pll) &&
+ g450_testpll(minfo, mnp + 0x0100, pll)) {
+ if (g450_testpll(minfo, mnp, pll)) {
return mnp;
}
- } else if (!found && g450_testpll(PMINFO mnp, pll)) {
+ } else if (!found && g450_testpll(minfo, mnp, pll)) {
mnpfound = mnp;
found = 1;
}
}
}
- g450_setpll(PMINFO mnpfound, pll);
+ g450_setpll(minfo, mnpfound, pll);
return mnpfound;
}
@@ -283,7 +306,9 @@ static void g450_addcache(struct matrox_pll_cache* ci, unsigned int mnp_key, uns
ci->data[0].mnp_value = mnp_value;
}
-static int g450_checkcache(WPMINFO struct matrox_pll_cache* ci, unsigned int mnp_key) {
+static int g450_checkcache(struct matrox_fb_info *minfo,
+ struct matrox_pll_cache *ci, unsigned int mnp_key)
+{
unsigned int i;
mnp_key &= G450_MNP_FREQBITS;
@@ -303,8 +328,10 @@ static int g450_checkcache(WPMINFO struct matrox_pll_cache* ci, unsigned int mnp
return NO_MORE_MNP;
}
-static int __g450_setclk(WPMINFO unsigned int fout, unsigned int pll,
- unsigned int* mnparray, unsigned int* deltaarray) {
+static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
+ unsigned int pll, unsigned int *mnparray,
+ unsigned int *deltaarray)
+{
unsigned int mnpcount;
unsigned int pixel_vco;
const struct matrox_pll_limits* pi;
@@ -321,19 +348,19 @@ static int __g450_setclk(WPMINFO unsigned int fout, unsigned int pll,
matroxfb_DAC_lock_irqsave(flags);
- xpwrctrl = matroxfb_DAC_in(PMINFO M1064_XPWRCTRL);
- matroxfb_DAC_out(PMINFO M1064_XPWRCTRL, xpwrctrl & ~M1064_XPWRCTRL_PANELPDN);
+ xpwrctrl = matroxfb_DAC_in(minfo, M1064_XPWRCTRL);
+ matroxfb_DAC_out(minfo, M1064_XPWRCTRL, xpwrctrl & ~M1064_XPWRCTRL_PANELPDN);
mga_outb(M_SEQ_INDEX, M_SEQ1);
mga_outb(M_SEQ_DATA, mga_inb(M_SEQ_DATA) | M_SEQ1_SCROFF);
- tmp = matroxfb_DAC_in(PMINFO M1064_XPIXCLKCTRL);
+ tmp = matroxfb_DAC_in(minfo, M1064_XPIXCLKCTRL);
tmp |= M1064_XPIXCLKCTRL_DIS;
if (!(tmp & M1064_XPIXCLKCTRL_PLL_UP)) {
tmp |= M1064_XPIXCLKCTRL_PLL_UP;
}
- matroxfb_DAC_out(PMINFO M1064_XPIXCLKCTRL, tmp);
+ matroxfb_DAC_out(minfo, M1064_XPIXCLKCTRL, tmp);
/* DVI PLL preferred for frequencies up to
panel link max, standard PLL otherwise */
- if (fout >= MINFO->max_pixel_clock_panellink)
+ if (fout >= minfo->max_pixel_clock_panellink)
tmp = 0;
else tmp =
M1064_XDVICLKCTRL_DVIDATAPATHSEL |
@@ -341,8 +368,8 @@ static int __g450_setclk(WPMINFO unsigned int fout, unsigned int pll,
M1064_XDVICLKCTRL_C1DVICLKEN |
M1064_XDVICLKCTRL_DVILOOPCTL |
M1064_XDVICLKCTRL_P1LOOPBWDTCTL;
- matroxfb_DAC_out(PMINFO M1064_XDVICLKCTRL,tmp);
- matroxfb_DAC_out(PMINFO M1064_XPWRCTRL,
+ matroxfb_DAC_out(minfo, M1064_XDVICLKCTRL, tmp);
+ matroxfb_DAC_out(minfo, M1064_XPWRCTRL,
xpwrctrl);
matroxfb_DAC_unlock_irqrestore(flags);
@@ -363,20 +390,20 @@ static int __g450_setclk(WPMINFO unsigned int fout, unsigned int pll,
}
mga_outb(M_MISC_REG, misc);
}
- pi = &ACCESS_FBINFO(limits.pixel);
- ci = &ACCESS_FBINFO(cache.pixel);
+ pi = &minfo->limits.pixel;
+ ci = &minfo->cache.pixel;
break;
case M_SYSTEM_PLL:
{
u_int32_t opt;
- pci_read_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, &opt);
+ pci_read_config_dword(minfo->pcidev, PCI_OPTION_REG, &opt);
if (!(opt & 0x20)) {
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, opt | 0x20);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, opt | 0x20);
}
}
- pi = &ACCESS_FBINFO(limits.system);
- ci = &ACCESS_FBINFO(cache.system);
+ pi = &minfo->limits.system;
+ ci = &minfo->cache.system;
break;
case M_VIDEO_PLL:
{
@@ -385,18 +412,18 @@ static int __g450_setclk(WPMINFO unsigned int fout, unsigned int pll,
unsigned long flags;
matroxfb_DAC_lock_irqsave(flags);
- tmp = matroxfb_DAC_in(PMINFO M1064_XPWRCTRL);
+ tmp = matroxfb_DAC_in(minfo, M1064_XPWRCTRL);
if (!(tmp & 2)) {
- matroxfb_DAC_out(PMINFO M1064_XPWRCTRL, tmp | 2);
+ matroxfb_DAC_out(minfo, M1064_XPWRCTRL, tmp | 2);
}
- mnp = matroxfb_DAC_in(PMINFO M1064_XPIXPLLCM) << 16;
- mnp |= matroxfb_DAC_in(PMINFO M1064_XPIXPLLCN) << 8;
- pixel_vco = g450_mnp2vco(PMINFO mnp);
+ mnp = matroxfb_DAC_in(minfo, M1064_XPIXPLLCM) << 16;
+ mnp |= matroxfb_DAC_in(minfo, M1064_XPIXPLLCN) << 8;
+ pixel_vco = g450_mnp2vco(minfo, mnp);
matroxfb_DAC_unlock_irqrestore(flags);
}
- pi = &ACCESS_FBINFO(limits.video);
- ci = &ACCESS_FBINFO(cache.video);
+ pi = &minfo->limits.video;
+ ci = &minfo->cache.video;
break;
default:
return -EINVAL;
@@ -407,12 +434,12 @@ static int __g450_setclk(WPMINFO unsigned int fout, unsigned int pll,
unsigned int mnp;
unsigned int xvco;
- for(mnp = g450_firstpll(PMINFO pi, &xvco, fout); mnp != NO_MORE_MNP; mnp = g450_nextpll(PMINFO pi, &xvco, mnp)) {
+ for (mnp = g450_firstpll(minfo, pi, &xvco, fout); mnp != NO_MORE_MNP; mnp = g450_nextpll(minfo, pi, &xvco, mnp)) {
unsigned int idx;
unsigned int vco;
unsigned int delta;
- vco = g450_mnp2vco(PMINFO mnp);
+ vco = g450_mnp2vco(minfo, mnp);
#if 0
if (pll == M_VIDEO_PLL) {
unsigned int big, small;
@@ -444,7 +471,7 @@ static int __g450_setclk(WPMINFO unsigned int fout, unsigned int pll,
* (freqs near VCOmin aren't as stable)
*/
if (delta == deltaarray[idx-1]
- && vco != g450_mnp2vco(PMINFO mnparray[idx-1])
+ && vco != g450_mnp2vco(minfo, mnparray[idx-1])
&& vco < (pi->vcomin * 17 / 16)) {
break;
}
@@ -468,14 +495,14 @@ static int __g450_setclk(WPMINFO unsigned int fout, unsigned int pll,
unsigned int mnp;
matroxfb_DAC_lock_irqsave(flags);
- mnp = g450_checkcache(PMINFO ci, mnparray[0]);
+ mnp = g450_checkcache(minfo, ci, mnparray[0]);
if (mnp != NO_MORE_MNP) {
- matroxfb_g450_setpll_cond(PMINFO mnp, pll);
+ matroxfb_g450_setpll_cond(minfo, mnp, pll);
} else {
- mnp = g450_findworkingpll(PMINFO pll, mnparray, mnpcount);
+ mnp = g450_findworkingpll(minfo, pll, mnparray, mnpcount);
g450_addcache(ci, mnparray[0], mnp);
}
- updatehwstate_clk(&ACCESS_FBINFO(hw), mnp, pll);
+ updatehwstate_clk(&minfo->hw, mnp, pll);
matroxfb_DAC_unlock_irqrestore(flags);
return mnp;
}
@@ -485,14 +512,16 @@ static int __g450_setclk(WPMINFO unsigned int fout, unsigned int pll,
* Currently there is 5(p) * 10(m) = 50 possible values. */
#define MNP_TABLE_SIZE 64
-int matroxfb_g450_setclk(WPMINFO unsigned int fout, unsigned int pll) {
+int matroxfb_g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
+ unsigned int pll)
+{
unsigned int* arr;
arr = kmalloc(sizeof(*arr) * MNP_TABLE_SIZE * 2, GFP_KERNEL);
if (arr) {
int r;
- r = __g450_setclk(PMINFO fout, pll, arr, arr + MNP_TABLE_SIZE);
+ r = __g450_setclk(minfo, fout, pll, arr, arr + MNP_TABLE_SIZE);
kfree(arr);
return r;
}
diff --git a/drivers/video/matrox/g450_pll.h b/drivers/video/matrox/g450_pll.h
index c17ed74501e..aac615d1844 100644
--- a/drivers/video/matrox/g450_pll.h
+++ b/drivers/video/matrox/g450_pll.h
@@ -3,8 +3,10 @@
#include "matroxfb_base.h"
-int matroxfb_g450_setclk(WPMINFO unsigned int fout, unsigned int pll);
-unsigned int g450_mnp2f(CPMINFO unsigned int mnp);
-void matroxfb_g450_setpll_cond(WPMINFO unsigned int mnp, unsigned int pll);
+int matroxfb_g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
+ unsigned int pll);
+unsigned int g450_mnp2f(const struct matrox_fb_info *minfo, unsigned int mnp);
+void matroxfb_g450_setpll_cond(struct matrox_fb_info *minfo, unsigned int mnp,
+ unsigned int pll);
#endif /* __G450_PLL_H__ */
diff --git a/drivers/video/matrox/i2c-matroxfb.c b/drivers/video/matrox/i2c-matroxfb.c
index c14e3e2212b..f3728ab262f 100644
--- a/drivers/video/matrox/i2c-matroxfb.c
+++ b/drivers/video/matrox/i2c-matroxfb.c
@@ -41,7 +41,7 @@ static int matroxfb_read_gpio(struct matrox_fb_info* minfo) {
int v;
matroxfb_DAC_lock_irqsave(flags);
- v = matroxfb_DAC_in(PMINFO DAC_XGENIODATA);
+ v = matroxfb_DAC_in(minfo, DAC_XGENIODATA);
matroxfb_DAC_unlock_irqrestore(flags);
return v;
}
@@ -51,10 +51,10 @@ static void matroxfb_set_gpio(struct matrox_fb_info* minfo, int mask, int val) {
int v;
matroxfb_DAC_lock_irqsave(flags);
- v = (matroxfb_DAC_in(PMINFO DAC_XGENIOCTRL) & mask) | val;
- matroxfb_DAC_out(PMINFO DAC_XGENIOCTRL, v);
+ v = (matroxfb_DAC_in(minfo, DAC_XGENIOCTRL) & mask) | val;
+ matroxfb_DAC_out(minfo, DAC_XGENIOCTRL, v);
/* We must reset GENIODATA very often... XFree plays with this register */
- matroxfb_DAC_out(PMINFO DAC_XGENIODATA, 0x00);
+ matroxfb_DAC_out(minfo, DAC_XGENIODATA, 0x00);
matroxfb_DAC_unlock_irqrestore(flags);
}
@@ -112,7 +112,7 @@ static int i2c_bus_reg(struct i2c_bit_adapter* b, struct matrox_fb_info* minfo,
i2c_set_adapdata(&b->adapter, b);
b->adapter.class = class;
b->adapter.algo_data = &b->bac;
- b->adapter.dev.parent = &ACCESS_FBINFO(pcidev)->dev;
+ b->adapter.dev.parent = &minfo->pcidev->dev;
b->bac = matrox_i2c_algo_template;
b->bac.data = b;
err = i2c_bit_add_bus(&b->adapter);
@@ -149,11 +149,11 @@ static void* i2c_matroxfb_probe(struct matrox_fb_info* minfo) {
return NULL;
matroxfb_DAC_lock_irqsave(flags);
- matroxfb_DAC_out(PMINFO DAC_XGENIODATA, 0xFF);
- matroxfb_DAC_out(PMINFO DAC_XGENIOCTRL, 0x00);
+ matroxfb_DAC_out(minfo, DAC_XGENIODATA, 0xFF);
+ matroxfb_DAC_out(minfo, DAC_XGENIOCTRL, 0x00);
matroxfb_DAC_unlock_irqrestore(flags);
- switch (ACCESS_FBINFO(chip)) {
+ switch (minfo->chip) {
case MGA_2064:
case MGA_2164:
err = i2c_bus_reg(&m2info->ddc1, minfo,
@@ -168,7 +168,7 @@ static void* i2c_matroxfb_probe(struct matrox_fb_info* minfo) {
}
if (err)
goto fail_ddc1;
- if (ACCESS_FBINFO(devflags.dualhead)) {
+ if (minfo->devflags.dualhead) {
err = i2c_bus_reg(&m2info->ddc2, minfo,
DDC2_DATA, DDC2_CLK,
"DDC:fb%u #1", I2C_CLASS_DDC);
diff --git a/drivers/video/matrox/matroxfb_DAC1064.c b/drivers/video/matrox/matroxfb_DAC1064.c
index a74e5da17aa..f9fa0fd0029 100644
--- a/drivers/video/matrox/matroxfb_DAC1064.c
+++ b/drivers/video/matrox/matroxfb_DAC1064.c
@@ -33,7 +33,11 @@
#define DAC1064_OPT_MDIV2 0x00
#define DAC1064_OPT_RESERVED 0x10
-static void DAC1064_calcclock(CPMINFO unsigned int freq, unsigned int fmax, unsigned int* in, unsigned int* feed, unsigned int* post) {
+static void DAC1064_calcclock(const struct matrox_fb_info *minfo,
+ unsigned int freq, unsigned int fmax,
+ unsigned int *in, unsigned int *feed,
+ unsigned int *post)
+{
unsigned int fvco;
unsigned int p;
@@ -41,7 +45,7 @@ static void DAC1064_calcclock(CPMINFO unsigned int freq, unsigned int fmax, unsi
/* only for devices older than G450 */
- fvco = PLL_calcclock(PMINFO freq, fmax, in, feed, &p);
+ fvco = PLL_calcclock(minfo, freq, fmax, in, feed, &p);
p = (1 << p) - 1;
if (fvco <= 100000)
@@ -80,32 +84,35 @@ static const unsigned char MGA1064_DAC[] = {
0x00,
0x00, 0x00, 0xFF, 0xFF};
-static void DAC1064_setpclk(WPMINFO unsigned long fout) {
+static void DAC1064_setpclk(struct matrox_fb_info *minfo, unsigned long fout)
+{
unsigned int m, n, p;
DBG(__func__)
- DAC1064_calcclock(PMINFO fout, ACCESS_FBINFO(max_pixel_clock), &m, &n, &p);
- ACCESS_FBINFO(hw).DACclk[0] = m;
- ACCESS_FBINFO(hw).DACclk[1] = n;
- ACCESS_FBINFO(hw).DACclk[2] = p;
+ DAC1064_calcclock(minfo, fout, minfo->max_pixel_clock, &m, &n, &p);
+ minfo->hw.DACclk[0] = m;
+ minfo->hw.DACclk[1] = n;
+ minfo->hw.DACclk[2] = p;
}
-static void DAC1064_setmclk(WPMINFO int oscinfo, unsigned long fmem) {
+static void DAC1064_setmclk(struct matrox_fb_info *minfo, int oscinfo,
+ unsigned long fmem)
+{
u_int32_t mx;
- struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
+ struct matrox_hw_state *hw = &minfo->hw;
DBG(__func__)
- if (ACCESS_FBINFO(devflags.noinit)) {
+ if (minfo->devflags.noinit) {
/* read MCLK and give up... */
- hw->DACclk[3] = inDAC1064(PMINFO DAC1064_XSYSPLLM);
- hw->DACclk[4] = inDAC1064(PMINFO DAC1064_XSYSPLLN);
- hw->DACclk[5] = inDAC1064(PMINFO DAC1064_XSYSPLLP);
+ hw->DACclk[3] = inDAC1064(minfo, DAC1064_XSYSPLLM);
+ hw->DACclk[4] = inDAC1064(minfo, DAC1064_XSYSPLLN);
+ hw->DACclk[5] = inDAC1064(minfo, DAC1064_XSYSPLLP);
return;
}
mx = hw->MXoptionReg | 0x00000004;
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, mx);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, mx);
mx &= ~0x000000BB;
if (oscinfo & DAC1064_OPT_GDIV1)
mx |= 0x00000008;
@@ -120,9 +127,9 @@ static void DAC1064_setmclk(WPMINFO int oscinfo, unsigned long fmem) {
/* powerup system PLL, select PCI clock */
mx |= 0x00000020;
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, mx);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, mx);
mx &= ~0x00000004;
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, mx);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, mx);
/* !!! you must not access device if MCLK is not running !!!
Doing so cause immediate PCI lockup :-( Maybe they should
@@ -131,12 +138,12 @@ static void DAC1064_setmclk(WPMINFO int oscinfo, unsigned long fmem) {
perfect... */
/* (bit 2 of PCI_OPTION_REG must be 0... and bits 0,1 must not
select PLL... because of PLL can be stopped at this time) */
- DAC1064_calcclock(PMINFO fmem, ACCESS_FBINFO(max_pixel_clock), &m, &n, &p);
- outDAC1064(PMINFO DAC1064_XSYSPLLM, hw->DACclk[3] = m);
- outDAC1064(PMINFO DAC1064_XSYSPLLN, hw->DACclk[4] = n);
- outDAC1064(PMINFO DAC1064_XSYSPLLP, hw->DACclk[5] = p);
+ DAC1064_calcclock(minfo, fmem, minfo->max_pixel_clock, &m, &n, &p);
+ outDAC1064(minfo, DAC1064_XSYSPLLM, hw->DACclk[3] = m);
+ outDAC1064(minfo, DAC1064_XSYSPLLN, hw->DACclk[4] = n);
+ outDAC1064(minfo, DAC1064_XSYSPLLP, hw->DACclk[5] = p);
for (clk = 65536; clk; --clk) {
- if (inDAC1064(PMINFO DAC1064_XSYSPLLSTAT) & 0x40)
+ if (inDAC1064(minfo, DAC1064_XSYSPLLSTAT) & 0x40)
break;
}
if (!clk)
@@ -147,29 +154,30 @@ static void DAC1064_setmclk(WPMINFO int oscinfo, unsigned long fmem) {
/* select specified system clock source */
mx |= oscinfo & DAC1064_OPT_SCLK_MASK;
}
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, mx);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, mx);
mx &= ~0x00000004;
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, mx);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, mx);
hw->MXoptionReg = mx;
}
#ifdef CONFIG_FB_MATROX_G
-static void g450_set_plls(WPMINFO2) {
+static void g450_set_plls(struct matrox_fb_info *minfo)
+{
u_int32_t c2_ctl;
unsigned int pxc;
- struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
+ struct matrox_hw_state *hw = &minfo->hw;
int pixelmnp;
int videomnp;
c2_ctl = hw->crtc2.ctl & ~0x4007; /* Clear PLL + enable for CRTC2 */
c2_ctl |= 0x0001; /* Enable CRTC2 */
hw->DACreg[POS1064_XPWRCTRL] &= ~0x02; /* Stop VIDEO PLL */
- pixelmnp = ACCESS_FBINFO(crtc1).mnp;
- videomnp = ACCESS_FBINFO(crtc2).mnp;
+ pixelmnp = minfo->crtc1.mnp;
+ videomnp = minfo->crtc2.mnp;
if (videomnp < 0) {
c2_ctl &= ~0x0001; /* Disable CRTC2 */
hw->DACreg[POS1064_XPWRCTRL] &= ~0x10; /* Powerdown CRTC2 */
- } else if (ACCESS_FBINFO(crtc2).pixclock == ACCESS_FBINFO(features).pll.ref_freq) {
+ } else if (minfo->crtc2.pixclock == minfo->features.pll.ref_freq) {
c2_ctl |= 0x4002; /* Use reference directly */
} else if (videomnp == pixelmnp) {
c2_ctl |= 0x0004; /* Use pixel PLL */
@@ -184,27 +192,27 @@ static void g450_set_plls(WPMINFO2) {
c2_ctl |= 0x0006; /* Use video PLL */
hw->DACreg[POS1064_XPWRCTRL] |= 0x02;
- outDAC1064(PMINFO M1064_XPWRCTRL, hw->DACreg[POS1064_XPWRCTRL]);
- matroxfb_g450_setpll_cond(PMINFO videomnp, M_VIDEO_PLL);
+ outDAC1064(minfo, M1064_XPWRCTRL, hw->DACreg[POS1064_XPWRCTRL]);
+ matroxfb_g450_setpll_cond(minfo, videomnp, M_VIDEO_PLL);
}
hw->DACreg[POS1064_XPIXCLKCTRL] &= ~M1064_XPIXCLKCTRL_PLL_UP;
if (pixelmnp >= 0) {
hw->DACreg[POS1064_XPIXCLKCTRL] |= M1064_XPIXCLKCTRL_PLL_UP;
- outDAC1064(PMINFO M1064_XPIXCLKCTRL, hw->DACreg[POS1064_XPIXCLKCTRL]);
- matroxfb_g450_setpll_cond(PMINFO pixelmnp, M_PIXEL_PLL_C);
+ outDAC1064(minfo, M1064_XPIXCLKCTRL, hw->DACreg[POS1064_XPIXCLKCTRL]);
+ matroxfb_g450_setpll_cond(minfo, pixelmnp, M_PIXEL_PLL_C);
}
if (c2_ctl != hw->crtc2.ctl) {
hw->crtc2.ctl = c2_ctl;
mga_outl(0x3C10, c2_ctl);
}
- pxc = ACCESS_FBINFO(crtc1).pixclock;
- if (pxc == 0 || ACCESS_FBINFO(outputs[2]).src == MATROXFB_SRC_CRTC2) {
- pxc = ACCESS_FBINFO(crtc2).pixclock;
+ pxc = minfo->crtc1.pixclock;
+ if (pxc == 0 || minfo->outputs[2].src == MATROXFB_SRC_CRTC2) {
+ pxc = minfo->crtc2.pixclock;
}
- if (ACCESS_FBINFO(chip) == MGA_G550) {
+ if (minfo->chip == MGA_G550) {
if (pxc < 45000) {
hw->DACreg[POS1064_XPANMODE] = 0x00; /* 0-50 */
} else if (pxc < 55000) {
@@ -245,18 +253,19 @@ static void g450_set_plls(WPMINFO2) {
}
#endif
-void DAC1064_global_init(WPMINFO2) {
- struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
+void DAC1064_global_init(struct matrox_fb_info *minfo)
+{
+ struct matrox_hw_state *hw = &minfo->hw;
hw->DACreg[POS1064_XMISCCTRL] &= M1064_XMISCCTRL_DAC_WIDTHMASK;
hw->DACreg[POS1064_XMISCCTRL] |= M1064_XMISCCTRL_LUT_EN;
hw->DACreg[POS1064_XPIXCLKCTRL] = M1064_XPIXCLKCTRL_PLL_UP | M1064_XPIXCLKCTRL_EN | M1064_XPIXCLKCTRL_SRC_PLL;
#ifdef CONFIG_FB_MATROX_G
- if (ACCESS_FBINFO(devflags.g450dac)) {
+ if (minfo->devflags.g450dac) {
hw->DACreg[POS1064_XPWRCTRL] = 0x1F; /* powerup everything */
hw->DACreg[POS1064_XOUTPUTCONN] = 0x00; /* disable outputs */
hw->DACreg[POS1064_XMISCCTRL] |= M1064_XMISCCTRL_DAC_EN;
- switch (ACCESS_FBINFO(outputs[0]).src) {
+ switch (minfo->outputs[0].src) {
case MATROXFB_SRC_CRTC1:
case MATROXFB_SRC_CRTC2:
hw->DACreg[POS1064_XOUTPUTCONN] |= 0x01; /* enable output; CRTC1/2 selection is in CRTC2 ctl */
@@ -265,12 +274,12 @@ void DAC1064_global_init(WPMINFO2) {
hw->DACreg[POS1064_XMISCCTRL] &= ~M1064_XMISCCTRL_DAC_EN;
break;
}
- switch (ACCESS_FBINFO(outputs[1]).src) {
+ switch (minfo->outputs[1].src) {
case MATROXFB_SRC_CRTC1:
hw->DACreg[POS1064_XOUTPUTCONN] |= 0x04;
break;
case MATROXFB_SRC_CRTC2:
- if (ACCESS_FBINFO(outputs[1]).mode == MATROXFB_OUTPUT_MODE_MONITOR) {
+ if (minfo->outputs[1].mode == MATROXFB_OUTPUT_MODE_MONITOR) {
hw->DACreg[POS1064_XOUTPUTCONN] |= 0x08;
} else {
hw->DACreg[POS1064_XOUTPUTCONN] |= 0x0C;
@@ -280,7 +289,7 @@ void DAC1064_global_init(WPMINFO2) {
hw->DACreg[POS1064_XPWRCTRL] &= ~0x01; /* Poweroff DAC2 */
break;
}
- switch (ACCESS_FBINFO(outputs[2]).src) {
+ switch (minfo->outputs[2].src) {
case MATROXFB_SRC_CRTC1:
hw->DACreg[POS1064_XOUTPUTCONN] |= 0x20;
break;
@@ -299,55 +308,57 @@ void DAC1064_global_init(WPMINFO2) {
break;
}
/* Now set timming related variables... */
- g450_set_plls(PMINFO2);
+ g450_set_plls(minfo);
} else
#endif
{
- if (ACCESS_FBINFO(outputs[1]).src == MATROXFB_SRC_CRTC1) {
+ if (minfo->outputs[1].src == MATROXFB_SRC_CRTC1) {
hw->DACreg[POS1064_XPIXCLKCTRL] = M1064_XPIXCLKCTRL_PLL_UP | M1064_XPIXCLKCTRL_EN | M1064_XPIXCLKCTRL_SRC_EXT;
hw->DACreg[POS1064_XMISCCTRL] |= GX00_XMISCCTRL_MFC_MAFC | G400_XMISCCTRL_VDO_MAFC12;
- } else if (ACCESS_FBINFO(outputs[1]).src == MATROXFB_SRC_CRTC2) {
+ } else if (minfo->outputs[1].src == MATROXFB_SRC_CRTC2) {
hw->DACreg[POS1064_XMISCCTRL] |= GX00_XMISCCTRL_MFC_MAFC | G400_XMISCCTRL_VDO_C2_MAFC12;
- } else if (ACCESS_FBINFO(outputs[2]).src == MATROXFB_SRC_CRTC1)
+ } else if (minfo->outputs[2].src == MATROXFB_SRC_CRTC1)
hw->DACreg[POS1064_XMISCCTRL] |= GX00_XMISCCTRL_MFC_PANELLINK | G400_XMISCCTRL_VDO_MAFC12;
else
hw->DACreg[POS1064_XMISCCTRL] |= GX00_XMISCCTRL_MFC_DIS;
- if (ACCESS_FBINFO(outputs[0]).src != MATROXFB_SRC_NONE)
+ if (minfo->outputs[0].src != MATROXFB_SRC_NONE)
hw->DACreg[POS1064_XMISCCTRL] |= M1064_XMISCCTRL_DAC_EN;
}
}
-void DAC1064_global_restore(WPMINFO2) {
- struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
-
- outDAC1064(PMINFO M1064_XPIXCLKCTRL, hw->DACreg[POS1064_XPIXCLKCTRL]);
- outDAC1064(PMINFO M1064_XMISCCTRL, hw->DACreg[POS1064_XMISCCTRL]);
- if (ACCESS_FBINFO(devflags.accelerator) == FB_ACCEL_MATROX_MGAG400) {
- outDAC1064(PMINFO 0x20, 0x04);
- outDAC1064(PMINFO 0x1F, ACCESS_FBINFO(devflags.dfp_type));
- if (ACCESS_FBINFO(devflags.g450dac)) {
- outDAC1064(PMINFO M1064_XSYNCCTRL, 0xCC);
- outDAC1064(PMINFO M1064_XPWRCTRL, hw->DACreg[POS1064_XPWRCTRL]);
- outDAC1064(PMINFO M1064_XPANMODE, hw->DACreg[POS1064_XPANMODE]);
- outDAC1064(PMINFO M1064_XOUTPUTCONN, hw->DACreg[POS1064_XOUTPUTCONN]);
+void DAC1064_global_restore(struct matrox_fb_info *minfo)
+{
+ struct matrox_hw_state *hw = &minfo->hw;
+
+ outDAC1064(minfo, M1064_XPIXCLKCTRL, hw->DACreg[POS1064_XPIXCLKCTRL]);
+ outDAC1064(minfo, M1064_XMISCCTRL, hw->DACreg[POS1064_XMISCCTRL]);
+ if (minfo->devflags.accelerator == FB_ACCEL_MATROX_MGAG400) {
+ outDAC1064(minfo, 0x20, 0x04);
+ outDAC1064(minfo, 0x1F, minfo->devflags.dfp_type);
+ if (minfo->devflags.g450dac) {
+ outDAC1064(minfo, M1064_XSYNCCTRL, 0xCC);
+ outDAC1064(minfo, M1064_XPWRCTRL, hw->DACreg[POS1064_XPWRCTRL]);
+ outDAC1064(minfo, M1064_XPANMODE, hw->DACreg[POS1064_XPANMODE]);
+ outDAC1064(minfo, M1064_XOUTPUTCONN, hw->DACreg[POS1064_XOUTPUTCONN]);
}
}
}
-static int DAC1064_init_1(WPMINFO struct my_timming* m) {
- struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
+static int DAC1064_init_1(struct matrox_fb_info *minfo, struct my_timming *m)
+{
+ struct matrox_hw_state *hw = &minfo->hw;
DBG(__func__)
memcpy(hw->DACreg, MGA1064_DAC, sizeof(MGA1064_DAC_regs));
- switch (ACCESS_FBINFO(fbcon).var.bits_per_pixel) {
+ switch (minfo->fbcon.var.bits_per_pixel) {
/* case 4: not supported by MGA1064 DAC */
case 8:
hw->DACreg[POS1064_XMULCTRL] = M1064_XMULCTRL_DEPTH_8BPP | M1064_XMULCTRL_GRAPHICS_PALETIZED;
break;
case 16:
- if (ACCESS_FBINFO(fbcon).var.green.length == 5)
+ if (minfo->fbcon.var.green.length == 5)
hw->DACreg[POS1064_XMULCTRL] = M1064_XMULCTRL_DEPTH_15BPP_1BPP | M1064_XMULCTRL_GRAPHICS_PALETIZED;
else
hw->DACreg[POS1064_XMULCTRL] = M1064_XMULCTRL_DEPTH_16BPP | M1064_XMULCTRL_GRAPHICS_PALETIZED;
@@ -361,22 +372,23 @@ static int DAC1064_init_1(WPMINFO struct my_timming* m) {
default:
return 1; /* unsupported depth */
}
- hw->DACreg[POS1064_XVREFCTRL] = ACCESS_FBINFO(features.DAC1064.xvrefctrl);
+ hw->DACreg[POS1064_XVREFCTRL] = minfo->features.DAC1064.xvrefctrl;
hw->DACreg[POS1064_XGENCTRL] &= ~M1064_XGENCTRL_SYNC_ON_GREEN_MASK;
hw->DACreg[POS1064_XGENCTRL] |= (m->sync & FB_SYNC_ON_GREEN)?M1064_XGENCTRL_SYNC_ON_GREEN:M1064_XGENCTRL_NO_SYNC_ON_GREEN;
hw->DACreg[POS1064_XCURADDL] = 0;
hw->DACreg[POS1064_XCURADDH] = 0;
- DAC1064_global_init(PMINFO2);
+ DAC1064_global_init(minfo);
return 0;
}
-static int DAC1064_init_2(WPMINFO struct my_timming* m) {
- struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
+static int DAC1064_init_2(struct matrox_fb_info *minfo, struct my_timming *m)
+{
+ struct matrox_hw_state *hw = &minfo->hw;
DBG(__func__)
- if (ACCESS_FBINFO(fbcon).var.bits_per_pixel > 16) { /* 256 entries */
+ if (minfo->fbcon.var.bits_per_pixel > 16) { /* 256 entries */
int i;
for (i = 0; i < 256; i++) {
@@ -384,8 +396,8 @@ static int DAC1064_init_2(WPMINFO struct my_timming* m) {
hw->DACpal[i * 3 + 1] = i;
hw->DACpal[i * 3 + 2] = i;
}
- } else if (ACCESS_FBINFO(fbcon).var.bits_per_pixel > 8) {
- if (ACCESS_FBINFO(fbcon).var.green.length == 5) { /* 0..31, 128..159 */
+ } else if (minfo->fbcon.var.bits_per_pixel > 8) {
+ if (minfo->fbcon.var.green.length == 5) { /* 0..31, 128..159 */
int i;
for (i = 0; i < 32; i++) {
@@ -413,8 +425,9 @@ static int DAC1064_init_2(WPMINFO struct my_timming* m) {
return 0;
}
-static void DAC1064_restore_1(WPMINFO2) {
- struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
+static void DAC1064_restore_1(struct matrox_fb_info *minfo)
+{
+ struct matrox_hw_state *hw = &minfo->hw;
CRITFLAGS
@@ -422,28 +435,29 @@ static void DAC1064_restore_1(WPMINFO2) {
CRITBEGIN
- if ((inDAC1064(PMINFO DAC1064_XSYSPLLM) != hw->DACclk[3]) ||
- (inDAC1064(PMINFO DAC1064_XSYSPLLN) != hw->DACclk[4]) ||
- (inDAC1064(PMINFO DAC1064_XSYSPLLP) != hw->DACclk[5])) {
- outDAC1064(PMINFO DAC1064_XSYSPLLM, hw->DACclk[3]);
- outDAC1064(PMINFO DAC1064_XSYSPLLN, hw->DACclk[4]);
- outDAC1064(PMINFO DAC1064_XSYSPLLP, hw->DACclk[5]);
+ if ((inDAC1064(minfo, DAC1064_XSYSPLLM) != hw->DACclk[3]) ||
+ (inDAC1064(minfo, DAC1064_XSYSPLLN) != hw->DACclk[4]) ||
+ (inDAC1064(minfo, DAC1064_XSYSPLLP) != hw->DACclk[5])) {
+ outDAC1064(minfo, DAC1064_XSYSPLLM, hw->DACclk[3]);
+ outDAC1064(minfo, DAC1064_XSYSPLLN, hw->DACclk[4]);
+ outDAC1064(minfo, DAC1064_XSYSPLLP, hw->DACclk[5]);
}
{
unsigned int i;
for (i = 0; i < sizeof(MGA1064_DAC_regs); i++) {
if ((i != POS1064_XPIXCLKCTRL) && (i != POS1064_XMISCCTRL))
- outDAC1064(PMINFO MGA1064_DAC_regs[i], hw->DACreg[i]);
+ outDAC1064(minfo, MGA1064_DAC_regs[i], hw->DACreg[i]);
}
}
- DAC1064_global_restore(PMINFO2);
+ DAC1064_global_restore(minfo);
CRITEND
};
-static void DAC1064_restore_2(WPMINFO2) {
+static void DAC1064_restore_2(struct matrox_fb_info *minfo)
+{
#ifdef DEBUG
unsigned int i;
#endif
@@ -453,12 +467,12 @@ static void DAC1064_restore_2(WPMINFO2) {
#ifdef DEBUG
dprintk(KERN_DEBUG "DAC1064regs ");
for (i = 0; i < sizeof(MGA1064_DAC_regs); i++) {
- dprintk("R%02X=%02X ", MGA1064_DAC_regs[i], ACCESS_FBINFO(hw).DACreg[i]);
+ dprintk("R%02X=%02X ", MGA1064_DAC_regs[i], minfo->hw.DACreg[i]);
if ((i & 0x7) == 0x7) dprintk(KERN_DEBUG "continuing... ");
}
dprintk(KERN_DEBUG "DAC1064clk ");
for (i = 0; i < 6; i++)
- dprintk("C%02X=%02X ", i, ACCESS_FBINFO(hw).DACclk[i]);
+ dprintk("C%02X=%02X ", i, minfo->hw.DACclk[i]);
dprintk("\n");
#endif
}
@@ -470,14 +484,14 @@ static int m1064_compute(void* out, struct my_timming* m) {
int tmout;
CRITFLAGS
- DAC1064_setpclk(PMINFO m->pixclock);
+ DAC1064_setpclk(minfo, m->pixclock);
CRITBEGIN
for (i = 0; i < 3; i++)
- outDAC1064(PMINFO M1064_XPIXPLLCM + i, ACCESS_FBINFO(hw).DACclk[i]);
+ outDAC1064(minfo, M1064_XPIXPLLCM + i, minfo->hw.DACclk[i]);
for (tmout = 500000; tmout; tmout--) {
- if (inDAC1064(PMINFO M1064_XPIXPLLSTAT) & 0x40)
+ if (inDAC1064(minfo, M1064_XPIXPLLSTAT) & 0x40)
break;
udelay(10);
};
@@ -500,9 +514,9 @@ static struct matrox_altout m1064 = {
static int g450_compute(void* out, struct my_timming* m) {
#define minfo ((struct matrox_fb_info*)out)
if (m->mnp < 0) {
- m->mnp = matroxfb_g450_setclk(PMINFO m->pixclock, (m->crtc == MATROXFB_SRC_CRTC1) ? M_PIXEL_PLL_C : M_VIDEO_PLL);
+ m->mnp = matroxfb_g450_setclk(minfo, m->pixclock, (m->crtc == MATROXFB_SRC_CRTC1) ? M_PIXEL_PLL_C : M_VIDEO_PLL);
if (m->mnp >= 0) {
- m->pixclock = g450_mnp2f(PMINFO m->mnp);
+ m->pixclock = g450_mnp2f(minfo, m->mnp);
}
}
#undef minfo
@@ -518,13 +532,14 @@ static struct matrox_altout g450out = {
#endif /* NEED_DAC1064 */
#ifdef CONFIG_FB_MATROX_MYSTIQUE
-static int MGA1064_init(WPMINFO struct my_timming* m) {
- struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
+static int MGA1064_init(struct matrox_fb_info *minfo, struct my_timming *m)
+{
+ struct matrox_hw_state *hw = &minfo->hw;
DBG(__func__)
- if (DAC1064_init_1(PMINFO m)) return 1;
- if (matroxfb_vgaHWinit(PMINFO m)) return 1;
+ if (DAC1064_init_1(minfo, m)) return 1;
+ if (matroxfb_vgaHWinit(minfo, m)) return 1;
hw->MiscOutReg = 0xCB;
if (m->sync & FB_SYNC_HOR_HIGH_ACT)
@@ -534,20 +549,21 @@ static int MGA1064_init(WPMINFO struct my_timming* m) {
if (m->sync & FB_SYNC_COMP_HIGH_ACT) /* should be only FB_SYNC_COMP */
hw->CRTCEXT[3] |= 0x40;
- if (DAC1064_init_2(PMINFO m)) return 1;
+ if (DAC1064_init_2(minfo, m)) return 1;
return 0;
}
#endif
#ifdef CONFIG_FB_MATROX_G
-static int MGAG100_init(WPMINFO struct my_timming* m) {
- struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
+static int MGAG100_init(struct matrox_fb_info *minfo, struct my_timming *m)
+{
+ struct matrox_hw_state *hw = &minfo->hw;
DBG(__func__)
- if (DAC1064_init_1(PMINFO m)) return 1;
+ if (DAC1064_init_1(minfo, m)) return 1;
hw->MXoptionReg &= ~0x2000;
- if (matroxfb_vgaHWinit(PMINFO m)) return 1;
+ if (matroxfb_vgaHWinit(minfo, m)) return 1;
hw->MiscOutReg = 0xEF;
if (m->sync & FB_SYNC_HOR_HIGH_ACT)
@@ -557,27 +573,28 @@ static int MGAG100_init(WPMINFO struct my_timming* m) {
if (m->sync & FB_SYNC_COMP_HIGH_ACT) /* should be only FB_SYNC_COMP */
hw->CRTCEXT[3] |= 0x40;
- if (DAC1064_init_2(PMINFO m)) return 1;
+ if (DAC1064_init_2(minfo, m)) return 1;
return 0;
}
#endif /* G */
#ifdef CONFIG_FB_MATROX_MYSTIQUE
-static void MGA1064_ramdac_init(WPMINFO2) {
+static void MGA1064_ramdac_init(struct matrox_fb_info *minfo)
+{
DBG(__func__)
- /* ACCESS_FBINFO(features.DAC1064.vco_freq_min) = 120000; */
- ACCESS_FBINFO(features.pll.vco_freq_min) = 62000;
- ACCESS_FBINFO(features.pll.ref_freq) = 14318;
- ACCESS_FBINFO(features.pll.feed_div_min) = 100;
- ACCESS_FBINFO(features.pll.feed_div_max) = 127;
- ACCESS_FBINFO(features.pll.in_div_min) = 1;
- ACCESS_FBINFO(features.pll.in_div_max) = 31;
- ACCESS_FBINFO(features.pll.post_shift_max) = 3;
- ACCESS_FBINFO(features.DAC1064.xvrefctrl) = DAC1064_XVREFCTRL_EXTERNAL;
+ /* minfo->features.DAC1064.vco_freq_min = 120000; */
+ minfo->features.pll.vco_freq_min = 62000;
+ minfo->features.pll.ref_freq = 14318;
+ minfo->features.pll.feed_div_min = 100;
+ minfo->features.pll.feed_div_max = 127;
+ minfo->features.pll.in_div_min = 1;
+ minfo->features.pll.in_div_max = 31;
+ minfo->features.pll.post_shift_max = 3;
+ minfo->features.DAC1064.xvrefctrl = DAC1064_XVREFCTRL_EXTERNAL;
/* maybe cmdline MCLK= ?, doc says gclk=44MHz, mclk=66MHz... it was 55/83 with old values */
- DAC1064_setmclk(PMINFO DAC1064_OPT_MDIV2 | DAC1064_OPT_GDIV3 | DAC1064_OPT_SCLK_PLL, 133333);
+ DAC1064_setmclk(minfo, DAC1064_OPT_MDIV2 | DAC1064_OPT_GDIV3 | DAC1064_OPT_SCLK_PLL, 133333);
}
#endif
@@ -589,23 +606,25 @@ static int x7AF4 = 0x10; /* flags, maybe 0x10 = SDRAM, 0x00 = SGRAM??? */
static int def50 = 0; /* reg50, & 0x0F, & 0x3000 (only 0x0000, 0x1000, 0x2000 (0x3000 disallowed and treated as 0) */
#endif
-static void MGAG100_progPixClock(CPMINFO int flags, int m, int n, int p) {
+static void MGAG100_progPixClock(const struct matrox_fb_info *minfo, int flags,
+ int m, int n, int p)
+{
int reg;
int selClk;
int clk;
DBG(__func__)
- outDAC1064(PMINFO M1064_XPIXCLKCTRL, inDAC1064(PMINFO M1064_XPIXCLKCTRL) | M1064_XPIXCLKCTRL_DIS |
+ outDAC1064(minfo, M1064_XPIXCLKCTRL, inDAC1064(minfo, M1064_XPIXCLKCTRL) | M1064_XPIXCLKCTRL_DIS |
M1064_XPIXCLKCTRL_PLL_UP);
switch (flags & 3) {
case 0: reg = M1064_XPIXPLLAM; break;
case 1: reg = M1064_XPIXPLLBM; break;
default: reg = M1064_XPIXPLLCM; break;
}
- outDAC1064(PMINFO reg++, m);
- outDAC1064(PMINFO reg++, n);
- outDAC1064(PMINFO reg, p);
+ outDAC1064(minfo, reg++, m);
+ outDAC1064(minfo, reg++, n);
+ outDAC1064(minfo, reg, p);
selClk = mga_inb(M_MISC_REG_READ) & ~0xC;
/* there should be flags & 0x03 & case 0/1/else */
/* and we should first select source and after that we should wait for PLL */
@@ -617,61 +636,64 @@ static void MGAG100_progPixClock(CPMINFO int flags, int m, int n, int p) {
}
mga_outb(M_MISC_REG, selClk);
for (clk = 500000; clk; clk--) {
- if (inDAC1064(PMINFO M1064_XPIXPLLSTAT) & 0x40)
+ if (inDAC1064(minfo, M1064_XPIXPLLSTAT) & 0x40)
break;
udelay(10);
};
if (!clk)
printk(KERN_ERR "matroxfb: Pixel PLL%c not locked after usual time\n", (reg-M1064_XPIXPLLAM-2)/4 + 'A');
- selClk = inDAC1064(PMINFO M1064_XPIXCLKCTRL) & ~M1064_XPIXCLKCTRL_SRC_MASK;
+ selClk = inDAC1064(minfo, M1064_XPIXCLKCTRL) & ~M1064_XPIXCLKCTRL_SRC_MASK;
switch (flags & 0x0C) {
case 0x00: selClk |= M1064_XPIXCLKCTRL_SRC_PCI; break;
case 0x04: selClk |= M1064_XPIXCLKCTRL_SRC_PLL; break;
default: selClk |= M1064_XPIXCLKCTRL_SRC_EXT; break;
}
- outDAC1064(PMINFO M1064_XPIXCLKCTRL, selClk);
- outDAC1064(PMINFO M1064_XPIXCLKCTRL, inDAC1064(PMINFO M1064_XPIXCLKCTRL) & ~M1064_XPIXCLKCTRL_DIS);
+ outDAC1064(minfo, M1064_XPIXCLKCTRL, selClk);
+ outDAC1064(minfo, M1064_XPIXCLKCTRL, inDAC1064(minfo, M1064_XPIXCLKCTRL) & ~M1064_XPIXCLKCTRL_DIS);
}
-static void MGAG100_setPixClock(CPMINFO int flags, int freq) {
+static void MGAG100_setPixClock(const struct matrox_fb_info *minfo, int flags,
+ int freq)
+{
unsigned int m, n, p;
DBG(__func__)
- DAC1064_calcclock(PMINFO freq, ACCESS_FBINFO(max_pixel_clock), &m, &n, &p);
- MGAG100_progPixClock(PMINFO flags, m, n, p);
+ DAC1064_calcclock(minfo, freq, minfo->max_pixel_clock, &m, &n, &p);
+ MGAG100_progPixClock(minfo, flags, m, n, p);
}
#endif
#ifdef CONFIG_FB_MATROX_MYSTIQUE
-static int MGA1064_preinit(WPMINFO2) {
+static int MGA1064_preinit(struct matrox_fb_info *minfo)
+{
static const int vxres_mystique[] = { 512, 640, 768, 800, 832, 960,
1024, 1152, 1280, 1600, 1664, 1920,
2048, 0};
- struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
+ struct matrox_hw_state *hw = &minfo->hw;
DBG(__func__)
- /* ACCESS_FBINFO(capable.cfb4) = 0; ... preinitialized by 0 */
- ACCESS_FBINFO(capable.text) = 1;
- ACCESS_FBINFO(capable.vxres) = vxres_mystique;
+ /* minfo->capable.cfb4 = 0; ... preinitialized by 0 */
+ minfo->capable.text = 1;
+ minfo->capable.vxres = vxres_mystique;
- ACCESS_FBINFO(outputs[0]).output = &m1064;
- ACCESS_FBINFO(outputs[0]).src = ACCESS_FBINFO(outputs[0]).default_src;
- ACCESS_FBINFO(outputs[0]).data = MINFO;
- ACCESS_FBINFO(outputs[0]).mode = MATROXFB_OUTPUT_MODE_MONITOR;
+ minfo->outputs[0].output = &m1064;
+ minfo->outputs[0].src = minfo->outputs[0].default_src;
+ minfo->outputs[0].data = minfo;
+ minfo->outputs[0].mode = MATROXFB_OUTPUT_MODE_MONITOR;
- if (ACCESS_FBINFO(devflags.noinit))
+ if (minfo->devflags.noinit)
return 0; /* do not modify settings */
hw->MXoptionReg &= 0xC0000100;
hw->MXoptionReg |= 0x00094E20;
- if (ACCESS_FBINFO(devflags.novga))
+ if (minfo->devflags.novga)
hw->MXoptionReg &= ~0x00000100;
- if (ACCESS_FBINFO(devflags.nobios))
+ if (minfo->devflags.nobios)
hw->MXoptionReg &= ~0x40000000;
- if (ACCESS_FBINFO(devflags.nopciretry))
+ if (minfo->devflags.nopciretry)
hw->MXoptionReg |= 0x20000000;
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, hw->MXoptionReg);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, hw->MXoptionReg);
mga_setr(M_SEQ_INDEX, 0x01, 0x20);
mga_outl(M_CTLWTST, 0x00000000);
udelay(200);
@@ -681,101 +703,105 @@ static int MGA1064_preinit(WPMINFO2) {
return 0;
}
-static void MGA1064_reset(WPMINFO2) {
+static void MGA1064_reset(struct matrox_fb_info *minfo)
+{
DBG(__func__);
- MGA1064_ramdac_init(PMINFO2);
+ MGA1064_ramdac_init(minfo);
}
#endif
#ifdef CONFIG_FB_MATROX_G
-static void g450_mclk_init(WPMINFO2) {
+static void g450_mclk_init(struct matrox_fb_info *minfo)
+{
/* switch all clocks to PCI source */
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, ACCESS_FBINFO(hw).MXoptionReg | 4);
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION3_REG, ACCESS_FBINFO(values).reg.opt3 & ~0x00300C03);
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, ACCESS_FBINFO(hw).MXoptionReg);
-
- if (((ACCESS_FBINFO(values).reg.opt3 & 0x000003) == 0x000003) ||
- ((ACCESS_FBINFO(values).reg.opt3 & 0x000C00) == 0x000C00) ||
- ((ACCESS_FBINFO(values).reg.opt3 & 0x300000) == 0x300000)) {
- matroxfb_g450_setclk(PMINFO ACCESS_FBINFO(values.pll.video), M_VIDEO_PLL);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg | 4);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION3_REG, minfo->values.reg.opt3 & ~0x00300C03);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg);
+
+ if (((minfo->values.reg.opt3 & 0x000003) == 0x000003) ||
+ ((minfo->values.reg.opt3 & 0x000C00) == 0x000C00) ||
+ ((minfo->values.reg.opt3 & 0x300000) == 0x300000)) {
+ matroxfb_g450_setclk(minfo, minfo->values.pll.video, M_VIDEO_PLL);
} else {
unsigned long flags;
unsigned int pwr;
matroxfb_DAC_lock_irqsave(flags);
- pwr = inDAC1064(PMINFO M1064_XPWRCTRL) & ~0x02;
- outDAC1064(PMINFO M1064_XPWRCTRL, pwr);
+ pwr = inDAC1064(minfo, M1064_XPWRCTRL) & ~0x02;
+ outDAC1064(minfo, M1064_XPWRCTRL, pwr);
matroxfb_DAC_unlock_irqrestore(flags);
}
- matroxfb_g450_setclk(PMINFO ACCESS_FBINFO(values.pll.system), M_SYSTEM_PLL);
+ matroxfb_g450_setclk(minfo, minfo->values.pll.system, M_SYSTEM_PLL);
/* switch clocks to their real PLL source(s) */
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, ACCESS_FBINFO(hw).MXoptionReg | 4);
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION3_REG, ACCESS_FBINFO(values).reg.opt3);
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, ACCESS_FBINFO(hw).MXoptionReg);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg | 4);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION3_REG, minfo->values.reg.opt3);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg);
}
-static void g450_memory_init(WPMINFO2) {
+static void g450_memory_init(struct matrox_fb_info *minfo)
+{
/* disable memory refresh */
- ACCESS_FBINFO(hw).MXoptionReg &= ~0x001F8000;
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, ACCESS_FBINFO(hw).MXoptionReg);
+ minfo->hw.MXoptionReg &= ~0x001F8000;
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg);
/* set memory interface parameters */
- ACCESS_FBINFO(hw).MXoptionReg &= ~0x00207E00;
- ACCESS_FBINFO(hw).MXoptionReg |= 0x00207E00 & ACCESS_FBINFO(values).reg.opt;
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, ACCESS_FBINFO(hw).MXoptionReg);
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION2_REG, ACCESS_FBINFO(values).reg.opt2);
+ minfo->hw.MXoptionReg &= ~0x00207E00;
+ minfo->hw.MXoptionReg |= 0x00207E00 & minfo->values.reg.opt;
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION2_REG, minfo->values.reg.opt2);
- mga_outl(M_CTLWTST, ACCESS_FBINFO(values).reg.mctlwtst);
+ mga_outl(M_CTLWTST, minfo->values.reg.mctlwtst);
/* first set up memory interface with disabled memory interface clocks */
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_MEMMISC_REG, ACCESS_FBINFO(values).reg.memmisc & ~0x80000000U);
- mga_outl(M_MEMRDBK, ACCESS_FBINFO(values).reg.memrdbk);
- mga_outl(M_MACCESS, ACCESS_FBINFO(values).reg.maccess);
+ pci_write_config_dword(minfo->pcidev, PCI_MEMMISC_REG, minfo->values.reg.memmisc & ~0x80000000U);
+ mga_outl(M_MEMRDBK, minfo->values.reg.memrdbk);
+ mga_outl(M_MACCESS, minfo->values.reg.maccess);
/* start memory clocks */
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_MEMMISC_REG, ACCESS_FBINFO(values).reg.memmisc | 0x80000000U);
+ pci_write_config_dword(minfo->pcidev, PCI_MEMMISC_REG, minfo->values.reg.memmisc | 0x80000000U);
udelay(200);
- if (ACCESS_FBINFO(values).memory.ddr && (!ACCESS_FBINFO(values).memory.emrswen || !ACCESS_FBINFO(values).memory.dll)) {
- mga_outl(M_MEMRDBK, ACCESS_FBINFO(values).reg.memrdbk & ~0x1000);
+ if (minfo->values.memory.ddr && (!minfo->values.memory.emrswen || !minfo->values.memory.dll)) {
+ mga_outl(M_MEMRDBK, minfo->values.reg.memrdbk & ~0x1000);
}
- mga_outl(M_MACCESS, ACCESS_FBINFO(values).reg.maccess | 0x8000);
+ mga_outl(M_MACCESS, minfo->values.reg.maccess | 0x8000);
udelay(200);
- ACCESS_FBINFO(hw).MXoptionReg |= 0x001F8000 & ACCESS_FBINFO(values).reg.opt;
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, ACCESS_FBINFO(hw).MXoptionReg);
+ minfo->hw.MXoptionReg |= 0x001F8000 & minfo->values.reg.opt;
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg);
/* value is written to memory chips only if old != new */
mga_outl(M_PLNWT, 0);
mga_outl(M_PLNWT, ~0);
- if (ACCESS_FBINFO(values).reg.mctlwtst != ACCESS_FBINFO(values).reg.mctlwtst_core) {
- mga_outl(M_CTLWTST, ACCESS_FBINFO(values).reg.mctlwtst_core);
+ if (minfo->values.reg.mctlwtst != minfo->values.reg.mctlwtst_core) {
+ mga_outl(M_CTLWTST, minfo->values.reg.mctlwtst_core);
}
}
-static void g450_preinit(WPMINFO2) {
+static void g450_preinit(struct matrox_fb_info *minfo)
+{
u_int32_t c2ctl;
u_int8_t curctl;
u_int8_t c1ctl;
- /* ACCESS_FBINFO(hw).MXoptionReg = minfo->values.reg.opt; */
- ACCESS_FBINFO(hw).MXoptionReg &= 0xC0000100;
- ACCESS_FBINFO(hw).MXoptionReg |= 0x00000020;
- if (ACCESS_FBINFO(devflags.novga))
- ACCESS_FBINFO(hw).MXoptionReg &= ~0x00000100;
- if (ACCESS_FBINFO(devflags.nobios))
- ACCESS_FBINFO(hw).MXoptionReg &= ~0x40000000;
- if (ACCESS_FBINFO(devflags.nopciretry))
- ACCESS_FBINFO(hw).MXoptionReg |= 0x20000000;
- ACCESS_FBINFO(hw).MXoptionReg |= ACCESS_FBINFO(values).reg.opt & 0x03400040;
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, ACCESS_FBINFO(hw).MXoptionReg);
+ /* minfo->hw.MXoptionReg = minfo->values.reg.opt; */
+ minfo->hw.MXoptionReg &= 0xC0000100;
+ minfo->hw.MXoptionReg |= 0x00000020;
+ if (minfo->devflags.novga)
+ minfo->hw.MXoptionReg &= ~0x00000100;
+ if (minfo->devflags.nobios)
+ minfo->hw.MXoptionReg &= ~0x40000000;
+ if (minfo->devflags.nopciretry)
+ minfo->hw.MXoptionReg |= 0x20000000;
+ minfo->hw.MXoptionReg |= minfo->values.reg.opt & 0x03400040;
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg);
/* Init system clocks */
@@ -783,24 +809,24 @@ static void g450_preinit(WPMINFO2) {
c2ctl = mga_inl(M_C2CTL);
mga_outl(M_C2CTL, c2ctl & ~1);
/* stop cursor */
- curctl = inDAC1064(PMINFO M1064_XCURCTRL);
- outDAC1064(PMINFO M1064_XCURCTRL, 0);
+ curctl = inDAC1064(minfo, M1064_XCURCTRL);
+ outDAC1064(minfo, M1064_XCURCTRL, 0);
/* stop crtc1 */
c1ctl = mga_readr(M_SEQ_INDEX, 1);
mga_setr(M_SEQ_INDEX, 1, c1ctl | 0x20);
- g450_mclk_init(PMINFO2);
- g450_memory_init(PMINFO2);
+ g450_mclk_init(minfo);
+ g450_memory_init(minfo);
/* set legacy VGA clock sources for DOSEmu or VMware... */
- matroxfb_g450_setclk(PMINFO 25175, M_PIXEL_PLL_A);
- matroxfb_g450_setclk(PMINFO 28322, M_PIXEL_PLL_B);
+ matroxfb_g450_setclk(minfo, 25175, M_PIXEL_PLL_A);
+ matroxfb_g450_setclk(minfo, 28322, M_PIXEL_PLL_B);
/* restore crtc1 */
mga_setr(M_SEQ_INDEX, 1, c1ctl);
/* restore cursor */
- outDAC1064(PMINFO M1064_XCURCTRL, curctl);
+ outDAC1064(minfo, M1064_XCURCTRL, curctl);
/* restore crtc2 */
mga_outl(M_C2CTL, c2ctl);
@@ -808,11 +834,12 @@ static void g450_preinit(WPMINFO2) {
return;
}
-static int MGAG100_preinit(WPMINFO2) {
+static int MGAG100_preinit(struct matrox_fb_info *minfo)
+{
static const int vxres_g100[] = { 512, 640, 768, 800, 832, 960,
1024, 1152, 1280, 1600, 1664, 1920,
2048, 0};
- struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
+ struct matrox_hw_state *hw = &minfo->hw;
u_int32_t reg50;
#if 0
@@ -822,68 +849,68 @@ static int MGAG100_preinit(WPMINFO2) {
DBG(__func__)
/* there are some instabilities if in_div > 19 && vco < 61000 */
- if (ACCESS_FBINFO(devflags.g450dac)) {
- ACCESS_FBINFO(features.pll.vco_freq_min) = 130000; /* my sample: >118 */
+ if (minfo->devflags.g450dac) {
+ minfo->features.pll.vco_freq_min = 130000; /* my sample: >118 */
} else {
- ACCESS_FBINFO(features.pll.vco_freq_min) = 62000;
+ minfo->features.pll.vco_freq_min = 62000;
}
- if (!ACCESS_FBINFO(features.pll.ref_freq)) {
- ACCESS_FBINFO(features.pll.ref_freq) = 27000;
+ if (!minfo->features.pll.ref_freq) {
+ minfo->features.pll.ref_freq = 27000;
}
- ACCESS_FBINFO(features.pll.feed_div_min) = 7;
- ACCESS_FBINFO(features.pll.feed_div_max) = 127;
- ACCESS_FBINFO(features.pll.in_div_min) = 1;
- ACCESS_FBINFO(features.pll.in_div_max) = 31;
- ACCESS_FBINFO(features.pll.post_shift_max) = 3;
- ACCESS_FBINFO(features.DAC1064.xvrefctrl) = DAC1064_XVREFCTRL_G100_DEFAULT;
- /* ACCESS_FBINFO(capable.cfb4) = 0; ... preinitialized by 0 */
- ACCESS_FBINFO(capable.text) = 1;
- ACCESS_FBINFO(capable.vxres) = vxres_g100;
- ACCESS_FBINFO(capable.plnwt) = ACCESS_FBINFO(devflags.accelerator) == FB_ACCEL_MATROX_MGAG100
- ? ACCESS_FBINFO(devflags.sgram) : 1;
+ minfo->features.pll.feed_div_min = 7;
+ minfo->features.pll.feed_div_max = 127;
+ minfo->features.pll.in_div_min = 1;
+ minfo->features.pll.in_div_max = 31;
+ minfo->features.pll.post_shift_max = 3;
+ minfo->features.DAC1064.xvrefctrl = DAC1064_XVREFCTRL_G100_DEFAULT;
+ /* minfo->capable.cfb4 = 0; ... preinitialized by 0 */
+ minfo->capable.text = 1;
+ minfo->capable.vxres = vxres_g100;
+ minfo->capable.plnwt = minfo->devflags.accelerator == FB_ACCEL_MATROX_MGAG100
+ ? minfo->devflags.sgram : 1;
#ifdef CONFIG_FB_MATROX_G
- if (ACCESS_FBINFO(devflags.g450dac)) {
- ACCESS_FBINFO(outputs[0]).output = &g450out;
+ if (minfo->devflags.g450dac) {
+ minfo->outputs[0].output = &g450out;
} else
#endif
{
- ACCESS_FBINFO(outputs[0]).output = &m1064;
+ minfo->outputs[0].output = &m1064;
}
- ACCESS_FBINFO(outputs[0]).src = ACCESS_FBINFO(outputs[0]).default_src;
- ACCESS_FBINFO(outputs[0]).data = MINFO;
- ACCESS_FBINFO(outputs[0]).mode = MATROXFB_OUTPUT_MODE_MONITOR;
+ minfo->outputs[0].src = minfo->outputs[0].default_src;
+ minfo->outputs[0].data = minfo;
+ minfo->outputs[0].mode = MATROXFB_OUTPUT_MODE_MONITOR;
- if (ACCESS_FBINFO(devflags.g450dac)) {
+ if (minfo->devflags.g450dac) {
/* we must do this always, BIOS does not do it for us
and accelerator dies without it */
mga_outl(0x1C0C, 0);
}
- if (ACCESS_FBINFO(devflags.noinit))
+ if (minfo->devflags.noinit)
return 0;
- if (ACCESS_FBINFO(devflags.g450dac)) {
- g450_preinit(PMINFO2);
+ if (minfo->devflags.g450dac) {
+ g450_preinit(minfo);
return 0;
}
hw->MXoptionReg &= 0xC0000100;
hw->MXoptionReg |= 0x00000020;
- if (ACCESS_FBINFO(devflags.novga))
+ if (minfo->devflags.novga)
hw->MXoptionReg &= ~0x00000100;
- if (ACCESS_FBINFO(devflags.nobios))
+ if (minfo->devflags.nobios)
hw->MXoptionReg &= ~0x40000000;
- if (ACCESS_FBINFO(devflags.nopciretry))
+ if (minfo->devflags.nopciretry)
hw->MXoptionReg |= 0x20000000;
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, hw->MXoptionReg);
- DAC1064_setmclk(PMINFO DAC1064_OPT_MDIV2 | DAC1064_OPT_GDIV3 | DAC1064_OPT_SCLK_PCI, 133333);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, hw->MXoptionReg);
+ DAC1064_setmclk(minfo, DAC1064_OPT_MDIV2 | DAC1064_OPT_GDIV3 | DAC1064_OPT_SCLK_PCI, 133333);
- if (ACCESS_FBINFO(devflags.accelerator) == FB_ACCEL_MATROX_MGAG100) {
- pci_read_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION2_REG, &reg50);
+ if (minfo->devflags.accelerator == FB_ACCEL_MATROX_MGAG100) {
+ pci_read_config_dword(minfo->pcidev, PCI_OPTION2_REG, &reg50);
reg50 &= ~0x3000;
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION2_REG, reg50);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION2_REG, reg50);
hw->MXoptionReg |= 0x1080;
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, hw->MXoptionReg);
- mga_outl(M_CTLWTST, ACCESS_FBINFO(values).reg.mctlwtst);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, hw->MXoptionReg);
+ mga_outl(M_CTLWTST, minfo->values.reg.mctlwtst);
udelay(100);
mga_outb(0x1C05, 0x00);
mga_outb(0x1C05, 0x80);
@@ -893,68 +920,69 @@ static int MGAG100_preinit(WPMINFO2) {
udelay(100);
reg50 &= ~0xFF;
reg50 |= 0x07;
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION2_REG, reg50);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION2_REG, reg50);
/* it should help with G100 */
mga_outb(M_GRAPHICS_INDEX, 6);
mga_outb(M_GRAPHICS_DATA, (mga_inb(M_GRAPHICS_DATA) & 3) | 4);
mga_setr(M_EXTVGA_INDEX, 0x03, 0x81);
mga_setr(M_EXTVGA_INDEX, 0x04, 0x00);
- mga_writeb(ACCESS_FBINFO(video.vbase), 0x0000, 0xAA);
- mga_writeb(ACCESS_FBINFO(video.vbase), 0x0800, 0x55);
- mga_writeb(ACCESS_FBINFO(video.vbase), 0x4000, 0x55);
+ mga_writeb(minfo->video.vbase, 0x0000, 0xAA);
+ mga_writeb(minfo->video.vbase, 0x0800, 0x55);
+ mga_writeb(minfo->video.vbase, 0x4000, 0x55);
#if 0
- if (mga_readb(ACCESS_FBINFO(video.vbase), 0x0000) != 0xAA) {
+ if (mga_readb(minfo->video.vbase, 0x0000) != 0xAA) {
hw->MXoptionReg &= ~0x1000;
}
#endif
hw->MXoptionReg |= 0x00078020;
- } else if (ACCESS_FBINFO(devflags.accelerator) == FB_ACCEL_MATROX_MGAG200) {
- pci_read_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION2_REG, &reg50);
+ } else if (minfo->devflags.accelerator == FB_ACCEL_MATROX_MGAG200) {
+ pci_read_config_dword(minfo->pcidev, PCI_OPTION2_REG, &reg50);
reg50 &= ~0x3000;
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION2_REG, reg50);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION2_REG, reg50);
- if (ACCESS_FBINFO(devflags.memtype) == -1)
- hw->MXoptionReg |= ACCESS_FBINFO(values).reg.opt & 0x1C00;
+ if (minfo->devflags.memtype == -1)
+ hw->MXoptionReg |= minfo->values.reg.opt & 0x1C00;
else
- hw->MXoptionReg |= (ACCESS_FBINFO(devflags.memtype) & 7) << 10;
- if (ACCESS_FBINFO(devflags.sgram))
+ hw->MXoptionReg |= (minfo->devflags.memtype & 7) << 10;
+ if (minfo->devflags.sgram)
hw->MXoptionReg |= 0x4000;
- mga_outl(M_CTLWTST, ACCESS_FBINFO(values).reg.mctlwtst);
- mga_outl(M_MEMRDBK, ACCESS_FBINFO(values).reg.memrdbk);
+ mga_outl(M_CTLWTST, minfo->values.reg.mctlwtst);
+ mga_outl(M_MEMRDBK, minfo->values.reg.memrdbk);
udelay(200);
mga_outl(M_MACCESS, 0x00000000);
mga_outl(M_MACCESS, 0x00008000);
udelay(100);
- mga_outw(M_MEMRDBK, ACCESS_FBINFO(values).reg.memrdbk);
+ mga_outw(M_MEMRDBK, minfo->values.reg.memrdbk);
hw->MXoptionReg |= 0x00078020;
} else {
- pci_read_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION2_REG, &reg50);
+ pci_read_config_dword(minfo->pcidev, PCI_OPTION2_REG, &reg50);
reg50 &= ~0x00000100;
reg50 |= 0x00000000;
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION2_REG, reg50);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION2_REG, reg50);
- if (ACCESS_FBINFO(devflags.memtype) == -1)
- hw->MXoptionReg |= ACCESS_FBINFO(values).reg.opt & 0x1C00;
+ if (minfo->devflags.memtype == -1)
+ hw->MXoptionReg |= minfo->values.reg.opt & 0x1C00;
else
- hw->MXoptionReg |= (ACCESS_FBINFO(devflags.memtype) & 7) << 10;
- if (ACCESS_FBINFO(devflags.sgram))
+ hw->MXoptionReg |= (minfo->devflags.memtype & 7) << 10;
+ if (minfo->devflags.sgram)
hw->MXoptionReg |= 0x4000;
- mga_outl(M_CTLWTST, ACCESS_FBINFO(values).reg.mctlwtst);
- mga_outl(M_MEMRDBK, ACCESS_FBINFO(values).reg.memrdbk);
+ mga_outl(M_CTLWTST, minfo->values.reg.mctlwtst);
+ mga_outl(M_MEMRDBK, minfo->values.reg.memrdbk);
udelay(200);
mga_outl(M_MACCESS, 0x00000000);
mga_outl(M_MACCESS, 0x00008000);
udelay(100);
- mga_outl(M_MEMRDBK, ACCESS_FBINFO(values).reg.memrdbk);
+ mga_outl(M_MEMRDBK, minfo->values.reg.memrdbk);
hw->MXoptionReg |= 0x00040020;
}
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, hw->MXoptionReg);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, hw->MXoptionReg);
return 0;
}
-static void MGAG100_reset(WPMINFO2) {
+static void MGAG100_reset(struct matrox_fb_info *minfo)
+{
u_int8_t b;
- struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
+ struct matrox_hw_state *hw = &minfo->hw;
DBG(__func__)
@@ -964,54 +992,55 @@ static void MGAG100_reset(WPMINFO2) {
find 1014/22 (IBM/82351); /* if found and bridging Matrox, do some strange stuff */
pci_read_config_byte(ibm, PCI_SECONDARY_BUS, &b);
- if (b == ACCESS_FBINFO(pcidev)->bus->number) {
+ if (b == minfo->pcidev->bus->number) {
pci_write_config_byte(ibm, PCI_COMMAND+1, 0); /* disable back-to-back & SERR */
pci_write_config_byte(ibm, 0x41, 0xF4); /* ??? */
pci_write_config_byte(ibm, PCI_IO_BASE, 0xF0); /* ??? */
pci_write_config_byte(ibm, PCI_IO_LIMIT, 0x00); /* ??? */
}
#endif
- if (!ACCESS_FBINFO(devflags.noinit)) {
+ if (!minfo->devflags.noinit) {
if (x7AF4 & 8) {
hw->MXoptionReg |= 0x40; /* FIXME... */
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, hw->MXoptionReg);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, hw->MXoptionReg);
}
mga_setr(M_EXTVGA_INDEX, 0x06, 0x00);
}
}
- if (ACCESS_FBINFO(devflags.g450dac)) {
+ if (minfo->devflags.g450dac) {
/* either leave MCLK as is... or they were set in preinit */
- hw->DACclk[3] = inDAC1064(PMINFO DAC1064_XSYSPLLM);
- hw->DACclk[4] = inDAC1064(PMINFO DAC1064_XSYSPLLN);
- hw->DACclk[5] = inDAC1064(PMINFO DAC1064_XSYSPLLP);
+ hw->DACclk[3] = inDAC1064(minfo, DAC1064_XSYSPLLM);
+ hw->DACclk[4] = inDAC1064(minfo, DAC1064_XSYSPLLN);
+ hw->DACclk[5] = inDAC1064(minfo, DAC1064_XSYSPLLP);
} else {
- DAC1064_setmclk(PMINFO DAC1064_OPT_RESERVED | DAC1064_OPT_MDIV2 | DAC1064_OPT_GDIV1 | DAC1064_OPT_SCLK_PLL, 133333);
+ DAC1064_setmclk(minfo, DAC1064_OPT_RESERVED | DAC1064_OPT_MDIV2 | DAC1064_OPT_GDIV1 | DAC1064_OPT_SCLK_PLL, 133333);
}
- if (ACCESS_FBINFO(devflags.accelerator) == FB_ACCEL_MATROX_MGAG400) {
- if (ACCESS_FBINFO(devflags.dfp_type) == -1) {
- ACCESS_FBINFO(devflags.dfp_type) = inDAC1064(PMINFO 0x1F);
+ if (minfo->devflags.accelerator == FB_ACCEL_MATROX_MGAG400) {
+ if (minfo->devflags.dfp_type == -1) {
+ minfo->devflags.dfp_type = inDAC1064(minfo, 0x1F);
}
}
- if (ACCESS_FBINFO(devflags.noinit))
+ if (minfo->devflags.noinit)
return;
- if (ACCESS_FBINFO(devflags.g450dac)) {
+ if (minfo->devflags.g450dac) {
} else {
- MGAG100_setPixClock(PMINFO 4, 25175);
- MGAG100_setPixClock(PMINFO 5, 28322);
+ MGAG100_setPixClock(minfo, 4, 25175);
+ MGAG100_setPixClock(minfo, 5, 28322);
if (x7AF4 & 0x10) {
- b = inDAC1064(PMINFO M1064_XGENIODATA) & ~1;
- outDAC1064(PMINFO M1064_XGENIODATA, b);
- b = inDAC1064(PMINFO M1064_XGENIOCTRL) | 1;
- outDAC1064(PMINFO M1064_XGENIOCTRL, b);
+ b = inDAC1064(minfo, M1064_XGENIODATA) & ~1;
+ outDAC1064(minfo, M1064_XGENIODATA, b);
+ b = inDAC1064(minfo, M1064_XGENIOCTRL) | 1;
+ outDAC1064(minfo, M1064_XGENIOCTRL, b);
}
}
}
#endif
#ifdef CONFIG_FB_MATROX_MYSTIQUE
-static void MGA1064_restore(WPMINFO2) {
+static void MGA1064_restore(struct matrox_fb_info *minfo)
+{
int i;
- struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
+ struct matrox_hw_state *hw = &minfo->hw;
CRITFLAGS
@@ -1019,25 +1048,26 @@ static void MGA1064_restore(WPMINFO2) {
CRITBEGIN
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, hw->MXoptionReg);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, hw->MXoptionReg);
mga_outb(M_IEN, 0x00);
mga_outb(M_CACHEFLUSH, 0x00);
CRITEND
- DAC1064_restore_1(PMINFO2);
- matroxfb_vgaHWrestore(PMINFO2);
- ACCESS_FBINFO(crtc1.panpos) = -1;
+ DAC1064_restore_1(minfo);
+ matroxfb_vgaHWrestore(minfo);
+ minfo->crtc1.panpos = -1;
for (i = 0; i < 6; i++)
mga_setr(M_EXTVGA_INDEX, i, hw->CRTCEXT[i]);
- DAC1064_restore_2(PMINFO2);
+ DAC1064_restore_2(minfo);
}
#endif
#ifdef CONFIG_FB_MATROX_G
-static void MGAG100_restore(WPMINFO2) {
+static void MGAG100_restore(struct matrox_fb_info *minfo)
+{
int i;
- struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
+ struct matrox_hw_state *hw = &minfo->hw;
CRITFLAGS
@@ -1045,19 +1075,17 @@ static void MGAG100_restore(WPMINFO2) {
CRITBEGIN
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, hw->MXoptionReg);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, hw->MXoptionReg);
CRITEND
- DAC1064_restore_1(PMINFO2);
- matroxfb_vgaHWrestore(PMINFO2);
-#ifdef CONFIG_FB_MATROX_32MB
- if (ACCESS_FBINFO(devflags.support32MB))
+ DAC1064_restore_1(minfo);
+ matroxfb_vgaHWrestore(minfo);
+ if (minfo->devflags.support32MB)
mga_setr(M_EXTVGA_INDEX, 8, hw->CRTCEXT[8]);
-#endif
- ACCESS_FBINFO(crtc1.panpos) = -1;
+ minfo->crtc1.panpos = -1;
for (i = 0; i < 6; i++)
mga_setr(M_EXTVGA_INDEX, i, hw->CRTCEXT[i]);
- DAC1064_restore_2(PMINFO2);
+ DAC1064_restore_2(minfo);
}
#endif
diff --git a/drivers/video/matrox/matroxfb_DAC1064.h b/drivers/video/matrox/matroxfb_DAC1064.h
index 7a98ce8043d..c6ed7801efe 100644
--- a/drivers/video/matrox/matroxfb_DAC1064.h
+++ b/drivers/video/matrox/matroxfb_DAC1064.h
@@ -11,8 +11,8 @@ extern struct matrox_switch matrox_mystique;
extern struct matrox_switch matrox_G100;
#endif
#ifdef NEED_DAC1064
-void DAC1064_global_init(WPMINFO2);
-void DAC1064_global_restore(WPMINFO2);
+void DAC1064_global_init(struct matrox_fb_info *minfo);
+void DAC1064_global_restore(struct matrox_fb_info *minfo);
#endif
#define M1064_INDEX 0x00
diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/matrox/matroxfb_Ti3026.c
index 4e825112a60..835aaaae6b9 100644
--- a/drivers/video/matrox/matroxfb_Ti3026.c
+++ b/drivers/video/matrox/matroxfb_Ti3026.c
@@ -279,27 +279,31 @@ static const unsigned char MGADACbpp32[] =
TVP3026_XCOLKEYCTRL_ZOOM1,
0x00, 0x00, TVP3026_XCURCTRL_DIS };
-static int Ti3026_calcclock(CPMINFO unsigned int freq, unsigned int fmax, int* in, int* feed, int* post) {
+static int Ti3026_calcclock(const struct matrox_fb_info *minfo,
+ unsigned int freq, unsigned int fmax, int *in,
+ int *feed, int *post)
+{
unsigned int fvco;
unsigned int lin, lfeed, lpost;
DBG(__func__)
- fvco = PLL_calcclock(PMINFO freq, fmax, &lin, &lfeed, &lpost);
+ fvco = PLL_calcclock(minfo, freq, fmax, &lin, &lfeed, &lpost);
fvco >>= (*post = lpost);
*in = 64 - lin;
*feed = 64 - lfeed;
return fvco;
}
-static int Ti3026_setpclk(WPMINFO int clk) {
+static int Ti3026_setpclk(struct matrox_fb_info *minfo, int clk)
+{
unsigned int f_pll;
unsigned int pixfeed, pixin, pixpost;
- struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
+ struct matrox_hw_state *hw = &minfo->hw;
DBG(__func__)
- f_pll = Ti3026_calcclock(PMINFO clk, ACCESS_FBINFO(max_pixel_clock), &pixin, &pixfeed, &pixpost);
+ f_pll = Ti3026_calcclock(minfo, clk, minfo->max_pixel_clock, &pixin, &pixfeed, &pixpost);
hw->DACclk[0] = pixin | 0xC0;
hw->DACclk[1] = pixfeed;
@@ -309,9 +313,9 @@ static int Ti3026_setpclk(WPMINFO int clk) {
unsigned int loopfeed, loopin, looppost, loopdiv, z;
unsigned int Bpp;
- Bpp = ACCESS_FBINFO(curr.final_bppShift);
+ Bpp = minfo->curr.final_bppShift;
- if (ACCESS_FBINFO(fbcon).var.bits_per_pixel == 24) {
+ if (minfo->fbcon.var.bits_per_pixel == 24) {
loopfeed = 3; /* set lm to any possible value */
loopin = 3 * 32 / Bpp;
} else {
@@ -330,18 +334,18 @@ static int Ti3026_setpclk(WPMINFO int clk) {
looppost = 3;
loopdiv = z/16;
}
- if (ACCESS_FBINFO(fbcon).var.bits_per_pixel == 24) {
+ if (minfo->fbcon.var.bits_per_pixel == 24) {
hw->DACclk[3] = ((65 - loopin) & 0x3F) | 0xC0;
hw->DACclk[4] = (65 - loopfeed) | 0x80;
- if (ACCESS_FBINFO(accel.ramdac_rev) > 0x20) {
- if (isInterleave(MINFO))
+ if (minfo->accel.ramdac_rev > 0x20) {
+ if (isInterleave(minfo))
hw->DACreg[POS3026_XLATCHCTRL] = TVP3026B_XLATCHCTRL_8_3;
else {
hw->DACclk[4] &= ~0xC0;
hw->DACreg[POS3026_XLATCHCTRL] = TVP3026B_XLATCHCTRL_4_3;
}
} else {
- if (isInterleave(MINFO))
+ if (isInterleave(minfo))
; /* default... */
else {
hw->DACclk[4] ^= 0xC0; /* change from 0x80 to 0x40 */
@@ -349,7 +353,7 @@ static int Ti3026_setpclk(WPMINFO int clk) {
}
}
hw->DACclk[5] = looppost | 0xF8;
- if (ACCESS_FBINFO(devflags.mga_24bpp_fix))
+ if (minfo->devflags.mga_24bpp_fix)
hw->DACclk[5] ^= 0x40;
} else {
hw->DACclk[3] = ((65 - loopin) & 0x3F) | 0xC0;
@@ -361,14 +365,15 @@ static int Ti3026_setpclk(WPMINFO int clk) {
return 0;
}
-static int Ti3026_init(WPMINFO struct my_timming* m) {
- u_int8_t muxctrl = isInterleave(MINFO) ? TVP3026_XMUXCTRL_MEMORY_64BIT : TVP3026_XMUXCTRL_MEMORY_32BIT;
- struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
+static int Ti3026_init(struct matrox_fb_info *minfo, struct my_timming *m)
+{
+ u_int8_t muxctrl = isInterleave(minfo) ? TVP3026_XMUXCTRL_MEMORY_64BIT : TVP3026_XMUXCTRL_MEMORY_32BIT;
+ struct matrox_hw_state *hw = &minfo->hw;
DBG(__func__)
memcpy(hw->DACreg, MGADACbpp32, sizeof(hw->DACreg));
- switch (ACCESS_FBINFO(fbcon).var.bits_per_pixel) {
+ switch (minfo->fbcon.var.bits_per_pixel) {
case 4: hw->DACreg[POS3026_XLATCHCTRL] = TVP3026_XLATCHCTRL_16_1; /* or _8_1, they are same */
hw->DACreg[POS3026_XTRUECOLORCTRL] = TVP3026_XTRUECOLORCTRL_PSEUDOCOLOR;
hw->DACreg[POS3026_XMUXCTRL] = muxctrl | TVP3026_XMUXCTRL_PIXEL_4BIT;
@@ -383,7 +388,7 @@ static int Ti3026_init(WPMINFO struct my_timming* m) {
break;
case 16:
/* XLATCHCTRL should be _4_1 / _2_1... Why is not? (_2_1 is used everytime) */
- hw->DACreg[POS3026_XTRUECOLORCTRL] = (ACCESS_FBINFO(fbcon).var.green.length == 5)? (TVP3026_XTRUECOLORCTRL_DIRECTCOLOR | TVP3026_XTRUECOLORCTRL_ORGB_1555 ) : (TVP3026_XTRUECOLORCTRL_DIRECTCOLOR | TVP3026_XTRUECOLORCTRL_RGB_565);
+ hw->DACreg[POS3026_XTRUECOLORCTRL] = (minfo->fbcon.var.green.length == 5) ? (TVP3026_XTRUECOLORCTRL_DIRECTCOLOR | TVP3026_XTRUECOLORCTRL_ORGB_1555) : (TVP3026_XTRUECOLORCTRL_DIRECTCOLOR | TVP3026_XTRUECOLORCTRL_RGB_565);
hw->DACreg[POS3026_XMUXCTRL] = muxctrl | TVP3026_XMUXCTRL_PIXEL_16BIT;
hw->DACreg[POS3026_XCLKCTRL] = TVP3026_XCLKCTRL_SRC_PLL | TVP3026_XCLKCTRL_DIV2;
break;
@@ -400,7 +405,7 @@ static int Ti3026_init(WPMINFO struct my_timming* m) {
default:
return 1; /* TODO: failed */
}
- if (matroxfb_vgaHWinit(PMINFO m)) return 1;
+ if (matroxfb_vgaHWinit(minfo, m)) return 1;
/* set SYNC */
hw->MiscOutReg = 0xCB;
@@ -412,9 +417,9 @@ static int Ti3026_init(WPMINFO struct my_timming* m) {
hw->DACreg[POS3026_XGENCTRL] |= TVP3026_XGENCTRL_SYNC_ON_GREEN;
/* set DELAY */
- if (ACCESS_FBINFO(video.len) < 0x400000)
+ if (minfo->video.len < 0x400000)
hw->CRTCEXT[3] |= 0x08;
- else if (ACCESS_FBINFO(video.len) > 0x400000)
+ else if (minfo->video.len > 0x400000)
hw->CRTCEXT[3] |= 0x10;
/* set HWCURSOR */
@@ -426,14 +431,15 @@ static int Ti3026_init(WPMINFO struct my_timming* m) {
/* set interleaving */
hw->MXoptionReg &= ~0x00001000;
- if (isInterleave(MINFO)) hw->MXoptionReg |= 0x00001000;
+ if (isInterleave(minfo)) hw->MXoptionReg |= 0x00001000;
/* set DAC */
- Ti3026_setpclk(PMINFO m->pixclock);
+ Ti3026_setpclk(minfo, m->pixclock);
return 0;
}
-static void ti3026_setMCLK(WPMINFO int fout){
+static void ti3026_setMCLK(struct matrox_fb_info *minfo, int fout)
+{
unsigned int f_pll;
unsigned int pclk_m, pclk_n, pclk_p;
unsigned int mclk_m, mclk_n, mclk_p;
@@ -442,29 +448,29 @@ static void ti3026_setMCLK(WPMINFO int fout){
DBG(__func__)
- f_pll = Ti3026_calcclock(PMINFO fout, ACCESS_FBINFO(max_pixel_clock), &mclk_n, &mclk_m, &mclk_p);
+ f_pll = Ti3026_calcclock(minfo, fout, minfo->max_pixel_clock, &mclk_n, &mclk_m, &mclk_p);
/* save pclk */
- outTi3026(PMINFO TVP3026_XPLLADDR, 0xFC);
- pclk_n = inTi3026(PMINFO TVP3026_XPIXPLLDATA);
- outTi3026(PMINFO TVP3026_XPLLADDR, 0xFD);
- pclk_m = inTi3026(PMINFO TVP3026_XPIXPLLDATA);
- outTi3026(PMINFO TVP3026_XPLLADDR, 0xFE);
- pclk_p = inTi3026(PMINFO TVP3026_XPIXPLLDATA);
+ outTi3026(minfo, TVP3026_XPLLADDR, 0xFC);
+ pclk_n = inTi3026(minfo, TVP3026_XPIXPLLDATA);
+ outTi3026(minfo, TVP3026_XPLLADDR, 0xFD);
+ pclk_m = inTi3026(minfo, TVP3026_XPIXPLLDATA);
+ outTi3026(minfo, TVP3026_XPLLADDR, 0xFE);
+ pclk_p = inTi3026(minfo, TVP3026_XPIXPLLDATA);
/* stop pclk */
- outTi3026(PMINFO TVP3026_XPLLADDR, 0xFE);
- outTi3026(PMINFO TVP3026_XPIXPLLDATA, 0x00);
+ outTi3026(minfo, TVP3026_XPLLADDR, 0xFE);
+ outTi3026(minfo, TVP3026_XPIXPLLDATA, 0x00);
/* set pclk to new mclk */
- outTi3026(PMINFO TVP3026_XPLLADDR, 0xFC);
- outTi3026(PMINFO TVP3026_XPIXPLLDATA, mclk_n | 0xC0);
- outTi3026(PMINFO TVP3026_XPIXPLLDATA, mclk_m);
- outTi3026(PMINFO TVP3026_XPIXPLLDATA, mclk_p | 0xB0);
+ outTi3026(minfo, TVP3026_XPLLADDR, 0xFC);
+ outTi3026(minfo, TVP3026_XPIXPLLDATA, mclk_n | 0xC0);
+ outTi3026(minfo, TVP3026_XPIXPLLDATA, mclk_m);
+ outTi3026(minfo, TVP3026_XPIXPLLDATA, mclk_p | 0xB0);
/* wait for PLL to lock */
for (tmout = 500000; tmout; tmout--) {
- if (inTi3026(PMINFO TVP3026_XPIXPLLDATA) & 0x40)
+ if (inTi3026(minfo, TVP3026_XPIXPLLDATA) & 0x40)
break;
udelay(10);
};
@@ -472,23 +478,23 @@ static void ti3026_setMCLK(WPMINFO int fout){
printk(KERN_ERR "matroxfb: Temporary pixel PLL not locked after 5 secs\n");
/* output pclk on mclk pin */
- mclk_ctl = inTi3026(PMINFO TVP3026_XMEMPLLCTRL);
- outTi3026(PMINFO TVP3026_XMEMPLLCTRL, mclk_ctl & 0xE7);
- outTi3026(PMINFO TVP3026_XMEMPLLCTRL, (mclk_ctl & 0xE7) | TVP3026_XMEMPLLCTRL_STROBEMKC4);
+ mclk_ctl = inTi3026(minfo, TVP3026_XMEMPLLCTRL);
+ outTi3026(minfo, TVP3026_XMEMPLLCTRL, mclk_ctl & 0xE7);
+ outTi3026(minfo, TVP3026_XMEMPLLCTRL, (mclk_ctl & 0xE7) | TVP3026_XMEMPLLCTRL_STROBEMKC4);
/* stop MCLK */
- outTi3026(PMINFO TVP3026_XPLLADDR, 0xFB);
- outTi3026(PMINFO TVP3026_XMEMPLLDATA, 0x00);
+ outTi3026(minfo, TVP3026_XPLLADDR, 0xFB);
+ outTi3026(minfo, TVP3026_XMEMPLLDATA, 0x00);
/* set mclk to new freq */
- outTi3026(PMINFO TVP3026_XPLLADDR, 0xF3);
- outTi3026(PMINFO TVP3026_XMEMPLLDATA, mclk_n | 0xC0);
- outTi3026(PMINFO TVP3026_XMEMPLLDATA, mclk_m);
- outTi3026(PMINFO TVP3026_XMEMPLLDATA, mclk_p | 0xB0);
+ outTi3026(minfo, TVP3026_XPLLADDR, 0xF3);
+ outTi3026(minfo, TVP3026_XMEMPLLDATA, mclk_n | 0xC0);
+ outTi3026(minfo, TVP3026_XMEMPLLDATA, mclk_m);
+ outTi3026(minfo, TVP3026_XMEMPLLDATA, mclk_p | 0xB0);
/* wait for PLL to lock */
for (tmout = 500000; tmout; tmout--) {
- if (inTi3026(PMINFO TVP3026_XMEMPLLDATA) & 0x40)
+ if (inTi3026(minfo, TVP3026_XMEMPLLDATA) & 0x40)
break;
udelay(10);
}
@@ -496,7 +502,7 @@ static void ti3026_setMCLK(WPMINFO int fout){
printk(KERN_ERR "matroxfb: Memory PLL not locked after 5 secs\n");
f_pll = f_pll * 333 / (10000 << mclk_p);
- if (isMilleniumII(MINFO)) {
+ if (isMilleniumII(minfo)) {
rfhcnt = (f_pll - 128) / 256;
if (rfhcnt > 15)
rfhcnt = 15;
@@ -505,26 +511,26 @@ static void ti3026_setMCLK(WPMINFO int fout){
if (rfhcnt > 15)
rfhcnt = 0;
}
- ACCESS_FBINFO(hw).MXoptionReg = (ACCESS_FBINFO(hw).MXoptionReg & ~0x000F0000) | (rfhcnt << 16);
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, ACCESS_FBINFO(hw).MXoptionReg);
+ minfo->hw.MXoptionReg = (minfo->hw.MXoptionReg & ~0x000F0000) | (rfhcnt << 16);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg);
/* output MCLK to MCLK pin */
- outTi3026(PMINFO TVP3026_XMEMPLLCTRL, (mclk_ctl & 0xE7) | TVP3026_XMEMPLLCTRL_MCLK_MCLKPLL);
- outTi3026(PMINFO TVP3026_XMEMPLLCTRL, (mclk_ctl ) | TVP3026_XMEMPLLCTRL_MCLK_MCLKPLL | TVP3026_XMEMPLLCTRL_STROBEMKC4);
+ outTi3026(minfo, TVP3026_XMEMPLLCTRL, (mclk_ctl & 0xE7) | TVP3026_XMEMPLLCTRL_MCLK_MCLKPLL);
+ outTi3026(minfo, TVP3026_XMEMPLLCTRL, (mclk_ctl ) | TVP3026_XMEMPLLCTRL_MCLK_MCLKPLL | TVP3026_XMEMPLLCTRL_STROBEMKC4);
/* stop PCLK */
- outTi3026(PMINFO TVP3026_XPLLADDR, 0xFE);
- outTi3026(PMINFO TVP3026_XPIXPLLDATA, 0x00);
+ outTi3026(minfo, TVP3026_XPLLADDR, 0xFE);
+ outTi3026(minfo, TVP3026_XPIXPLLDATA, 0x00);
/* restore pclk */
- outTi3026(PMINFO TVP3026_XPLLADDR, 0xFC);
- outTi3026(PMINFO TVP3026_XPIXPLLDATA, pclk_n);
- outTi3026(PMINFO TVP3026_XPIXPLLDATA, pclk_m);
- outTi3026(PMINFO TVP3026_XPIXPLLDATA, pclk_p);
+ outTi3026(minfo, TVP3026_XPLLADDR, 0xFC);
+ outTi3026(minfo, TVP3026_XPIXPLLDATA, pclk_n);
+ outTi3026(minfo, TVP3026_XPIXPLLDATA, pclk_m);
+ outTi3026(minfo, TVP3026_XPIXPLLDATA, pclk_p);
/* wait for PLL to lock */
for (tmout = 500000; tmout; tmout--) {
- if (inTi3026(PMINFO TVP3026_XPIXPLLDATA) & 0x40)
+ if (inTi3026(minfo, TVP3026_XPIXPLLDATA) & 0x40)
break;
udelay(10);
}
@@ -532,26 +538,27 @@ static void ti3026_setMCLK(WPMINFO int fout){
printk(KERN_ERR "matroxfb: Pixel PLL not locked after 5 secs\n");
}
-static void ti3026_ramdac_init(WPMINFO2) {
-
+static void ti3026_ramdac_init(struct matrox_fb_info *minfo)
+{
DBG(__func__)
- ACCESS_FBINFO(features.pll.vco_freq_min) = 110000;
- ACCESS_FBINFO(features.pll.ref_freq) = 114545;
- ACCESS_FBINFO(features.pll.feed_div_min) = 2;
- ACCESS_FBINFO(features.pll.feed_div_max) = 24;
- ACCESS_FBINFO(features.pll.in_div_min) = 2;
- ACCESS_FBINFO(features.pll.in_div_max) = 63;
- ACCESS_FBINFO(features.pll.post_shift_max) = 3;
- if (ACCESS_FBINFO(devflags.noinit))
+ minfo->features.pll.vco_freq_min = 110000;
+ minfo->features.pll.ref_freq = 114545;
+ minfo->features.pll.feed_div_min = 2;
+ minfo->features.pll.feed_div_max = 24;
+ minfo->features.pll.in_div_min = 2;
+ minfo->features.pll.in_div_max = 63;
+ minfo->features.pll.post_shift_max = 3;
+ if (minfo->devflags.noinit)
return;
- ti3026_setMCLK(PMINFO 60000);
+ ti3026_setMCLK(minfo, 60000);
}
-static void Ti3026_restore(WPMINFO2) {
+static void Ti3026_restore(struct matrox_fb_info *minfo)
+{
int i;
unsigned char progdac[6];
- struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
+ struct matrox_hw_state *hw = &minfo->hw;
CRITFLAGS
DBG(__func__)
@@ -565,31 +572,31 @@ static void Ti3026_restore(WPMINFO2) {
CRITBEGIN
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, hw->MXoptionReg);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, hw->MXoptionReg);
CRITEND
- matroxfb_vgaHWrestore(PMINFO2);
+ matroxfb_vgaHWrestore(minfo);
CRITBEGIN
- ACCESS_FBINFO(crtc1.panpos) = -1;
+ minfo->crtc1.panpos = -1;
for (i = 0; i < 6; i++)
mga_setr(M_EXTVGA_INDEX, i, hw->CRTCEXT[i]);
for (i = 0; i < 21; i++) {
- outTi3026(PMINFO DACseq[i], hw->DACreg[i]);
+ outTi3026(minfo, DACseq[i], hw->DACreg[i]);
}
- outTi3026(PMINFO TVP3026_XPLLADDR, 0x00);
- progdac[0] = inTi3026(PMINFO TVP3026_XPIXPLLDATA);
- progdac[3] = inTi3026(PMINFO TVP3026_XLOOPPLLDATA);
- outTi3026(PMINFO TVP3026_XPLLADDR, 0x15);
- progdac[1] = inTi3026(PMINFO TVP3026_XPIXPLLDATA);
- progdac[4] = inTi3026(PMINFO TVP3026_XLOOPPLLDATA);
- outTi3026(PMINFO TVP3026_XPLLADDR, 0x2A);
- progdac[2] = inTi3026(PMINFO TVP3026_XPIXPLLDATA);
- progdac[5] = inTi3026(PMINFO TVP3026_XLOOPPLLDATA);
+ outTi3026(minfo, TVP3026_XPLLADDR, 0x00);
+ progdac[0] = inTi3026(minfo, TVP3026_XPIXPLLDATA);
+ progdac[3] = inTi3026(minfo, TVP3026_XLOOPPLLDATA);
+ outTi3026(minfo, TVP3026_XPLLADDR, 0x15);
+ progdac[1] = inTi3026(minfo, TVP3026_XPIXPLLDATA);
+ progdac[4] = inTi3026(minfo, TVP3026_XLOOPPLLDATA);
+ outTi3026(minfo, TVP3026_XPLLADDR, 0x2A);
+ progdac[2] = inTi3026(minfo, TVP3026_XPIXPLLDATA);
+ progdac[5] = inTi3026(minfo, TVP3026_XLOOPPLLDATA);
CRITEND
if (memcmp(hw->DACclk, progdac, 6)) {
@@ -598,20 +605,20 @@ static void Ti3026_restore(WPMINFO2) {
/* Maybe even we should call schedule() ? */
CRITBEGIN
- outTi3026(PMINFO TVP3026_XCLKCTRL, hw->DACreg[POS3026_XCLKCTRL]);
- outTi3026(PMINFO TVP3026_XPLLADDR, 0x2A);
- outTi3026(PMINFO TVP3026_XLOOPPLLDATA, 0);
- outTi3026(PMINFO TVP3026_XPIXPLLDATA, 0);
+ outTi3026(minfo, TVP3026_XCLKCTRL, hw->DACreg[POS3026_XCLKCTRL]);
+ outTi3026(minfo, TVP3026_XPLLADDR, 0x2A);
+ outTi3026(minfo, TVP3026_XLOOPPLLDATA, 0);
+ outTi3026(minfo, TVP3026_XPIXPLLDATA, 0);
- outTi3026(PMINFO TVP3026_XPLLADDR, 0x00);
+ outTi3026(minfo, TVP3026_XPLLADDR, 0x00);
for (i = 0; i < 3; i++)
- outTi3026(PMINFO TVP3026_XPIXPLLDATA, hw->DACclk[i]);
+ outTi3026(minfo, TVP3026_XPIXPLLDATA, hw->DACclk[i]);
/* wait for PLL only if PLL clock requested (always for PowerMode, never for VGA) */
if (hw->MiscOutReg & 0x08) {
int tmout;
- outTi3026(PMINFO TVP3026_XPLLADDR, 0x3F);
+ outTi3026(minfo, TVP3026_XPLLADDR, 0x3F);
for (tmout = 500000; tmout; --tmout) {
- if (inTi3026(PMINFO TVP3026_XPIXPLLDATA) & 0x40)
+ if (inTi3026(minfo, TVP3026_XPIXPLLDATA) & 0x40)
break;
udelay(10);
}
@@ -624,18 +631,18 @@ static void Ti3026_restore(WPMINFO2) {
dprintk(KERN_INFO "PixelPLL: %d\n", 500000-tmout);
CRITBEGIN
}
- outTi3026(PMINFO TVP3026_XMEMPLLCTRL, hw->DACreg[POS3026_XMEMPLLCTRL]);
- outTi3026(PMINFO TVP3026_XPLLADDR, 0x00);
+ outTi3026(minfo, TVP3026_XMEMPLLCTRL, hw->DACreg[POS3026_XMEMPLLCTRL]);
+ outTi3026(minfo, TVP3026_XPLLADDR, 0x00);
for (i = 3; i < 6; i++)
- outTi3026(PMINFO TVP3026_XLOOPPLLDATA, hw->DACclk[i]);
+ outTi3026(minfo, TVP3026_XLOOPPLLDATA, hw->DACclk[i]);
CRITEND
if ((hw->MiscOutReg & 0x08) && ((hw->DACclk[5] & 0x80) == 0x80)) {
int tmout;
CRITBEGIN
- outTi3026(PMINFO TVP3026_XPLLADDR, 0x3F);
+ outTi3026(minfo, TVP3026_XPLLADDR, 0x3F);
for (tmout = 500000; tmout; --tmout) {
- if (inTi3026(PMINFO TVP3026_XLOOPPLLDATA) & 0x40)
+ if (inTi3026(minfo, TVP3026_XLOOPPLLDATA) & 0x40)
break;
udelay(10);
}
@@ -660,65 +667,66 @@ static void Ti3026_restore(WPMINFO2) {
#endif
}
-static void Ti3026_reset(WPMINFO2) {
-
+static void Ti3026_reset(struct matrox_fb_info *minfo)
+{
DBG(__func__)
- ti3026_ramdac_init(PMINFO2);
+ ti3026_ramdac_init(minfo);
}
static struct matrox_altout ti3026_output = {
.name = "Primary output",
};
-static int Ti3026_preinit(WPMINFO2) {
+static int Ti3026_preinit(struct matrox_fb_info *minfo)
+{
static const int vxres_mill2[] = { 512, 640, 768, 800, 832, 960,
1024, 1152, 1280, 1600, 1664, 1920,
2048, 0};
static const int vxres_mill1[] = { 640, 768, 800, 960,
1024, 1152, 1280, 1600, 1920,
2048, 0};
- struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
+ struct matrox_hw_state *hw = &minfo->hw;
DBG(__func__)
- ACCESS_FBINFO(millenium) = 1;
- ACCESS_FBINFO(milleniumII) = (ACCESS_FBINFO(pcidev)->device != PCI_DEVICE_ID_MATROX_MIL);
- ACCESS_FBINFO(capable.cfb4) = 1;
- ACCESS_FBINFO(capable.text) = 1; /* isMilleniumII(MINFO); */
- ACCESS_FBINFO(capable.vxres) = isMilleniumII(MINFO)?vxres_mill2:vxres_mill1;
+ minfo->millenium = 1;
+ minfo->milleniumII = (minfo->pcidev->device != PCI_DEVICE_ID_MATROX_MIL);
+ minfo->capable.cfb4 = 1;
+ minfo->capable.text = 1; /* isMilleniumII(minfo); */
+ minfo->capable.vxres = isMilleniumII(minfo) ? vxres_mill2 : vxres_mill1;
- ACCESS_FBINFO(outputs[0]).data = MINFO;
- ACCESS_FBINFO(outputs[0]).output = &ti3026_output;
- ACCESS_FBINFO(outputs[0]).src = ACCESS_FBINFO(outputs[0]).default_src;
- ACCESS_FBINFO(outputs[0]).mode = MATROXFB_OUTPUT_MODE_MONITOR;
+ minfo->outputs[0].data = minfo;
+ minfo->outputs[0].output = &ti3026_output;
+ minfo->outputs[0].src = minfo->outputs[0].default_src;
+ minfo->outputs[0].mode = MATROXFB_OUTPUT_MODE_MONITOR;
- if (ACCESS_FBINFO(devflags.noinit))
+ if (minfo->devflags.noinit)
return 0;
/* preserve VGA I/O, BIOS and PPC */
hw->MXoptionReg &= 0xC0000100;
hw->MXoptionReg |= 0x002C0000;
- if (ACCESS_FBINFO(devflags.novga))
+ if (minfo->devflags.novga)
hw->MXoptionReg &= ~0x00000100;
- if (ACCESS_FBINFO(devflags.nobios))
+ if (minfo->devflags.nobios)
hw->MXoptionReg &= ~0x40000000;
- if (ACCESS_FBINFO(devflags.nopciretry))
+ if (minfo->devflags.nopciretry)
hw->MXoptionReg |= 0x20000000;
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, hw->MXoptionReg);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, hw->MXoptionReg);
- ACCESS_FBINFO(accel.ramdac_rev) = inTi3026(PMINFO TVP3026_XSILICONREV);
+ minfo->accel.ramdac_rev = inTi3026(minfo, TVP3026_XSILICONREV);
- outTi3026(PMINFO TVP3026_XCLKCTRL, TVP3026_XCLKCTRL_SRC_CLK0VGA | TVP3026_XCLKCTRL_CLKSTOPPED);
- outTi3026(PMINFO TVP3026_XTRUECOLORCTRL, TVP3026_XTRUECOLORCTRL_PSEUDOCOLOR);
- outTi3026(PMINFO TVP3026_XMUXCTRL, TVP3026_XMUXCTRL_VGA);
+ outTi3026(minfo, TVP3026_XCLKCTRL, TVP3026_XCLKCTRL_SRC_CLK0VGA | TVP3026_XCLKCTRL_CLKSTOPPED);
+ outTi3026(minfo, TVP3026_XTRUECOLORCTRL, TVP3026_XTRUECOLORCTRL_PSEUDOCOLOR);
+ outTi3026(minfo, TVP3026_XMUXCTRL, TVP3026_XMUXCTRL_VGA);
- outTi3026(PMINFO TVP3026_XPLLADDR, 0x2A);
- outTi3026(PMINFO TVP3026_XLOOPPLLDATA, 0x00);
- outTi3026(PMINFO TVP3026_XPIXPLLDATA, 0x00);
+ outTi3026(minfo, TVP3026_XPLLADDR, 0x2A);
+ outTi3026(minfo, TVP3026_XLOOPPLLDATA, 0x00);
+ outTi3026(minfo, TVP3026_XPIXPLLDATA, 0x00);
mga_outb(M_MISC_REG, 0x67);
- outTi3026(PMINFO TVP3026_XMEMPLLCTRL, TVP3026_XMEMPLLCTRL_STROBEMKC4 | TVP3026_XMEMPLLCTRL_MCLK_MCLKPLL);
+ outTi3026(minfo, TVP3026_XMEMPLLCTRL, TVP3026_XMEMPLLCTRL_STROBEMKC4 | TVP3026_XMEMPLLCTRL_MCLK_MCLKPLL);
mga_outl(M_RESET, 1);
udelay(250);
diff --git a/drivers/video/matrox/matroxfb_accel.c b/drivers/video/matrox/matroxfb_accel.c
index 9c3aeee1cc4..8335a6fe303 100644
--- a/drivers/video/matrox/matroxfb_accel.c
+++ b/drivers/video/matrox/matroxfb_accel.c
@@ -81,7 +81,7 @@
#include "matroxfb_Ti3026.h"
#include "matroxfb_misc.h"
-#define curr_ydstorg(x) ACCESS_FBINFO2(x, curr.ydstorg.pixels)
+#define curr_ydstorg(x) ((x)->curr.ydstorg.pixels)
#define mga_ydstlen(y,l) mga_outl(M_YDSTLEN | M_EXEC, ((y) << 16) | (l))
@@ -107,7 +107,8 @@ static void matroxfb_imageblit(struct fb_info* info, const struct fb_image* imag
static void matroxfb_cfb4_fillrect(struct fb_info* info, const struct fb_fillrect* rect);
static void matroxfb_cfb4_copyarea(struct fb_info* info, const struct fb_copyarea* area);
-void matrox_cfbX_init(WPMINFO2) {
+void matrox_cfbX_init(struct matrox_fb_info *minfo)
+{
u_int32_t maccess;
u_int32_t mpitch;
u_int32_t mopmode;
@@ -115,59 +116,59 @@ void matrox_cfbX_init(WPMINFO2) {
DBG(__func__)
- mpitch = ACCESS_FBINFO(fbcon).var.xres_virtual;
+ mpitch = minfo->fbcon.var.xres_virtual;
- ACCESS_FBINFO(fbops).fb_copyarea = cfb_copyarea;
- ACCESS_FBINFO(fbops).fb_fillrect = cfb_fillrect;
- ACCESS_FBINFO(fbops).fb_imageblit = cfb_imageblit;
- ACCESS_FBINFO(fbops).fb_cursor = NULL;
+ minfo->fbops.fb_copyarea = cfb_copyarea;
+ minfo->fbops.fb_fillrect = cfb_fillrect;
+ minfo->fbops.fb_imageblit = cfb_imageblit;
+ minfo->fbops.fb_cursor = NULL;
- accel = (ACCESS_FBINFO(fbcon).var.accel_flags & FB_ACCELF_TEXT) == FB_ACCELF_TEXT;
+ accel = (minfo->fbcon.var.accel_flags & FB_ACCELF_TEXT) == FB_ACCELF_TEXT;
- switch (ACCESS_FBINFO(fbcon).var.bits_per_pixel) {
+ switch (minfo->fbcon.var.bits_per_pixel) {
case 4: maccess = 0x00000000; /* accelerate as 8bpp video */
mpitch = (mpitch >> 1) | 0x8000; /* disable linearization */
mopmode = M_OPMODE_4BPP;
- matrox_cfb4_pal(ACCESS_FBINFO(cmap));
+ matrox_cfb4_pal(minfo->cmap);
if (accel && !(mpitch & 1)) {
- ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_cfb4_copyarea;
- ACCESS_FBINFO(fbops).fb_fillrect = matroxfb_cfb4_fillrect;
+ minfo->fbops.fb_copyarea = matroxfb_cfb4_copyarea;
+ minfo->fbops.fb_fillrect = matroxfb_cfb4_fillrect;
}
break;
case 8: maccess = 0x00000000;
mopmode = M_OPMODE_8BPP;
- matrox_cfb8_pal(ACCESS_FBINFO(cmap));
+ matrox_cfb8_pal(minfo->cmap);
if (accel) {
- ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea;
- ACCESS_FBINFO(fbops).fb_fillrect = matroxfb_fillrect;
- ACCESS_FBINFO(fbops).fb_imageblit = matroxfb_imageblit;
+ minfo->fbops.fb_copyarea = matroxfb_copyarea;
+ minfo->fbops.fb_fillrect = matroxfb_fillrect;
+ minfo->fbops.fb_imageblit = matroxfb_imageblit;
}
break;
- case 16: if (ACCESS_FBINFO(fbcon).var.green.length == 5)
+ case 16: if (minfo->fbcon.var.green.length == 5)
maccess = 0xC0000001;
else
maccess = 0x40000001;
mopmode = M_OPMODE_16BPP;
if (accel) {
- ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea;
- ACCESS_FBINFO(fbops).fb_fillrect = matroxfb_fillrect;
- ACCESS_FBINFO(fbops).fb_imageblit = matroxfb_imageblit;
+ minfo->fbops.fb_copyarea = matroxfb_copyarea;
+ minfo->fbops.fb_fillrect = matroxfb_fillrect;
+ minfo->fbops.fb_imageblit = matroxfb_imageblit;
}
break;
case 24: maccess = 0x00000003;
mopmode = M_OPMODE_24BPP;
if (accel) {
- ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea;
- ACCESS_FBINFO(fbops).fb_fillrect = matroxfb_fillrect;
- ACCESS_FBINFO(fbops).fb_imageblit = matroxfb_imageblit;
+ minfo->fbops.fb_copyarea = matroxfb_copyarea;
+ minfo->fbops.fb_fillrect = matroxfb_fillrect;
+ minfo->fbops.fb_imageblit = matroxfb_imageblit;
}
break;
case 32: maccess = 0x00000002;
mopmode = M_OPMODE_32BPP;
if (accel) {
- ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea;
- ACCESS_FBINFO(fbops).fb_fillrect = matroxfb_fillrect;
- ACCESS_FBINFO(fbops).fb_imageblit = matroxfb_imageblit;
+ minfo->fbops.fb_copyarea = matroxfb_copyarea;
+ minfo->fbops.fb_fillrect = matroxfb_fillrect;
+ minfo->fbops.fb_imageblit = matroxfb_imageblit;
}
break;
default: maccess = 0x00000000;
@@ -176,10 +177,10 @@ void matrox_cfbX_init(WPMINFO2) {
}
mga_fifo(8);
mga_outl(M_PITCH, mpitch);
- mga_outl(M_YDSTORG, curr_ydstorg(MINFO));
- if (ACCESS_FBINFO(capable.plnwt))
+ mga_outl(M_YDSTORG, curr_ydstorg(minfo));
+ if (minfo->capable.plnwt)
mga_outl(M_PLNWT, -1);
- if (ACCESS_FBINFO(capable.srcorg)) {
+ if (minfo->capable.srcorg) {
mga_outl(M_SRCORG, 0);
mga_outl(M_DSTORG, 0);
}
@@ -188,14 +189,16 @@ void matrox_cfbX_init(WPMINFO2) {
mga_outl(M_YTOP, 0);
mga_outl(M_YBOT, 0x01FFFFFF);
mga_outl(M_MACCESS, maccess);
- ACCESS_FBINFO(accel.m_dwg_rect) = M_DWG_TRAP | M_DWG_SOLID | M_DWG_ARZERO | M_DWG_SGNZERO | M_DWG_SHIFTZERO;
- if (isMilleniumII(MINFO)) ACCESS_FBINFO(accel.m_dwg_rect) |= M_DWG_TRANSC;
- ACCESS_FBINFO(accel.m_opmode) = mopmode;
+ minfo->accel.m_dwg_rect = M_DWG_TRAP | M_DWG_SOLID | M_DWG_ARZERO | M_DWG_SGNZERO | M_DWG_SHIFTZERO;
+ if (isMilleniumII(minfo)) minfo->accel.m_dwg_rect |= M_DWG_TRANSC;
+ minfo->accel.m_opmode = mopmode;
}
EXPORT_SYMBOL(matrox_cfbX_init);
-static void matrox_accel_bmove(WPMINFO int vxres, int sy, int sx, int dy, int dx, int height, int width) {
+static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
int start, end;
CRITFLAGS
@@ -209,7 +212,7 @@ static void matrox_accel_bmove(WPMINFO int vxres, int sy, int sx, int dy, int dx
M_DWG_BFCOL | M_DWG_REPLACE);
mga_outl(M_AR5, vxres);
width--;
- start = sy*vxres+sx+curr_ydstorg(MINFO);
+ start = sy*vxres+sx+curr_ydstorg(minfo);
end = start+width;
} else {
mga_fifo(3);
@@ -217,7 +220,7 @@ static void matrox_accel_bmove(WPMINFO int vxres, int sy, int sx, int dy, int dx
mga_outl(M_SGN, 5);
mga_outl(M_AR5, -vxres);
width--;
- end = (sy+height-1)*vxres+sx+curr_ydstorg(MINFO);
+ end = (sy+height-1)*vxres+sx+curr_ydstorg(minfo);
start = end+width;
dy += height-1;
}
@@ -231,7 +234,10 @@ static void matrox_accel_bmove(WPMINFO int vxres, int sy, int sx, int dy, int dx
CRITEND
}
-static void matrox_accel_bmove_lin(WPMINFO int vxres, int sy, int sx, int dy, int dx, int height, int width) {
+static void matrox_accel_bmove_lin(struct matrox_fb_info *minfo, int vxres,
+ int sy, int sx, int dy, int dx, int height,
+ int width)
+{
int start, end;
CRITFLAGS
@@ -245,7 +251,7 @@ static void matrox_accel_bmove_lin(WPMINFO int vxres, int sy, int sx, int dy, in
M_DWG_BFCOL | M_DWG_REPLACE);
mga_outl(M_AR5, vxres);
width--;
- start = sy*vxres+sx+curr_ydstorg(MINFO);
+ start = sy*vxres+sx+curr_ydstorg(minfo);
end = start+width;
} else {
mga_fifo(3);
@@ -253,7 +259,7 @@ static void matrox_accel_bmove_lin(WPMINFO int vxres, int sy, int sx, int dy, in
mga_outl(M_SGN, 5);
mga_outl(M_AR5, -vxres);
width--;
- end = (sy+height-1)*vxres+sx+curr_ydstorg(MINFO);
+ end = (sy+height-1)*vxres+sx+curr_ydstorg(minfo);
start = end+width;
dy += height-1;
}
@@ -269,22 +275,23 @@ static void matrox_accel_bmove_lin(WPMINFO int vxres, int sy, int sx, int dy, in
}
static void matroxfb_cfb4_copyarea(struct fb_info* info, const struct fb_copyarea* area) {
- MINFO_FROM_INFO(info);
+ struct matrox_fb_info *minfo = info2minfo(info);
if ((area->sx | area->dx | area->width) & 1)
cfb_copyarea(info, area);
else
- matrox_accel_bmove_lin(PMINFO ACCESS_FBINFO(fbcon.var.xres_virtual) >> 1, area->sy, area->sx >> 1, area->dy, area->dx >> 1, area->height, area->width >> 1);
+ matrox_accel_bmove_lin(minfo, minfo->fbcon.var.xres_virtual >> 1, area->sy, area->sx >> 1, area->dy, area->dx >> 1, area->height, area->width >> 1);
}
static void matroxfb_copyarea(struct fb_info* info, const struct fb_copyarea* area) {
- MINFO_FROM_INFO(info);
+ struct matrox_fb_info *minfo = info2minfo(info);
- matrox_accel_bmove(PMINFO ACCESS_FBINFO(fbcon.var.xres_virtual), area->sy, area->sx, area->dy, area->dx, area->height, area->width);
+ matrox_accel_bmove(minfo, minfo->fbcon.var.xres_virtual, area->sy, area->sx, area->dy, area->dx, area->height, area->width);
}
-static void matroxfb_accel_clear(WPMINFO u_int32_t color, int sy, int sx, int height,
- int width) {
+static void matroxfb_accel_clear(struct matrox_fb_info *minfo, u_int32_t color,
+ int sy, int sx, int height, int width)
+{
CRITFLAGS
DBG(__func__)
@@ -292,7 +299,7 @@ static void matroxfb_accel_clear(WPMINFO u_int32_t color, int sy, int sx, int he
CRITBEGIN
mga_fifo(5);
- mga_outl(M_DWGCTL, ACCESS_FBINFO(accel.m_dwg_rect) | M_DWG_REPLACE);
+ mga_outl(M_DWGCTL, minfo->accel.m_dwg_rect | M_DWG_REPLACE);
mga_outl(M_FCOL, color);
mga_outl(M_FXBNDRY, ((sx + width) << 16) | sx);
mga_ydstlen(sy, height);
@@ -302,16 +309,18 @@ static void matroxfb_accel_clear(WPMINFO u_int32_t color, int sy, int sx, int he
}
static void matroxfb_fillrect(struct fb_info* info, const struct fb_fillrect* rect) {
- MINFO_FROM_INFO(info);
+ struct matrox_fb_info *minfo = info2minfo(info);
switch (rect->rop) {
case ROP_COPY:
- matroxfb_accel_clear(PMINFO ((u_int32_t*)info->pseudo_palette)[rect->color], rect->dy, rect->dx, rect->height, rect->width);
+ matroxfb_accel_clear(minfo, ((u_int32_t *)info->pseudo_palette)[rect->color], rect->dy, rect->dx, rect->height, rect->width);
break;
}
}
-static void matroxfb_cfb4_clear(WPMINFO u_int32_t bgx, int sy, int sx, int height, int width) {
+static void matroxfb_cfb4_clear(struct matrox_fb_info *minfo, u_int32_t bgx,
+ int sy, int sx, int height, int width)
+{
int whattodo;
CRITFLAGS
@@ -333,16 +342,16 @@ static void matroxfb_cfb4_clear(WPMINFO u_int32_t bgx, int sy, int sx, int heigh
sx >>= 1;
if (width) {
mga_fifo(5);
- mga_outl(M_DWGCTL, ACCESS_FBINFO(accel.m_dwg_rect) | M_DWG_REPLACE2);
+ mga_outl(M_DWGCTL, minfo->accel.m_dwg_rect | M_DWG_REPLACE2);
mga_outl(M_FCOL, bgx);
mga_outl(M_FXBNDRY, ((sx + width) << 16) | sx);
- mga_outl(M_YDST, sy * ACCESS_FBINFO(fbcon).var.xres_virtual >> 6);
+ mga_outl(M_YDST, sy * minfo->fbcon.var.xres_virtual >> 6);
mga_outl(M_LEN | M_EXEC, height);
WaitTillIdle();
}
if (whattodo) {
- u_int32_t step = ACCESS_FBINFO(fbcon).var.xres_virtual >> 1;
- vaddr_t vbase = ACCESS_FBINFO(video.vbase);
+ u_int32_t step = minfo->fbcon.var.xres_virtual >> 1;
+ vaddr_t vbase = minfo->video.vbase;
if (whattodo & 1) {
unsigned int uaddr = sy * step + sx - 1;
u_int32_t loop;
@@ -367,17 +376,19 @@ static void matroxfb_cfb4_clear(WPMINFO u_int32_t bgx, int sy, int sx, int heigh
}
static void matroxfb_cfb4_fillrect(struct fb_info* info, const struct fb_fillrect* rect) {
- MINFO_FROM_INFO(info);
+ struct matrox_fb_info *minfo = info2minfo(info);
switch (rect->rop) {
case ROP_COPY:
- matroxfb_cfb4_clear(PMINFO ((u_int32_t*)info->pseudo_palette)[rect->color], rect->dy, rect->dx, rect->height, rect->width);
+ matroxfb_cfb4_clear(minfo, ((u_int32_t *)info->pseudo_palette)[rect->color], rect->dy, rect->dx, rect->height, rect->width);
break;
}
}
-static void matroxfb_1bpp_imageblit(WPMINFO u_int32_t fgx, u_int32_t bgx,
- const u_int8_t* chardata, int width, int height, int yy, int xx) {
+static void matroxfb_1bpp_imageblit(struct matrox_fb_info *minfo, u_int32_t fgx,
+ u_int32_t bgx, const u_int8_t *chardata,
+ int width, int height, int yy, int xx)
+{
u_int32_t step;
u_int32_t ydstlen;
u_int32_t xlen;
@@ -412,7 +423,7 @@ static void matroxfb_1bpp_imageblit(WPMINFO u_int32_t fgx, u_int32_t bgx,
mga_outl(M_FCOL, fgx);
mga_outl(M_BCOL, bgx);
fxbndry = ((xx + width - 1) << 16) | xx;
- mmio = ACCESS_FBINFO(mmio.vbase);
+ mmio = minfo->mmio.vbase;
mga_fifo(6);
mga_writel(mmio, M_FXBNDRY, fxbndry);
@@ -467,7 +478,7 @@ static void matroxfb_1bpp_imageblit(WPMINFO u_int32_t fgx, u_int32_t bgx,
static void matroxfb_imageblit(struct fb_info* info, const struct fb_image* image) {
- MINFO_FROM_INFO(info);
+ struct matrox_fb_info *minfo = info2minfo(info);
DBG_HEAVY(__func__);
@@ -476,7 +487,7 @@ static void matroxfb_imageblit(struct fb_info* info, const struct fb_image* imag
fgx = ((u_int32_t*)info->pseudo_palette)[image->fg_color];
bgx = ((u_int32_t*)info->pseudo_palette)[image->bg_color];
- matroxfb_1bpp_imageblit(PMINFO fgx, bgx, image->data, image->width, image->height, image->dy, image->dx);
+ matroxfb_1bpp_imageblit(minfo, fgx, bgx, image->data, image->width, image->height, image->dy, image->dx);
} else {
/* Danger! image->depth is useless: logo painting code always
passes framebuffer color depth here, although logo data are
diff --git a/drivers/video/matrox/matroxfb_accel.h b/drivers/video/matrox/matroxfb_accel.h
index f40c314b4c3..1e418e62c22 100644
--- a/drivers/video/matrox/matroxfb_accel.h
+++ b/drivers/video/matrox/matroxfb_accel.h
@@ -3,6 +3,6 @@
#include "matroxfb_base.h"
-void matrox_cfbX_init(WPMINFO2);
+void matrox_cfbX_init(struct matrox_fb_info *minfo);
#endif
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 0c1049b308b..7064fb4427b 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -154,21 +154,22 @@ static struct fb_var_screeninfo vesafb_defined = {
/* --------------------------------------------------------------------- */
-static void update_crtc2(WPMINFO unsigned int pos) {
- struct matroxfb_dh_fb_info* info = ACCESS_FBINFO(crtc2.info);
+static void update_crtc2(struct matrox_fb_info *minfo, unsigned int pos)
+{
+ struct matroxfb_dh_fb_info *info = minfo->crtc2.info;
/* Make sure that displays are compatible */
- if (info && (info->fbcon.var.bits_per_pixel == ACCESS_FBINFO(fbcon).var.bits_per_pixel)
- && (info->fbcon.var.xres_virtual == ACCESS_FBINFO(fbcon).var.xres_virtual)
- && (info->fbcon.var.green.length == ACCESS_FBINFO(fbcon).var.green.length)
+ if (info && (info->fbcon.var.bits_per_pixel == minfo->fbcon.var.bits_per_pixel)
+ && (info->fbcon.var.xres_virtual == minfo->fbcon.var.xres_virtual)
+ && (info->fbcon.var.green.length == minfo->fbcon.var.green.length)
) {
- switch (ACCESS_FBINFO(fbcon).var.bits_per_pixel) {
+ switch (minfo->fbcon.var.bits_per_pixel) {
case 16:
case 32:
pos = pos * 8;
if (info->interlaced) {
mga_outl(0x3C2C, pos);
- mga_outl(0x3C28, pos + ACCESS_FBINFO(fbcon).var.xres_virtual * ACCESS_FBINFO(fbcon).var.bits_per_pixel / 8);
+ mga_outl(0x3C28, pos + minfo->fbcon.var.xres_virtual * minfo->fbcon.var.bits_per_pixel / 8);
} else {
mga_outl(0x3C28, pos);
}
@@ -177,17 +178,18 @@ static void update_crtc2(WPMINFO unsigned int pos) {
}
}
-static void matroxfb_crtc1_panpos(WPMINFO2) {
- if (ACCESS_FBINFO(crtc1.panpos) >= 0) {
+static void matroxfb_crtc1_panpos(struct matrox_fb_info *minfo)
+{
+ if (minfo->crtc1.panpos >= 0) {
unsigned long flags;
int panpos;
matroxfb_DAC_lock_irqsave(flags);
- panpos = ACCESS_FBINFO(crtc1.panpos);
+ panpos = minfo->crtc1.panpos;
if (panpos >= 0) {
unsigned int extvga_reg;
- ACCESS_FBINFO(crtc1.panpos) = -1; /* No update pending anymore */
+ minfo->crtc1.panpos = -1; /* No update pending anymore */
extvga_reg = mga_inb(M_EXTVGA_INDEX);
mga_setr(M_EXTVGA_INDEX, 0x00, panpos);
if (extvga_reg != 0x00) {
@@ -202,39 +204,39 @@ static irqreturn_t matrox_irq(int irq, void *dev_id)
{
u_int32_t status;
int handled = 0;
-
- MINFO_FROM(dev_id);
+ struct matrox_fb_info *minfo = dev_id;
status = mga_inl(M_STATUS);
if (status & 0x20) {
mga_outl(M_ICLEAR, 0x20);
- ACCESS_FBINFO(crtc1.vsync.cnt)++;
- matroxfb_crtc1_panpos(PMINFO2);
- wake_up_interruptible(&ACCESS_FBINFO(crtc1.vsync.wait));
+ minfo->crtc1.vsync.cnt++;
+ matroxfb_crtc1_panpos(minfo);
+ wake_up_interruptible(&minfo->crtc1.vsync.wait);
handled = 1;
}
if (status & 0x200) {
mga_outl(M_ICLEAR, 0x200);
- ACCESS_FBINFO(crtc2.vsync.cnt)++;
- wake_up_interruptible(&ACCESS_FBINFO(crtc2.vsync.wait));
+ minfo->crtc2.vsync.cnt++;
+ wake_up_interruptible(&minfo->crtc2.vsync.wait);
handled = 1;
}
return IRQ_RETVAL(handled);
}
-int matroxfb_enable_irq(WPMINFO int reenable) {
+int matroxfb_enable_irq(struct matrox_fb_info *minfo, int reenable)
+{
u_int32_t bm;
- if (ACCESS_FBINFO(devflags.accelerator) == FB_ACCEL_MATROX_MGAG400)
+ if (minfo->devflags.accelerator == FB_ACCEL_MATROX_MGAG400)
bm = 0x220;
else
bm = 0x020;
- if (!test_and_set_bit(0, &ACCESS_FBINFO(irq_flags))) {
- if (request_irq(ACCESS_FBINFO(pcidev)->irq, matrox_irq,
- IRQF_SHARED, "matroxfb", MINFO)) {
- clear_bit(0, &ACCESS_FBINFO(irq_flags));
+ if (!test_and_set_bit(0, &minfo->irq_flags)) {
+ if (request_irq(minfo->pcidev->irq, matrox_irq,
+ IRQF_SHARED, "matroxfb", minfo)) {
+ clear_bit(0, &minfo->irq_flags);
return -EINVAL;
}
/* Clear any pending field interrupts */
@@ -252,37 +254,39 @@ int matroxfb_enable_irq(WPMINFO int reenable) {
return 0;
}
-static void matroxfb_disable_irq(WPMINFO2) {
- if (test_and_clear_bit(0, &ACCESS_FBINFO(irq_flags))) {
+static void matroxfb_disable_irq(struct matrox_fb_info *minfo)
+{
+ if (test_and_clear_bit(0, &minfo->irq_flags)) {
/* Flush pending pan-at-vbl request... */
- matroxfb_crtc1_panpos(PMINFO2);
- if (ACCESS_FBINFO(devflags.accelerator) == FB_ACCEL_MATROX_MGAG400)
+ matroxfb_crtc1_panpos(minfo);
+ if (minfo->devflags.accelerator == FB_ACCEL_MATROX_MGAG400)
mga_outl(M_IEN, mga_inl(M_IEN) & ~0x220);
else
mga_outl(M_IEN, mga_inl(M_IEN) & ~0x20);
- free_irq(ACCESS_FBINFO(pcidev)->irq, MINFO);
+ free_irq(minfo->pcidev->irq, minfo);
}
}
-int matroxfb_wait_for_sync(WPMINFO u_int32_t crtc) {
+int matroxfb_wait_for_sync(struct matrox_fb_info *minfo, u_int32_t crtc)
+{
struct matrox_vsync *vs;
unsigned int cnt;
int ret;
switch (crtc) {
case 0:
- vs = &ACCESS_FBINFO(crtc1.vsync);
+ vs = &minfo->crtc1.vsync;
break;
case 1:
- if (ACCESS_FBINFO(devflags.accelerator) != FB_ACCEL_MATROX_MGAG400) {
+ if (minfo->devflags.accelerator != FB_ACCEL_MATROX_MGAG400) {
return -ENODEV;
}
- vs = &ACCESS_FBINFO(crtc2.vsync);
+ vs = &minfo->crtc2.vsync;
break;
default:
return -ENODEV;
}
- ret = matroxfb_enable_irq(PMINFO 0);
+ ret = matroxfb_enable_irq(minfo, 0);
if (ret) {
return ret;
}
@@ -293,7 +297,7 @@ int matroxfb_wait_for_sync(WPMINFO u_int32_t crtc) {
return ret;
}
if (ret == 0) {
- matroxfb_enable_irq(PMINFO 1);
+ matroxfb_enable_irq(minfo, 1);
return -ETIMEDOUT;
}
return 0;
@@ -301,12 +305,12 @@ int matroxfb_wait_for_sync(WPMINFO u_int32_t crtc) {
/* --------------------------------------------------------------------- */
-static void matrox_pan_var(WPMINFO struct fb_var_screeninfo *var) {
+static void matrox_pan_var(struct matrox_fb_info *minfo,
+ struct fb_var_screeninfo *var)
+{
unsigned int pos;
unsigned short p0, p1, p2;
-#ifdef CONFIG_FB_MATROX_32MB
unsigned int p3;
-#endif
int vbl;
unsigned long flags;
@@ -314,47 +318,44 @@ static void matrox_pan_var(WPMINFO struct fb_var_screeninfo *var) {
DBG(__func__)
- if (ACCESS_FBINFO(dead))
+ if (minfo->dead)
return;
- ACCESS_FBINFO(fbcon).var.xoffset = var->xoffset;
- ACCESS_FBINFO(fbcon).var.yoffset = var->yoffset;
- pos = (ACCESS_FBINFO(fbcon).var.yoffset * ACCESS_FBINFO(fbcon).var.xres_virtual + ACCESS_FBINFO(fbcon).var.xoffset) * ACCESS_FBINFO(curr.final_bppShift) / 32;
- pos += ACCESS_FBINFO(curr.ydstorg.chunks);
- p0 = ACCESS_FBINFO(hw).CRTC[0x0D] = pos & 0xFF;
- p1 = ACCESS_FBINFO(hw).CRTC[0x0C] = (pos & 0xFF00) >> 8;
- p2 = ACCESS_FBINFO(hw).CRTCEXT[0] = (ACCESS_FBINFO(hw).CRTCEXT[0] & 0xB0) | ((pos >> 16) & 0x0F) | ((pos >> 14) & 0x40);
-#ifdef CONFIG_FB_MATROX_32MB
- p3 = ACCESS_FBINFO(hw).CRTCEXT[8] = pos >> 21;
-#endif
+ minfo->fbcon.var.xoffset = var->xoffset;
+ minfo->fbcon.var.yoffset = var->yoffset;
+ pos = (minfo->fbcon.var.yoffset * minfo->fbcon.var.xres_virtual + minfo->fbcon.var.xoffset) * minfo->curr.final_bppShift / 32;
+ pos += minfo->curr.ydstorg.chunks;
+ p0 = minfo->hw.CRTC[0x0D] = pos & 0xFF;
+ p1 = minfo->hw.CRTC[0x0C] = (pos & 0xFF00) >> 8;
+ p2 = minfo->hw.CRTCEXT[0] = (minfo->hw.CRTCEXT[0] & 0xB0) | ((pos >> 16) & 0x0F) | ((pos >> 14) & 0x40);
+ p3 = minfo->hw.CRTCEXT[8] = pos >> 21;
/* FB_ACTIVATE_VBL and we can acquire interrupts? Honor FB_ACTIVATE_VBL then... */
- vbl = (var->activate & FB_ACTIVATE_VBL) && (matroxfb_enable_irq(PMINFO 0) == 0);
+ vbl = (var->activate & FB_ACTIVATE_VBL) && (matroxfb_enable_irq(minfo, 0) == 0);
CRITBEGIN
matroxfb_DAC_lock_irqsave(flags);
mga_setr(M_CRTC_INDEX, 0x0D, p0);
mga_setr(M_CRTC_INDEX, 0x0C, p1);
-#ifdef CONFIG_FB_MATROX_32MB
- if (ACCESS_FBINFO(devflags.support32MB))
+ if (minfo->devflags.support32MB)
mga_setr(M_EXTVGA_INDEX, 0x08, p3);
-#endif
if (vbl) {
- ACCESS_FBINFO(crtc1.panpos) = p2;
+ minfo->crtc1.panpos = p2;
} else {
/* Abort any pending change */
- ACCESS_FBINFO(crtc1.panpos) = -1;
+ minfo->crtc1.panpos = -1;
mga_setr(M_EXTVGA_INDEX, 0x00, p2);
}
matroxfb_DAC_unlock_irqrestore(flags);
- update_crtc2(PMINFO pos);
+ update_crtc2(minfo, pos);
CRITEND
}
-static void matroxfb_remove(WPMINFO int dummy) {
+static void matroxfb_remove(struct matrox_fb_info *minfo, int dummy)
+{
/* Currently we are holding big kernel lock on all dead & usecount updates.
* Destroy everything after all users release it. Especially do not unregister
* framebuffer and iounmap memory, neither fbmem nor fbcon-cfb* does not check
@@ -363,25 +364,23 @@ static void matroxfb_remove(WPMINFO int dummy) {
* write data without causing too much damage...
*/
- ACCESS_FBINFO(dead) = 1;
- if (ACCESS_FBINFO(usecount)) {
+ minfo->dead = 1;
+ if (minfo->usecount) {
/* destroy it later */
return;
}
- matroxfb_unregister_device(MINFO);
- unregister_framebuffer(&ACCESS_FBINFO(fbcon));
- matroxfb_g450_shutdown(PMINFO2);
+ matroxfb_unregister_device(minfo);
+ unregister_framebuffer(&minfo->fbcon);
+ matroxfb_g450_shutdown(minfo);
#ifdef CONFIG_MTRR
- if (ACCESS_FBINFO(mtrr.vram_valid))
- mtrr_del(ACCESS_FBINFO(mtrr.vram), ACCESS_FBINFO(video.base), ACCESS_FBINFO(video.len));
+ if (minfo->mtrr.vram_valid)
+ mtrr_del(minfo->mtrr.vram, minfo->video.base, minfo->video.len);
#endif
- mga_iounmap(ACCESS_FBINFO(mmio.vbase));
- mga_iounmap(ACCESS_FBINFO(video.vbase));
- release_mem_region(ACCESS_FBINFO(video.base), ACCESS_FBINFO(video.len_maximum));
- release_mem_region(ACCESS_FBINFO(mmio.base), 16384);
-#ifdef CONFIG_FB_MATROX_MULTIHEAD
+ mga_iounmap(minfo->mmio.vbase);
+ mga_iounmap(minfo->video.vbase);
+ release_mem_region(minfo->video.base, minfo->video.len_maximum);
+ release_mem_region(minfo->mmio.base, 16384);
kfree(minfo);
-#endif
}
/*
@@ -390,48 +389,50 @@ static void matroxfb_remove(WPMINFO int dummy) {
static int matroxfb_open(struct fb_info *info, int user)
{
- MINFO_FROM_INFO(info);
+ struct matrox_fb_info *minfo = info2minfo(info);
DBG_LOOP(__func__)
- if (ACCESS_FBINFO(dead)) {
+ if (minfo->dead) {
return -ENXIO;
}
- ACCESS_FBINFO(usecount)++;
+ minfo->usecount++;
if (user) {
- ACCESS_FBINFO(userusecount)++;
+ minfo->userusecount++;
}
return(0);
}
static int matroxfb_release(struct fb_info *info, int user)
{
- MINFO_FROM_INFO(info);
+ struct matrox_fb_info *minfo = info2minfo(info);
DBG_LOOP(__func__)
if (user) {
- if (0 == --ACCESS_FBINFO(userusecount)) {
- matroxfb_disable_irq(PMINFO2);
+ if (0 == --minfo->userusecount) {
+ matroxfb_disable_irq(minfo);
}
}
- if (!(--ACCESS_FBINFO(usecount)) && ACCESS_FBINFO(dead)) {
- matroxfb_remove(PMINFO 0);
+ if (!(--minfo->usecount) && minfo->dead) {
+ matroxfb_remove(minfo, 0);
}
return(0);
}
static int matroxfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info* info) {
- MINFO_FROM_INFO(info);
+ struct matrox_fb_info *minfo = info2minfo(info);
DBG(__func__)
- matrox_pan_var(PMINFO var);
+ matrox_pan_var(minfo, var);
return 0;
}
-static int matroxfb_get_final_bppShift(CPMINFO int bpp) {
+static int matroxfb_get_final_bppShift(const struct matrox_fb_info *minfo,
+ int bpp)
+{
int bppshft2;
DBG(__func__)
@@ -440,14 +441,16 @@ static int matroxfb_get_final_bppShift(CPMINFO int bpp) {
if (!bppshft2) {
return 8;
}
- if (isInterleave(MINFO))
+ if (isInterleave(minfo))
bppshft2 >>= 1;
- if (ACCESS_FBINFO(devflags.video64bits))
+ if (minfo->devflags.video64bits)
bppshft2 >>= 1;
return bppshft2;
}
-static int matroxfb_test_and_set_rounding(CPMINFO int xres, int bpp) {
+static int matroxfb_test_and_set_rounding(const struct matrox_fb_info *minfo,
+ int xres, int bpp)
+{
int over;
int rounding;
@@ -465,11 +468,11 @@ static int matroxfb_test_and_set_rounding(CPMINFO int xres, int bpp) {
break;
default: rounding = 16;
/* on G400, 16 really does not work */
- if (ACCESS_FBINFO(devflags.accelerator) == FB_ACCEL_MATROX_MGAG400)
+ if (minfo->devflags.accelerator == FB_ACCEL_MATROX_MGAG400)
rounding = 32;
break;
}
- if (isInterleave(MINFO)) {
+ if (isInterleave(minfo)) {
rounding *= 2;
}
over = xres % rounding;
@@ -478,7 +481,9 @@ static int matroxfb_test_and_set_rounding(CPMINFO int xres, int bpp) {
return xres;
}
-static int matroxfb_pitch_adjust(CPMINFO int xres, int bpp) {
+static int matroxfb_pitch_adjust(const struct matrox_fb_info *minfo, int xres,
+ int bpp)
+{
const int* width;
int xres_new;
@@ -486,18 +491,18 @@ static int matroxfb_pitch_adjust(CPMINFO int xres, int bpp) {
if (!bpp) return xres;
- width = ACCESS_FBINFO(capable.vxres);
+ width = minfo->capable.vxres;
- if (ACCESS_FBINFO(devflags.precise_width)) {
+ if (minfo->devflags.precise_width) {
while (*width) {
- if ((*width >= xres) && (matroxfb_test_and_set_rounding(PMINFO *width, bpp) == *width)) {
+ if ((*width >= xres) && (matroxfb_test_and_set_rounding(minfo, *width, bpp) == *width)) {
break;
}
width++;
}
xres_new = *width;
} else {
- xres_new = matroxfb_test_and_set_rounding(PMINFO xres, bpp);
+ xres_new = matroxfb_test_and_set_rounding(minfo, xres, bpp);
}
return xres_new;
}
@@ -524,7 +529,10 @@ static int matroxfb_get_cmap_len(struct fb_var_screeninfo *var) {
return 16; /* return something reasonable... or panic()? */
}
-static int matroxfb_decode_var(CPMINFO struct fb_var_screeninfo *var, int *visual, int *video_cmap_len, unsigned int* ydstorg) {
+static int matroxfb_decode_var(const struct matrox_fb_info *minfo,
+ struct fb_var_screeninfo *var, int *visual,
+ int *video_cmap_len, unsigned int* ydstorg)
+{
struct RGBT {
unsigned char bpp;
struct {
@@ -551,7 +559,7 @@ static int matroxfb_decode_var(CPMINFO struct fb_var_screeninfo *var, int *visua
DBG(__func__)
switch (bpp) {
- case 4: if (!ACCESS_FBINFO(capable.cfb4)) return -EINVAL;
+ case 4: if (!minfo->capable.cfb4) return -EINVAL;
break;
case 8: break;
case 16: break;
@@ -560,13 +568,13 @@ static int matroxfb_decode_var(CPMINFO struct fb_var_screeninfo *var, int *visua
default: return -EINVAL;
}
*ydstorg = 0;
- vramlen = ACCESS_FBINFO(video.len_usable);
+ vramlen = minfo->video.len_usable;
if (var->yres_virtual < var->yres)
var->yres_virtual = var->yres;
if (var->xres_virtual < var->xres)
var->xres_virtual = var->xres;
- var->xres_virtual = matroxfb_pitch_adjust(PMINFO var->xres_virtual, bpp);
+ var->xres_virtual = matroxfb_pitch_adjust(minfo, var->xres_virtual, bpp);
memlen = var->xres_virtual * bpp * var->yres_virtual / 8;
if (memlen > vramlen) {
var->yres_virtual = vramlen * 8 / (var->xres_virtual * bpp);
@@ -575,7 +583,7 @@ static int matroxfb_decode_var(CPMINFO struct fb_var_screeninfo *var, int *visua
/* There is hardware bug that no line can cross 4MB boundary */
/* give up for CFB24, it is impossible to easy workaround it */
/* for other try to do something */
- if (!ACCESS_FBINFO(capable.cross4MB) && (memlen > 0x400000)) {
+ if (!minfo->capable.cross4MB && (memlen > 0x400000)) {
if (bpp == 24) {
/* sorry */
} else {
@@ -644,9 +652,7 @@ static int matroxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *fb_info)
{
-#ifdef CONFIG_FB_MATROX_MULTIHEAD
struct matrox_fb_info* minfo = container_of(fb_info, struct matrox_fb_info, fbcon);
-#endif
DBG(__func__)
@@ -657,20 +663,20 @@ static int matroxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
* != 0 for invalid regno.
*/
- if (regno >= ACCESS_FBINFO(curr.cmap_len))
+ if (regno >= minfo->curr.cmap_len)
return 1;
- if (ACCESS_FBINFO(fbcon).var.grayscale) {
+ if (minfo->fbcon.var.grayscale) {
/* gray = 0.30*R + 0.59*G + 0.11*B */
red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
}
- red = CNVT_TOHW(red, ACCESS_FBINFO(fbcon).var.red.length);
- green = CNVT_TOHW(green, ACCESS_FBINFO(fbcon).var.green.length);
- blue = CNVT_TOHW(blue, ACCESS_FBINFO(fbcon).var.blue.length);
- transp = CNVT_TOHW(transp, ACCESS_FBINFO(fbcon).var.transp.length);
+ red = CNVT_TOHW(red, minfo->fbcon.var.red.length);
+ green = CNVT_TOHW(green, minfo->fbcon.var.green.length);
+ blue = CNVT_TOHW(blue, minfo->fbcon.var.blue.length);
+ transp = CNVT_TOHW(transp, minfo->fbcon.var.transp.length);
- switch (ACCESS_FBINFO(fbcon).var.bits_per_pixel) {
+ switch (minfo->fbcon.var.bits_per_pixel) {
case 4:
case 8:
mga_outb(M_DAC_REG, regno);
@@ -683,30 +689,30 @@ static int matroxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
break;
{
u_int16_t col =
- (red << ACCESS_FBINFO(fbcon).var.red.offset) |
- (green << ACCESS_FBINFO(fbcon).var.green.offset) |
- (blue << ACCESS_FBINFO(fbcon).var.blue.offset) |
- (transp << ACCESS_FBINFO(fbcon).var.transp.offset); /* for 1:5:5:5 */
- ACCESS_FBINFO(cmap[regno]) = col | (col << 16);
+ (red << minfo->fbcon.var.red.offset) |
+ (green << minfo->fbcon.var.green.offset) |
+ (blue << minfo->fbcon.var.blue.offset) |
+ (transp << minfo->fbcon.var.transp.offset); /* for 1:5:5:5 */
+ minfo->cmap[regno] = col | (col << 16);
}
break;
case 24:
case 32:
if (regno >= 16)
break;
- ACCESS_FBINFO(cmap[regno]) =
- (red << ACCESS_FBINFO(fbcon).var.red.offset) |
- (green << ACCESS_FBINFO(fbcon).var.green.offset) |
- (blue << ACCESS_FBINFO(fbcon).var.blue.offset) |
- (transp << ACCESS_FBINFO(fbcon).var.transp.offset); /* 8:8:8:8 */
+ minfo->cmap[regno] =
+ (red << minfo->fbcon.var.red.offset) |
+ (green << minfo->fbcon.var.green.offset) |
+ (blue << minfo->fbcon.var.blue.offset) |
+ (transp << minfo->fbcon.var.transp.offset); /* 8:8:8:8 */
break;
}
return 0;
}
-static void matroxfb_init_fix(WPMINFO2)
+static void matroxfb_init_fix(struct matrox_fb_info *minfo)
{
- struct fb_fix_screeninfo *fix = &ACCESS_FBINFO(fbcon).fix;
+ struct fb_fix_screeninfo *fix = &minfo->fbcon.fix;
DBG(__func__)
strcpy(fix->id,"MATROX");
@@ -714,20 +720,20 @@ static void matroxfb_init_fix(WPMINFO2)
fix->xpanstep = 8; /* 8 for 8bpp, 4 for 16bpp, 2 for 32bpp */
fix->ypanstep = 1;
fix->ywrapstep = 0;
- fix->mmio_start = ACCESS_FBINFO(mmio.base);
- fix->mmio_len = ACCESS_FBINFO(mmio.len);
- fix->accel = ACCESS_FBINFO(devflags.accelerator);
+ fix->mmio_start = minfo->mmio.base;
+ fix->mmio_len = minfo->mmio.len;
+ fix->accel = minfo->devflags.accelerator;
}
-static void matroxfb_update_fix(WPMINFO2)
+static void matroxfb_update_fix(struct matrox_fb_info *minfo)
{
- struct fb_fix_screeninfo *fix = &ACCESS_FBINFO(fbcon).fix;
+ struct fb_fix_screeninfo *fix = &minfo->fbcon.fix;
DBG(__func__)
- mutex_lock(&ACCESS_FBINFO(fbcon).mm_lock);
- fix->smem_start = ACCESS_FBINFO(video.base) + ACCESS_FBINFO(curr.ydstorg.bytes);
- fix->smem_len = ACCESS_FBINFO(video.len_usable) - ACCESS_FBINFO(curr.ydstorg.bytes);
- mutex_unlock(&ACCESS_FBINFO(fbcon).mm_lock);
+ mutex_lock(&minfo->fbcon.mm_lock);
+ fix->smem_start = minfo->video.base + minfo->curr.ydstorg.bytes;
+ fix->smem_len = minfo->video.len_usable - minfo->curr.ydstorg.bytes;
+ mutex_unlock(&minfo->fbcon.mm_lock);
}
static int matroxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
@@ -736,12 +742,12 @@ static int matroxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *inf
int visual;
int cmap_len;
unsigned int ydstorg;
- MINFO_FROM_INFO(info);
+ struct matrox_fb_info *minfo = info2minfo(info);
- if (ACCESS_FBINFO(dead)) {
+ if (minfo->dead) {
return -ENXIO;
}
- if ((err = matroxfb_decode_var(PMINFO var, &visual, &cmap_len, &ydstorg)) != 0)
+ if ((err = matroxfb_decode_var(minfo, var, &visual, &cmap_len, &ydstorg)) != 0)
return err;
return 0;
}
@@ -753,35 +759,35 @@ static int matroxfb_set_par(struct fb_info *info)
int cmap_len;
unsigned int ydstorg;
struct fb_var_screeninfo *var;
- MINFO_FROM_INFO(info);
+ struct matrox_fb_info *minfo = info2minfo(info);
DBG(__func__)
- if (ACCESS_FBINFO(dead)) {
+ if (minfo->dead) {
return -ENXIO;
}
var = &info->var;
- if ((err = matroxfb_decode_var(PMINFO var, &visual, &cmap_len, &ydstorg)) != 0)
+ if ((err = matroxfb_decode_var(minfo, var, &visual, &cmap_len, &ydstorg)) != 0)
return err;
- ACCESS_FBINFO(fbcon.screen_base) = vaddr_va(ACCESS_FBINFO(video.vbase)) + ydstorg;
- matroxfb_update_fix(PMINFO2);
- ACCESS_FBINFO(fbcon).fix.visual = visual;
- ACCESS_FBINFO(fbcon).fix.type = FB_TYPE_PACKED_PIXELS;
- ACCESS_FBINFO(fbcon).fix.type_aux = 0;
- ACCESS_FBINFO(fbcon).fix.line_length = (var->xres_virtual * var->bits_per_pixel) >> 3;
+ minfo->fbcon.screen_base = vaddr_va(minfo->video.vbase) + ydstorg;
+ matroxfb_update_fix(minfo);
+ minfo->fbcon.fix.visual = visual;
+ minfo->fbcon.fix.type = FB_TYPE_PACKED_PIXELS;
+ minfo->fbcon.fix.type_aux = 0;
+ minfo->fbcon.fix.line_length = (var->xres_virtual * var->bits_per_pixel) >> 3;
{
unsigned int pos;
- ACCESS_FBINFO(curr.cmap_len) = cmap_len;
- ydstorg += ACCESS_FBINFO(devflags.ydstorg);
- ACCESS_FBINFO(curr.ydstorg.bytes) = ydstorg;
- ACCESS_FBINFO(curr.ydstorg.chunks) = ydstorg >> (isInterleave(MINFO)?3:2);
+ minfo->curr.cmap_len = cmap_len;
+ ydstorg += minfo->devflags.ydstorg;
+ minfo->curr.ydstorg.bytes = ydstorg;
+ minfo->curr.ydstorg.chunks = ydstorg >> (isInterleave(minfo) ? 3 : 2);
if (var->bits_per_pixel == 4)
- ACCESS_FBINFO(curr.ydstorg.pixels) = ydstorg;
+ minfo->curr.ydstorg.pixels = ydstorg;
else
- ACCESS_FBINFO(curr.ydstorg.pixels) = (ydstorg * 8) / var->bits_per_pixel;
- ACCESS_FBINFO(curr.final_bppShift) = matroxfb_get_final_bppShift(PMINFO var->bits_per_pixel);
+ minfo->curr.ydstorg.pixels = (ydstorg * 8) / var->bits_per_pixel;
+ minfo->curr.final_bppShift = matroxfb_get_final_bppShift(minfo, var->bits_per_pixel);
{ struct my_timming mt;
struct matrox_hw_state* hw;
int out;
@@ -797,54 +803,55 @@ static int matroxfb_set_par(struct fb_info *info)
default: mt.delay = 31 + 8; break;
}
- hw = &ACCESS_FBINFO(hw);
+ hw = &minfo->hw;
- down_read(&ACCESS_FBINFO(altout).lock);
+ down_read(&minfo->altout.lock);
for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) {
- if (ACCESS_FBINFO(outputs[out]).src == MATROXFB_SRC_CRTC1 &&
- ACCESS_FBINFO(outputs[out]).output->compute) {
- ACCESS_FBINFO(outputs[out]).output->compute(ACCESS_FBINFO(outputs[out]).data, &mt);
+ if (minfo->outputs[out].src == MATROXFB_SRC_CRTC1 &&
+ minfo->outputs[out].output->compute) {
+ minfo->outputs[out].output->compute(minfo->outputs[out].data, &mt);
}
}
- up_read(&ACCESS_FBINFO(altout).lock);
- ACCESS_FBINFO(crtc1).pixclock = mt.pixclock;
- ACCESS_FBINFO(crtc1).mnp = mt.mnp;
- ACCESS_FBINFO(hw_switch->init(PMINFO &mt));
- pos = (var->yoffset * var->xres_virtual + var->xoffset) * ACCESS_FBINFO(curr.final_bppShift) / 32;
- pos += ACCESS_FBINFO(curr.ydstorg.chunks);
+ up_read(&minfo->altout.lock);
+ minfo->crtc1.pixclock = mt.pixclock;
+ minfo->crtc1.mnp = mt.mnp;
+ minfo->hw_switch->init(minfo, &mt);
+ pos = (var->yoffset * var->xres_virtual + var->xoffset) * minfo->curr.final_bppShift / 32;
+ pos += minfo->curr.ydstorg.chunks;
hw->CRTC[0x0D] = pos & 0xFF;
hw->CRTC[0x0C] = (pos & 0xFF00) >> 8;
hw->CRTCEXT[0] = (hw->CRTCEXT[0] & 0xF0) | ((pos >> 16) & 0x0F) | ((pos >> 14) & 0x40);
hw->CRTCEXT[8] = pos >> 21;
- ACCESS_FBINFO(hw_switch->restore(PMINFO2));
- update_crtc2(PMINFO pos);
- down_read(&ACCESS_FBINFO(altout).lock);
+ minfo->hw_switch->restore(minfo);
+ update_crtc2(minfo, pos);
+ down_read(&minfo->altout.lock);
for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) {
- if (ACCESS_FBINFO(outputs[out]).src == MATROXFB_SRC_CRTC1 &&
- ACCESS_FBINFO(outputs[out]).output->program) {
- ACCESS_FBINFO(outputs[out]).output->program(ACCESS_FBINFO(outputs[out]).data);
+ if (minfo->outputs[out].src == MATROXFB_SRC_CRTC1 &&
+ minfo->outputs[out].output->program) {
+ minfo->outputs[out].output->program(minfo->outputs[out].data);
}
}
for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) {
- if (ACCESS_FBINFO(outputs[out]).src == MATROXFB_SRC_CRTC1 &&
- ACCESS_FBINFO(outputs[out]).output->start) {
- ACCESS_FBINFO(outputs[out]).output->start(ACCESS_FBINFO(outputs[out]).data);
+ if (minfo->outputs[out].src == MATROXFB_SRC_CRTC1 &&
+ minfo->outputs[out].output->start) {
+ minfo->outputs[out].output->start(minfo->outputs[out].data);
}
}
- up_read(&ACCESS_FBINFO(altout).lock);
- matrox_cfbX_init(PMINFO2);
+ up_read(&minfo->altout.lock);
+ matrox_cfbX_init(minfo);
}
}
- ACCESS_FBINFO(initialized) = 1;
+ minfo->initialized = 1;
return 0;
}
-static int matroxfb_get_vblank(WPMINFO struct fb_vblank *vblank)
+static int matroxfb_get_vblank(struct matrox_fb_info *minfo,
+ struct fb_vblank *vblank)
{
unsigned int sts1;
- matroxfb_enable_irq(PMINFO 0);
+ matroxfb_enable_irq(minfo, 0);
memset(vblank, 0, sizeof(*vblank));
vblank->flags = FB_VBLANK_HAVE_VCOUNT | FB_VBLANK_HAVE_VSYNC |
FB_VBLANK_HAVE_VBLANK | FB_VBLANK_HAVE_HBLANK;
@@ -857,13 +864,13 @@ static int matroxfb_get_vblank(WPMINFO struct fb_vblank *vblank)
vblank->flags |= FB_VBLANK_HBLANKING;
if (sts1 & 8)
vblank->flags |= FB_VBLANK_VSYNCING;
- if (vblank->vcount >= ACCESS_FBINFO(fbcon).var.yres)
+ if (vblank->vcount >= minfo->fbcon.var.yres)
vblank->flags |= FB_VBLANK_VBLANKING;
- if (test_bit(0, &ACCESS_FBINFO(irq_flags))) {
+ if (test_bit(0, &minfo->irq_flags)) {
vblank->flags |= FB_VBLANK_HAVE_COUNT;
/* Only one writer, aligned int value...
it should work without lock and without atomic_t */
- vblank->count = ACCESS_FBINFO(crtc1).vsync.cnt;
+ vblank->count = minfo->crtc1.vsync.cnt;
}
return 0;
}
@@ -876,11 +883,11 @@ static int matroxfb_ioctl(struct fb_info *info,
unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
- MINFO_FROM_INFO(info);
+ struct matrox_fb_info *minfo = info2minfo(info);
DBG(__func__)
- if (ACCESS_FBINFO(dead)) {
+ if (minfo->dead) {
return -ENXIO;
}
@@ -890,7 +897,7 @@ static int matroxfb_ioctl(struct fb_info *info,
struct fb_vblank vblank;
int err;
- err = matroxfb_get_vblank(PMINFO &vblank);
+ err = matroxfb_get_vblank(minfo, &vblank);
if (err)
return err;
if (copy_to_user(argp, &vblank, sizeof(vblank)))
@@ -904,7 +911,7 @@ static int matroxfb_ioctl(struct fb_info *info,
if (get_user(crt, (u_int32_t __user *)arg))
return -EFAULT;
- return matroxfb_wait_for_sync(PMINFO crt);
+ return matroxfb_wait_for_sync(minfo, crt);
}
case MATROXFB_SET_OUTPUT_MODE:
{
@@ -916,8 +923,8 @@ static int matroxfb_ioctl(struct fb_info *info,
return -EFAULT;
if (mom.output >= MATROXFB_MAX_OUTPUTS)
return -ENXIO;
- down_read(&ACCESS_FBINFO(altout.lock));
- oproc = ACCESS_FBINFO(outputs[mom.output]).output;
+ down_read(&minfo->altout.lock);
+ oproc = minfo->outputs[mom.output].output;
if (!oproc) {
val = -ENXIO;
} else if (!oproc->verifymode) {
@@ -927,18 +934,18 @@ static int matroxfb_ioctl(struct fb_info *info,
val = -EINVAL;
}
} else {
- val = oproc->verifymode(ACCESS_FBINFO(outputs[mom.output]).data, mom.mode);
+ val = oproc->verifymode(minfo->outputs[mom.output].data, mom.mode);
}
if (!val) {
- if (ACCESS_FBINFO(outputs[mom.output]).mode != mom.mode) {
- ACCESS_FBINFO(outputs[mom.output]).mode = mom.mode;
+ if (minfo->outputs[mom.output].mode != mom.mode) {
+ minfo->outputs[mom.output].mode = mom.mode;
val = 1;
}
}
- up_read(&ACCESS_FBINFO(altout.lock));
+ up_read(&minfo->altout.lock);
if (val != 1)
return val;
- switch (ACCESS_FBINFO(outputs[mom.output]).src) {
+ switch (minfo->outputs[mom.output].src) {
case MATROXFB_SRC_CRTC1:
matroxfb_set_par(info);
break;
@@ -946,11 +953,11 @@ static int matroxfb_ioctl(struct fb_info *info,
{
struct matroxfb_dh_fb_info* crtc2;
- down_read(&ACCESS_FBINFO(crtc2.lock));
- crtc2 = ACCESS_FBINFO(crtc2.info);
+ down_read(&minfo->crtc2.lock);
+ crtc2 = minfo->crtc2.info;
if (crtc2)
crtc2->fbcon.fbops->fb_set_par(&crtc2->fbcon);
- up_read(&ACCESS_FBINFO(crtc2.lock));
+ up_read(&minfo->crtc2.lock);
}
break;
}
@@ -966,15 +973,15 @@ static int matroxfb_ioctl(struct fb_info *info,
return -EFAULT;
if (mom.output >= MATROXFB_MAX_OUTPUTS)
return -ENXIO;
- down_read(&ACCESS_FBINFO(altout.lock));
- oproc = ACCESS_FBINFO(outputs[mom.output]).output;
+ down_read(&minfo->altout.lock);
+ oproc = minfo->outputs[mom.output].output;
if (!oproc) {
val = -ENXIO;
} else {
- mom.mode = ACCESS_FBINFO(outputs[mom.output]).mode;
+ mom.mode = minfo->outputs[mom.output].mode;
val = 0;
}
- up_read(&ACCESS_FBINFO(altout.lock));
+ up_read(&minfo->altout.lock);
if (val)
return val;
if (copy_to_user(argp, &mom, sizeof(mom)))
@@ -993,9 +1000,9 @@ static int matroxfb_ioctl(struct fb_info *info,
if (tmp & (1 << i)) {
if (i >= MATROXFB_MAX_OUTPUTS)
return -ENXIO;
- if (!ACCESS_FBINFO(outputs[i]).output)
+ if (!minfo->outputs[i].output)
return -ENXIO;
- switch (ACCESS_FBINFO(outputs[i]).src) {
+ switch (minfo->outputs[i].src) {
case MATROXFB_SRC_NONE:
case MATROXFB_SRC_CRTC1:
break;
@@ -1004,12 +1011,12 @@ static int matroxfb_ioctl(struct fb_info *info,
}
}
}
- if (ACCESS_FBINFO(devflags.panellink)) {
+ if (minfo->devflags.panellink) {
if (tmp & MATROXFB_OUTPUT_CONN_DFP) {
if (tmp & MATROXFB_OUTPUT_CONN_SECONDARY)
return -EINVAL;
for (i = 0; i < MATROXFB_MAX_OUTPUTS; i++) {
- if (ACCESS_FBINFO(outputs[i]).src == MATROXFB_SRC_CRTC2) {
+ if (minfo->outputs[i].src == MATROXFB_SRC_CRTC2) {
return -EBUSY;
}
}
@@ -1018,13 +1025,13 @@ static int matroxfb_ioctl(struct fb_info *info,
changes = 0;
for (i = 0; i < MATROXFB_MAX_OUTPUTS; i++) {
if (tmp & (1 << i)) {
- if (ACCESS_FBINFO(outputs[i]).src != MATROXFB_SRC_CRTC1) {
+ if (minfo->outputs[i].src != MATROXFB_SRC_CRTC1) {
changes = 1;
- ACCESS_FBINFO(outputs[i]).src = MATROXFB_SRC_CRTC1;
+ minfo->outputs[i].src = MATROXFB_SRC_CRTC1;
}
- } else if (ACCESS_FBINFO(outputs[i]).src == MATROXFB_SRC_CRTC1) {
+ } else if (minfo->outputs[i].src == MATROXFB_SRC_CRTC1) {
changes = 1;
- ACCESS_FBINFO(outputs[i]).src = MATROXFB_SRC_NONE;
+ minfo->outputs[i].src = MATROXFB_SRC_NONE;
}
}
if (!changes)
@@ -1038,7 +1045,7 @@ static int matroxfb_ioctl(struct fb_info *info,
int i;
for (i = 0; i < MATROXFB_MAX_OUTPUTS; i++) {
- if (ACCESS_FBINFO(outputs[i]).src == MATROXFB_SRC_CRTC1) {
+ if (minfo->outputs[i].src == MATROXFB_SRC_CRTC1) {
conn |= 1 << i;
}
}
@@ -1052,8 +1059,8 @@ static int matroxfb_ioctl(struct fb_info *info,
int i;
for (i = 0; i < MATROXFB_MAX_OUTPUTS; i++) {
- if (ACCESS_FBINFO(outputs[i]).output) {
- switch (ACCESS_FBINFO(outputs[i]).src) {
+ if (minfo->outputs[i].output) {
+ switch (minfo->outputs[i].src) {
case MATROXFB_SRC_NONE:
case MATROXFB_SRC_CRTC1:
conn |= 1 << i;
@@ -1061,7 +1068,7 @@ static int matroxfb_ioctl(struct fb_info *info,
}
}
}
- if (ACCESS_FBINFO(devflags.panellink)) {
+ if (minfo->devflags.panellink) {
if (conn & MATROXFB_OUTPUT_CONN_DFP)
conn &= ~MATROXFB_OUTPUT_CONN_SECONDARY;
if (conn & MATROXFB_OUTPUT_CONN_SECONDARY)
@@ -1077,7 +1084,7 @@ static int matroxfb_ioctl(struct fb_info *info,
int i;
for (i = 0; i < MATROXFB_MAX_OUTPUTS; i++) {
- if (ACCESS_FBINFO(outputs[i]).output) {
+ if (minfo->outputs[i].output) {
conn |= 1 << i;
}
}
@@ -1092,7 +1099,7 @@ static int matroxfb_ioctl(struct fb_info *info,
memset(&r, 0, sizeof(r));
strcpy(r.driver, "matroxfb");
strcpy(r.card, "Matrox");
- sprintf(r.bus_info, "PCI:%s", pci_name(ACCESS_FBINFO(pcidev)));
+ sprintf(r.bus_info, "PCI:%s", pci_name(minfo->pcidev));
r.version = KERNEL_VERSION(1,0,0);
r.capabilities = V4L2_CAP_VIDEO_OUTPUT;
if (copy_to_user(argp, &r, sizeof(r)))
@@ -1108,15 +1115,15 @@ static int matroxfb_ioctl(struct fb_info *info,
if (copy_from_user(&qctrl, argp, sizeof(qctrl)))
return -EFAULT;
- down_read(&ACCESS_FBINFO(altout).lock);
- if (!ACCESS_FBINFO(outputs[1]).output) {
+ down_read(&minfo->altout.lock);
+ if (!minfo->outputs[1].output) {
err = -ENXIO;
- } else if (ACCESS_FBINFO(outputs[1]).output->getqueryctrl) {
- err = ACCESS_FBINFO(outputs[1]).output->getqueryctrl(ACCESS_FBINFO(outputs[1]).data, &qctrl);
+ } else if (minfo->outputs[1].output->getqueryctrl) {
+ err = minfo->outputs[1].output->getqueryctrl(minfo->outputs[1].data, &qctrl);
} else {
err = -EINVAL;
}
- up_read(&ACCESS_FBINFO(altout).lock);
+ up_read(&minfo->altout.lock);
if (err >= 0 &&
copy_to_user(argp, &qctrl, sizeof(qctrl)))
return -EFAULT;
@@ -1130,15 +1137,15 @@ static int matroxfb_ioctl(struct fb_info *info,
if (copy_from_user(&ctrl, argp, sizeof(ctrl)))
return -EFAULT;
- down_read(&ACCESS_FBINFO(altout).lock);
- if (!ACCESS_FBINFO(outputs[1]).output) {
+ down_read(&minfo->altout.lock);
+ if (!minfo->outputs[1].output) {
err = -ENXIO;
- } else if (ACCESS_FBINFO(outputs[1]).output->getctrl) {
- err = ACCESS_FBINFO(outputs[1]).output->getctrl(ACCESS_FBINFO(outputs[1]).data, &ctrl);
+ } else if (minfo->outputs[1].output->getctrl) {
+ err = minfo->outputs[1].output->getctrl(minfo->outputs[1].data, &ctrl);
} else {
err = -EINVAL;
}
- up_read(&ACCESS_FBINFO(altout).lock);
+ up_read(&minfo->altout.lock);
if (err >= 0 &&
copy_to_user(argp, &ctrl, sizeof(ctrl)))
return -EFAULT;
@@ -1153,15 +1160,15 @@ static int matroxfb_ioctl(struct fb_info *info,
if (copy_from_user(&ctrl, argp, sizeof(ctrl)))
return -EFAULT;
- down_read(&ACCESS_FBINFO(altout).lock);
- if (!ACCESS_FBINFO(outputs[1]).output) {
+ down_read(&minfo->altout.lock);
+ if (!minfo->outputs[1].output) {
err = -ENXIO;
- } else if (ACCESS_FBINFO(outputs[1]).output->setctrl) {
- err = ACCESS_FBINFO(outputs[1]).output->setctrl(ACCESS_FBINFO(outputs[1]).data, &ctrl);
+ } else if (minfo->outputs[1].output->setctrl) {
+ err = minfo->outputs[1].output->setctrl(minfo->outputs[1].data, &ctrl);
} else {
err = -EINVAL;
}
- up_read(&ACCESS_FBINFO(altout).lock);
+ up_read(&minfo->altout.lock);
return err;
}
}
@@ -1175,11 +1182,11 @@ static int matroxfb_blank(int blank, struct fb_info *info)
int seq;
int crtc;
CRITFLAGS
- MINFO_FROM_INFO(info);
+ struct matrox_fb_info *minfo = info2minfo(info);
DBG(__func__)
- if (ACCESS_FBINFO(dead))
+ if (minfo->dead)
return 1;
switch (blank) {
@@ -1281,7 +1288,9 @@ static char outputs[8]; /* "matrox:outputs:xxx" */
static char videomode[64]; /* "matrox:mode:xxxxx" or "matrox:xxxxx" */
#endif
-static int matroxfb_getmemory(WPMINFO unsigned int maxSize, unsigned int *realSize){
+static int matroxfb_getmemory(struct matrox_fb_info *minfo,
+ unsigned int maxSize, unsigned int *realSize)
+{
vaddr_t vm;
unsigned int offs;
unsigned int offs2;
@@ -1291,7 +1300,7 @@ static int matroxfb_getmemory(WPMINFO unsigned int maxSize, unsigned int *realSi
DBG(__func__)
- vm = ACCESS_FBINFO(video.vbase);
+ vm = minfo->video.vbase;
maxSize &= ~0x1FFFFF; /* must be X*2MB (really it must be 2 or X*4MB) */
/* at least 2MB */
if (maxSize < 0x0200000) return 0;
@@ -1323,7 +1332,7 @@ static int matroxfb_getmemory(WPMINFO unsigned int maxSize, unsigned int *realSi
*realSize = offs - 0x100000;
#ifdef CONFIG_FB_MATROX_MILLENIUM
- ACCESS_FBINFO(interleave) = !(!isMillenium(MINFO) || ((offs - 0x100000) & 0x3FFFFF));
+ minfo->interleave = !(!isMillenium(minfo) || ((offs - 0x100000) & 0x3FFFFF));
#endif
return 1;
}
@@ -1345,13 +1354,9 @@ static struct video_board vbMystique = {0x0800000, 0x0800000, FB_ACCEL_MATROX_M
#ifdef CONFIG_FB_MATROX_G
static struct video_board vbG100 = {0x0800000, 0x0800000, FB_ACCEL_MATROX_MGAG100, &matrox_G100};
static struct video_board vbG200 = {0x1000000, 0x1000000, FB_ACCEL_MATROX_MGAG200, &matrox_G100};
-#ifdef CONFIG_FB_MATROX_32MB
/* from doc it looks like that accelerator can draw only to low 16MB :-( Direct accesses & displaying are OK for
whole 32MB */
static struct video_board vbG400 = {0x2000000, 0x1000000, FB_ACCEL_MATROX_MGAG400, &matrox_G100};
-#else
-static struct video_board vbG400 = {0x2000000, 0x1000000, FB_ACCEL_MATROX_MGAG400, &matrox_G100};
-#endif
#endif
#define DEVF_VIDEO64BIT 0x0001
@@ -1558,16 +1563,17 @@ static struct fb_videomode defaultmode = {
static int hotplug = 0;
-static void setDefaultOutputs(WPMINFO2) {
+static void setDefaultOutputs(struct matrox_fb_info *minfo)
+{
unsigned int i;
const char* ptr;
- ACCESS_FBINFO(outputs[0]).default_src = MATROXFB_SRC_CRTC1;
- if (ACCESS_FBINFO(devflags.g450dac)) {
- ACCESS_FBINFO(outputs[1]).default_src = MATROXFB_SRC_CRTC1;
- ACCESS_FBINFO(outputs[2]).default_src = MATROXFB_SRC_CRTC1;
+ minfo->outputs[0].default_src = MATROXFB_SRC_CRTC1;
+ if (minfo->devflags.g450dac) {
+ minfo->outputs[1].default_src = MATROXFB_SRC_CRTC1;
+ minfo->outputs[2].default_src = MATROXFB_SRC_CRTC1;
} else if (dfp) {
- ACCESS_FBINFO(outputs[2]).default_src = MATROXFB_SRC_CRTC1;
+ minfo->outputs[2].default_src = MATROXFB_SRC_CRTC1;
}
ptr = outputs;
for (i = 0; i < MATROXFB_MAX_OUTPUTS; i++) {
@@ -1577,11 +1583,11 @@ static void setDefaultOutputs(WPMINFO2) {
break;
}
if (c == '0') {
- ACCESS_FBINFO(outputs[i]).default_src = MATROXFB_SRC_NONE;
+ minfo->outputs[i].default_src = MATROXFB_SRC_NONE;
} else if (c == '1') {
- ACCESS_FBINFO(outputs[i]).default_src = MATROXFB_SRC_CRTC1;
- } else if (c == '2' && ACCESS_FBINFO(devflags.crtc2)) {
- ACCESS_FBINFO(outputs[i]).default_src = MATROXFB_SRC_CRTC2;
+ minfo->outputs[i].default_src = MATROXFB_SRC_CRTC1;
+ } else if (c == '2' && minfo->devflags.crtc2) {
+ minfo->outputs[i].default_src = MATROXFB_SRC_CRTC2;
} else {
printk(KERN_ERR "matroxfb: Unknown outputs setting\n");
break;
@@ -1591,7 +1597,8 @@ static void setDefaultOutputs(WPMINFO2) {
outputs[0] = 0;
}
-static int initMatrox2(WPMINFO struct board* b){
+static int initMatrox2(struct matrox_fb_info *minfo, struct board *b)
+{
unsigned long ctrlptr_phys = 0;
unsigned long video_base_phys = 0;
unsigned int memsize;
@@ -1607,58 +1614,56 @@ static int initMatrox2(WPMINFO struct board* b){
/* set default values... */
vesafb_defined.accel_flags = FB_ACCELF_TEXT;
- ACCESS_FBINFO(hw_switch) = b->base->lowlevel;
- ACCESS_FBINFO(devflags.accelerator) = b->base->accelID;
- ACCESS_FBINFO(max_pixel_clock) = b->maxclk;
+ minfo->hw_switch = b->base->lowlevel;
+ minfo->devflags.accelerator = b->base->accelID;
+ minfo->max_pixel_clock = b->maxclk;
printk(KERN_INFO "matroxfb: Matrox %s detected\n", b->name);
- ACCESS_FBINFO(capable.plnwt) = 1;
- ACCESS_FBINFO(chip) = b->chip;
- ACCESS_FBINFO(capable.srcorg) = b->flags & DEVF_SRCORG;
- ACCESS_FBINFO(devflags.video64bits) = b->flags & DEVF_VIDEO64BIT;
+ minfo->capable.plnwt = 1;
+ minfo->chip = b->chip;
+ minfo->capable.srcorg = b->flags & DEVF_SRCORG;
+ minfo->devflags.video64bits = b->flags & DEVF_VIDEO64BIT;
if (b->flags & DEVF_TEXT4B) {
- ACCESS_FBINFO(devflags.vgastep) = 4;
- ACCESS_FBINFO(devflags.textmode) = 4;
- ACCESS_FBINFO(devflags.text_type_aux) = FB_AUX_TEXT_MGA_STEP16;
+ minfo->devflags.vgastep = 4;
+ minfo->devflags.textmode = 4;
+ minfo->devflags.text_type_aux = FB_AUX_TEXT_MGA_STEP16;
} else if (b->flags & DEVF_TEXT16B) {
- ACCESS_FBINFO(devflags.vgastep) = 16;
- ACCESS_FBINFO(devflags.textmode) = 1;
- ACCESS_FBINFO(devflags.text_type_aux) = FB_AUX_TEXT_MGA_STEP16;
+ minfo->devflags.vgastep = 16;
+ minfo->devflags.textmode = 1;
+ minfo->devflags.text_type_aux = FB_AUX_TEXT_MGA_STEP16;
} else {
- ACCESS_FBINFO(devflags.vgastep) = 8;
- ACCESS_FBINFO(devflags.textmode) = 1;
- ACCESS_FBINFO(devflags.text_type_aux) = FB_AUX_TEXT_MGA_STEP8;
- }
-#ifdef CONFIG_FB_MATROX_32MB
- ACCESS_FBINFO(devflags.support32MB) = (b->flags & DEVF_SUPPORT32MB) != 0;
-#endif
- ACCESS_FBINFO(devflags.precise_width) = !(b->flags & DEVF_ANY_VXRES);
- ACCESS_FBINFO(devflags.crtc2) = (b->flags & DEVF_CRTC2) != 0;
- ACCESS_FBINFO(devflags.maven_capable) = (b->flags & DEVF_MAVEN_CAPABLE) != 0;
- ACCESS_FBINFO(devflags.dualhead) = (b->flags & DEVF_DUALHEAD) != 0;
- ACCESS_FBINFO(devflags.dfp_type) = dfp_type;
- ACCESS_FBINFO(devflags.g450dac) = (b->flags & DEVF_G450DAC) != 0;
- ACCESS_FBINFO(devflags.textstep) = ACCESS_FBINFO(devflags.vgastep) * ACCESS_FBINFO(devflags.textmode);
- ACCESS_FBINFO(devflags.textvram) = 65536 / ACCESS_FBINFO(devflags.textmode);
- setDefaultOutputs(PMINFO2);
+ minfo->devflags.vgastep = 8;
+ minfo->devflags.textmode = 1;
+ minfo->devflags.text_type_aux = FB_AUX_TEXT_MGA_STEP8;
+ }
+ minfo->devflags.support32MB = (b->flags & DEVF_SUPPORT32MB) != 0;
+ minfo->devflags.precise_width = !(b->flags & DEVF_ANY_VXRES);
+ minfo->devflags.crtc2 = (b->flags & DEVF_CRTC2) != 0;
+ minfo->devflags.maven_capable = (b->flags & DEVF_MAVEN_CAPABLE) != 0;
+ minfo->devflags.dualhead = (b->flags & DEVF_DUALHEAD) != 0;
+ minfo->devflags.dfp_type = dfp_type;
+ minfo->devflags.g450dac = (b->flags & DEVF_G450DAC) != 0;
+ minfo->devflags.textstep = minfo->devflags.vgastep * minfo->devflags.textmode;
+ minfo->devflags.textvram = 65536 / minfo->devflags.textmode;
+ setDefaultOutputs(minfo);
if (b->flags & DEVF_PANELLINK_CAPABLE) {
- ACCESS_FBINFO(outputs[2]).data = MINFO;
- ACCESS_FBINFO(outputs[2]).output = &panellink_output;
- ACCESS_FBINFO(outputs[2]).src = ACCESS_FBINFO(outputs[2]).default_src;
- ACCESS_FBINFO(outputs[2]).mode = MATROXFB_OUTPUT_MODE_MONITOR;
- ACCESS_FBINFO(devflags.panellink) = 1;
+ minfo->outputs[2].data = minfo;
+ minfo->outputs[2].output = &panellink_output;
+ minfo->outputs[2].src = minfo->outputs[2].default_src;
+ minfo->outputs[2].mode = MATROXFB_OUTPUT_MODE_MONITOR;
+ minfo->devflags.panellink = 1;
}
- if (ACCESS_FBINFO(capable.cross4MB) < 0)
- ACCESS_FBINFO(capable.cross4MB) = b->flags & DEVF_CROSS4MB;
+ if (minfo->capable.cross4MB < 0)
+ minfo->capable.cross4MB = b->flags & DEVF_CROSS4MB;
if (b->flags & DEVF_SWAPS) {
- ctrlptr_phys = pci_resource_start(ACCESS_FBINFO(pcidev), 1);
- video_base_phys = pci_resource_start(ACCESS_FBINFO(pcidev), 0);
- ACCESS_FBINFO(devflags.fbResource) = PCI_BASE_ADDRESS_0;
+ ctrlptr_phys = pci_resource_start(minfo->pcidev, 1);
+ video_base_phys = pci_resource_start(minfo->pcidev, 0);
+ minfo->devflags.fbResource = PCI_BASE_ADDRESS_0;
} else {
- ctrlptr_phys = pci_resource_start(ACCESS_FBINFO(pcidev), 0);
- video_base_phys = pci_resource_start(ACCESS_FBINFO(pcidev), 1);
- ACCESS_FBINFO(devflags.fbResource) = PCI_BASE_ADDRESS_1;
+ ctrlptr_phys = pci_resource_start(minfo->pcidev, 0);
+ video_base_phys = pci_resource_start(minfo->pcidev, 1);
+ minfo->devflags.fbResource = PCI_BASE_ADDRESS_1;
}
err = -EINVAL;
if (!ctrlptr_phys) {
@@ -1676,7 +1681,7 @@ static int initMatrox2(WPMINFO struct board* b){
if (!request_mem_region(video_base_phys, memsize, "matroxfb FB")) {
goto failCtrlMR;
}
- ACCESS_FBINFO(video.len_maximum) = memsize;
+ minfo->video.len_maximum = memsize;
/* convert mem (autodetect k, M) */
if (mem < 1024) mem *= 1024;
if (mem < 0x00100000) mem *= 1024;
@@ -1684,14 +1689,14 @@ static int initMatrox2(WPMINFO struct board* b){
if (mem && (mem < memsize))
memsize = mem;
err = -ENOMEM;
- if (mga_ioremap(ctrlptr_phys, 16384, MGA_IOREMAP_MMIO, &ACCESS_FBINFO(mmio.vbase))) {
+ if (mga_ioremap(ctrlptr_phys, 16384, MGA_IOREMAP_MMIO, &minfo->mmio.vbase)) {
printk(KERN_ERR "matroxfb: cannot ioremap(%lX, 16384), matroxfb disabled\n", ctrlptr_phys);
goto failVideoMR;
}
- ACCESS_FBINFO(mmio.base) = ctrlptr_phys;
- ACCESS_FBINFO(mmio.len) = 16384;
- ACCESS_FBINFO(video.base) = video_base_phys;
- if (mga_ioremap(video_base_phys, memsize, MGA_IOREMAP_FB, &ACCESS_FBINFO(video.vbase))) {
+ minfo->mmio.base = ctrlptr_phys;
+ minfo->mmio.len = 16384;
+ minfo->video.base = video_base_phys;
+ if (mga_ioremap(video_base_phys, memsize, MGA_IOREMAP_FB, &minfo->video.vbase)) {
printk(KERN_ERR "matroxfb: cannot ioremap(%lX, %d), matroxfb disabled\n",
video_base_phys, memsize);
goto failCtrlIO;
@@ -1700,63 +1705,63 @@ static int initMatrox2(WPMINFO struct board* b){
u_int32_t cmd;
u_int32_t mga_option;
- pci_read_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, &mga_option);
- pci_read_config_dword(ACCESS_FBINFO(pcidev), PCI_COMMAND, &cmd);
+ pci_read_config_dword(minfo->pcidev, PCI_OPTION_REG, &mga_option);
+ pci_read_config_dword(minfo->pcidev, PCI_COMMAND, &cmd);
mga_option &= 0x7FFFFFFF; /* clear BIG_ENDIAN */
mga_option |= MX_OPTION_BSWAP;
/* disable palette snooping */
cmd &= ~PCI_COMMAND_VGA_PALETTE;
if (pci_dev_present(intel_82437)) {
- if (!(mga_option & 0x20000000) && !ACCESS_FBINFO(devflags.nopciretry)) {
+ if (!(mga_option & 0x20000000) && !minfo->devflags.nopciretry) {
printk(KERN_WARNING "matroxfb: Disabling PCI retries due to i82437 present\n");
}
mga_option |= 0x20000000;
- ACCESS_FBINFO(devflags.nopciretry) = 1;
+ minfo->devflags.nopciretry = 1;
}
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_COMMAND, cmd);
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, mga_option);
- ACCESS_FBINFO(hw).MXoptionReg = mga_option;
+ pci_write_config_dword(minfo->pcidev, PCI_COMMAND, cmd);
+ pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, mga_option);
+ minfo->hw.MXoptionReg = mga_option;
/* select non-DMA memory for PCI_MGA_DATA, otherwise dump of PCI cfg space can lock PCI bus */
/* maybe preinit() candidate, but it is same... for all devices... at this time... */
- pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_MGA_INDEX, 0x00003C00);
+ pci_write_config_dword(minfo->pcidev, PCI_MGA_INDEX, 0x00003C00);
}
err = -ENXIO;
- matroxfb_read_pins(PMINFO2);
- if (ACCESS_FBINFO(hw_switch)->preinit(PMINFO2)) {
+ matroxfb_read_pins(minfo);
+ if (minfo->hw_switch->preinit(minfo)) {
goto failVideoIO;
}
err = -ENOMEM;
- if (!matroxfb_getmemory(PMINFO memsize, &ACCESS_FBINFO(video.len)) || !ACCESS_FBINFO(video.len)) {
+ if (!matroxfb_getmemory(minfo, memsize, &minfo->video.len) || !minfo->video.len) {
printk(KERN_ERR "matroxfb: cannot determine memory size\n");
goto failVideoIO;
}
- ACCESS_FBINFO(devflags.ydstorg) = 0;
+ minfo->devflags.ydstorg = 0;
- ACCESS_FBINFO(video.base) = video_base_phys;
- ACCESS_FBINFO(video.len_usable) = ACCESS_FBINFO(video.len);
- if (ACCESS_FBINFO(video.len_usable) > b->base->maxdisplayable)
- ACCESS_FBINFO(video.len_usable) = b->base->maxdisplayable;
+ minfo->video.base = video_base_phys;
+ minfo->video.len_usable = minfo->video.len;
+ if (minfo->video.len_usable > b->base->maxdisplayable)
+ minfo->video.len_usable = b->base->maxdisplayable;
#ifdef CONFIG_MTRR
if (mtrr) {
- ACCESS_FBINFO(mtrr.vram) = mtrr_add(video_base_phys, ACCESS_FBINFO(video.len), MTRR_TYPE_WRCOMB, 1);
- ACCESS_FBINFO(mtrr.vram_valid) = 1;
+ minfo->mtrr.vram = mtrr_add(video_base_phys, minfo->video.len, MTRR_TYPE_WRCOMB, 1);
+ minfo->mtrr.vram_valid = 1;
printk(KERN_INFO "matroxfb: MTRR's turned on\n");
}
#endif /* CONFIG_MTRR */
- if (!ACCESS_FBINFO(devflags.novga))
+ if (!minfo->devflags.novga)
request_region(0x3C0, 32, "matrox");
- matroxfb_g450_connect(PMINFO2);
- ACCESS_FBINFO(hw_switch->reset(PMINFO2));
+ matroxfb_g450_connect(minfo);
+ minfo->hw_switch->reset(minfo);
- ACCESS_FBINFO(fbcon.monspecs.hfmin) = 0;
- ACCESS_FBINFO(fbcon.monspecs.hfmax) = fh;
- ACCESS_FBINFO(fbcon.monspecs.vfmin) = 0;
- ACCESS_FBINFO(fbcon.monspecs.vfmax) = fv;
- ACCESS_FBINFO(fbcon.monspecs.dpms) = 0; /* TBD */
+ minfo->fbcon.monspecs.hfmin = 0;
+ minfo->fbcon.monspecs.hfmax = fh;
+ minfo->fbcon.monspecs.vfmin = 0;
+ minfo->fbcon.monspecs.vfmax = fv;
+ minfo->fbcon.monspecs.dpms = 0; /* TBD */
/* static settings */
vesafb_defined.red = colors[depth-1].red;
@@ -1768,24 +1773,24 @@ static int initMatrox2(WPMINFO struct board* b){
if (noaccel)
vesafb_defined.accel_flags &= ~FB_ACCELF_TEXT;
- ACCESS_FBINFO(fbops) = matroxfb_ops;
- ACCESS_FBINFO(fbcon.fbops) = &ACCESS_FBINFO(fbops);
- ACCESS_FBINFO(fbcon.pseudo_palette) = ACCESS_FBINFO(cmap);
+ minfo->fbops = matroxfb_ops;
+ minfo->fbcon.fbops = &minfo->fbops;
+ minfo->fbcon.pseudo_palette = minfo->cmap;
/* after __init time we are like module... no logo */
- ACCESS_FBINFO(fbcon.flags) = hotplug ? FBINFO_FLAG_MODULE : FBINFO_FLAG_DEFAULT;
- ACCESS_FBINFO(fbcon.flags) |= FBINFO_PARTIAL_PAN_OK | /* Prefer panning for scroll under MC viewer/edit */
+ minfo->fbcon.flags = hotplug ? FBINFO_FLAG_MODULE : FBINFO_FLAG_DEFAULT;
+ minfo->fbcon.flags |= FBINFO_PARTIAL_PAN_OK | /* Prefer panning for scroll under MC viewer/edit */
FBINFO_HWACCEL_COPYAREA | /* We have hw-assisted bmove */
FBINFO_HWACCEL_FILLRECT | /* And fillrect */
FBINFO_HWACCEL_IMAGEBLIT | /* And imageblit */
FBINFO_HWACCEL_XPAN | /* And we support both horizontal */
FBINFO_HWACCEL_YPAN; /* And vertical panning */
- ACCESS_FBINFO(video.len_usable) &= PAGE_MASK;
- fb_alloc_cmap(&ACCESS_FBINFO(fbcon.cmap), 256, 1);
+ minfo->video.len_usable &= PAGE_MASK;
+ fb_alloc_cmap(&minfo->fbcon.cmap, 256, 1);
#ifndef MODULE
/* mode database is marked __init!!! */
if (!hotplug) {
- fb_find_mode(&vesafb_defined, &ACCESS_FBINFO(fbcon), videomode[0]?videomode:NULL,
+ fb_find_mode(&vesafb_defined, &minfo->fbcon, videomode[0] ? videomode : NULL,
NULL, 0, &defaultmode, vesafb_defined.bits_per_pixel);
}
#endif /* !MODULE */
@@ -1874,52 +1879,52 @@ static int initMatrox2(WPMINFO struct board* b){
vesafb_defined.yres_virtual = 65536; /* large enough to be INF, but small enough
to yres_virtual * xres_virtual < 2^32 */
}
- matroxfb_init_fix(PMINFO2);
- ACCESS_FBINFO(fbcon.screen_base) = vaddr_va(ACCESS_FBINFO(video.vbase));
+ matroxfb_init_fix(minfo);
+ minfo->fbcon.screen_base = vaddr_va(minfo->video.vbase);
/* Normalize values (namely yres_virtual) */
- matroxfb_check_var(&vesafb_defined, &ACCESS_FBINFO(fbcon));
+ matroxfb_check_var(&vesafb_defined, &minfo->fbcon);
/* And put it into "current" var. Do NOT program hardware yet, or we'll not take over
* vgacon correctly. fbcon_startup will call fb_set_par for us, WITHOUT check_var,
* and unfortunately it will do it BEFORE vgacon contents is saved, so it won't work
* anyway. But we at least tried... */
- ACCESS_FBINFO(fbcon.var) = vesafb_defined;
+ minfo->fbcon.var = vesafb_defined;
err = -EINVAL;
printk(KERN_INFO "matroxfb: %dx%dx%dbpp (virtual: %dx%d)\n",
vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel,
vesafb_defined.xres_virtual, vesafb_defined.yres_virtual);
printk(KERN_INFO "matroxfb: framebuffer at 0x%lX, mapped to 0x%p, size %d\n",
- ACCESS_FBINFO(video.base), vaddr_va(ACCESS_FBINFO(video.vbase)), ACCESS_FBINFO(video.len));
+ minfo->video.base, vaddr_va(minfo->video.vbase), minfo->video.len);
/* We do not have to set currcon to 0... register_framebuffer do it for us on first console
* and we do not want currcon == 0 for subsequent framebuffers */
- ACCESS_FBINFO(fbcon).device = &ACCESS_FBINFO(pcidev)->dev;
- if (register_framebuffer(&ACCESS_FBINFO(fbcon)) < 0) {
+ minfo->fbcon.device = &minfo->pcidev->dev;
+ if (register_framebuffer(&minfo->fbcon) < 0) {
goto failVideoIO;
}
printk("fb%d: %s frame buffer device\n",
- ACCESS_FBINFO(fbcon.node), ACCESS_FBINFO(fbcon.fix.id));
+ minfo->fbcon.node, minfo->fbcon.fix.id);
/* there is no console on this fb... but we have to initialize hardware
* until someone tells me what is proper thing to do */
- if (!ACCESS_FBINFO(initialized)) {
+ if (!minfo->initialized) {
printk(KERN_INFO "fb%d: initializing hardware\n",
- ACCESS_FBINFO(fbcon.node));
+ minfo->fbcon.node);
/* We have to use FB_ACTIVATE_FORCE, as we had to put vesafb_defined to the fbcon.var
* already before, so register_framebuffer works correctly. */
vesafb_defined.activate |= FB_ACTIVATE_FORCE;
- fb_set_var(&ACCESS_FBINFO(fbcon), &vesafb_defined);
+ fb_set_var(&minfo->fbcon, &vesafb_defined);
}
return 0;
failVideoIO:;
- matroxfb_g450_shutdown(PMINFO2);
- mga_iounmap(ACCESS_FBINFO(video.vbase));
+ matroxfb_g450_shutdown(minfo);
+ mga_iounmap(minfo->video.vbase);
failCtrlIO:;
- mga_iounmap(ACCESS_FBINFO(mmio.vbase));
+ mga_iounmap(minfo->mmio.vbase);
failVideoMR:;
- release_mem_region(video_base_phys, ACCESS_FBINFO(video.len_maximum));
+ release_mem_region(video_base_phys, minfo->video.len_maximum);
failCtrlMR:;
release_mem_region(ctrlptr_phys, 16384);
fail:;
@@ -1975,7 +1980,7 @@ void matroxfb_unregister_driver(struct matroxfb_driver* drv) {
static void matroxfb_register_device(struct matrox_fb_info* minfo) {
struct matroxfb_driver* drv;
int i = 0;
- list_add(&ACCESS_FBINFO(next_fb), &matroxfb_list);
+ list_add(&minfo->next_fb, &matroxfb_list);
for (drv = matroxfb_driver_l(matroxfb_driver_list.next);
drv != matroxfb_driver_l(&matroxfb_driver_list);
drv = matroxfb_driver_l(drv->node.next)) {
@@ -1995,7 +2000,7 @@ static void matroxfb_register_device(struct matrox_fb_info* minfo) {
static void matroxfb_unregister_device(struct matrox_fb_info* minfo) {
int i;
- list_del(&ACCESS_FBINFO(next_fb));
+ list_del(&minfo->next_fb);
for (i = 0; i < minfo->drivers_count; i++) {
struct matroxfb_driver* drv = minfo->drivers[i];
@@ -2011,9 +2016,6 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
struct matrox_fb_info* minfo;
int err;
u_int32_t cmd;
-#ifndef CONFIG_FB_MATROX_MULTIHEAD
- static int registered = 0;
-#endif
DBG(__func__)
svid = pdev->subsystem_vendor;
@@ -2037,68 +2039,57 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
return -1;
}
-#ifdef CONFIG_FB_MATROX_MULTIHEAD
minfo = kmalloc(sizeof(*minfo), GFP_KERNEL);
if (!minfo)
return -1;
-#else
- if (registered) /* singlehead driver... */
- return -1;
- minfo = &matroxfb_global_mxinfo;
-#endif
- memset(MINFO, 0, sizeof(*MINFO));
+ memset(minfo, 0, sizeof(*minfo));
- ACCESS_FBINFO(pcidev) = pdev;
- ACCESS_FBINFO(dead) = 0;
- ACCESS_FBINFO(usecount) = 0;
- ACCESS_FBINFO(userusecount) = 0;
+ minfo->pcidev = pdev;
+ minfo->dead = 0;
+ minfo->usecount = 0;
+ minfo->userusecount = 0;
- pci_set_drvdata(pdev, MINFO);
+ pci_set_drvdata(pdev, minfo);
/* DEVFLAGS */
- ACCESS_FBINFO(devflags.memtype) = memtype;
+ minfo->devflags.memtype = memtype;
if (memtype != -1)
noinit = 0;
if (cmd & PCI_COMMAND_MEMORY) {
- ACCESS_FBINFO(devflags.novga) = novga;
- ACCESS_FBINFO(devflags.nobios) = nobios;
- ACCESS_FBINFO(devflags.noinit) = noinit;
+ minfo->devflags.novga = novga;
+ minfo->devflags.nobios = nobios;
+ minfo->devflags.noinit = noinit;
/* subsequent heads always needs initialization and must not enable BIOS */
novga = 1;
nobios = 1;
noinit = 0;
} else {
- ACCESS_FBINFO(devflags.novga) = 1;
- ACCESS_FBINFO(devflags.nobios) = 1;
- ACCESS_FBINFO(devflags.noinit) = 0;
- }
-
- ACCESS_FBINFO(devflags.nopciretry) = no_pci_retry;
- ACCESS_FBINFO(devflags.mga_24bpp_fix) = inv24;
- ACCESS_FBINFO(devflags.precise_width) = option_precise_width;
- ACCESS_FBINFO(devflags.sgram) = sgram;
- ACCESS_FBINFO(capable.cross4MB) = cross4MB;
-
- spin_lock_init(&ACCESS_FBINFO(lock.DAC));
- spin_lock_init(&ACCESS_FBINFO(lock.accel));
- init_rwsem(&ACCESS_FBINFO(crtc2.lock));
- init_rwsem(&ACCESS_FBINFO(altout.lock));
- mutex_init(&ACCESS_FBINFO(fbcon).mm_lock);
- ACCESS_FBINFO(irq_flags) = 0;
- init_waitqueue_head(&ACCESS_FBINFO(crtc1.vsync.wait));
- init_waitqueue_head(&ACCESS_FBINFO(crtc2.vsync.wait));
- ACCESS_FBINFO(crtc1.panpos) = -1;
-
- err = initMatrox2(PMINFO b);
+ minfo->devflags.novga = 1;
+ minfo->devflags.nobios = 1;
+ minfo->devflags.noinit = 0;
+ }
+
+ minfo->devflags.nopciretry = no_pci_retry;
+ minfo->devflags.mga_24bpp_fix = inv24;
+ minfo->devflags.precise_width = option_precise_width;
+ minfo->devflags.sgram = sgram;
+ minfo->capable.cross4MB = cross4MB;
+
+ spin_lock_init(&minfo->lock.DAC);
+ spin_lock_init(&minfo->lock.accel);
+ init_rwsem(&minfo->crtc2.lock);
+ init_rwsem(&minfo->altout.lock);
+ mutex_init(&minfo->fbcon.mm_lock);
+ minfo->irq_flags = 0;
+ init_waitqueue_head(&minfo->crtc1.vsync.wait);
+ init_waitqueue_head(&minfo->crtc2.vsync.wait);
+ minfo->crtc1.panpos = -1;
+
+ err = initMatrox2(minfo, b);
if (!err) {
-#ifndef CONFIG_FB_MATROX_MULTIHEAD
- registered = 1;
-#endif
- matroxfb_register_device(MINFO);
+ matroxfb_register_device(minfo);
return 0;
}
-#ifdef CONFIG_FB_MATROX_MULTIHEAD
kfree(minfo);
-#endif
return -1;
}
@@ -2106,7 +2097,7 @@ static void pci_remove_matrox(struct pci_dev* pdev) {
struct matrox_fb_info* minfo;
minfo = pci_get_drvdata(pdev);
- matroxfb_remove(PMINFO 1);
+ matroxfb_remove(minfo, 1);
}
static struct pci_device_id matroxfb_devices[] = {
@@ -2510,13 +2501,8 @@ module_param(inv24, int, 0);
MODULE_PARM_DESC(inv24, "Inverts clock polarity for 24bpp and loop frequency > 100MHz (default=do not invert polarity)");
module_param(inverse, int, 0);
MODULE_PARM_DESC(inverse, "Inverse (0 or 1) (default=0)");
-#ifdef CONFIG_FB_MATROX_MULTIHEAD
module_param(dev, int, 0);
MODULE_PARM_DESC(dev, "Multihead support, attach to device ID (0..N) (default=all working)");
-#else
-module_param(dev, int, 0);
-MODULE_PARM_DESC(dev, "Multihead support, attach to device ID (0..N) (default=first working)");
-#endif
module_param(vesa, int, 0);
MODULE_PARM_DESC(vesa, "Startup videomode (0x000-0x1FF) (default=0x101)");
module_param(xres, int, 0);
diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
index 95883236c0c..f3a4e15672d 100644
--- a/drivers/video/matrox/matroxfb_base.h
+++ b/drivers/video/matrox/matroxfb_base.h
@@ -54,9 +54,6 @@
#include "../macmodes.h"
#endif
-/* always compile support for 32MB... It cost almost nothing */
-#define CONFIG_FB_MATROX_32MB
-
#ifdef MATROXFB_DEBUG
#define DEBUG
@@ -464,9 +461,7 @@ struct matrox_fb_info {
int nopciretry;
int noinit;
int sgram;
-#ifdef CONFIG_FB_MATROX_32MB
int support32MB;
-#endif
int accelerator;
int text_type_aux;
@@ -524,47 +519,11 @@ struct matrox_fb_info {
#define info2minfo(info) container_of(info, struct matrox_fb_info, fbcon)
-#ifdef CONFIG_FB_MATROX_MULTIHEAD
-#define ACCESS_FBINFO2(info, x) (info->x)
-#define ACCESS_FBINFO(x) ACCESS_FBINFO2(minfo,x)
-
-#define MINFO minfo
-
-#define WPMINFO2 struct matrox_fb_info* minfo
-#define WPMINFO WPMINFO2 ,
-#define CPMINFO2 const struct matrox_fb_info* minfo
-#define CPMINFO CPMINFO2 ,
-#define PMINFO2 minfo
-#define PMINFO PMINFO2 ,
-
-#define MINFO_FROM(x) struct matrox_fb_info* minfo = x
-#else
-
-extern struct matrox_fb_info matroxfb_global_mxinfo;
-
-#define ACCESS_FBINFO(x) (matroxfb_global_mxinfo.x)
-#define ACCESS_FBINFO2(info, x) (matroxfb_global_mxinfo.x)
-
-#define MINFO (&matroxfb_global_mxinfo)
-
-#define WPMINFO2 void
-#define WPMINFO
-#define CPMINFO2 void
-#define CPMINFO
-#define PMINFO2
-#define PMINFO
-
-#define MINFO_FROM(x)
-
-#endif
-
-#define MINFO_FROM_INFO(x) MINFO_FROM(info2minfo(x))
-
struct matrox_switch {
- int (*preinit)(WPMINFO2);
- void (*reset)(WPMINFO2);
- int (*init)(WPMINFO struct my_timming*);
- void (*restore)(WPMINFO2);
+ int (*preinit)(struct matrox_fb_info *minfo);
+ void (*reset)(struct matrox_fb_info *minfo);
+ int (*init)(struct matrox_fb_info *minfo, struct my_timming*);
+ void (*restore)(struct matrox_fb_info *minfo);
};
struct matroxfb_driver {
@@ -727,11 +686,11 @@ void matroxfb_unregister_driver(struct matroxfb_driver* drv);
#endif
#endif
-#define mga_inb(addr) mga_readb(ACCESS_FBINFO(mmio.vbase), (addr))
-#define mga_inl(addr) mga_readl(ACCESS_FBINFO(mmio.vbase), (addr))
-#define mga_outb(addr,val) mga_writeb(ACCESS_FBINFO(mmio.vbase), (addr), (val))
-#define mga_outw(addr,val) mga_writew(ACCESS_FBINFO(mmio.vbase), (addr), (val))
-#define mga_outl(addr,val) mga_writel(ACCESS_FBINFO(mmio.vbase), (addr), (val))
+#define mga_inb(addr) mga_readb(minfo->mmio.vbase, (addr))
+#define mga_inl(addr) mga_readl(minfo->mmio.vbase, (addr))
+#define mga_outb(addr,val) mga_writeb(minfo->mmio.vbase, (addr), (val))
+#define mga_outw(addr,val) mga_writew(minfo->mmio.vbase, (addr), (val))
+#define mga_outl(addr,val) mga_writel(minfo->mmio.vbase, (addr), (val))
#define mga_readr(port,idx) (mga_outb((port),(idx)), mga_inb((port)+1))
#define mga_setr(addr,port,val) mga_outw(addr, ((val)<<8) | (port))
@@ -750,19 +709,20 @@ void matroxfb_unregister_driver(struct matroxfb_driver* drv);
#define isMilleniumII(x) (0)
#endif
-#define matroxfb_DAC_lock() spin_lock(&ACCESS_FBINFO(lock.DAC))
-#define matroxfb_DAC_unlock() spin_unlock(&ACCESS_FBINFO(lock.DAC))
-#define matroxfb_DAC_lock_irqsave(flags) spin_lock_irqsave(&ACCESS_FBINFO(lock.DAC),flags)
-#define matroxfb_DAC_unlock_irqrestore(flags) spin_unlock_irqrestore(&ACCESS_FBINFO(lock.DAC),flags)
-extern void matroxfb_DAC_out(CPMINFO int reg, int val);
-extern int matroxfb_DAC_in(CPMINFO int reg);
+#define matroxfb_DAC_lock() spin_lock(&minfo->lock.DAC)
+#define matroxfb_DAC_unlock() spin_unlock(&minfo->lock.DAC)
+#define matroxfb_DAC_lock_irqsave(flags) spin_lock_irqsave(&minfo->lock.DAC, flags)
+#define matroxfb_DAC_unlock_irqrestore(flags) spin_unlock_irqrestore(&minfo->lock.DAC, flags)
+extern void matroxfb_DAC_out(const struct matrox_fb_info *minfo, int reg,
+ int val);
+extern int matroxfb_DAC_in(const struct matrox_fb_info *minfo, int reg);
extern void matroxfb_var2my(struct fb_var_screeninfo* fvsi, struct my_timming* mt);
-extern int matroxfb_wait_for_sync(WPMINFO u_int32_t crtc);
-extern int matroxfb_enable_irq(WPMINFO int reenable);
+extern int matroxfb_wait_for_sync(struct matrox_fb_info *minfo, u_int32_t crtc);
+extern int matroxfb_enable_irq(struct matrox_fb_info *minfo, int reenable);
#ifdef MATROXFB_USE_SPINLOCKS
-#define CRITBEGIN spin_lock_irqsave(&ACCESS_FBINFO(lock.accel), critflags);
-#define CRITEND spin_unlock_irqrestore(&ACCESS_FBINFO(lock.accel), critflags);
+#define CRITBEGIN spin_lock_irqsave(&minfo->lock.accel, critflags);
+#define CRITEND spin_unlock_irqrestore(&minfo->lock.accel, critflags);
#define CRITFLAGS unsigned long critflags;
#else
#define CRITBEGIN
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c
index ebcb5c6b496..78414baa5a5 100644
--- a/drivers/video/matrox/matroxfb_crtc2.c
+++ b/drivers/video/matrox/matroxfb_crtc2.c
@@ -65,7 +65,7 @@ static void matroxfb_dh_restore(struct matroxfb_dh_fb_info* m2info,
unsigned int pos) {
u_int32_t tmp;
u_int32_t datactl;
- MINFO_FROM(m2info->primary_dev);
+ struct matrox_fb_info *minfo = m2info->primary_dev;
switch (mode) {
case 15:
@@ -81,11 +81,11 @@ static void matroxfb_dh_restore(struct matroxfb_dh_fb_info* m2info,
}
tmp |= 0x00000001; /* enable CRTC2 */
datactl = 0;
- if (ACCESS_FBINFO(outputs[1]).src == MATROXFB_SRC_CRTC2) {
- if (ACCESS_FBINFO(devflags.g450dac)) {
+ if (minfo->outputs[1].src == MATROXFB_SRC_CRTC2) {
+ if (minfo->devflags.g450dac) {
tmp |= 0x00000006; /* source from secondary pixel PLL */
/* no vidrst when in monitor mode */
- if (ACCESS_FBINFO(outputs[1]).mode != MATROXFB_OUTPUT_MODE_MONITOR) {
+ if (minfo->outputs[1].mode != MATROXFB_OUTPUT_MODE_MONITOR) {
tmp |= 0xC0001000; /* Enable H/V vidrst */
}
} else {
@@ -93,11 +93,11 @@ static void matroxfb_dh_restore(struct matroxfb_dh_fb_info* m2info,
tmp |= 0xC0000000; /* enable vvidrst & hvidrst */
/* MGA TVO is our clock source */
}
- } else if (ACCESS_FBINFO(outputs[0]).src == MATROXFB_SRC_CRTC2) {
+ } else if (minfo->outputs[0].src == MATROXFB_SRC_CRTC2) {
tmp |= 0x00000004; /* source from pixclock */
/* PIXPLL is our clock source */
}
- if (ACCESS_FBINFO(outputs[0]).src == MATROXFB_SRC_CRTC2) {
+ if (minfo->outputs[0].src == MATROXFB_SRC_CRTC2) {
tmp |= 0x00100000; /* connect CRTC2 to DAC */
}
if (mt->interlaced) {
@@ -146,7 +146,7 @@ static void matroxfb_dh_restore(struct matroxfb_dh_fb_info* m2info,
}
}
mga_outl(0x3C10, tmp);
- ACCESS_FBINFO(hw).crtc2.ctl = tmp;
+ minfo->hw.crtc2.ctl = tmp;
tmp = mt->VDisplay << 16; /* line compare */
if (mt->sync & FB_SYNC_HOR_HIGH_ACT)
@@ -157,10 +157,10 @@ static void matroxfb_dh_restore(struct matroxfb_dh_fb_info* m2info,
}
static void matroxfb_dh_disable(struct matroxfb_dh_fb_info* m2info) {
- MINFO_FROM(m2info->primary_dev);
+ struct matrox_fb_info *minfo = m2info->primary_dev;
mga_outl(0x3C10, 0x00000004); /* disable CRTC2, CRTC1->DAC1, PLL as clock source */
- ACCESS_FBINFO(hw).crtc2.ctl = 0x00000004;
+ minfo->hw.crtc2.ctl = 0x00000004;
}
static void matroxfb_dh_pan_var(struct matroxfb_dh_fb_info* m2info,
@@ -168,7 +168,7 @@ static void matroxfb_dh_pan_var(struct matroxfb_dh_fb_info* m2info,
unsigned int pos;
unsigned int linelen;
unsigned int pixelsize;
- MINFO_FROM(m2info->primary_dev);
+ struct matrox_fb_info *minfo = m2info->primary_dev;
m2info->fbcon.var.xoffset = var->xoffset;
m2info->fbcon.var.yoffset = var->yoffset;
@@ -260,15 +260,15 @@ static int matroxfb_dh_decode_var(struct matroxfb_dh_fb_info* m2info,
static int matroxfb_dh_open(struct fb_info* info, int user) {
#define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon))
- MINFO_FROM(m2info->primary_dev);
+ struct matrox_fb_info *minfo = m2info->primary_dev;
- if (MINFO) {
+ if (minfo) {
int err;
- if (ACCESS_FBINFO(dead)) {
+ if (minfo->dead) {
return -ENXIO;
}
- err = ACCESS_FBINFO(fbops).fb_open(&ACCESS_FBINFO(fbcon), user);
+ err = minfo->fbops.fb_open(&minfo->fbcon, user);
if (err) {
return err;
}
@@ -280,10 +280,10 @@ static int matroxfb_dh_open(struct fb_info* info, int user) {
static int matroxfb_dh_release(struct fb_info* info, int user) {
#define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon))
int err = 0;
- MINFO_FROM(m2info->primary_dev);
+ struct matrox_fb_info *minfo = m2info->primary_dev;
- if (MINFO) {
- err = ACCESS_FBINFO(fbops).fb_release(&ACCESS_FBINFO(fbcon), user);
+ if (minfo) {
+ err = minfo->fbops.fb_release(&minfo->fbcon, user);
}
return err;
#undef m2info
@@ -326,7 +326,7 @@ static int matroxfb_dh_set_par(struct fb_info* info) {
int mode;
int err;
struct fb_var_screeninfo* var = &info->var;
- MINFO_FROM(m2info->primary_dev);
+ struct matrox_fb_info *minfo = m2info->primary_dev;
if ((err = matroxfb_dh_decode_var(m2info, var, &visual, &cmap_len, &mode)) != 0)
return err;
@@ -352,39 +352,39 @@ static int matroxfb_dh_set_par(struct fb_info* info) {
pos = (m2info->fbcon.var.yoffset * m2info->fbcon.var.xres_virtual + m2info->fbcon.var.xoffset) * m2info->fbcon.var.bits_per_pixel >> 3;
pos += m2info->video.offbase;
cnt = 0;
- down_read(&ACCESS_FBINFO(altout).lock);
+ down_read(&minfo->altout.lock);
for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) {
- if (ACCESS_FBINFO(outputs[out]).src == MATROXFB_SRC_CRTC2) {
+ if (minfo->outputs[out].src == MATROXFB_SRC_CRTC2) {
cnt++;
- if (ACCESS_FBINFO(outputs[out]).output->compute) {
- ACCESS_FBINFO(outputs[out]).output->compute(ACCESS_FBINFO(outputs[out]).data, &mt);
+ if (minfo->outputs[out].output->compute) {
+ minfo->outputs[out].output->compute(minfo->outputs[out].data, &mt);
}
}
}
- ACCESS_FBINFO(crtc2).pixclock = mt.pixclock;
- ACCESS_FBINFO(crtc2).mnp = mt.mnp;
- up_read(&ACCESS_FBINFO(altout).lock);
+ minfo->crtc2.pixclock = mt.pixclock;
+ minfo->crtc2.mnp = mt.mnp;
+ up_read(&minfo->altout.lock);
if (cnt) {
matroxfb_dh_restore(m2info, &mt, mode, pos);
} else {
matroxfb_dh_disable(m2info);
}
- DAC1064_global_init(PMINFO2);
- DAC1064_global_restore(PMINFO2);
- down_read(&ACCESS_FBINFO(altout).lock);
+ DAC1064_global_init(minfo);
+ DAC1064_global_restore(minfo);
+ down_read(&minfo->altout.lock);
for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) {
- if (ACCESS_FBINFO(outputs[out]).src == MATROXFB_SRC_CRTC2 &&
- ACCESS_FBINFO(outputs[out]).output->program) {
- ACCESS_FBINFO(outputs[out]).output->program(ACCESS_FBINFO(outputs[out]).data);
+ if (minfo->outputs[out].src == MATROXFB_SRC_CRTC2 &&
+ minfo->outputs[out].output->program) {
+ minfo->outputs[out].output->program(minfo->outputs[out].data);
}
}
for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) {
- if (ACCESS_FBINFO(outputs[out]).src == MATROXFB_SRC_CRTC2 &&
- ACCESS_FBINFO(outputs[out]).output->start) {
- ACCESS_FBINFO(outputs[out]).output->start(ACCESS_FBINFO(outputs[out]).data);
+ if (minfo->outputs[out].src == MATROXFB_SRC_CRTC2 &&
+ minfo->outputs[out].output->start) {
+ minfo->outputs[out].output->start(minfo->outputs[out].data);
}
}
- up_read(&ACCESS_FBINFO(altout).lock);
+ up_read(&minfo->altout.lock);
}
m2info->initialized = 1;
return 0;
@@ -399,9 +399,9 @@ static int matroxfb_dh_pan_display(struct fb_var_screeninfo* var, struct fb_info
}
static int matroxfb_dh_get_vblank(const struct matroxfb_dh_fb_info* m2info, struct fb_vblank* vblank) {
- MINFO_FROM(m2info->primary_dev);
+ struct matrox_fb_info *minfo = m2info->primary_dev;
- matroxfb_enable_irq(PMINFO 0);
+ matroxfb_enable_irq(minfo, 0);
memset(vblank, 0, sizeof(*vblank));
vblank->flags = FB_VBLANK_HAVE_VCOUNT | FB_VBLANK_HAVE_VBLANK;
/* mask out reserved bits + field number (odd/even) */
@@ -409,11 +409,11 @@ static int matroxfb_dh_get_vblank(const struct matroxfb_dh_fb_info* m2info, stru
/* compatibility stuff */
if (vblank->vcount >= m2info->fbcon.var.yres)
vblank->flags |= FB_VBLANK_VBLANKING;
- if (test_bit(0, &ACCESS_FBINFO(irq_flags))) {
+ if (test_bit(0, &minfo->irq_flags)) {
vblank->flags |= FB_VBLANK_HAVE_COUNT;
/* Only one writer, aligned int value...
it should work without lock and without atomic_t */
- vblank->count = ACCESS_FBINFO(crtc2).vsync.cnt;
+ vblank->count = minfo->crtc2.vsync.cnt;
}
return 0;
}
@@ -423,7 +423,7 @@ static int matroxfb_dh_ioctl(struct fb_info *info,
unsigned long arg)
{
#define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon))
- MINFO_FROM(m2info->primary_dev);
+ struct matrox_fb_info *minfo = m2info->primary_dev;
DBG(__func__)
@@ -449,13 +449,13 @@ static int matroxfb_dh_ioctl(struct fb_info *info,
if (crt != 0)
return -ENODEV;
- return matroxfb_wait_for_sync(PMINFO 1);
+ return matroxfb_wait_for_sync(minfo, 1);
}
case MATROXFB_SET_OUTPUT_MODE:
case MATROXFB_GET_OUTPUT_MODE:
case MATROXFB_GET_ALL_OUTPUTS:
{
- return ACCESS_FBINFO(fbcon.fbops)->fb_ioctl(&ACCESS_FBINFO(fbcon), cmd, arg);
+ return minfo->fbcon.fbops->fb_ioctl(&minfo->fbcon, cmd, arg);
}
case MATROXFB_SET_OUTPUT_CONNECTION:
{
@@ -469,9 +469,9 @@ static int matroxfb_dh_ioctl(struct fb_info *info,
if (tmp & (1 << out)) {
if (out >= MATROXFB_MAX_OUTPUTS)
return -ENXIO;
- if (!ACCESS_FBINFO(outputs[out]).output)
+ if (!minfo->outputs[out].output)
return -ENXIO;
- switch (ACCESS_FBINFO(outputs[out]).src) {
+ switch (minfo->outputs[out].src) {
case MATROXFB_SRC_NONE:
case MATROXFB_SRC_CRTC2:
break;
@@ -480,22 +480,22 @@ static int matroxfb_dh_ioctl(struct fb_info *info,
}
}
}
- if (ACCESS_FBINFO(devflags.panellink)) {
+ if (minfo->devflags.panellink) {
if (tmp & MATROXFB_OUTPUT_CONN_DFP)
return -EINVAL;
- if ((ACCESS_FBINFO(outputs[2]).src == MATROXFB_SRC_CRTC1) && tmp)
+ if ((minfo->outputs[2].src == MATROXFB_SRC_CRTC1) && tmp)
return -EBUSY;
}
changes = 0;
for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) {
if (tmp & (1 << out)) {
- if (ACCESS_FBINFO(outputs[out]).src != MATROXFB_SRC_CRTC2) {
+ if (minfo->outputs[out].src != MATROXFB_SRC_CRTC2) {
changes = 1;
- ACCESS_FBINFO(outputs[out]).src = MATROXFB_SRC_CRTC2;
+ minfo->outputs[out].src = MATROXFB_SRC_CRTC2;
}
- } else if (ACCESS_FBINFO(outputs[out]).src == MATROXFB_SRC_CRTC2) {
+ } else if (minfo->outputs[out].src == MATROXFB_SRC_CRTC2) {
changes = 1;
- ACCESS_FBINFO(outputs[out]).src = MATROXFB_SRC_NONE;
+ minfo->outputs[out].src = MATROXFB_SRC_NONE;
}
}
if (!changes)
@@ -509,7 +509,7 @@ static int matroxfb_dh_ioctl(struct fb_info *info,
int out;
for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) {
- if (ACCESS_FBINFO(outputs[out]).src == MATROXFB_SRC_CRTC2) {
+ if (minfo->outputs[out].src == MATROXFB_SRC_CRTC2) {
conn |= 1 << out;
}
}
@@ -523,8 +523,8 @@ static int matroxfb_dh_ioctl(struct fb_info *info,
int out;
for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) {
- if (ACCESS_FBINFO(outputs[out]).output) {
- switch (ACCESS_FBINFO(outputs[out]).src) {
+ if (minfo->outputs[out].output) {
+ switch (minfo->outputs[out].src) {
case MATROXFB_SRC_NONE:
case MATROXFB_SRC_CRTC2:
tmp |= 1 << out;
@@ -532,9 +532,9 @@ static int matroxfb_dh_ioctl(struct fb_info *info,
}
}
}
- if (ACCESS_FBINFO(devflags.panellink)) {
+ if (minfo->devflags.panellink) {
tmp &= ~MATROXFB_OUTPUT_CONN_DFP;
- if (ACCESS_FBINFO(outputs[2]).src == MATROXFB_SRC_CRTC1) {
+ if (minfo->outputs[2].src == MATROXFB_SRC_CRTC1) {
tmp = 0;
}
}
@@ -595,7 +595,9 @@ static struct fb_var_screeninfo matroxfb_dh_defined = {
0, {0,0,0,0,0}
};
-static int matroxfb_dh_regit(CPMINFO struct matroxfb_dh_fb_info* m2info) {
+static int matroxfb_dh_regit(const struct matrox_fb_info *minfo,
+ struct matroxfb_dh_fb_info *m2info)
+{
#define minfo (m2info->primary_dev)
void* oldcrtc2;
@@ -611,21 +613,21 @@ static int matroxfb_dh_regit(CPMINFO struct matroxfb_dh_fb_info* m2info) {
if (mem < 64*1024)
mem *= 1024;
mem &= ~0x00000FFF; /* PAGE_MASK? */
- if (ACCESS_FBINFO(video.len_usable) + mem <= ACCESS_FBINFO(video.len))
- m2info->video.offbase = ACCESS_FBINFO(video.len) - mem;
- else if (ACCESS_FBINFO(video.len) < mem) {
+ if (minfo->video.len_usable + mem <= minfo->video.len)
+ m2info->video.offbase = minfo->video.len - mem;
+ else if (minfo->video.len < mem) {
return -ENOMEM;
} else { /* check yres on first head... */
m2info->video.borrowed = mem;
- ACCESS_FBINFO(video.len_usable) -= mem;
- m2info->video.offbase = ACCESS_FBINFO(video.len_usable);
+ minfo->video.len_usable -= mem;
+ m2info->video.offbase = minfo->video.len_usable;
}
- m2info->video.base = ACCESS_FBINFO(video.base) + m2info->video.offbase;
+ m2info->video.base = minfo->video.base + m2info->video.offbase;
m2info->video.len = m2info->video.len_usable = m2info->video.len_maximum = mem;
- m2info->video.vbase.vaddr = vaddr_va(ACCESS_FBINFO(video.vbase)) + m2info->video.offbase;
- m2info->mmio.base = ACCESS_FBINFO(mmio.base);
- m2info->mmio.vbase = ACCESS_FBINFO(mmio.vbase);
- m2info->mmio.len = ACCESS_FBINFO(mmio.len);
+ m2info->video.vbase.vaddr = vaddr_va(minfo->video.vbase) + m2info->video.offbase;
+ m2info->mmio.base = minfo->mmio.base;
+ m2info->mmio.vbase = minfo->mmio.vbase;
+ m2info->mmio.len = minfo->mmio.len;
matroxfb_dh_init_fix(m2info);
if (register_framebuffer(&m2info->fbcon)) {
@@ -633,10 +635,10 @@ static int matroxfb_dh_regit(CPMINFO struct matroxfb_dh_fb_info* m2info) {
}
if (!m2info->initialized)
fb_set_var(&m2info->fbcon, &matroxfb_dh_defined);
- down_write(&ACCESS_FBINFO(crtc2.lock));
- oldcrtc2 = ACCESS_FBINFO(crtc2.info);
- ACCESS_FBINFO(crtc2.info) = m2info;
- up_write(&ACCESS_FBINFO(crtc2.lock));
+ down_write(&minfo->crtc2.lock);
+ oldcrtc2 = minfo->crtc2.info;
+ minfo->crtc2.info = m2info;
+ up_write(&minfo->crtc2.lock);
if (oldcrtc2) {
printk(KERN_ERR "matroxfb_crtc2: Internal consistency check failed: crtc2 already present: %p\n",
oldcrtc2);
@@ -649,12 +651,12 @@ static int matroxfb_dh_regit(CPMINFO struct matroxfb_dh_fb_info* m2info) {
static int matroxfb_dh_registerfb(struct matroxfb_dh_fb_info* m2info) {
#define minfo (m2info->primary_dev)
- if (matroxfb_dh_regit(PMINFO m2info)) {
+ if (matroxfb_dh_regit(minfo, m2info)) {
printk(KERN_ERR "matroxfb_crtc2: secondary head failed to register\n");
return -1;
}
printk(KERN_INFO "matroxfb_crtc2: secondary head of fb%u was registered as fb%u\n",
- ACCESS_FBINFO(fbcon.node), m2info->fbcon.node);
+ minfo->fbcon.node, m2info->fbcon.node);
m2info->fbcon_registered = 1;
return 0;
#undef minfo
@@ -666,11 +668,11 @@ static void matroxfb_dh_deregisterfb(struct matroxfb_dh_fb_info* m2info) {
int id;
struct matroxfb_dh_fb_info* crtc2;
- down_write(&ACCESS_FBINFO(crtc2.lock));
- crtc2 = ACCESS_FBINFO(crtc2.info);
+ down_write(&minfo->crtc2.lock);
+ crtc2 = minfo->crtc2.info;
if (crtc2 == m2info)
- ACCESS_FBINFO(crtc2.info) = NULL;
- up_write(&ACCESS_FBINFO(crtc2.lock));
+ minfo->crtc2.info = NULL;
+ up_write(&minfo->crtc2.lock);
if (crtc2 != m2info) {
printk(KERN_ERR "matroxfb_crtc2: Internal consistency check failed: crtc2 mismatch at unload: %p != %p\n",
crtc2, m2info);
@@ -680,7 +682,7 @@ static void matroxfb_dh_deregisterfb(struct matroxfb_dh_fb_info* m2info) {
id = m2info->fbcon.node;
unregister_framebuffer(&m2info->fbcon);
/* return memory back to primary head */
- ACCESS_FBINFO(video.len_usable) += m2info->video.borrowed;
+ minfo->video.len_usable += m2info->video.borrowed;
printk(KERN_INFO "matroxfb_crtc2: fb%u unregistered\n", id);
m2info->fbcon_registered = 0;
}
@@ -691,14 +693,14 @@ static void* matroxfb_crtc2_probe(struct matrox_fb_info* minfo) {
struct matroxfb_dh_fb_info* m2info;
/* hardware is CRTC2 incapable... */
- if (!ACCESS_FBINFO(devflags.crtc2))
+ if (!minfo->devflags.crtc2)
return NULL;
m2info = kzalloc(sizeof(*m2info), GFP_KERNEL);
if (!m2info) {
printk(KERN_ERR "matroxfb_crtc2: Not enough memory for CRTC2 control structs\n");
return NULL;
}
- m2info->primary_dev = MINFO;
+ m2info->primary_dev = minfo;
if (matroxfb_dh_registerfb(m2info)) {
kfree(m2info);
printk(KERN_ERR "matroxfb_crtc2: CRTC2 framebuffer failed to register\n");
diff --git a/drivers/video/matrox/matroxfb_g450.c b/drivers/video/matrox/matroxfb_g450.c
index 6209a761f67..cff0546ea6f 100644
--- a/drivers/video/matrox/matroxfb_g450.c
+++ b/drivers/video/matrox/matroxfb_g450.c
@@ -80,52 +80,59 @@ static int get_ctrl_id(__u32 v4l2_id) {
return -EINVAL;
}
-static inline int* get_ctrl_ptr(WPMINFO unsigned int idx) {
- return (int*)((char*)MINFO + g450_controls[idx].control);
+static inline int *get_ctrl_ptr(struct matrox_fb_info *minfo, unsigned int idx)
+{
+ return (int*)((char*)minfo + g450_controls[idx].control);
}
-static void tvo_fill_defaults(WPMINFO2) {
+static void tvo_fill_defaults(struct matrox_fb_info *minfo)
+{
unsigned int i;
for (i = 0; i < G450CTRLS; i++) {
- *get_ctrl_ptr(PMINFO i) = g450_controls[i].desc.default_value;
+ *get_ctrl_ptr(minfo, i) = g450_controls[i].desc.default_value;
}
}
-static int cve2_get_reg(WPMINFO int reg) {
+static int cve2_get_reg(struct matrox_fb_info *minfo, int reg)
+{
unsigned long flags;
int val;
matroxfb_DAC_lock_irqsave(flags);
- matroxfb_DAC_out(PMINFO 0x87, reg);
- val = matroxfb_DAC_in(PMINFO 0x88);
+ matroxfb_DAC_out(minfo, 0x87, reg);
+ val = matroxfb_DAC_in(minfo, 0x88);
matroxfb_DAC_unlock_irqrestore(flags);
return val;
}
-static void cve2_set_reg(WPMINFO int reg, int val) {
+static void cve2_set_reg(struct matrox_fb_info *minfo, int reg, int val)
+{
unsigned long flags;
matroxfb_DAC_lock_irqsave(flags);
- matroxfb_DAC_out(PMINFO 0x87, reg);
- matroxfb_DAC_out(PMINFO 0x88, val);
+ matroxfb_DAC_out(minfo, 0x87, reg);
+ matroxfb_DAC_out(minfo, 0x88, val);
matroxfb_DAC_unlock_irqrestore(flags);
}
-static void cve2_set_reg10(WPMINFO int reg, int val) {
+static void cve2_set_reg10(struct matrox_fb_info *minfo, int reg, int val)
+{
unsigned long flags;
matroxfb_DAC_lock_irqsave(flags);
- matroxfb_DAC_out(PMINFO 0x87, reg);
- matroxfb_DAC_out(PMINFO 0x88, val >> 2);
- matroxfb_DAC_out(PMINFO 0x87, reg + 1);
- matroxfb_DAC_out(PMINFO 0x88, val & 3);
+ matroxfb_DAC_out(minfo, 0x87, reg);
+ matroxfb_DAC_out(minfo, 0x88, val >> 2);
+ matroxfb_DAC_out(minfo, 0x87, reg + 1);
+ matroxfb_DAC_out(minfo, 0x88, val & 3);
matroxfb_DAC_unlock_irqrestore(flags);
}
-static void g450_compute_bwlevel(CPMINFO int *bl, int *wl) {
- const int b = ACCESS_FBINFO(altout.tvo_params.brightness) + BLMIN;
- const int c = ACCESS_FBINFO(altout.tvo_params.contrast);
+static void g450_compute_bwlevel(const struct matrox_fb_info *minfo, int *bl,
+ int *wl)
+{
+ const int b = minfo->altout.tvo_params.brightness + BLMIN;
+ const int c = minfo->altout.tvo_params.contrast;
*bl = max(b - c, BLMIN);
*wl = min(b + c, WLMAX);
@@ -154,7 +161,7 @@ static int g450_query_ctrl(void* md, struct v4l2_queryctrl *p) {
static int g450_set_ctrl(void* md, struct v4l2_control *p) {
int i;
- MINFO_FROM(md);
+ struct matrox_fb_info *minfo = md;
i = get_ctrl_id(p->id);
if (i < 0) return -EINVAL;
@@ -162,7 +169,7 @@ static int g450_set_ctrl(void* md, struct v4l2_control *p) {
/*
* Check if changed.
*/
- if (p->value == *get_ctrl_ptr(PMINFO i)) return 0;
+ if (p->value == *get_ctrl_ptr(minfo, i)) return 0;
/*
* Check limits.
@@ -173,31 +180,31 @@ static int g450_set_ctrl(void* md, struct v4l2_control *p) {
/*
* Store new value.
*/
- *get_ctrl_ptr(PMINFO i) = p->value;
+ *get_ctrl_ptr(minfo, i) = p->value;
switch (p->id) {
case V4L2_CID_BRIGHTNESS:
case V4L2_CID_CONTRAST:
{
int blacklevel, whitelevel;
- g450_compute_bwlevel(PMINFO &blacklevel, &whitelevel);
- cve2_set_reg10(PMINFO 0x0e, blacklevel);
- cve2_set_reg10(PMINFO 0x1e, whitelevel);
+ g450_compute_bwlevel(minfo, &blacklevel, &whitelevel);
+ cve2_set_reg10(minfo, 0x0e, blacklevel);
+ cve2_set_reg10(minfo, 0x1e, whitelevel);
}
break;
case V4L2_CID_SATURATION:
- cve2_set_reg(PMINFO 0x20, p->value);
- cve2_set_reg(PMINFO 0x22, p->value);
+ cve2_set_reg(minfo, 0x20, p->value);
+ cve2_set_reg(minfo, 0x22, p->value);
break;
case V4L2_CID_HUE:
- cve2_set_reg(PMINFO 0x25, p->value);
+ cve2_set_reg(minfo, 0x25, p->value);
break;
case MATROXFB_CID_TESTOUT:
{
- unsigned char val = cve2_get_reg (PMINFO 0x05);
+ unsigned char val = cve2_get_reg(minfo, 0x05);
if (p->value) val |= 0x02;
else val &= ~0x02;
- cve2_set_reg(PMINFO 0x05, val);
+ cve2_set_reg(minfo, 0x05, val);
}
break;
}
@@ -208,11 +215,11 @@ static int g450_set_ctrl(void* md, struct v4l2_control *p) {
static int g450_get_ctrl(void* md, struct v4l2_control *p) {
int i;
- MINFO_FROM(md);
+ struct matrox_fb_info *minfo = md;
i = get_ctrl_id(p->id);
if (i < 0) return -EINVAL;
- p->value = *get_ctrl_ptr(PMINFO i);
+ p->value = *get_ctrl_ptr(minfo, i);
return 0;
}
@@ -226,7 +233,9 @@ struct output_desc {
unsigned int v_total;
};
-static void computeRegs(WPMINFO struct mavenregs* r, struct my_timming* mt, const struct output_desc* outd) {
+static void computeRegs(struct matrox_fb_info *minfo, struct mavenregs *r,
+ struct my_timming *mt, const struct output_desc *outd)
+{
u_int32_t chromasc;
u_int32_t hlen;
u_int32_t hsl;
@@ -251,10 +260,10 @@ static void computeRegs(WPMINFO struct mavenregs* r, struct my_timming* mt, cons
dprintk(KERN_DEBUG "Want %u kHz pixclock\n", (unsigned int)piic);
- mnp = matroxfb_g450_setclk(PMINFO piic, M_VIDEO_PLL);
+ mnp = matroxfb_g450_setclk(minfo, piic, M_VIDEO_PLL);
mt->mnp = mnp;
- mt->pixclock = g450_mnp2f(PMINFO mnp);
+ mt->pixclock = g450_mnp2f(minfo, mnp);
dprintk(KERN_DEBUG "MNP=%08X\n", mnp);
@@ -490,65 +499,67 @@ static void cve2_init_TVdata(int norm, struct mavenregs* data, const struct outp
return;
}
-#define LR(x) cve2_set_reg(PMINFO (x), m->regs[(x)])
-static void cve2_init_TV(WPMINFO const struct mavenregs* m) {
+#define LR(x) cve2_set_reg(minfo, (x), m->regs[(x)])
+static void cve2_init_TV(struct matrox_fb_info *minfo,
+ const struct mavenregs *m)
+{
int i;
LR(0x80);
LR(0x82); LR(0x83);
LR(0x84); LR(0x85);
- cve2_set_reg(PMINFO 0x3E, 0x01);
+ cve2_set_reg(minfo, 0x3E, 0x01);
for (i = 0; i < 0x3E; i++) {
LR(i);
}
- cve2_set_reg(PMINFO 0x3E, 0x00);
+ cve2_set_reg(minfo, 0x3E, 0x00);
}
static int matroxfb_g450_compute(void* md, struct my_timming* mt) {
- MINFO_FROM(md);
+ struct matrox_fb_info *minfo = md;
- dprintk(KERN_DEBUG "Computing, mode=%u\n", ACCESS_FBINFO(outputs[1]).mode);
+ dprintk(KERN_DEBUG "Computing, mode=%u\n", minfo->outputs[1].mode);
if (mt->crtc == MATROXFB_SRC_CRTC2 &&
- ACCESS_FBINFO(outputs[1]).mode != MATROXFB_OUTPUT_MODE_MONITOR) {
+ minfo->outputs[1].mode != MATROXFB_OUTPUT_MODE_MONITOR) {
const struct output_desc* outd;
- cve2_init_TVdata(ACCESS_FBINFO(outputs[1]).mode, &ACCESS_FBINFO(hw).maven, &outd);
+ cve2_init_TVdata(minfo->outputs[1].mode, &minfo->hw.maven, &outd);
{
int blacklevel, whitelevel;
- g450_compute_bwlevel(PMINFO &blacklevel, &whitelevel);
- ACCESS_FBINFO(hw).maven.regs[0x0E] = blacklevel >> 2;
- ACCESS_FBINFO(hw).maven.regs[0x0F] = blacklevel & 3;
- ACCESS_FBINFO(hw).maven.regs[0x1E] = whitelevel >> 2;
- ACCESS_FBINFO(hw).maven.regs[0x1F] = whitelevel & 3;
+ g450_compute_bwlevel(minfo, &blacklevel, &whitelevel);
+ minfo->hw.maven.regs[0x0E] = blacklevel >> 2;
+ minfo->hw.maven.regs[0x0F] = blacklevel & 3;
+ minfo->hw.maven.regs[0x1E] = whitelevel >> 2;
+ minfo->hw.maven.regs[0x1F] = whitelevel & 3;
- ACCESS_FBINFO(hw).maven.regs[0x20] =
- ACCESS_FBINFO(hw).maven.regs[0x22] = ACCESS_FBINFO(altout.tvo_params.saturation);
+ minfo->hw.maven.regs[0x20] =
+ minfo->hw.maven.regs[0x22] = minfo->altout.tvo_params.saturation;
- ACCESS_FBINFO(hw).maven.regs[0x25] = ACCESS_FBINFO(altout.tvo_params.hue);
+ minfo->hw.maven.regs[0x25] = minfo->altout.tvo_params.hue;
- if (ACCESS_FBINFO(altout.tvo_params.testout)) {
- ACCESS_FBINFO(hw).maven.regs[0x05] |= 0x02;
+ if (minfo->altout.tvo_params.testout) {
+ minfo->hw.maven.regs[0x05] |= 0x02;
}
}
- computeRegs(PMINFO &ACCESS_FBINFO(hw).maven, mt, outd);
+ computeRegs(minfo, &minfo->hw.maven, mt, outd);
} else if (mt->mnp < 0) {
/* We must program clocks before CRTC2, otherwise interlaced mode
startup may fail */
- mt->mnp = matroxfb_g450_setclk(PMINFO mt->pixclock, (mt->crtc == MATROXFB_SRC_CRTC1) ? M_PIXEL_PLL_C : M_VIDEO_PLL);
- mt->pixclock = g450_mnp2f(PMINFO mt->mnp);
+ mt->mnp = matroxfb_g450_setclk(minfo, mt->pixclock, (mt->crtc == MATROXFB_SRC_CRTC1) ? M_PIXEL_PLL_C : M_VIDEO_PLL);
+ mt->pixclock = g450_mnp2f(minfo, mt->mnp);
}
dprintk(KERN_DEBUG "Pixclock = %u\n", mt->pixclock);
return 0;
}
static int matroxfb_g450_program(void* md) {
- MINFO_FROM(md);
+ struct matrox_fb_info *minfo = md;
- if (ACCESS_FBINFO(outputs[1]).mode != MATROXFB_OUTPUT_MODE_MONITOR) {
- cve2_init_TV(PMINFO &ACCESS_FBINFO(hw).maven);
+ if (minfo->outputs[1].mode != MATROXFB_OUTPUT_MODE_MONITOR) {
+ cve2_init_TV(minfo, &minfo->hw.maven);
}
return 0;
}
@@ -564,11 +575,11 @@ static int matroxfb_g450_verify_mode(void* md, u_int32_t arg) {
}
static int g450_dvi_compute(void* md, struct my_timming* mt) {
- MINFO_FROM(md);
+ struct matrox_fb_info *minfo = md;
if (mt->mnp < 0) {
- mt->mnp = matroxfb_g450_setclk(PMINFO mt->pixclock, (mt->crtc == MATROXFB_SRC_CRTC1) ? M_PIXEL_PLL_C : M_VIDEO_PLL);
- mt->pixclock = g450_mnp2f(PMINFO mt->mnp);
+ mt->mnp = matroxfb_g450_setclk(minfo, mt->pixclock, (mt->crtc == MATROXFB_SRC_CRTC1) ? M_PIXEL_PLL_C : M_VIDEO_PLL);
+ mt->pixclock = g450_mnp2f(minfo, mt->mnp);
}
return 0;
}
@@ -588,34 +599,36 @@ static struct matrox_altout matroxfb_g450_dvi = {
.compute = g450_dvi_compute,
};
-void matroxfb_g450_connect(WPMINFO2) {
- if (ACCESS_FBINFO(devflags.g450dac)) {
- down_write(&ACCESS_FBINFO(altout.lock));
- tvo_fill_defaults(PMINFO2);
- ACCESS_FBINFO(outputs[1]).src = ACCESS_FBINFO(outputs[1]).default_src;
- ACCESS_FBINFO(outputs[1]).data = MINFO;
- ACCESS_FBINFO(outputs[1]).output = &matroxfb_g450_altout;
- ACCESS_FBINFO(outputs[1]).mode = MATROXFB_OUTPUT_MODE_MONITOR;
- ACCESS_FBINFO(outputs[2]).src = ACCESS_FBINFO(outputs[2]).default_src;
- ACCESS_FBINFO(outputs[2]).data = MINFO;
- ACCESS_FBINFO(outputs[2]).output = &matroxfb_g450_dvi;
- ACCESS_FBINFO(outputs[2]).mode = MATROXFB_OUTPUT_MODE_MONITOR;
- up_write(&ACCESS_FBINFO(altout.lock));
+void matroxfb_g450_connect(struct matrox_fb_info *minfo)
+{
+ if (minfo->devflags.g450dac) {
+ down_write(&minfo->altout.lock);
+ tvo_fill_defaults(minfo);
+ minfo->outputs[1].src = minfo->outputs[1].default_src;
+ minfo->outputs[1].data = minfo;
+ minfo->outputs[1].output = &matroxfb_g450_altout;
+ minfo->outputs[1].mode = MATROXFB_OUTPUT_MODE_MONITOR;
+ minfo->outputs[2].src = minfo->outputs[2].default_src;
+ minfo->outputs[2].data = minfo;
+ minfo->outputs[2].output = &matroxfb_g450_dvi;
+ minfo->outputs[2].mode = MATROXFB_OUTPUT_MODE_MONITOR;
+ up_write(&minfo->altout.lock);
}
}
-void matroxfb_g450_shutdown(WPMINFO2) {
- if (ACCESS_FBINFO(devflags.g450dac)) {
- down_write(&ACCESS_FBINFO(altout.lock));
- ACCESS_FBINFO(outputs[1]).src = MATROXFB_SRC_NONE;
- ACCESS_FBINFO(outputs[1]).output = NULL;
- ACCESS_FBINFO(outputs[1]).data = NULL;
- ACCESS_FBINFO(outputs[1]).mode = MATROXFB_OUTPUT_MODE_MONITOR;
- ACCESS_FBINFO(outputs[2]).src = MATROXFB_SRC_NONE;
- ACCESS_FBINFO(outputs[2]).output = NULL;
- ACCESS_FBINFO(outputs[2]).data = NULL;
- ACCESS_FBINFO(outputs[2]).mode = MATROXFB_OUTPUT_MODE_MONITOR;
- up_write(&ACCESS_FBINFO(altout.lock));
+void matroxfb_g450_shutdown(struct matrox_fb_info *minfo)
+{
+ if (minfo->devflags.g450dac) {
+ down_write(&minfo->altout.lock);
+ minfo->outputs[1].src = MATROXFB_SRC_NONE;
+ minfo->outputs[1].output = NULL;
+ minfo->outputs[1].data = NULL;
+ minfo->outputs[1].mode = MATROXFB_OUTPUT_MODE_MONITOR;
+ minfo->outputs[2].src = MATROXFB_SRC_NONE;
+ minfo->outputs[2].output = NULL;
+ minfo->outputs[2].data = NULL;
+ minfo->outputs[2].mode = MATROXFB_OUTPUT_MODE_MONITOR;
+ up_write(&minfo->altout.lock);
}
}
diff --git a/drivers/video/matrox/matroxfb_g450.h b/drivers/video/matrox/matroxfb_g450.h
index a0822a6033e..3a3e654444b 100644
--- a/drivers/video/matrox/matroxfb_g450.h
+++ b/drivers/video/matrox/matroxfb_g450.h
@@ -4,11 +4,11 @@
#include "matroxfb_base.h"
#ifdef CONFIG_FB_MATROX_G
-void matroxfb_g450_connect(WPMINFO2);
-void matroxfb_g450_shutdown(WPMINFO2);
+void matroxfb_g450_connect(struct matrox_fb_info *minfo);
+void matroxfb_g450_shutdown(struct matrox_fb_info *minfo);
#else
-static inline void matroxfb_g450_connect(WPMINFO2) { };
-static inline void matroxfb_g450_shutdown(WPMINFO2) { };
+static inline void matroxfb_g450_connect(struct matrox_fb_info *minfo) { };
+static inline void matroxfb_g450_shutdown(struct matrox_fb_info *minfo) { };
#endif
#endif /* __MATROXFB_G450_H__ */
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index 042408a8c63..91af9159111 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -458,9 +458,9 @@ static void maven_init_TVdata(const struct maven_data* md, struct mavenregs* dat
0x00, /* 3E written multiple times */
0x00, /* never written */
}, MATROXFB_OUTPUT_MODE_NTSC, 525, 60 };
- MINFO_FROM(md->primary_head);
+ struct matrox_fb_info *minfo = md->primary_head;
- if (ACCESS_FBINFO(outputs[1]).mode == MATROXFB_OUTPUT_MODE_PAL)
+ if (minfo->outputs[1].mode == MATROXFB_OUTPUT_MODE_PAL)
*data = palregs;
else
*data = ntscregs;
@@ -496,11 +496,11 @@ static void maven_init_TVdata(const struct maven_data* md, struct mavenregs* dat
/* Set saturation */
{
data->regs[0x20] =
- data->regs[0x22] = ACCESS_FBINFO(altout.tvo_params.saturation);
+ data->regs[0x22] = minfo->altout.tvo_params.saturation;
}
/* Set HUE */
- data->regs[0x25] = ACCESS_FBINFO(altout.tvo_params.hue);
+ data->regs[0x25] = minfo->altout.tvo_params.hue;
return;
}
@@ -741,9 +741,9 @@ static inline int maven_compute_timming(struct maven_data* md,
struct mavenregs* m) {
unsigned int tmpi;
unsigned int a, bv, c;
- MINFO_FROM(md->primary_head);
+ struct matrox_fb_info *minfo = md->primary_head;
- m->mode = ACCESS_FBINFO(outputs[1]).mode;
+ m->mode = minfo->outputs[1].mode;
if (m->mode != MATROXFB_OUTPUT_MODE_MONITOR) {
unsigned int lmargin;
unsigned int umargin;
@@ -1132,7 +1132,7 @@ static int maven_get_control (struct maven_data* md,
static int maven_out_compute(void* md, struct my_timming* mt) {
#define mdinfo ((struct maven_data*)md)
#define minfo (mdinfo->primary_head)
- return maven_compute_timming(md, mt, &ACCESS_FBINFO(hw).maven);
+ return maven_compute_timming(md, mt, &minfo->hw.maven);
#undef minfo
#undef mdinfo
}
@@ -1140,7 +1140,7 @@ static int maven_out_compute(void* md, struct my_timming* mt) {
static int maven_out_program(void* md) {
#define mdinfo ((struct maven_data*)md)
#define minfo (mdinfo->primary_head)
- return maven_program_timming(md, &ACCESS_FBINFO(hw).maven);
+ return maven_program_timming(md, &minfo->hw.maven);
#undef minfo
#undef mdinfo
}
@@ -1184,16 +1184,18 @@ static struct matrox_altout maven_altout = {
static int maven_init_client(struct i2c_client* clnt) {
struct maven_data* md = i2c_get_clientdata(clnt);
- MINFO_FROM(container_of(clnt->adapter, struct i2c_bit_adapter, adapter)->minfo);
+ struct matrox_fb_info *minfo = container_of(clnt->adapter,
+ struct i2c_bit_adapter,
+ adapter)->minfo;
- md->primary_head = MINFO;
+ md->primary_head = minfo;
md->client = clnt;
- down_write(&ACCESS_FBINFO(altout.lock));
- ACCESS_FBINFO(outputs[1]).output = &maven_altout;
- ACCESS_FBINFO(outputs[1]).src = ACCESS_FBINFO(outputs[1]).default_src;
- ACCESS_FBINFO(outputs[1]).data = md;
- ACCESS_FBINFO(outputs[1]).mode = MATROXFB_OUTPUT_MODE_MONITOR;
- up_write(&ACCESS_FBINFO(altout.lock));
+ down_write(&minfo->altout.lock);
+ minfo->outputs[1].output = &maven_altout;
+ minfo->outputs[1].src = minfo->outputs[1].default_src;
+ minfo->outputs[1].data = md;
+ minfo->outputs[1].mode = MATROXFB_OUTPUT_MODE_MONITOR;
+ up_write(&minfo->altout.lock);
if (maven_get_reg(clnt, 0xB2) < 0x14) {
md->version = MGATVO_B;
/* Tweak some things for this old chip */
@@ -1218,14 +1220,14 @@ static int maven_shutdown_client(struct i2c_client* clnt) {
struct maven_data* md = i2c_get_clientdata(clnt);
if (md->primary_head) {
- MINFO_FROM(md->primary_head);
-
- down_write(&ACCESS_FBINFO(altout.lock));
- ACCESS_FBINFO(outputs[1]).src = MATROXFB_SRC_NONE;
- ACCESS_FBINFO(outputs[1]).output = NULL;
- ACCESS_FBINFO(outputs[1]).data = NULL;
- ACCESS_FBINFO(outputs[1]).mode = MATROXFB_OUTPUT_MODE_MONITOR;
- up_write(&ACCESS_FBINFO(altout.lock));
+ struct matrox_fb_info *minfo = md->primary_head;
+
+ down_write(&minfo->altout.lock);
+ minfo->outputs[1].src = MATROXFB_SRC_NONE;
+ minfo->outputs[1].output = NULL;
+ minfo->outputs[1].data = NULL;
+ minfo->outputs[1].mode = MATROXFB_OUTPUT_MODE_MONITOR;
+ up_write(&minfo->altout.lock);
md->primary_head = NULL;
}
return 0;
diff --git a/drivers/video/matrox/matroxfb_misc.c b/drivers/video/matrox/matroxfb_misc.c
index 5b5f072fc1a..9948ca2a304 100644
--- a/drivers/video/matrox/matroxfb_misc.c
+++ b/drivers/video/matrox/matroxfb_misc.c
@@ -89,13 +89,15 @@
#include <linux/interrupt.h>
#include <linux/matroxfb.h>
-void matroxfb_DAC_out(CPMINFO int reg, int val) {
+void matroxfb_DAC_out(const struct matrox_fb_info *minfo, int reg, int val)
+{
DBG_REG(__func__)
mga_outb(M_RAMDAC_BASE+M_X_INDEX, reg);
mga_outb(M_RAMDAC_BASE+M_X_DATAREG, val);
}
-int matroxfb_DAC_in(CPMINFO int reg) {
+int matroxfb_DAC_in(const struct matrox_fb_info *minfo, int reg)
+{
DBG_REG(__func__)
mga_outb(M_RAMDAC_BASE+M_X_INDEX, reg);
return mga_inb(M_RAMDAC_BASE+M_X_DATAREG);
@@ -184,13 +186,14 @@ int matroxfb_PLL_calcclock(const struct matrox_pll_features* pll, unsigned int f
return bestvco;
}
-int matroxfb_vgaHWinit(WPMINFO struct my_timming* m) {
+int matroxfb_vgaHWinit(struct matrox_fb_info *minfo, struct my_timming *m)
+{
unsigned int hd, hs, he, hbe, ht;
unsigned int vd, vs, ve, vt, lc;
unsigned int wd;
unsigned int divider;
int i;
- struct matrox_hw_state * const hw = &ACCESS_FBINFO(hw);
+ struct matrox_hw_state * const hw = &minfo->hw;
DBG(__func__)
@@ -240,7 +243,7 @@ int matroxfb_vgaHWinit(WPMINFO struct my_timming* m) {
/* standard timmings are in 8pixels, but for interleaved we cannot */
/* do it for 4bpp (because of (4bpp >> 1(interleaved))/4 == 0) */
/* using 16 or more pixels per unit can save us */
- divider = ACCESS_FBINFO(curr.final_bppShift);
+ divider = minfo->curr.final_bppShift;
while (divider & 3) {
hd >>= 1;
hs >>= 1;
@@ -270,7 +273,7 @@ int matroxfb_vgaHWinit(WPMINFO struct my_timming* m) {
if (((ht & 0x07) == 0x06) || ((ht & 0x0F) == 0x04))
ht++;
hbe = ht;
- wd = ACCESS_FBINFO(fbcon).var.xres_virtual * ACCESS_FBINFO(curr.final_bppShift) / 64;
+ wd = minfo->fbcon.var.xres_virtual * minfo->curr.final_bppShift / 64;
hw->CRTCEXT[0] = 0;
hw->CRTCEXT[5] = 0;
@@ -287,7 +290,7 @@ int matroxfb_vgaHWinit(WPMINFO struct my_timming* m) {
((hs & 0x100) >> 6) | /* sync start */
(hbe & 0x040); /* end hor. blanking */
/* FIXME: Enable vidrst only on G400, and only if TV-out is used */
- if (ACCESS_FBINFO(outputs[1]).src == MATROXFB_SRC_CRTC1)
+ if (minfo->outputs[1].src == MATROXFB_SRC_CRTC1)
hw->CRTCEXT[1] |= 0x88; /* enable horizontal and vertical vidrst */
hw->CRTCEXT[2] = ((vt & 0xC00) >> 10) |
((vd & 0x400) >> 8) | /* disp end */
@@ -331,9 +334,10 @@ int matroxfb_vgaHWinit(WPMINFO struct my_timming* m) {
return 0;
};
-void matroxfb_vgaHWrestore(WPMINFO2) {
+void matroxfb_vgaHWrestore(struct matrox_fb_info *minfo)
+{
int i;
- struct matrox_hw_state * const hw = &ACCESS_FBINFO(hw);
+ struct matrox_hw_state * const hw = &minfo->hw;
CRITFLAGS
DBG(__func__)
@@ -522,7 +526,9 @@ static void parse_bios(unsigned char __iomem* vbios, struct matrox_bios* bd) {
#endif
}
-static int parse_pins1(WPMINFO const struct matrox_bios* bd) {
+static int parse_pins1(struct matrox_fb_info *minfo,
+ const struct matrox_bios *bd)
+{
unsigned int maxdac;
switch (bd->pins[22]) {
@@ -533,173 +539,188 @@ static int parse_pins1(WPMINFO const struct matrox_bios* bd) {
if (get_unaligned_le16(bd->pins + 24)) {
maxdac = get_unaligned_le16(bd->pins + 24) * 10;
}
- MINFO->limits.pixel.vcomax = maxdac;
- MINFO->values.pll.system = get_unaligned_le16(bd->pins + 28) ?
+ minfo->limits.pixel.vcomax = maxdac;
+ minfo->values.pll.system = get_unaligned_le16(bd->pins + 28) ?
get_unaligned_le16(bd->pins + 28) * 10 : 50000;
/* ignore 4MB, 8MB, module clocks */
- MINFO->features.pll.ref_freq = 14318;
- MINFO->values.reg.mctlwtst = 0x00030101;
+ minfo->features.pll.ref_freq = 14318;
+ minfo->values.reg.mctlwtst = 0x00030101;
return 0;
}
-static void default_pins1(WPMINFO2) {
+static void default_pins1(struct matrox_fb_info *minfo)
+{
/* Millennium */
- MINFO->limits.pixel.vcomax = 220000;
- MINFO->values.pll.system = 50000;
- MINFO->features.pll.ref_freq = 14318;
- MINFO->values.reg.mctlwtst = 0x00030101;
+ minfo->limits.pixel.vcomax = 220000;
+ minfo->values.pll.system = 50000;
+ minfo->features.pll.ref_freq = 14318;
+ minfo->values.reg.mctlwtst = 0x00030101;
}
-static int parse_pins2(WPMINFO const struct matrox_bios* bd) {
- MINFO->limits.pixel.vcomax =
- MINFO->limits.system.vcomax = (bd->pins[41] == 0xFF) ? 230000 : ((bd->pins[41] + 100) * 1000);
- MINFO->values.reg.mctlwtst = ((bd->pins[51] & 0x01) ? 0x00000001 : 0) |
+static int parse_pins2(struct matrox_fb_info *minfo,
+ const struct matrox_bios *bd)
+{
+ minfo->limits.pixel.vcomax =
+ minfo->limits.system.vcomax = (bd->pins[41] == 0xFF) ? 230000 : ((bd->pins[41] + 100) * 1000);
+ minfo->values.reg.mctlwtst = ((bd->pins[51] & 0x01) ? 0x00000001 : 0) |
((bd->pins[51] & 0x02) ? 0x00000100 : 0) |
((bd->pins[51] & 0x04) ? 0x00010000 : 0) |
((bd->pins[51] & 0x08) ? 0x00020000 : 0);
- MINFO->values.pll.system = (bd->pins[43] == 0xFF) ? 50000 : ((bd->pins[43] + 100) * 1000);
- MINFO->features.pll.ref_freq = 14318;
+ minfo->values.pll.system = (bd->pins[43] == 0xFF) ? 50000 : ((bd->pins[43] + 100) * 1000);
+ minfo->features.pll.ref_freq = 14318;
return 0;
}
-static void default_pins2(WPMINFO2) {
+static void default_pins2(struct matrox_fb_info *minfo)
+{
/* Millennium II, Mystique */
- MINFO->limits.pixel.vcomax =
- MINFO->limits.system.vcomax = 230000;
- MINFO->values.reg.mctlwtst = 0x00030101;
- MINFO->values.pll.system = 50000;
- MINFO->features.pll.ref_freq = 14318;
+ minfo->limits.pixel.vcomax =
+ minfo->limits.system.vcomax = 230000;
+ minfo->values.reg.mctlwtst = 0x00030101;
+ minfo->values.pll.system = 50000;
+ minfo->features.pll.ref_freq = 14318;
}
-static int parse_pins3(WPMINFO const struct matrox_bios* bd) {
- MINFO->limits.pixel.vcomax =
- MINFO->limits.system.vcomax = (bd->pins[36] == 0xFF) ? 230000 : ((bd->pins[36] + 100) * 1000);
- MINFO->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 48) == 0xFFFFFFFF ?
+static int parse_pins3(struct matrox_fb_info *minfo,
+ const struct matrox_bios *bd)
+{
+ minfo->limits.pixel.vcomax =
+ minfo->limits.system.vcomax = (bd->pins[36] == 0xFF) ? 230000 : ((bd->pins[36] + 100) * 1000);
+ minfo->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 48) == 0xFFFFFFFF ?
0x01250A21 : get_unaligned_le32(bd->pins + 48);
/* memory config */
- MINFO->values.reg.memrdbk = ((bd->pins[57] << 21) & 0x1E000000) |
+ minfo->values.reg.memrdbk = ((bd->pins[57] << 21) & 0x1E000000) |
((bd->pins[57] << 22) & 0x00C00000) |
((bd->pins[56] << 1) & 0x000001E0) |
( bd->pins[56] & 0x0000000F);
- MINFO->values.reg.opt = (bd->pins[54] & 7) << 10;
- MINFO->values.reg.opt2 = bd->pins[58] << 12;
- MINFO->features.pll.ref_freq = (bd->pins[52] & 0x20) ? 14318 : 27000;
+ minfo->values.reg.opt = (bd->pins[54] & 7) << 10;
+ minfo->values.reg.opt2 = bd->pins[58] << 12;
+ minfo->features.pll.ref_freq = (bd->pins[52] & 0x20) ? 14318 : 27000;
return 0;
}
-static void default_pins3(WPMINFO2) {
+static void default_pins3(struct matrox_fb_info *minfo)
+{
/* G100, G200 */
- MINFO->limits.pixel.vcomax =
- MINFO->limits.system.vcomax = 230000;
- MINFO->values.reg.mctlwtst = 0x01250A21;
- MINFO->values.reg.memrdbk = 0x00000000;
- MINFO->values.reg.opt = 0x00000C00;
- MINFO->values.reg.opt2 = 0x00000000;
- MINFO->features.pll.ref_freq = 27000;
+ minfo->limits.pixel.vcomax =
+ minfo->limits.system.vcomax = 230000;
+ minfo->values.reg.mctlwtst = 0x01250A21;
+ minfo->values.reg.memrdbk = 0x00000000;
+ minfo->values.reg.opt = 0x00000C00;
+ minfo->values.reg.opt2 = 0x00000000;
+ minfo->features.pll.ref_freq = 27000;
}
-static int parse_pins4(WPMINFO const struct matrox_bios* bd) {
- MINFO->limits.pixel.vcomax = (bd->pins[ 39] == 0xFF) ? 230000 : bd->pins[ 39] * 4000;
- MINFO->limits.system.vcomax = (bd->pins[ 38] == 0xFF) ? MINFO->limits.pixel.vcomax : bd->pins[ 38] * 4000;
- MINFO->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 71);
- MINFO->values.reg.memrdbk = ((bd->pins[87] << 21) & 0x1E000000) |
+static int parse_pins4(struct matrox_fb_info *minfo,
+ const struct matrox_bios *bd)
+{
+ minfo->limits.pixel.vcomax = (bd->pins[ 39] == 0xFF) ? 230000 : bd->pins[ 39] * 4000;
+ minfo->limits.system.vcomax = (bd->pins[ 38] == 0xFF) ? minfo->limits.pixel.vcomax : bd->pins[ 38] * 4000;
+ minfo->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 71);
+ minfo->values.reg.memrdbk = ((bd->pins[87] << 21) & 0x1E000000) |
((bd->pins[87] << 22) & 0x00C00000) |
((bd->pins[86] << 1) & 0x000001E0) |
( bd->pins[86] & 0x0000000F);
- MINFO->values.reg.opt = ((bd->pins[53] << 15) & 0x00400000) |
+ minfo->values.reg.opt = ((bd->pins[53] << 15) & 0x00400000) |
((bd->pins[53] << 22) & 0x10000000) |
((bd->pins[53] << 7) & 0x00001C00);
- MINFO->values.reg.opt3 = get_unaligned_le32(bd->pins + 67);
- MINFO->values.pll.system = (bd->pins[ 65] == 0xFF) ? 200000 : bd->pins[ 65] * 4000;
- MINFO->features.pll.ref_freq = (bd->pins[ 92] & 0x01) ? 14318 : 27000;
+ minfo->values.reg.opt3 = get_unaligned_le32(bd->pins + 67);
+ minfo->values.pll.system = (bd->pins[ 65] == 0xFF) ? 200000 : bd->pins[ 65] * 4000;
+ minfo->features.pll.ref_freq = (bd->pins[ 92] & 0x01) ? 14318 : 27000;
return 0;
}
-static void default_pins4(WPMINFO2) {
+static void default_pins4(struct matrox_fb_info *minfo)
+{
/* G400 */
- MINFO->limits.pixel.vcomax =
- MINFO->limits.system.vcomax = 252000;
- MINFO->values.reg.mctlwtst = 0x04A450A1;
- MINFO->values.reg.memrdbk = 0x000000E7;
- MINFO->values.reg.opt = 0x10000400;
- MINFO->values.reg.opt3 = 0x0190A419;
- MINFO->values.pll.system = 200000;
- MINFO->features.pll.ref_freq = 27000;
+ minfo->limits.pixel.vcomax =
+ minfo->limits.system.vcomax = 252000;
+ minfo->values.reg.mctlwtst = 0x04A450A1;
+ minfo->values.reg.memrdbk = 0x000000E7;
+ minfo->values.reg.opt = 0x10000400;
+ minfo->values.reg.opt3 = 0x0190A419;
+ minfo->values.pll.system = 200000;
+ minfo->features.pll.ref_freq = 27000;
}
-static int parse_pins5(WPMINFO const struct matrox_bios* bd) {
+static int parse_pins5(struct matrox_fb_info *minfo,
+ const struct matrox_bios *bd)
+{
unsigned int mult;
mult = bd->pins[4]?8000:6000;
- MINFO->limits.pixel.vcomax = (bd->pins[ 38] == 0xFF) ? 600000 : bd->pins[ 38] * mult;
- MINFO->limits.system.vcomax = (bd->pins[ 36] == 0xFF) ? MINFO->limits.pixel.vcomax : bd->pins[ 36] * mult;
- MINFO->limits.video.vcomax = (bd->pins[ 37] == 0xFF) ? MINFO->limits.system.vcomax : bd->pins[ 37] * mult;
- MINFO->limits.pixel.vcomin = (bd->pins[123] == 0xFF) ? 256000 : bd->pins[123] * mult;
- MINFO->limits.system.vcomin = (bd->pins[121] == 0xFF) ? MINFO->limits.pixel.vcomin : bd->pins[121] * mult;
- MINFO->limits.video.vcomin = (bd->pins[122] == 0xFF) ? MINFO->limits.system.vcomin : bd->pins[122] * mult;
- MINFO->values.pll.system =
- MINFO->values.pll.video = (bd->pins[ 92] == 0xFF) ? 284000 : bd->pins[ 92] * 4000;
- MINFO->values.reg.opt = get_unaligned_le32(bd->pins + 48);
- MINFO->values.reg.opt2 = get_unaligned_le32(bd->pins + 52);
- MINFO->values.reg.opt3 = get_unaligned_le32(bd->pins + 94);
- MINFO->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 98);
- MINFO->values.reg.memmisc = get_unaligned_le32(bd->pins + 102);
- MINFO->values.reg.memrdbk = get_unaligned_le32(bd->pins + 106);
- MINFO->features.pll.ref_freq = (bd->pins[110] & 0x01) ? 14318 : 27000;
- MINFO->values.memory.ddr = (bd->pins[114] & 0x60) == 0x20;
- MINFO->values.memory.dll = (bd->pins[115] & 0x02) != 0;
- MINFO->values.memory.emrswen = (bd->pins[115] & 0x01) != 0;
- MINFO->values.reg.maccess = MINFO->values.memory.emrswen ? 0x00004000 : 0x00000000;
+ minfo->limits.pixel.vcomax = (bd->pins[ 38] == 0xFF) ? 600000 : bd->pins[ 38] * mult;
+ minfo->limits.system.vcomax = (bd->pins[ 36] == 0xFF) ? minfo->limits.pixel.vcomax : bd->pins[ 36] * mult;
+ minfo->limits.video.vcomax = (bd->pins[ 37] == 0xFF) ? minfo->limits.system.vcomax : bd->pins[ 37] * mult;
+ minfo->limits.pixel.vcomin = (bd->pins[123] == 0xFF) ? 256000 : bd->pins[123] * mult;
+ minfo->limits.system.vcomin = (bd->pins[121] == 0xFF) ? minfo->limits.pixel.vcomin : bd->pins[121] * mult;
+ minfo->limits.video.vcomin = (bd->pins[122] == 0xFF) ? minfo->limits.system.vcomin : bd->pins[122] * mult;
+ minfo->values.pll.system =
+ minfo->values.pll.video = (bd->pins[ 92] == 0xFF) ? 284000 : bd->pins[ 92] * 4000;
+ minfo->values.reg.opt = get_unaligned_le32(bd->pins + 48);
+ minfo->values.reg.opt2 = get_unaligned_le32(bd->pins + 52);
+ minfo->values.reg.opt3 = get_unaligned_le32(bd->pins + 94);
+ minfo->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 98);
+ minfo->values.reg.memmisc = get_unaligned_le32(bd->pins + 102);
+ minfo->values.reg.memrdbk = get_unaligned_le32(bd->pins + 106);
+ minfo->features.pll.ref_freq = (bd->pins[110] & 0x01) ? 14318 : 27000;
+ minfo->values.memory.ddr = (bd->pins[114] & 0x60) == 0x20;
+ minfo->values.memory.dll = (bd->pins[115] & 0x02) != 0;
+ minfo->values.memory.emrswen = (bd->pins[115] & 0x01) != 0;
+ minfo->values.reg.maccess = minfo->values.memory.emrswen ? 0x00004000 : 0x00000000;
if (bd->pins[115] & 4) {
- MINFO->values.reg.mctlwtst_core = MINFO->values.reg.mctlwtst;
+ minfo->values.reg.mctlwtst_core = minfo->values.reg.mctlwtst;
} else {
u_int32_t wtst_xlat[] = { 0, 1, 5, 6, 7, 5, 2, 3 };
- MINFO->values.reg.mctlwtst_core = (MINFO->values.reg.mctlwtst & ~7) |
- wtst_xlat[MINFO->values.reg.mctlwtst & 7];
+ minfo->values.reg.mctlwtst_core = (minfo->values.reg.mctlwtst & ~7) |
+ wtst_xlat[minfo->values.reg.mctlwtst & 7];
}
- MINFO->max_pixel_clock_panellink = bd->pins[47] * 4000;
+ minfo->max_pixel_clock_panellink = bd->pins[47] * 4000;
return 0;
}
-static void default_pins5(WPMINFO2) {
+static void default_pins5(struct matrox_fb_info *minfo)
+{
/* Mine 16MB G450 with SDRAM DDR */
- MINFO->limits.pixel.vcomax =
- MINFO->limits.system.vcomax =
- MINFO->limits.video.vcomax = 600000;
- MINFO->limits.pixel.vcomin =
- MINFO->limits.system.vcomin =
- MINFO->limits.video.vcomin = 256000;
- MINFO->values.pll.system =
- MINFO->values.pll.video = 284000;
- MINFO->values.reg.opt = 0x404A1160;
- MINFO->values.reg.opt2 = 0x0000AC00;
- MINFO->values.reg.opt3 = 0x0090A409;
- MINFO->values.reg.mctlwtst_core =
- MINFO->values.reg.mctlwtst = 0x0C81462B;
- MINFO->values.reg.memmisc = 0x80000004;
- MINFO->values.reg.memrdbk = 0x01001103;
- MINFO->features.pll.ref_freq = 27000;
- MINFO->values.memory.ddr = 1;
- MINFO->values.memory.dll = 1;
- MINFO->values.memory.emrswen = 1;
- MINFO->values.reg.maccess = 0x00004000;
+ minfo->limits.pixel.vcomax =
+ minfo->limits.system.vcomax =
+ minfo->limits.video.vcomax = 600000;
+ minfo->limits.pixel.vcomin =
+ minfo->limits.system.vcomin =
+ minfo->limits.video.vcomin = 256000;
+ minfo->values.pll.system =
+ minfo->values.pll.video = 284000;
+ minfo->values.reg.opt = 0x404A1160;
+ minfo->values.reg.opt2 = 0x0000AC00;
+ minfo->values.reg.opt3 = 0x0090A409;
+ minfo->values.reg.mctlwtst_core =
+ minfo->values.reg.mctlwtst = 0x0C81462B;
+ minfo->values.reg.memmisc = 0x80000004;
+ minfo->values.reg.memrdbk = 0x01001103;
+ minfo->features.pll.ref_freq = 27000;
+ minfo->values.memory.ddr = 1;
+ minfo->values.memory.dll = 1;
+ minfo->values.memory.emrswen = 1;
+ minfo->values.reg.maccess = 0x00004000;
}
-static int matroxfb_set_limits(WPMINFO const struct matrox_bios* bd) {
+static int matroxfb_set_limits(struct matrox_fb_info *minfo,
+ const struct matrox_bios *bd)
+{
unsigned int pins_version;
static const unsigned int pinslen[] = { 64, 64, 64, 128, 128 };
- switch (ACCESS_FBINFO(chip)) {
- case MGA_2064: default_pins1(PMINFO2); break;
+ switch (minfo->chip) {
+ case MGA_2064: default_pins1(minfo); break;
case MGA_2164:
case MGA_1064:
- case MGA_1164: default_pins2(PMINFO2); break;
+ case MGA_1164: default_pins2(minfo); break;
case MGA_G100:
- case MGA_G200: default_pins3(PMINFO2); break;
- case MGA_G400: default_pins4(PMINFO2); break;
+ case MGA_G200: default_pins3(minfo); break;
+ case MGA_G400: default_pins4(minfo); break;
case MGA_G450:
- case MGA_G550: default_pins5(PMINFO2); break;
+ case MGA_G550: default_pins5(minfo); break;
}
if (!bd->bios_valid) {
printk(KERN_INFO "matroxfb: Your Matrox device does not have BIOS\n");
@@ -724,38 +745,39 @@ static int matroxfb_set_limits(WPMINFO const struct matrox_bios* bd) {
}
switch (pins_version) {
case 1:
- return parse_pins1(PMINFO bd);
+ return parse_pins1(minfo, bd);
case 2:
- return parse_pins2(PMINFO bd);
+ return parse_pins2(minfo, bd);
case 3:
- return parse_pins3(PMINFO bd);
+ return parse_pins3(minfo, bd);
case 4:
- return parse_pins4(PMINFO bd);
+ return parse_pins4(minfo, bd);
case 5:
- return parse_pins5(PMINFO bd);
+ return parse_pins5(minfo, bd);
default:
printk(KERN_DEBUG "matroxfb: Powerup info version %u is not yet supported\n", pins_version);
return -1;
}
}
-void matroxfb_read_pins(WPMINFO2) {
+void matroxfb_read_pins(struct matrox_fb_info *minfo)
+{
u32 opt;
u32 biosbase;
u32 fbbase;
- struct pci_dev* pdev = ACCESS_FBINFO(pcidev);
+ struct pci_dev *pdev = minfo->pcidev;
- memset(&ACCESS_FBINFO(bios), 0, sizeof(ACCESS_FBINFO(bios)));
+ memset(&minfo->bios, 0, sizeof(minfo->bios));
pci_read_config_dword(pdev, PCI_OPTION_REG, &opt);
pci_write_config_dword(pdev, PCI_OPTION_REG, opt | PCI_OPTION_ENABLE_ROM);
pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &biosbase);
- pci_read_config_dword(pdev, ACCESS_FBINFO(devflags.fbResource), &fbbase);
+ pci_read_config_dword(pdev, minfo->devflags.fbResource, &fbbase);
pci_write_config_dword(pdev, PCI_ROM_ADDRESS, (fbbase & PCI_ROM_ADDRESS_MASK) | PCI_ROM_ADDRESS_ENABLE);
- parse_bios(vaddr_va(ACCESS_FBINFO(video).vbase), &ACCESS_FBINFO(bios));
+ parse_bios(vaddr_va(minfo->video.vbase), &minfo->bios);
pci_write_config_dword(pdev, PCI_ROM_ADDRESS, biosbase);
pci_write_config_dword(pdev, PCI_OPTION_REG, opt);
#ifdef CONFIG_X86
- if (!ACCESS_FBINFO(bios).bios_valid) {
+ if (!minfo->bios.bios_valid) {
unsigned char __iomem* b;
b = ioremap(0x000C0000, 65536);
@@ -769,25 +791,21 @@ void matroxfb_read_pins(WPMINFO2) {
printk(KERN_INFO "matroxfb: Legacy BIOS is for %04X:%04X, while this device is %04X:%04X\n",
ven, dev, pdev->vendor, pdev->device);
} else {
- parse_bios(b, &ACCESS_FBINFO(bios));
+ parse_bios(b, &minfo->bios);
}
iounmap(b);
}
}
#endif
- matroxfb_set_limits(PMINFO &ACCESS_FBINFO(bios));
+ matroxfb_set_limits(minfo, &minfo->bios);
printk(KERN_INFO "PInS memtype = %u\n",
- (ACCESS_FBINFO(values).reg.opt & 0x1C00) >> 10);
+ (minfo->values.reg.opt & 0x1C00) >> 10);
}
EXPORT_SYMBOL(matroxfb_DAC_in);
EXPORT_SYMBOL(matroxfb_DAC_out);
EXPORT_SYMBOL(matroxfb_var2my);
EXPORT_SYMBOL(matroxfb_PLL_calcclock);
-#ifndef CONFIG_FB_MATROX_MULTIHEAD
-struct matrox_fb_info matroxfb_global_mxinfo;
-EXPORT_SYMBOL(matroxfb_global_mxinfo);
-#endif
EXPORT_SYMBOL(matroxfb_vgaHWinit); /* DAC1064, Ti3026 */
EXPORT_SYMBOL(matroxfb_vgaHWrestore); /* DAC1064, Ti3026 */
EXPORT_SYMBOL(matroxfb_read_pins);
diff --git a/drivers/video/matrox/matroxfb_misc.h b/drivers/video/matrox/matroxfb_misc.h
index cb62cc0ead9..351c823f1f7 100644
--- a/drivers/video/matrox/matroxfb_misc.h
+++ b/drivers/video/matrox/matroxfb_misc.h
@@ -6,13 +6,16 @@
/* also for modules */
int matroxfb_PLL_calcclock(const struct matrox_pll_features* pll, unsigned int freq, unsigned int fmax,
unsigned int* in, unsigned int* feed, unsigned int* post);
-static inline int PLL_calcclock(CPMINFO unsigned int freq, unsigned int fmax,
- unsigned int* in, unsigned int* feed, unsigned int* post) {
- return matroxfb_PLL_calcclock(&ACCESS_FBINFO(features.pll), freq, fmax, in, feed, post);
+static inline int PLL_calcclock(const struct matrox_fb_info *minfo,
+ unsigned int freq, unsigned int fmax,
+ unsigned int *in, unsigned int *feed,
+ unsigned int *post)
+{
+ return matroxfb_PLL_calcclock(&minfo->features.pll, freq, fmax, in, feed, post);
}
-int matroxfb_vgaHWinit(WPMINFO struct my_timming* m);
-void matroxfb_vgaHWrestore(WPMINFO2);
-void matroxfb_read_pins(WPMINFO2);
+int matroxfb_vgaHWinit(struct matrox_fb_info *minfo, struct my_timming* m);
+void matroxfb_vgaHWrestore(struct matrox_fb_info *minfo);
+void matroxfb_read_pins(struct matrox_fb_info *minfo);
#endif /* __MATROXFB_MISC_H__ */
diff --git a/drivers/video/msm/Makefile b/drivers/video/msm/Makefile
new file mode 100644
index 00000000000..802d6ae523f
--- /dev/null
+++ b/drivers/video/msm/Makefile
@@ -0,0 +1,19 @@
+
+# core framebuffer
+#
+obj-y := msm_fb.o
+
+# MDP DMA/PPP engine
+#
+obj-y += mdp.o mdp_scale_tables.o mdp_ppp.o
+
+# MDDI interface
+#
+obj-y += mddi.o
+
+# MDDI client/panel drivers
+#
+obj-y += mddi_client_dummy.o
+obj-y += mddi_client_toshiba.o
+obj-y += mddi_client_nt35399.o
+
diff --git a/drivers/video/msm/mddi.c b/drivers/video/msm/mddi.c
new file mode 100644
index 00000000000..f2de5a1acd6
--- /dev/null
+++ b/drivers/video/msm/mddi.c
@@ -0,0 +1,828 @@
+/*
+ * MSM MDDI Transport
+ *
+ * Copyright (C) 2007 Google Incorporated
+ * Copyright (C) 2007 QUALCOMM Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <mach/msm_iomap.h>
+#include <mach/irqs.h>
+#include <mach/board.h>
+#include <linux/delay.h>
+
+#include <mach/msm_fb.h>
+#include "mddi_hw.h"
+
+#define FLAG_DISABLE_HIBERNATION 0x0001
+#define FLAG_HAVE_CAPS 0x0002
+#define FLAG_HAS_VSYNC_IRQ 0x0004
+#define FLAG_HAVE_STATUS 0x0008
+
+#define CMD_GET_CLIENT_CAP 0x0601
+#define CMD_GET_CLIENT_STATUS 0x0602
+
+union mddi_rev {
+ unsigned char raw[MDDI_REV_BUFFER_SIZE];
+ struct mddi_rev_packet hdr;
+ struct mddi_client_status status;
+ struct mddi_client_caps caps;
+ struct mddi_register_access reg;
+};
+
+struct reg_read_info {
+ struct completion done;
+ uint32_t reg;
+ uint32_t status;
+ uint32_t result;
+};
+
+struct mddi_info {
+ uint16_t flags;
+ uint16_t version;
+ char __iomem *base;
+ int irq;
+ struct clk *clk;
+ struct msm_mddi_client_data client_data;
+
+ /* buffer for rev encap packets */
+ void *rev_data;
+ dma_addr_t rev_addr;
+ struct mddi_llentry *reg_write_data;
+ dma_addr_t reg_write_addr;
+ struct mddi_llentry *reg_read_data;
+ dma_addr_t reg_read_addr;
+ size_t rev_data_curr;
+
+ spinlock_t int_lock;
+ uint32_t int_enable;
+ uint32_t got_int;
+ wait_queue_head_t int_wait;
+
+ struct mutex reg_write_lock;
+ struct mutex reg_read_lock;
+ struct reg_read_info *reg_read;
+
+ struct mddi_client_caps caps;
+ struct mddi_client_status status;
+
+ void (*power_client)(struct msm_mddi_client_data *, int);
+
+ /* client device published to bind us to the
+ * appropriate mddi_client driver
+ */
+ char client_name[20];
+
+ struct platform_device client_pdev;
+};
+
+static void mddi_init_rev_encap(struct mddi_info *mddi);
+
+#define mddi_readl(r) readl(mddi->base + (MDDI_##r))
+#define mddi_writel(v, r) writel((v), mddi->base + (MDDI_##r))
+
+void mddi_activate_link(struct msm_mddi_client_data *cdata)
+{
+ struct mddi_info *mddi = container_of(cdata, struct mddi_info,
+ client_data);
+
+ mddi_writel(MDDI_CMD_LINK_ACTIVE, CMD);
+}
+
+static void mddi_handle_link_list_done(struct mddi_info *mddi)
+{
+}
+
+static void mddi_reset_rev_encap_ptr(struct mddi_info *mddi)
+{
+ printk(KERN_INFO "mddi: resetting rev ptr\n");
+ mddi->rev_data_curr = 0;
+ mddi_writel(mddi->rev_addr, REV_PTR);
+ mddi_writel(mddi->rev_addr, REV_PTR);
+ mddi_writel(MDDI_CMD_FORCE_NEW_REV_PTR, CMD);
+}
+
+static void mddi_handle_rev_data(struct mddi_info *mddi, union mddi_rev *rev)
+{
+ int i;
+ struct reg_read_info *ri;
+
+ if ((rev->hdr.length <= MDDI_REV_BUFFER_SIZE - 2) &&
+ (rev->hdr.length >= sizeof(struct mddi_rev_packet) - 2)) {
+
+ switch (rev->hdr.type) {
+ case TYPE_CLIENT_CAPS:
+ memcpy(&mddi->caps, &rev->caps,
+ sizeof(struct mddi_client_caps));
+ mddi->flags |= FLAG_HAVE_CAPS;
+ wake_up(&mddi->int_wait);
+ break;
+ case TYPE_CLIENT_STATUS:
+ memcpy(&mddi->status, &rev->status,
+ sizeof(struct mddi_client_status));
+ mddi->flags |= FLAG_HAVE_STATUS;
+ wake_up(&mddi->int_wait);
+ break;
+ case TYPE_REGISTER_ACCESS:
+ ri = mddi->reg_read;
+ if (ri == 0) {
+ printk(KERN_INFO "rev: got reg %x = %x without "
+ " pending read\n",
+ rev->reg.register_address,
+ rev->reg.register_data_list);
+ break;
+ }
+ if (ri->reg != rev->reg.register_address) {
+ printk(KERN_INFO "rev: got reg %x = %x for "
+ "wrong register, expected "
+ "%x\n",
+ rev->reg.register_address,
+ rev->reg.register_data_list, ri->reg);
+ break;
+ }
+ mddi->reg_read = NULL;
+ ri->status = 0;
+ ri->result = rev->reg.register_data_list;
+ complete(&ri->done);
+ break;
+ default:
+ printk(KERN_INFO "rev: unknown reverse packet: "
+ "len=%04x type=%04x CURR_REV_PTR=%x\n",
+ rev->hdr.length, rev->hdr.type,
+ mddi_readl(CURR_REV_PTR));
+ for (i = 0; i < rev->hdr.length + 2; i++) {
+ if ((i % 16) == 0)
+ printk(KERN_INFO "\n");
+ printk(KERN_INFO " %02x", rev->raw[i]);
+ }
+ printk(KERN_INFO "\n");
+ mddi_reset_rev_encap_ptr(mddi);
+ }
+ } else {
+ printk(KERN_INFO "bad rev length, %d, CURR_REV_PTR %x\n",
+ rev->hdr.length, mddi_readl(CURR_REV_PTR));
+ mddi_reset_rev_encap_ptr(mddi);
+ }
+}
+
+static void mddi_wait_interrupt(struct mddi_info *mddi, uint32_t intmask);
+
+static void mddi_handle_rev_data_avail(struct mddi_info *mddi)
+{
+ union mddi_rev *rev = mddi->rev_data;
+ uint32_t rev_data_count;
+ uint32_t rev_crc_err_count;
+ int i;
+ struct reg_read_info *ri;
+ size_t prev_offset;
+ uint16_t length;
+
+ union mddi_rev *crev = mddi->rev_data + mddi->rev_data_curr;
+
+ /* clear the interrupt */
+ mddi_writel(MDDI_INT_REV_DATA_AVAIL, INT);
+ rev_data_count = mddi_readl(REV_PKT_CNT);
+ rev_crc_err_count = mddi_readl(REV_CRC_ERR);
+ if (rev_data_count > 1)
+ printk(KERN_INFO "rev_data_count %d\n", rev_data_count);
+
+ if (rev_crc_err_count) {
+ printk(KERN_INFO "rev_crc_err_count %d, INT %x\n",
+ rev_crc_err_count, mddi_readl(INT));
+ ri = mddi->reg_read;
+ if (ri == 0) {
+ printk(KERN_INFO "rev: got crc error without pending "
+ "read\n");
+ } else {
+ mddi->reg_read = NULL;
+ ri->status = -EIO;
+ ri->result = -1;
+ complete(&ri->done);
+ }
+ }
+
+ if (rev_data_count == 0)
+ return;
+
+ prev_offset = mddi->rev_data_curr;
+
+ length = *((uint8_t *)mddi->rev_data + mddi->rev_data_curr);
+ mddi->rev_data_curr++;
+ if (mddi->rev_data_curr == MDDI_REV_BUFFER_SIZE)
+ mddi->rev_data_curr = 0;
+ length += *((uint8_t *)mddi->rev_data + mddi->rev_data_curr) << 8;
+ mddi->rev_data_curr += 1 + length;
+ if (mddi->rev_data_curr >= MDDI_REV_BUFFER_SIZE)
+ mddi->rev_data_curr =
+ mddi->rev_data_curr % MDDI_REV_BUFFER_SIZE;
+
+ if (length > MDDI_REV_BUFFER_SIZE - 2) {
+ printk(KERN_INFO "mddi: rev data length greater than buffer"
+ "size\n");
+ mddi_reset_rev_encap_ptr(mddi);
+ return;
+ }
+
+ if (prev_offset + 2 + length >= MDDI_REV_BUFFER_SIZE) {
+ union mddi_rev tmprev;
+ size_t rem = MDDI_REV_BUFFER_SIZE - prev_offset;
+ memcpy(&tmprev.raw[0], mddi->rev_data + prev_offset, rem);
+ memcpy(&tmprev.raw[rem], mddi->rev_data, 2 + length - rem);
+ mddi_handle_rev_data(mddi, &tmprev);
+ } else {
+ mddi_handle_rev_data(mddi, crev);
+ }
+
+ if (prev_offset < MDDI_REV_BUFFER_SIZE / 2 &&
+ mddi->rev_data_curr >= MDDI_REV_BUFFER_SIZE / 2) {
+ mddi_writel(mddi->rev_addr, REV_PTR);
+ }
+}
+
+static irqreturn_t mddi_isr(int irq, void *data)
+{
+ struct msm_mddi_client_data *cdata = data;
+ struct mddi_info *mddi = container_of(cdata, struct mddi_info,
+ client_data);
+ uint32_t active, status;
+
+ spin_lock(&mddi->int_lock);
+
+ active = mddi_readl(INT);
+ status = mddi_readl(STAT);
+
+ mddi_writel(active, INT);
+
+ /* ignore any interrupts we have disabled */
+ active &= mddi->int_enable;
+
+ mddi->got_int |= active;
+ wake_up(&mddi->int_wait);
+
+ if (active & MDDI_INT_PRI_LINK_LIST_DONE) {
+ mddi->int_enable &= (~MDDI_INT_PRI_LINK_LIST_DONE);
+ mddi_handle_link_list_done(mddi);
+ }
+ if (active & MDDI_INT_REV_DATA_AVAIL)
+ mddi_handle_rev_data_avail(mddi);
+
+ if (active & ~MDDI_INT_NEED_CLEAR)
+ mddi->int_enable &= ~(active & ~MDDI_INT_NEED_CLEAR);
+
+ if (active & MDDI_INT_LINK_ACTIVE) {
+ mddi->int_enable &= (~MDDI_INT_LINK_ACTIVE);
+ mddi->int_enable |= MDDI_INT_IN_HIBERNATION;
+ }
+
+ if (active & MDDI_INT_IN_HIBERNATION) {
+ mddi->int_enable &= (~MDDI_INT_IN_HIBERNATION);
+ mddi->int_enable |= MDDI_INT_LINK_ACTIVE;
+ }
+
+ mddi_writel(mddi->int_enable, INTEN);
+ spin_unlock(&mddi->int_lock);
+
+ return IRQ_HANDLED;
+}
+
+static long mddi_wait_interrupt_timeout(struct mddi_info *mddi,
+ uint32_t intmask, int timeout)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&mddi->int_lock, irq_flags);
+ mddi->got_int &= ~intmask;
+ mddi->int_enable |= intmask;
+ mddi_writel(mddi->int_enable, INTEN);
+ spin_unlock_irqrestore(&mddi->int_lock, irq_flags);
+ return wait_event_timeout(mddi->int_wait, mddi->got_int & intmask,
+ timeout);
+}
+
+static void mddi_wait_interrupt(struct mddi_info *mddi, uint32_t intmask)
+{
+ if (mddi_wait_interrupt_timeout(mddi, intmask, HZ/10) == 0)
+ printk(KERN_INFO KERN_ERR "mddi_wait_interrupt %d, timeout "
+ "waiting for %x, INT = %x, STAT = %x gotint = %x\n",
+ current->pid, intmask, mddi_readl(INT), mddi_readl(STAT),
+ mddi->got_int);
+}
+
+static void mddi_init_rev_encap(struct mddi_info *mddi)
+{
+ memset(mddi->rev_data, 0xee, MDDI_REV_BUFFER_SIZE);
+ mddi_writel(mddi->rev_addr, REV_PTR);
+ mddi_writel(MDDI_CMD_FORCE_NEW_REV_PTR, CMD);
+ mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
+}
+
+void mddi_set_auto_hibernate(struct msm_mddi_client_data *cdata, int on)
+{
+ struct mddi_info *mddi = container_of(cdata, struct mddi_info,
+ client_data);
+ mddi_writel(MDDI_CMD_POWERDOWN, CMD);
+ mddi_wait_interrupt(mddi, MDDI_INT_IN_HIBERNATION);
+ mddi_writel(MDDI_CMD_HIBERNATE | !!on, CMD);
+ mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
+}
+
+
+static uint16_t mddi_init_registers(struct mddi_info *mddi)
+{
+ mddi_writel(0x0001, VERSION);
+ mddi_writel(MDDI_HOST_BYTES_PER_SUBFRAME, BPS);
+ mddi_writel(0x0003, SPM); /* subframes per media */
+ mddi_writel(0x0005, TA1_LEN);
+ mddi_writel(MDDI_HOST_TA2_LEN, TA2_LEN);
+ mddi_writel(0x0096, DRIVE_HI);
+ /* 0x32 normal, 0x50 for Toshiba display */
+ mddi_writel(0x0050, DRIVE_LO);
+ mddi_writel(0x003C, DISP_WAKE); /* wakeup counter */
+ mddi_writel(MDDI_HOST_REV_RATE_DIV, REV_RATE_DIV);
+
+ mddi_writel(MDDI_REV_BUFFER_SIZE, REV_SIZE);
+ mddi_writel(MDDI_MAX_REV_PKT_SIZE, REV_ENCAP_SZ);
+
+ /* disable periodic rev encap */
+ mddi_writel(MDDI_CMD_PERIODIC_REV_ENCAP, CMD);
+ mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
+
+ if (mddi_readl(PAD_CTL) == 0) {
+ /* If we are turning on band gap, need to wait 5us before
+ * turning on the rest of the PAD */
+ mddi_writel(0x08000, PAD_CTL);
+ udelay(5);
+ }
+
+ /* Recommendation from PAD hw team */
+ mddi_writel(0xa850f, PAD_CTL);
+
+
+ /* Need an even number for counts */
+ mddi_writel(0x60006, DRIVER_START_CNT);
+
+ mddi_set_auto_hibernate(&mddi->client_data, 0);
+
+ mddi_writel(MDDI_CMD_DISP_IGNORE, CMD);
+ mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
+
+ mddi_init_rev_encap(mddi);
+ return mddi_readl(CORE_VER) & 0xffff;
+}
+
+static void mddi_suspend(struct msm_mddi_client_data *cdata)
+{
+ struct mddi_info *mddi = container_of(cdata, struct mddi_info,
+ client_data);
+ /* turn off the client */
+ if (mddi->power_client)
+ mddi->power_client(&mddi->client_data, 0);
+ /* turn off the link */
+ mddi_writel(MDDI_CMD_RESET, CMD);
+ mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
+ /* turn off the clock */
+ clk_disable(mddi->clk);
+}
+
+static void mddi_resume(struct msm_mddi_client_data *cdata)
+{
+ struct mddi_info *mddi = container_of(cdata, struct mddi_info,
+ client_data);
+ mddi_set_auto_hibernate(&mddi->client_data, 0);
+ /* turn on the client */
+ if (mddi->power_client)
+ mddi->power_client(&mddi->client_data, 1);
+ /* turn on the clock */
+ clk_enable(mddi->clk);
+ /* set up the local registers */
+ mddi->rev_data_curr = 0;
+ mddi_init_registers(mddi);
+ mddi_writel(mddi->int_enable, INTEN);
+ mddi_writel(MDDI_CMD_LINK_ACTIVE, CMD);
+ mddi_writel(MDDI_CMD_SEND_RTD, CMD);
+ mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
+ mddi_set_auto_hibernate(&mddi->client_data, 1);
+}
+
+static int __init mddi_get_client_caps(struct mddi_info *mddi)
+{
+ int i, j;
+
+ /* clear any stale interrupts */
+ mddi_writel(0xffffffff, INT);
+
+ mddi->int_enable = MDDI_INT_LINK_ACTIVE |
+ MDDI_INT_IN_HIBERNATION |
+ MDDI_INT_PRI_LINK_LIST_DONE |
+ MDDI_INT_REV_DATA_AVAIL |
+ MDDI_INT_REV_OVERFLOW |
+ MDDI_INT_REV_OVERWRITE |
+ MDDI_INT_RTD_FAILURE;
+ mddi_writel(mddi->int_enable, INTEN);
+
+ mddi_writel(MDDI_CMD_LINK_ACTIVE, CMD);
+ mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
+
+ for (j = 0; j < 3; j++) {
+ /* the toshiba vga panel does not respond to get
+ * caps unless you SEND_RTD, but the first SEND_RTD
+ * will fail...
+ */
+ for (i = 0; i < 4; i++) {
+ uint32_t stat;
+
+ mddi_writel(MDDI_CMD_SEND_RTD, CMD);
+ mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
+ stat = mddi_readl(STAT);
+ printk(KERN_INFO "mddi cmd send rtd: int %x, stat %x, "
+ "rtd val %x\n", mddi_readl(INT), stat,
+ mddi_readl(RTD_VAL));
+ if ((stat & MDDI_STAT_RTD_MEAS_FAIL) == 0)
+ break;
+ msleep(1);
+ }
+
+ mddi_writel(CMD_GET_CLIENT_CAP, CMD);
+ mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
+ wait_event_timeout(mddi->int_wait, mddi->flags & FLAG_HAVE_CAPS,
+ HZ / 100);
+
+ if (mddi->flags & FLAG_HAVE_CAPS)
+ break;
+ printk(KERN_INFO KERN_ERR "mddi_init, timeout waiting for "
+ "caps\n");
+ }
+ return mddi->flags & FLAG_HAVE_CAPS;
+}
+
+/* link must be active when this is called */
+int mddi_check_status(struct mddi_info *mddi)
+{
+ int ret = -1, retry = 3;
+ mutex_lock(&mddi->reg_read_lock);
+ mddi_writel(MDDI_CMD_PERIODIC_REV_ENCAP | 1, CMD);
+ mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
+
+ do {
+ mddi->flags &= ~FLAG_HAVE_STATUS;
+ mddi_writel(CMD_GET_CLIENT_STATUS, CMD);
+ mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
+ wait_event_timeout(mddi->int_wait,
+ mddi->flags & FLAG_HAVE_STATUS,
+ HZ / 100);
+
+ if (mddi->flags & FLAG_HAVE_STATUS) {
+ if (mddi->status.crc_error_count)
+ printk(KERN_INFO "mddi status: crc_error "
+ "count: %d\n",
+ mddi->status.crc_error_count);
+ else
+ ret = 0;
+ break;
+ } else
+ printk(KERN_INFO "mddi status: failed to get client "
+ "status\n");
+ mddi_writel(MDDI_CMD_SEND_RTD, CMD);
+ mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
+ } while (--retry);
+
+ mddi_writel(MDDI_CMD_PERIODIC_REV_ENCAP | 0, CMD);
+ mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
+ mutex_unlock(&mddi->reg_read_lock);
+ return ret;
+}
+
+
+void mddi_remote_write(struct msm_mddi_client_data *cdata, uint32_t val,
+ uint32_t reg)
+{
+ struct mddi_info *mddi = container_of(cdata, struct mddi_info,
+ client_data);
+ struct mddi_llentry *ll;
+ struct mddi_register_access *ra;
+
+ mutex_lock(&mddi->reg_write_lock);
+
+ ll = mddi->reg_write_data;
+
+ ra = &(ll->u.r);
+ ra->length = 14 + 4;
+ ra->type = TYPE_REGISTER_ACCESS;
+ ra->client_id = 0;
+ ra->read_write_info = MDDI_WRITE | 1;
+ ra->crc16 = 0;
+
+ ra->register_address = reg;
+ ra->register_data_list = val;
+
+ ll->flags = 1;
+ ll->header_count = 14;
+ ll->data_count = 4;
+ ll->data = mddi->reg_write_addr + offsetof(struct mddi_llentry,
+ u.r.register_data_list);
+ ll->next = 0;
+ ll->reserved = 0;
+
+ mddi_writel(mddi->reg_write_addr, PRI_PTR);
+
+ mddi_wait_interrupt(mddi, MDDI_INT_PRI_LINK_LIST_DONE);
+ mutex_unlock(&mddi->reg_write_lock);
+}
+
+uint32_t mddi_remote_read(struct msm_mddi_client_data *cdata, uint32_t reg)
+{
+ struct mddi_info *mddi = container_of(cdata, struct mddi_info,
+ client_data);
+ struct mddi_llentry *ll;
+ struct mddi_register_access *ra;
+ struct reg_read_info ri;
+ unsigned s;
+ int retry_count = 2;
+ unsigned long irq_flags;
+
+ mutex_lock(&mddi->reg_read_lock);
+
+ ll = mddi->reg_read_data;
+
+ ra = &(ll->u.r);
+ ra->length = 14;
+ ra->type = TYPE_REGISTER_ACCESS;
+ ra->client_id = 0;
+ ra->read_write_info = MDDI_READ | 1;
+ ra->crc16 = 0;
+
+ ra->register_address = reg;
+
+ ll->flags = 0x11;
+ ll->header_count = 14;
+ ll->data_count = 0;
+ ll->data = 0;
+ ll->next = 0;
+ ll->reserved = 0;
+
+ s = mddi_readl(STAT);
+
+ ri.reg = reg;
+ ri.status = -1;
+
+ do {
+ init_completion(&ri.done);
+ mddi->reg_read = &ri;
+ mddi_writel(mddi->reg_read_addr, PRI_PTR);
+
+ mddi_wait_interrupt(mddi, MDDI_INT_PRI_LINK_LIST_DONE);
+
+ /* Enable Periodic Reverse Encapsulation. */
+ mddi_writel(MDDI_CMD_PERIODIC_REV_ENCAP | 1, CMD);
+ mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
+ if (wait_for_completion_timeout(&ri.done, HZ/10) == 0 &&
+ !ri.done.done) {
+ printk(KERN_INFO "mddi_remote_read(%x) timeout "
+ "(%d %d %d)\n",
+ reg, ri.status, ri.result, ri.done.done);
+ spin_lock_irqsave(&mddi->int_lock, irq_flags);
+ mddi->reg_read = NULL;
+ spin_unlock_irqrestore(&mddi->int_lock, irq_flags);
+ ri.status = -1;
+ ri.result = -1;
+ }
+ if (ri.status == 0)
+ break;
+
+ mddi_writel(MDDI_CMD_SEND_RTD, CMD);
+ mddi_writel(MDDI_CMD_LINK_ACTIVE, CMD);
+ mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
+ printk(KERN_INFO "mddi_remote_read: failed, sent "
+ "MDDI_CMD_SEND_RTD: int %x, stat %x, rtd val %x "
+ "curr_rev_ptr %x\n", mddi_readl(INT), mddi_readl(STAT),
+ mddi_readl(RTD_VAL), mddi_readl(CURR_REV_PTR));
+ } while (retry_count-- > 0);
+ /* Disable Periodic Reverse Encapsulation. */
+ mddi_writel(MDDI_CMD_PERIODIC_REV_ENCAP | 0, CMD);
+ mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
+ mddi->reg_read = NULL;
+ mutex_unlock(&mddi->reg_read_lock);
+ return ri.result;
+}
+
+static struct mddi_info mddi_info[2];
+
+static int __init mddi_clk_setup(struct platform_device *pdev,
+ struct mddi_info *mddi,
+ unsigned long clk_rate)
+{
+ int ret;
+
+ /* set up the clocks */
+ mddi->clk = clk_get(&pdev->dev, "mddi_clk");
+ if (IS_ERR(mddi->clk)) {
+ printk(KERN_INFO "mddi: failed to get clock\n");
+ return PTR_ERR(mddi->clk);
+ }
+ ret = clk_enable(mddi->clk);
+ if (ret)
+ goto fail;
+ ret = clk_set_rate(mddi->clk, clk_rate);
+ if (ret)
+ goto fail;
+ return 0;
+
+fail:
+ clk_put(mddi->clk);
+ return ret;
+}
+
+static int __init mddi_rev_data_setup(struct mddi_info *mddi)
+{
+ void *dma;
+ dma_addr_t dma_addr;
+
+ /* set up dma buffer */
+ dma = dma_alloc_coherent(NULL, 0x1000, &dma_addr, GFP_KERNEL);
+ if (dma == 0)
+ return -ENOMEM;
+ mddi->rev_data = dma;
+ mddi->rev_data_curr = 0;
+ mddi->rev_addr = dma_addr;
+ mddi->reg_write_data = dma + MDDI_REV_BUFFER_SIZE;
+ mddi->reg_write_addr = dma_addr + MDDI_REV_BUFFER_SIZE;
+ mddi->reg_read_data = mddi->reg_write_data + 1;
+ mddi->reg_read_addr = mddi->reg_write_addr +
+ sizeof(*mddi->reg_write_data);
+ return 0;
+}
+
+static int __init mddi_probe(struct platform_device *pdev)
+{
+ struct msm_mddi_platform_data *pdata = pdev->dev.platform_data;
+ struct mddi_info *mddi = &mddi_info[pdev->id];
+ struct resource *resource;
+ int ret, i;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!resource) {
+ printk(KERN_ERR "mddi: no associated mem resource!\n");
+ return -ENOMEM;
+ }
+ mddi->base = ioremap(resource->start, resource->end - resource->start);
+ if (!mddi->base) {
+ printk(KERN_ERR "mddi: failed to remap base!\n");
+ ret = -EINVAL;
+ goto error_ioremap;
+ }
+ resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!resource) {
+ printk(KERN_ERR "mddi: no associated irq resource!\n");
+ ret = -EINVAL;
+ goto error_get_irq_resource;
+ }
+ mddi->irq = resource->start;
+ printk(KERN_INFO "mddi: init() base=0x%p irq=%d\n", mddi->base,
+ mddi->irq);
+ mddi->power_client = pdata->power_client;
+
+ mutex_init(&mddi->reg_write_lock);
+ mutex_init(&mddi->reg_read_lock);
+ spin_lock_init(&mddi->int_lock);
+ init_waitqueue_head(&mddi->int_wait);
+
+ ret = mddi_clk_setup(pdev, mddi, pdata->clk_rate);
+ if (ret) {
+ printk(KERN_ERR "mddi: failed to setup clock!\n");
+ goto error_clk_setup;
+ }
+
+ ret = mddi_rev_data_setup(mddi);
+ if (ret) {
+ printk(KERN_ERR "mddi: failed to setup rev data!\n");
+ goto error_rev_data;
+ }
+
+ mddi->int_enable = 0;
+ mddi_writel(mddi->int_enable, INTEN);
+ ret = request_irq(mddi->irq, mddi_isr, IRQF_DISABLED, "mddi",
+ &mddi->client_data);
+ if (ret) {
+ printk(KERN_ERR "mddi: failed to request enable irq!\n");
+ goto error_request_irq;
+ }
+
+ /* turn on the mddi client bridge chip */
+ if (mddi->power_client)
+ mddi->power_client(&mddi->client_data, 1);
+
+ /* initialize the mddi registers */
+ mddi_set_auto_hibernate(&mddi->client_data, 0);
+ mddi_writel(MDDI_CMD_RESET, CMD);
+ mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND);
+ mddi->version = mddi_init_registers(mddi);
+ if (mddi->version < 0x20) {
+ printk(KERN_ERR "mddi: unsupported version 0x%x\n",
+ mddi->version);
+ ret = -ENODEV;
+ goto error_mddi_version;
+ }
+
+ /* read the capabilities off the client */
+ if (!mddi_get_client_caps(mddi)) {
+ printk(KERN_INFO "mddi: no client found\n");
+ /* power down the panel */
+ mddi_writel(MDDI_CMD_POWERDOWN, CMD);
+ printk(KERN_INFO "mddi powerdown: stat %x\n", mddi_readl(STAT));
+ msleep(100);
+ printk(KERN_INFO "mddi powerdown: stat %x\n", mddi_readl(STAT));
+ return 0;
+ }
+ mddi_set_auto_hibernate(&mddi->client_data, 1);
+
+ if (mddi->caps.Mfr_Name == 0 && mddi->caps.Product_Code == 0)
+ pdata->fixup(&mddi->caps.Mfr_Name, &mddi->caps.Product_Code);
+
+ mddi->client_pdev.id = 0;
+ for (i = 0; i < pdata->num_clients; i++) {
+ if (pdata->client_platform_data[i].product_id ==
+ (mddi->caps.Mfr_Name << 16 | mddi->caps.Product_Code)) {
+ mddi->client_data.private_client_data =
+ pdata->client_platform_data[i].client_data;
+ mddi->client_pdev.name =
+ pdata->client_platform_data[i].name;
+ mddi->client_pdev.id =
+ pdata->client_platform_data[i].id;
+ /* XXX: possibly set clock */
+ break;
+ }
+ }
+
+ if (i >= pdata->num_clients)
+ mddi->client_pdev.name = "mddi_c_dummy";
+ printk(KERN_INFO "mddi: registering panel %s\n",
+ mddi->client_pdev.name);
+
+ mddi->client_data.suspend = mddi_suspend;
+ mddi->client_data.resume = mddi_resume;
+ mddi->client_data.activate_link = mddi_activate_link;
+ mddi->client_data.remote_write = mddi_remote_write;
+ mddi->client_data.remote_read = mddi_remote_read;
+ mddi->client_data.auto_hibernate = mddi_set_auto_hibernate;
+ mddi->client_data.fb_resource = pdata->fb_resource;
+ if (pdev->id == 0)
+ mddi->client_data.interface_type = MSM_MDDI_PMDH_INTERFACE;
+ else if (pdev->id == 1)
+ mddi->client_data.interface_type = MSM_MDDI_EMDH_INTERFACE;
+ else {
+ printk(KERN_ERR "mddi: can not determine interface %d!\n",
+ pdev->id);
+ ret = -EINVAL;
+ goto error_mddi_interface;
+ }
+
+ mddi->client_pdev.dev.platform_data = &mddi->client_data;
+ printk(KERN_INFO "mddi: publish: %s\n", mddi->client_name);
+ platform_device_register(&mddi->client_pdev);
+ return 0;
+
+error_mddi_interface:
+error_mddi_version:
+ free_irq(mddi->irq, 0);
+error_request_irq:
+ dma_free_coherent(NULL, 0x1000, mddi->rev_data, mddi->rev_addr);
+error_rev_data:
+error_clk_setup:
+error_get_irq_resource:
+ iounmap(mddi->base);
+error_ioremap:
+
+ printk(KERN_INFO "mddi: mddi_init() failed (%d)\n", ret);
+ return ret;
+}
+
+
+static struct platform_driver mddi_driver = {
+ .probe = mddi_probe,
+ .driver = { .name = "msm_mddi" },
+};
+
+static int __init _mddi_init(void)
+{
+ return platform_driver_register(&mddi_driver);
+}
+
+module_init(_mddi_init);
diff --git a/drivers/video/msm/mddi_client_dummy.c b/drivers/video/msm/mddi_client_dummy.c
new file mode 100644
index 00000000000..ebbae87885b
--- /dev/null
+++ b/drivers/video/msm/mddi_client_dummy.c
@@ -0,0 +1,97 @@
+/* drivers/video/msm_fb/mddi_client_dummy.c
+ *
+ * Support for "dummy" mddi client devices which require no
+ * special initialization code.
+ *
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <mach/msm_fb.h>
+
+struct panel_info {
+ struct platform_device pdev;
+ struct msm_panel_data panel_data;
+};
+
+static int mddi_dummy_suspend(struct msm_panel_data *panel_data)
+{
+ return 0;
+}
+
+static int mddi_dummy_resume(struct msm_panel_data *panel_data)
+{
+ return 0;
+}
+
+static int mddi_dummy_blank(struct msm_panel_data *panel_data)
+{
+ return 0;
+}
+
+static int mddi_dummy_unblank(struct msm_panel_data *panel_data)
+{
+ return 0;
+}
+
+static int mddi_dummy_probe(struct platform_device *pdev)
+{
+ struct msm_mddi_client_data *client_data = pdev->dev.platform_data;
+ struct panel_info *panel =
+ kzalloc(sizeof(struct panel_info), GFP_KERNEL);
+ int ret;
+ if (!panel)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, panel);
+ panel->panel_data.suspend = mddi_dummy_suspend;
+ panel->panel_data.resume = mddi_dummy_resume;
+ panel->panel_data.blank = mddi_dummy_blank;
+ panel->panel_data.unblank = mddi_dummy_unblank;
+ panel->panel_data.caps = MSMFB_CAP_PARTIAL_UPDATES;
+ panel->pdev.name = "msm_panel";
+ panel->pdev.id = pdev->id;
+ platform_device_add_resources(&panel->pdev,
+ client_data->fb_resource, 1);
+ panel->panel_data.fb_data = client_data->private_client_data;
+ panel->pdev.dev.platform_data = &panel->panel_data;
+ ret = platform_device_register(&panel->pdev);
+ if (ret) {
+ kfree(panel);
+ return ret;
+ }
+ return 0;
+}
+
+static int mddi_dummy_remove(struct platform_device *pdev)
+{
+ struct panel_info *panel = platform_get_drvdata(pdev);
+ kfree(panel);
+ return 0;
+}
+
+static struct platform_driver mddi_client_dummy = {
+ .probe = mddi_dummy_probe,
+ .remove = mddi_dummy_remove,
+ .driver = { .name = "mddi_c_dummy" },
+};
+
+static int __init mddi_client_dummy_init(void)
+{
+ platform_driver_register(&mddi_client_dummy);
+ return 0;
+}
+
+module_init(mddi_client_dummy_init);
+
diff --git a/drivers/video/msm/mddi_client_nt35399.c b/drivers/video/msm/mddi_client_nt35399.c
new file mode 100644
index 00000000000..9c78050ac79
--- /dev/null
+++ b/drivers/video/msm/mddi_client_nt35399.c
@@ -0,0 +1,255 @@
+/* drivers/video/msm_fb/mddi_client_nt35399.c
+ *
+ * Support for Novatek NT35399 MDDI client of Sapphire
+ *
+ * Copyright (C) 2008 HTC Incorporated
+ * Author: Solomon Chiu (solomon_chiu@htc.com)
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <mach/msm_fb.h>
+
+static DECLARE_WAIT_QUEUE_HEAD(nt35399_vsync_wait);
+
+struct panel_info {
+ struct msm_mddi_client_data *client_data;
+ struct platform_device pdev;
+ struct msm_panel_data panel_data;
+ struct msmfb_callback *fb_callback;
+ struct work_struct panel_work;
+ struct workqueue_struct *fb_wq;
+ int nt35399_got_int;
+};
+
+static void
+nt35399_request_vsync(struct msm_panel_data *panel_data,
+ struct msmfb_callback *callback)
+{
+ struct panel_info *panel = container_of(panel_data, struct panel_info,
+ panel_data);
+ struct msm_mddi_client_data *client_data = panel->client_data;
+
+ panel->fb_callback = callback;
+ if (panel->nt35399_got_int) {
+ panel->nt35399_got_int = 0;
+ client_data->activate_link(client_data); /* clears interrupt */
+ }
+}
+
+static void nt35399_wait_vsync(struct msm_panel_data *panel_data)
+{
+ struct panel_info *panel = container_of(panel_data, struct panel_info,
+ panel_data);
+ struct msm_mddi_client_data *client_data = panel->client_data;
+
+ if (panel->nt35399_got_int) {
+ panel->nt35399_got_int = 0;
+ client_data->activate_link(client_data); /* clears interrupt */
+ }
+
+ if (wait_event_timeout(nt35399_vsync_wait, panel->nt35399_got_int,
+ HZ/2) == 0)
+ printk(KERN_ERR "timeout waiting for VSYNC\n");
+
+ panel->nt35399_got_int = 0;
+ /* interrupt clears when screen dma starts */
+}
+
+static int nt35399_suspend(struct msm_panel_data *panel_data)
+{
+ struct panel_info *panel = container_of(panel_data, struct panel_info,
+ panel_data);
+ struct msm_mddi_client_data *client_data = panel->client_data;
+
+ struct msm_mddi_bridge_platform_data *bridge_data =
+ client_data->private_client_data;
+ int ret;
+
+ ret = bridge_data->uninit(bridge_data, client_data);
+ if (ret) {
+ printk(KERN_INFO "mddi nt35399 client: non zero return from "
+ "uninit\n");
+ return ret;
+ }
+ client_data->suspend(client_data);
+ return 0;
+}
+
+static int nt35399_resume(struct msm_panel_data *panel_data)
+{
+ struct panel_info *panel = container_of(panel_data, struct panel_info,
+ panel_data);
+ struct msm_mddi_client_data *client_data = panel->client_data;
+
+ struct msm_mddi_bridge_platform_data *bridge_data =
+ client_data->private_client_data;
+ int ret;
+
+ client_data->resume(client_data);
+ ret = bridge_data->init(bridge_data, client_data);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static int nt35399_blank(struct msm_panel_data *panel_data)
+{
+ struct panel_info *panel = container_of(panel_data, struct panel_info,
+ panel_data);
+ struct msm_mddi_client_data *client_data = panel->client_data;
+ struct msm_mddi_bridge_platform_data *bridge_data =
+ client_data->private_client_data;
+
+ return bridge_data->blank(bridge_data, client_data);
+}
+
+static int nt35399_unblank(struct msm_panel_data *panel_data)
+{
+ struct panel_info *panel = container_of(panel_data, struct panel_info,
+ panel_data);
+ struct msm_mddi_client_data *client_data = panel->client_data;
+ struct msm_mddi_bridge_platform_data *bridge_data =
+ client_data->private_client_data;
+
+ return bridge_data->unblank(bridge_data, client_data);
+}
+
+irqreturn_t nt35399_vsync_interrupt(int irq, void *data)
+{
+ struct panel_info *panel = data;
+
+ panel->nt35399_got_int = 1;
+
+ if (panel->fb_callback) {
+ panel->fb_callback->func(panel->fb_callback);
+ panel->fb_callback = NULL;
+ }
+
+ wake_up(&nt35399_vsync_wait);
+
+ return IRQ_HANDLED;
+}
+
+static int setup_vsync(struct panel_info *panel, int init)
+{
+ int ret;
+ int gpio = 97;
+ unsigned int irq;
+
+ if (!init) {
+ ret = 0;
+ goto uninit;
+ }
+ ret = gpio_request(gpio, "vsync");
+ if (ret)
+ goto err_request_gpio_failed;
+
+ ret = gpio_direction_input(gpio);
+ if (ret)
+ goto err_gpio_direction_input_failed;
+
+ ret = irq = gpio_to_irq(gpio);
+ if (ret < 0)
+ goto err_get_irq_num_failed;
+
+ ret = request_irq(irq, nt35399_vsync_interrupt, IRQF_TRIGGER_RISING,
+ "vsync", panel);
+ if (ret)
+ goto err_request_irq_failed;
+
+ printk(KERN_INFO "vsync on gpio %d now %d\n",
+ gpio, gpio_get_value(gpio));
+ return 0;
+
+uninit:
+ free_irq(gpio_to_irq(gpio), panel->client_data);
+err_request_irq_failed:
+err_get_irq_num_failed:
+err_gpio_direction_input_failed:
+ gpio_free(gpio);
+err_request_gpio_failed:
+ return ret;
+}
+
+static int mddi_nt35399_probe(struct platform_device *pdev)
+{
+ struct msm_mddi_client_data *client_data = pdev->dev.platform_data;
+ struct msm_mddi_bridge_platform_data *bridge_data =
+ client_data->private_client_data;
+
+ int ret;
+
+ struct panel_info *panel = kzalloc(sizeof(struct panel_info),
+ GFP_KERNEL);
+
+ printk(KERN_DEBUG "%s: enter.\n", __func__);
+
+ if (!panel)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, panel);
+
+ ret = setup_vsync(panel, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "mddi_nt35399_setup_vsync failed\n");
+ return ret;
+ }
+
+ panel->client_data = client_data;
+ panel->panel_data.suspend = nt35399_suspend;
+ panel->panel_data.resume = nt35399_resume;
+ panel->panel_data.wait_vsync = nt35399_wait_vsync;
+ panel->panel_data.request_vsync = nt35399_request_vsync;
+ panel->panel_data.blank = nt35399_blank;
+ panel->panel_data.unblank = nt35399_unblank;
+ panel->panel_data.fb_data = &bridge_data->fb_data;
+ panel->panel_data.caps = 0;
+
+ panel->pdev.name = "msm_panel";
+ panel->pdev.id = pdev->id;
+ panel->pdev.resource = client_data->fb_resource;
+ panel->pdev.num_resources = 1;
+ panel->pdev.dev.platform_data = &panel->panel_data;
+
+ if (bridge_data->init)
+ bridge_data->init(bridge_data, client_data);
+
+ platform_device_register(&panel->pdev);
+
+ return 0;
+}
+
+static int mddi_nt35399_remove(struct platform_device *pdev)
+{
+ struct panel_info *panel = platform_get_drvdata(pdev);
+
+ setup_vsync(panel, 0);
+ kfree(panel);
+ return 0;
+}
+
+static struct platform_driver mddi_client_0bda_8a47 = {
+ .probe = mddi_nt35399_probe,
+ .remove = mddi_nt35399_remove,
+ .driver = { .name = "mddi_c_0bda_8a47" },
+};
+
+static int __init mddi_client_nt35399_init(void)
+{
+ return platform_driver_register(&mddi_client_0bda_8a47);
+}
+
+module_init(mddi_client_nt35399_init);
+
diff --git a/drivers/video/msm/mddi_client_toshiba.c b/drivers/video/msm/mddi_client_toshiba.c
new file mode 100644
index 00000000000..80d0f5fdf0b
--- /dev/null
+++ b/drivers/video/msm/mddi_client_toshiba.c
@@ -0,0 +1,283 @@
+/* drivers/video/msm_fb/mddi_client_toshiba.c
+ *
+ * Support for Toshiba TC358720XBG mddi client devices which require no
+ * special initialization code.
+ *
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <mach/msm_fb.h>
+
+
+#define LCD_CONTROL_BLOCK_BASE 0x110000
+#define CMN (LCD_CONTROL_BLOCK_BASE|0x10)
+#define INTFLG (LCD_CONTROL_BLOCK_BASE|0x18)
+#define HCYCLE (LCD_CONTROL_BLOCK_BASE|0x34)
+#define HDE_START (LCD_CONTROL_BLOCK_BASE|0x3C)
+#define VPOS (LCD_CONTROL_BLOCK_BASE|0xC0)
+#define MPLFBUF (LCD_CONTROL_BLOCK_BASE|0x20)
+#define WAKEUP (LCD_CONTROL_BLOCK_BASE|0x54)
+#define WSYN_DLY (LCD_CONTROL_BLOCK_BASE|0x58)
+#define REGENB (LCD_CONTROL_BLOCK_BASE|0x5C)
+
+#define BASE5 0x150000
+#define BASE6 0x160000
+#define BASE7 0x170000
+
+#define GPIOIEV (BASE5 + 0x10)
+#define GPIOIE (BASE5 + 0x14)
+#define GPIORIS (BASE5 + 0x18)
+#define GPIOMIS (BASE5 + 0x1C)
+#define GPIOIC (BASE5 + 0x20)
+
+#define INTMASK (BASE6 + 0x0C)
+#define INTMASK_VWAKEOUT (1U << 0)
+#define INTMASK_VWAKEOUT_ACTIVE_LOW (1U << 8)
+#define GPIOSEL (BASE7 + 0x00)
+#define GPIOSEL_VWAKEINT (1U << 0)
+
+static DECLARE_WAIT_QUEUE_HEAD(toshiba_vsync_wait);
+
+struct panel_info {
+ struct msm_mddi_client_data *client_data;
+ struct platform_device pdev;
+ struct msm_panel_data panel_data;
+ struct msmfb_callback *toshiba_callback;
+ int toshiba_got_int;
+};
+
+
+static void toshiba_request_vsync(struct msm_panel_data *panel_data,
+ struct msmfb_callback *callback)
+{
+ struct panel_info *panel = container_of(panel_data, struct panel_info,
+ panel_data);
+ struct msm_mddi_client_data *client_data = panel->client_data;
+
+ panel->toshiba_callback = callback;
+ if (panel->toshiba_got_int) {
+ panel->toshiba_got_int = 0;
+ client_data->activate_link(client_data);
+ }
+}
+
+static void toshiba_clear_vsync(struct msm_panel_data *panel_data)
+{
+ struct panel_info *panel = container_of(panel_data, struct panel_info,
+ panel_data);
+ struct msm_mddi_client_data *client_data = panel->client_data;
+
+ client_data->activate_link(client_data);
+}
+
+static void toshiba_wait_vsync(struct msm_panel_data *panel_data)
+{
+ struct panel_info *panel = container_of(panel_data, struct panel_info,
+ panel_data);
+ struct msm_mddi_client_data *client_data = panel->client_data;
+
+ if (panel->toshiba_got_int) {
+ panel->toshiba_got_int = 0;
+ client_data->activate_link(client_data); /* clears interrupt */
+ }
+ if (wait_event_timeout(toshiba_vsync_wait, panel->toshiba_got_int,
+ HZ/2) == 0)
+ printk(KERN_ERR "timeout waiting for VSYNC\n");
+ panel->toshiba_got_int = 0;
+ /* interrupt clears when screen dma starts */
+}
+
+static int toshiba_suspend(struct msm_panel_data *panel_data)
+{
+ struct panel_info *panel = container_of(panel_data, struct panel_info,
+ panel_data);
+ struct msm_mddi_client_data *client_data = panel->client_data;
+
+ struct msm_mddi_bridge_platform_data *bridge_data =
+ client_data->private_client_data;
+ int ret;
+
+ ret = bridge_data->uninit(bridge_data, client_data);
+ if (ret) {
+ printk(KERN_INFO "mddi toshiba client: non zero return from "
+ "uninit\n");
+ return ret;
+ }
+ client_data->suspend(client_data);
+ return 0;
+}
+
+static int toshiba_resume(struct msm_panel_data *panel_data)
+{
+ struct panel_info *panel = container_of(panel_data, struct panel_info,
+ panel_data);
+ struct msm_mddi_client_data *client_data = panel->client_data;
+
+ struct msm_mddi_bridge_platform_data *bridge_data =
+ client_data->private_client_data;
+ int ret;
+
+ client_data->resume(client_data);
+ ret = bridge_data->init(bridge_data, client_data);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static int toshiba_blank(struct msm_panel_data *panel_data)
+{
+ struct panel_info *panel = container_of(panel_data, struct panel_info,
+ panel_data);
+ struct msm_mddi_client_data *client_data = panel->client_data;
+ struct msm_mddi_bridge_platform_data *bridge_data =
+ client_data->private_client_data;
+
+ return bridge_data->blank(bridge_data, client_data);
+}
+
+static int toshiba_unblank(struct msm_panel_data *panel_data)
+{
+ struct panel_info *panel = container_of(panel_data, struct panel_info,
+ panel_data);
+ struct msm_mddi_client_data *client_data = panel->client_data;
+ struct msm_mddi_bridge_platform_data *bridge_data =
+ client_data->private_client_data;
+
+ return bridge_data->unblank(bridge_data, client_data);
+}
+
+irqreturn_t toshiba_vsync_interrupt(int irq, void *data)
+{
+ struct panel_info *panel = data;
+
+ panel->toshiba_got_int = 1;
+ if (panel->toshiba_callback) {
+ panel->toshiba_callback->func(panel->toshiba_callback);
+ panel->toshiba_callback = 0;
+ }
+ wake_up(&toshiba_vsync_wait);
+ return IRQ_HANDLED;
+}
+
+static int setup_vsync(struct panel_info *panel,
+ int init)
+{
+ int ret;
+ int gpio = 97;
+ unsigned int irq;
+
+ if (!init) {
+ ret = 0;
+ goto uninit;
+ }
+ ret = gpio_request(gpio, "vsync");
+ if (ret)
+ goto err_request_gpio_failed;
+
+ ret = gpio_direction_input(gpio);
+ if (ret)
+ goto err_gpio_direction_input_failed;
+
+ ret = irq = gpio_to_irq(gpio);
+ if (ret < 0)
+ goto err_get_irq_num_failed;
+
+ ret = request_irq(irq, toshiba_vsync_interrupt, IRQF_TRIGGER_RISING,
+ "vsync", panel);
+ if (ret)
+ goto err_request_irq_failed;
+ printk(KERN_INFO "vsync on gpio %d now %d\n",
+ gpio, gpio_get_value(gpio));
+ return 0;
+
+uninit:
+ free_irq(gpio_to_irq(gpio), panel);
+err_request_irq_failed:
+err_get_irq_num_failed:
+err_gpio_direction_input_failed:
+ gpio_free(gpio);
+err_request_gpio_failed:
+ return ret;
+}
+
+static int mddi_toshiba_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct msm_mddi_client_data *client_data = pdev->dev.platform_data;
+ struct msm_mddi_bridge_platform_data *bridge_data =
+ client_data->private_client_data;
+ struct panel_info *panel =
+ kzalloc(sizeof(struct panel_info), GFP_KERNEL);
+ if (!panel)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, panel);
+
+ /* mddi_remote_write(mddi, 0, WAKEUP); */
+ client_data->remote_write(client_data, GPIOSEL_VWAKEINT, GPIOSEL);
+ client_data->remote_write(client_data, INTMASK_VWAKEOUT, INTMASK);
+
+ ret = setup_vsync(panel, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "mddi_bridge_setup_vsync failed\n");
+ return ret;
+ }
+
+ panel->client_data = client_data;
+ panel->panel_data.suspend = toshiba_suspend;
+ panel->panel_data.resume = toshiba_resume;
+ panel->panel_data.wait_vsync = toshiba_wait_vsync;
+ panel->panel_data.request_vsync = toshiba_request_vsync;
+ panel->panel_data.clear_vsync = toshiba_clear_vsync;
+ panel->panel_data.blank = toshiba_blank;
+ panel->panel_data.unblank = toshiba_unblank;
+ panel->panel_data.fb_data = &bridge_data->fb_data;
+ panel->panel_data.caps = MSMFB_CAP_PARTIAL_UPDATES;
+
+ panel->pdev.name = "msm_panel";
+ panel->pdev.id = pdev->id;
+ panel->pdev.resource = client_data->fb_resource;
+ panel->pdev.num_resources = 1;
+ panel->pdev.dev.platform_data = &panel->panel_data;
+ bridge_data->init(bridge_data, client_data);
+ platform_device_register(&panel->pdev);
+
+ return 0;
+}
+
+static int mddi_toshiba_remove(struct platform_device *pdev)
+{
+ struct panel_info *panel = platform_get_drvdata(pdev);
+
+ setup_vsync(panel, 0);
+ kfree(panel);
+ return 0;
+}
+
+static struct platform_driver mddi_client_d263_0000 = {
+ .probe = mddi_toshiba_probe,
+ .remove = mddi_toshiba_remove,
+ .driver = { .name = "mddi_c_d263_0000" },
+};
+
+static int __init mddi_client_toshiba_init(void)
+{
+ platform_driver_register(&mddi_client_d263_0000);
+ return 0;
+}
+
+module_init(mddi_client_toshiba_init);
+
diff --git a/drivers/video/msm/mddi_hw.h b/drivers/video/msm/mddi_hw.h
new file mode 100644
index 00000000000..45cc01fc1e7
--- /dev/null
+++ b/drivers/video/msm/mddi_hw.h
@@ -0,0 +1,305 @@
+/* drivers/video/msm_fb/mddi_hw.h
+ *
+ * MSM MDDI Hardware Registers and Structures
+ *
+ * Copyright (C) 2007 QUALCOMM Incorporated
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MDDI_HW_H_
+#define _MDDI_HW_H_
+
+#include <linux/types.h>
+
+#define MDDI_CMD 0x0000
+#define MDDI_VERSION 0x0004
+#define MDDI_PRI_PTR 0x0008
+#define MDDI_SEC_PTR 0x000c
+#define MDDI_BPS 0x0010
+#define MDDI_SPM 0x0014
+#define MDDI_INT 0x0018
+#define MDDI_INTEN 0x001c
+#define MDDI_REV_PTR 0x0020
+#define MDDI_REV_SIZE 0x0024
+#define MDDI_STAT 0x0028
+#define MDDI_REV_RATE_DIV 0x002c
+#define MDDI_REV_CRC_ERR 0x0030
+#define MDDI_TA1_LEN 0x0034
+#define MDDI_TA2_LEN 0x0038
+#define MDDI_TEST_BUS 0x003c
+#define MDDI_TEST 0x0040
+#define MDDI_REV_PKT_CNT 0x0044
+#define MDDI_DRIVE_HI 0x0048
+#define MDDI_DRIVE_LO 0x004c
+#define MDDI_DISP_WAKE 0x0050
+#define MDDI_REV_ENCAP_SZ 0x0054
+#define MDDI_RTD_VAL 0x0058
+#define MDDI_PAD_CTL 0x0068
+#define MDDI_DRIVER_START_CNT 0x006c
+#define MDDI_NEXT_PRI_PTR 0x0070
+#define MDDI_NEXT_SEC_PTR 0x0074
+#define MDDI_MISR_CTL 0x0078
+#define MDDI_MISR_DATA 0x007c
+#define MDDI_SF_CNT 0x0080
+#define MDDI_MF_CNT 0x0084
+#define MDDI_CURR_REV_PTR 0x0088
+#define MDDI_CORE_VER 0x008c
+
+#define MDDI_INT_PRI_PTR_READ 0x0001
+#define MDDI_INT_SEC_PTR_READ 0x0002
+#define MDDI_INT_REV_DATA_AVAIL 0x0004
+#define MDDI_INT_DISP_REQ 0x0008
+#define MDDI_INT_PRI_UNDERFLOW 0x0010
+#define MDDI_INT_SEC_UNDERFLOW 0x0020
+#define MDDI_INT_REV_OVERFLOW 0x0040
+#define MDDI_INT_CRC_ERROR 0x0080
+#define MDDI_INT_MDDI_IN 0x0100
+#define MDDI_INT_PRI_OVERWRITE 0x0200
+#define MDDI_INT_SEC_OVERWRITE 0x0400
+#define MDDI_INT_REV_OVERWRITE 0x0800
+#define MDDI_INT_DMA_FAILURE 0x1000
+#define MDDI_INT_LINK_ACTIVE 0x2000
+#define MDDI_INT_IN_HIBERNATION 0x4000
+#define MDDI_INT_PRI_LINK_LIST_DONE 0x8000
+#define MDDI_INT_SEC_LINK_LIST_DONE 0x10000
+#define MDDI_INT_NO_CMD_PKTS_PEND 0x20000
+#define MDDI_INT_RTD_FAILURE 0x40000
+#define MDDI_INT_REV_PKT_RECEIVED 0x80000
+#define MDDI_INT_REV_PKTS_AVAIL 0x100000
+
+#define MDDI_INT_NEED_CLEAR ( \
+ MDDI_INT_REV_DATA_AVAIL | \
+ MDDI_INT_PRI_UNDERFLOW | \
+ MDDI_INT_SEC_UNDERFLOW | \
+ MDDI_INT_REV_OVERFLOW | \
+ MDDI_INT_CRC_ERROR | \
+ MDDI_INT_REV_PKT_RECEIVED)
+
+
+#define MDDI_STAT_LINK_ACTIVE 0x0001
+#define MDDI_STAT_NEW_REV_PTR 0x0002
+#define MDDI_STAT_NEW_PRI_PTR 0x0004
+#define MDDI_STAT_NEW_SEC_PTR 0x0008
+#define MDDI_STAT_IN_HIBERNATION 0x0010
+#define MDDI_STAT_PRI_LINK_LIST_DONE 0x0020
+#define MDDI_STAT_SEC_LINK_LIST_DONE 0x0040
+#define MDDI_STAT_PENDING_TIMING_PKT 0x0080
+#define MDDI_STAT_PENDING_REV_ENCAP 0x0100
+#define MDDI_STAT_PENDING_POWERDOWN 0x0200
+#define MDDI_STAT_RTD_MEAS_FAIL 0x0800
+#define MDDI_STAT_CLIENT_WAKEUP_REQ 0x1000
+
+
+#define MDDI_CMD_POWERDOWN 0x0100
+#define MDDI_CMD_POWERUP 0x0200
+#define MDDI_CMD_HIBERNATE 0x0300
+#define MDDI_CMD_RESET 0x0400
+#define MDDI_CMD_DISP_IGNORE 0x0501
+#define MDDI_CMD_DISP_LISTEN 0x0500
+#define MDDI_CMD_SEND_REV_ENCAP 0x0600
+#define MDDI_CMD_GET_CLIENT_CAP 0x0601
+#define MDDI_CMD_GET_CLIENT_STATUS 0x0602
+#define MDDI_CMD_SEND_RTD 0x0700
+#define MDDI_CMD_LINK_ACTIVE 0x0900
+#define MDDI_CMD_PERIODIC_REV_ENCAP 0x0A00
+#define MDDI_CMD_FORCE_NEW_REV_PTR 0x0C00
+
+
+
+#define MDDI_VIDEO_REV_PKT_SIZE 0x40
+#define MDDI_CLIENT_CAPABILITY_REV_PKT_SIZE 0x60
+#define MDDI_MAX_REV_PKT_SIZE 0x60
+
+/* #define MDDI_REV_BUFFER_SIZE 128 */
+#define MDDI_REV_BUFFER_SIZE (MDDI_MAX_REV_PKT_SIZE * 4)
+
+/* MDP sends 256 pixel packets, so lower value hibernates more without
+ * significantly increasing latency of waiting for next subframe */
+#define MDDI_HOST_BYTES_PER_SUBFRAME 0x3C00
+#define MDDI_HOST_TA2_LEN 0x000c
+#define MDDI_HOST_REV_RATE_DIV 0x0002
+
+
+struct __attribute__((packed)) mddi_rev_packet {
+ uint16_t length;
+ uint16_t type;
+ uint16_t client_id;
+};
+
+struct __attribute__((packed)) mddi_client_status {
+ uint16_t length;
+ uint16_t type;
+ uint16_t client_id;
+ uint16_t reverse_link_request; /* bytes needed in rev encap message */
+ uint8_t crc_error_count;
+ uint8_t capability_change;
+ uint16_t graphics_busy_flags;
+ uint16_t crc16;
+};
+
+struct __attribute__((packed)) mddi_client_caps {
+ uint16_t length; /* length, exclusive of this field */
+ uint16_t type; /* 66 */
+ uint16_t client_id;
+
+ uint16_t Protocol_Version;
+ uint16_t Minimum_Protocol_Version;
+ uint16_t Data_Rate_Capability;
+ uint8_t Interface_Type_Capability;
+ uint8_t Number_of_Alt_Displays;
+ uint16_t PostCal_Data_Rate;
+ uint16_t Bitmap_Width;
+ uint16_t Bitmap_Height;
+ uint16_t Display_Window_Width;
+ uint16_t Display_Window_Height;
+ uint32_t Color_Map_Size;
+ uint16_t Color_Map_RGB_Width;
+ uint16_t RGB_Capability;
+ uint8_t Monochrome_Capability;
+ uint8_t Reserved_1;
+ uint16_t Y_Cb_Cr_Capability;
+ uint16_t Bayer_Capability;
+ uint16_t Alpha_Cursor_Image_Planes;
+ uint32_t Client_Feature_Capability_Indicators;
+ uint8_t Maximum_Video_Frame_Rate_Capability;
+ uint8_t Minimum_Video_Frame_Rate_Capability;
+ uint16_t Minimum_Sub_frame_Rate;
+ uint16_t Audio_Buffer_Depth;
+ uint16_t Audio_Channel_Capability;
+ uint16_t Audio_Sample_Rate_Capability;
+ uint8_t Audio_Sample_Resolution;
+ uint8_t Mic_Audio_Sample_Resolution;
+ uint16_t Mic_Sample_Rate_Capability;
+ uint8_t Keyboard_Data_Format;
+ uint8_t pointing_device_data_format;
+ uint16_t content_protection_type;
+ uint16_t Mfr_Name;
+ uint16_t Product_Code;
+ uint16_t Reserved_3;
+ uint32_t Serial_Number;
+ uint8_t Week_of_Manufacture;
+ uint8_t Year_of_Manufacture;
+
+ uint16_t crc16;
+} mddi_client_capability_type;
+
+
+struct __attribute__((packed)) mddi_video_stream {
+ uint16_t length;
+ uint16_t type; /* 16 */
+ uint16_t client_id; /* 0 */
+
+ uint16_t video_data_format_descriptor;
+/* format of each pixel in the Pixel Data in the present stream in the
+ * present packet.
+ * If bits [15:13] = 000 monochrome
+ * If bits [15:13] = 001 color pixels (palette).
+ * If bits [15:13] = 010 color pixels in raw RGB
+ * If bits [15:13] = 011 data in 4:2:2 Y Cb Cr format
+ * If bits [15:13] = 100 Bayer pixels
+ */
+
+ uint16_t pixel_data_attributes;
+/* interpreted as follows:
+ * Bits [1:0] = 11 pixel data is displayed to both eyes
+ * Bits [1:0] = 10 pixel data is routed to the left eye only.
+ * Bits [1:0] = 01 pixel data is routed to the right eye only.
+ * Bits [1:0] = 00 pixel data is routed to the alternate display.
+ * Bit 2 is 0 Pixel Data is in the standard progressive format.
+ * Bit 2 is 1 Pixel Data is in interlace format.
+ * Bit 3 is 0 Pixel Data is in the standard progressive format.
+ * Bit 3 is 1 Pixel Data is in alternate pixel format.
+ * Bit 4 is 0 Pixel Data is to or from the display frame buffer.
+ * Bit 4 is 1 Pixel Data is to or from the camera.
+ * Bit 5 is 0 pixel data contains the next consecutive row of pixels.
+ * Bit 5 is 1 X Left Edge, Y Top Edge, X Right Edge, Y Bottom Edge,
+ * X Start, and Y Start parameters are not defined and
+ * shall be ignored by the client.
+ * Bits [7:6] = 01 Pixel data is written to the offline image buffer.
+ * Bits [7:6] = 00 Pixel data is written to the buffer to refresh display.
+ * Bits [7:6] = 11 Pixel data is written to all image buffers.
+ * Bits [7:6] = 10 Invalid. Reserved for future use.
+ * Bits 8 through 11 alternate display number.
+ * Bits 12 through 14 are reserved for future use and shall be set to zero.
+ * Bit 15 is 1 the row of pixels is the last row of pixels in a frame.
+ */
+
+ uint16_t x_left_edge;
+ uint16_t y_top_edge;
+ /* X,Y coordinate of the top left edge of the screen window */
+
+ uint16_t x_right_edge;
+ uint16_t y_bottom_edge;
+ /* X,Y coordinate of the bottom right edge of the window being
+ * updated. */
+
+ uint16_t x_start;
+ uint16_t y_start;
+ /* (X Start, Y Start) is the first pixel in the Pixel Data field
+ * below. */
+
+ uint16_t pixel_count;
+ /* number of pixels in the Pixel Data field below. */
+
+ uint16_t parameter_CRC;
+ /* 16-bit CRC of all bytes from the Packet Length to the Pixel Count. */
+
+ uint16_t reserved;
+ /* 16-bit variable to make structure align on 4 byte boundary */
+};
+
+#define TYPE_VIDEO_STREAM 16
+#define TYPE_CLIENT_CAPS 66
+#define TYPE_REGISTER_ACCESS 146
+#define TYPE_CLIENT_STATUS 70
+
+struct __attribute__((packed)) mddi_register_access {
+ uint16_t length;
+ uint16_t type; /* 146 */
+ uint16_t client_id;
+
+ uint16_t read_write_info;
+ /* Bits 13:0 a 14-bit unsigned integer that specifies the number of
+ * 32-bit Register Data List items to be transferred in the
+ * Register Data List field.
+ * Bits[15:14] = 00 Write to register(s);
+ * Bits[15:14] = 10 Read from register(s);
+ * Bits[15:14] = 11 Response to a Read.
+ * Bits[15:14] = 01 this value is reserved for future use. */
+#define MDDI_WRITE (0 << 14)
+#define MDDI_READ (2 << 14)
+#define MDDI_READ_RESP (3 << 14)
+
+ uint32_t register_address;
+ /* the register address that is to be written to or read from. */
+
+ uint16_t crc16;
+
+ uint32_t register_data_list;
+ /* list of 4-byte register data values for/from client registers */
+};
+
+struct __attribute__((packed)) mddi_llentry {
+ uint16_t flags;
+ uint16_t header_count;
+ uint16_t data_count;
+ dma_addr_t data; /* 32 bit */
+ struct mddi_llentry *next;
+ uint16_t reserved;
+ union {
+ struct mddi_video_stream v;
+ struct mddi_register_access r;
+ uint32_t _[12];
+ } u;
+};
+
+#endif
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
new file mode 100644
index 00000000000..99636a2b20f
--- /dev/null
+++ b/drivers/video/msm/mdp.c
@@ -0,0 +1,538 @@
+/* drivers/video/msm_fb/mdp.c
+ *
+ * MSM MDP Interface (used by framebuffer core)
+ *
+ * Copyright (C) 2007 QUALCOMM Incorporated
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/fb.h>
+#include <linux/msm_mdp.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/clk.h>
+#include <linux/file.h>
+#ifdef CONFIG_ANDROID_PMEM
+#include <linux/android_pmem.h>
+#endif
+#include <linux/major.h>
+
+#include <mach/msm_iomap.h>
+#include <mach/msm_fb.h>
+#include <linux/platform_device.h>
+
+#include "mdp_hw.h"
+
+struct class *mdp_class;
+
+#define MDP_CMD_DEBUG_ACCESS_BASE (0x10000)
+
+static uint16_t mdp_default_ccs[] = {
+ 0x254, 0x000, 0x331, 0x254, 0xF38, 0xE61, 0x254, 0x409, 0x000,
+ 0x010, 0x080, 0x080
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(mdp_dma2_waitqueue);
+static DECLARE_WAIT_QUEUE_HEAD(mdp_ppp_waitqueue);
+static struct msmfb_callback *dma_callback;
+static struct clk *clk;
+static unsigned int mdp_irq_mask;
+static DEFINE_SPINLOCK(mdp_lock);
+DEFINE_MUTEX(mdp_mutex);
+
+static int enable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
+{
+ unsigned long irq_flags;
+ int ret = 0;
+
+ BUG_ON(!mask);
+
+ spin_lock_irqsave(&mdp_lock, irq_flags);
+ /* if the mask bits are already set return an error, this interrupt
+ * is already enabled */
+ if (mdp_irq_mask & mask) {
+ printk(KERN_ERR "mdp irq already on already on %x %x\n",
+ mdp_irq_mask, mask);
+ ret = -1;
+ }
+ /* if the mdp irq is not already enabled enable it */
+ if (!mdp_irq_mask) {
+ if (clk)
+ clk_enable(clk);
+ enable_irq(mdp->irq);
+ }
+
+ /* update the irq mask to reflect the fact that the interrupt is
+ * enabled */
+ mdp_irq_mask |= mask;
+ spin_unlock_irqrestore(&mdp_lock, irq_flags);
+ return ret;
+}
+
+static int locked_disable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
+{
+ /* this interrupt is already disabled! */
+ if (!(mdp_irq_mask & mask)) {
+ printk(KERN_ERR "mdp irq already off %x %x\n",
+ mdp_irq_mask, mask);
+ return -1;
+ }
+ /* update the irq mask to reflect the fact that the interrupt is
+ * disabled */
+ mdp_irq_mask &= ~(mask);
+ /* if no one is waiting on the interrupt, disable it */
+ if (!mdp_irq_mask) {
+ disable_irq(mdp->irq);
+ if (clk)
+ clk_disable(clk);
+ }
+ return 0;
+}
+
+static int disable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
+{
+ unsigned long irq_flags;
+ int ret;
+
+ spin_lock_irqsave(&mdp_lock, irq_flags);
+ ret = locked_disable_mdp_irq(mdp, mask);
+ spin_unlock_irqrestore(&mdp_lock, irq_flags);
+ return ret;
+}
+
+static irqreturn_t mdp_isr(int irq, void *data)
+{
+ uint32_t status;
+ unsigned long irq_flags;
+ struct mdp_info *mdp = data;
+
+ spin_lock_irqsave(&mdp_lock, irq_flags);
+
+ status = mdp_readl(mdp, MDP_INTR_STATUS);
+ mdp_writel(mdp, status, MDP_INTR_CLEAR);
+
+ status &= mdp_irq_mask;
+ if (status & DL0_DMA2_TERM_DONE) {
+ if (dma_callback) {
+ dma_callback->func(dma_callback);
+ dma_callback = NULL;
+ }
+ wake_up(&mdp_dma2_waitqueue);
+ }
+
+ if (status & DL0_ROI_DONE)
+ wake_up(&mdp_ppp_waitqueue);
+
+ if (status)
+ locked_disable_mdp_irq(mdp, status);
+
+ spin_unlock_irqrestore(&mdp_lock, irq_flags);
+ return IRQ_HANDLED;
+}
+
+static uint32_t mdp_check_mask(uint32_t mask)
+{
+ uint32_t ret;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&mdp_lock, irq_flags);
+ ret = mdp_irq_mask & mask;
+ spin_unlock_irqrestore(&mdp_lock, irq_flags);
+ return ret;
+}
+
+static int mdp_wait(struct mdp_info *mdp, uint32_t mask, wait_queue_head_t *wq)
+{
+ int ret = 0;
+ unsigned long irq_flags;
+
+ wait_event_timeout(*wq, !mdp_check_mask(mask), HZ);
+
+ spin_lock_irqsave(&mdp_lock, irq_flags);
+ if (mdp_irq_mask & mask) {
+ locked_disable_mdp_irq(mdp, mask);
+ printk(KERN_WARNING "timeout waiting for mdp to complete %x\n",
+ mask);
+ ret = -ETIMEDOUT;
+ }
+ spin_unlock_irqrestore(&mdp_lock, irq_flags);
+
+ return ret;
+}
+
+void mdp_dma_wait(struct mdp_device *mdp_dev)
+{
+#define MDP_MAX_TIMEOUTS 20
+ static int timeout_count;
+ struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
+
+ if (mdp_wait(mdp, DL0_DMA2_TERM_DONE, &mdp_dma2_waitqueue) == -ETIMEDOUT)
+ timeout_count++;
+ else
+ timeout_count = 0;
+
+ if (timeout_count > MDP_MAX_TIMEOUTS) {
+ printk(KERN_ERR "mdp: dma failed %d times, somethings wrong!\n",
+ MDP_MAX_TIMEOUTS);
+ BUG();
+ }
+}
+
+static int mdp_ppp_wait(struct mdp_info *mdp)
+{
+ return mdp_wait(mdp, DL0_ROI_DONE, &mdp_ppp_waitqueue);
+}
+
+void mdp_dma_to_mddi(struct mdp_info *mdp, uint32_t addr, uint32_t stride,
+ uint32_t width, uint32_t height, uint32_t x, uint32_t y,
+ struct msmfb_callback *callback)
+{
+ uint32_t dma2_cfg;
+ uint16_t ld_param = 0; /* 0=PRIM, 1=SECD, 2=EXT */
+
+ if (enable_mdp_irq(mdp, DL0_DMA2_TERM_DONE)) {
+ printk(KERN_ERR "mdp_dma_to_mddi: busy\n");
+ return;
+ }
+
+ dma_callback = callback;
+
+ dma2_cfg = DMA_PACK_TIGHT |
+ DMA_PACK_ALIGN_LSB |
+ DMA_PACK_PATTERN_RGB |
+ DMA_OUT_SEL_AHB |
+ DMA_IBUF_NONCONTIGUOUS;
+
+ dma2_cfg |= DMA_IBUF_FORMAT_RGB565;
+
+ dma2_cfg |= DMA_OUT_SEL_MDDI;
+
+ dma2_cfg |= DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY;
+
+ dma2_cfg |= DMA_DITHER_EN;
+
+ /* setup size, address, and stride */
+ mdp_writel(mdp, (height << 16) | (width),
+ MDP_CMD_DEBUG_ACCESS_BASE + 0x0184);
+ mdp_writel(mdp, addr, MDP_CMD_DEBUG_ACCESS_BASE + 0x0188);
+ mdp_writel(mdp, stride, MDP_CMD_DEBUG_ACCESS_BASE + 0x018C);
+
+ /* 666 18BPP */
+ dma2_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
+
+ /* set y & x offset and MDDI transaction parameters */
+ mdp_writel(mdp, (y << 16) | (x), MDP_CMD_DEBUG_ACCESS_BASE + 0x0194);
+ mdp_writel(mdp, ld_param, MDP_CMD_DEBUG_ACCESS_BASE + 0x01a0);
+ mdp_writel(mdp, (MDDI_VDO_PACKET_DESC << 16) | MDDI_VDO_PACKET_PRIM,
+ MDP_CMD_DEBUG_ACCESS_BASE + 0x01a4);
+
+ mdp_writel(mdp, dma2_cfg, MDP_CMD_DEBUG_ACCESS_BASE + 0x0180);
+
+ /* start DMA2 */
+ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0044);
+}
+
+void mdp_dma(struct mdp_device *mdp_dev, uint32_t addr, uint32_t stride,
+ uint32_t width, uint32_t height, uint32_t x, uint32_t y,
+ struct msmfb_callback *callback, int interface)
+{
+ struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
+
+ if (interface == MSM_MDDI_PMDH_INTERFACE) {
+ mdp_dma_to_mddi(mdp, addr, stride, width, height, x, y,
+ callback);
+ }
+}
+
+int get_img(struct mdp_img *img, struct fb_info *info,
+ unsigned long *start, unsigned long *len,
+ struct file **filep)
+{
+ int put_needed, ret = 0;
+ struct file *file;
+ unsigned long vstart;
+
+#ifdef CONFIG_ANDROID_PMEM
+ if (!get_pmem_file(img->memory_id, start, &vstart, len, filep))
+ return 0;
+#endif
+
+ file = fget_light(img->memory_id, &put_needed);
+ if (file == NULL)
+ return -1;
+
+ if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
+ *start = info->fix.smem_start;
+ *len = info->fix.smem_len;
+ } else
+ ret = -1;
+ fput_light(file, put_needed);
+
+ return ret;
+}
+
+void put_img(struct file *src_file, struct file *dst_file)
+{
+#ifdef CONFIG_ANDROID_PMEM
+ if (src_file)
+ put_pmem_file(src_file);
+ if (dst_file)
+ put_pmem_file(dst_file);
+#endif
+}
+
+int mdp_blit(struct mdp_device *mdp_dev, struct fb_info *fb,
+ struct mdp_blit_req *req)
+{
+ int ret;
+ unsigned long src_start = 0, src_len = 0, dst_start = 0, dst_len = 0;
+ struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
+ struct file *src_file = 0, *dst_file = 0;
+
+ /* WORKAROUND FOR HARDWARE BUG IN BG TILE FETCH */
+ if (unlikely(req->src_rect.h == 0 ||
+ req->src_rect.w == 0)) {
+ printk(KERN_ERR "mpd_ppp: src img of zero size!\n");
+ return -EINVAL;
+ }
+ if (unlikely(req->dst_rect.h == 0 ||
+ req->dst_rect.w == 0))
+ return -EINVAL;
+
+ /* do this first so that if this fails, the caller can always
+ * safely call put_img */
+ if (unlikely(get_img(&req->src, fb, &src_start, &src_len, &src_file))) {
+ printk(KERN_ERR "mpd_ppp: could not retrieve src image from "
+ "memory\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(get_img(&req->dst, fb, &dst_start, &dst_len, &dst_file))) {
+ printk(KERN_ERR "mpd_ppp: could not retrieve dst image from "
+ "memory\n");
+#ifdef CONFIG_ANDROID_PMEM
+ put_pmem_file(src_file);
+#endif
+ return -EINVAL;
+ }
+ mutex_lock(&mdp_mutex);
+
+ /* transp_masking unimplemented */
+ req->transp_mask = MDP_TRANSP_NOP;
+ if (unlikely((req->transp_mask != MDP_TRANSP_NOP ||
+ req->alpha != MDP_ALPHA_NOP ||
+ HAS_ALPHA(req->src.format)) &&
+ (req->flags & MDP_ROT_90 &&
+ req->dst_rect.w <= 16 && req->dst_rect.h >= 16))) {
+ int i;
+ unsigned int tiles = req->dst_rect.h / 16;
+ unsigned int remainder = req->dst_rect.h % 16;
+ req->src_rect.w = 16*req->src_rect.w / req->dst_rect.h;
+ req->dst_rect.h = 16;
+ for (i = 0; i < tiles; i++) {
+ enable_mdp_irq(mdp, DL0_ROI_DONE);
+ ret = mdp_ppp_blit(mdp, req, src_file, src_start,
+ src_len, dst_file, dst_start,
+ dst_len);
+ if (ret)
+ goto err_bad_blit;
+ ret = mdp_ppp_wait(mdp);
+ if (ret)
+ goto err_wait_failed;
+ req->dst_rect.y += 16;
+ req->src_rect.x += req->src_rect.w;
+ }
+ if (!remainder)
+ goto end;
+ req->src_rect.w = remainder*req->src_rect.w / req->dst_rect.h;
+ req->dst_rect.h = remainder;
+ }
+ enable_mdp_irq(mdp, DL0_ROI_DONE);
+ ret = mdp_ppp_blit(mdp, req, src_file, src_start, src_len, dst_file,
+ dst_start,
+ dst_len);
+ if (ret)
+ goto err_bad_blit;
+ ret = mdp_ppp_wait(mdp);
+ if (ret)
+ goto err_wait_failed;
+end:
+ put_img(src_file, dst_file);
+ mutex_unlock(&mdp_mutex);
+ return 0;
+err_bad_blit:
+ disable_mdp_irq(mdp, DL0_ROI_DONE);
+err_wait_failed:
+ put_img(src_file, dst_file);
+ mutex_unlock(&mdp_mutex);
+ return ret;
+}
+
+void mdp_set_grp_disp(struct mdp_device *mdp_dev, unsigned disp_id)
+{
+ struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
+
+ disp_id &= 0xf;
+ mdp_writel(mdp, disp_id, MDP_FULL_BYPASS_WORD43);
+}
+
+int register_mdp_client(struct class_interface *cint)
+{
+ if (!mdp_class) {
+ pr_err("mdp: no mdp_class when registering mdp client\n");
+ return -ENODEV;
+ }
+ cint->class = mdp_class;
+ return class_interface_register(cint);
+}
+
+#include "mdp_csc_table.h"
+#include "mdp_scale_tables.h"
+
+int mdp_probe(struct platform_device *pdev)
+{
+ struct resource *resource;
+ int ret;
+ int n;
+ struct mdp_info *mdp;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!resource) {
+ pr_err("mdp: can not get mdp mem resource!\n");
+ return -ENOMEM;
+ }
+
+ mdp = kzalloc(sizeof(struct mdp_info), GFP_KERNEL);
+ if (!mdp)
+ return -ENOMEM;
+
+ mdp->irq = platform_get_irq(pdev, 0);
+ if (mdp->irq < 0) {
+ pr_err("mdp: can not get mdp irq\n");
+ ret = mdp->irq;
+ goto error_get_irq;
+ }
+
+ mdp->base = ioremap(resource->start,
+ resource->end - resource->start);
+ if (mdp->base == 0) {
+ printk(KERN_ERR "msmfb: cannot allocate mdp regs!\n");
+ ret = -ENOMEM;
+ goto error_ioremap;
+ }
+
+ mdp->mdp_dev.dma = mdp_dma;
+ mdp->mdp_dev.dma_wait = mdp_dma_wait;
+ mdp->mdp_dev.blit = mdp_blit;
+ mdp->mdp_dev.set_grp_disp = mdp_set_grp_disp;
+
+ clk = clk_get(&pdev->dev, "mdp_clk");
+ if (IS_ERR(clk)) {
+ printk(KERN_INFO "mdp: failed to get mdp clk");
+ return PTR_ERR(clk);
+ }
+
+ ret = request_irq(mdp->irq, mdp_isr, IRQF_DISABLED, "msm_mdp", mdp);
+ if (ret)
+ goto error_request_irq;
+ disable_irq(mdp->irq);
+ mdp_irq_mask = 0;
+
+ /* debug interface write access */
+ mdp_writel(mdp, 1, 0x60);
+
+ mdp_writel(mdp, MDP_ANY_INTR_MASK, MDP_INTR_ENABLE);
+ mdp_writel(mdp, 1, MDP_EBI2_PORTMAP_MODE);
+
+ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01f8);
+ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01fc);
+
+ for (n = 0; n < ARRAY_SIZE(csc_table); n++)
+ mdp_writel(mdp, csc_table[n].val, csc_table[n].reg);
+
+ /* clear up unused fg/main registers */
+ /* comp.plane 2&3 ystride */
+ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0120);
+
+ /* unpacked pattern */
+ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x012c);
+ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0130);
+ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0134);
+ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0158);
+ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x015c);
+ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0160);
+ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0170);
+ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0174);
+ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x017c);
+
+ /* comp.plane 2 & 3 */
+ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0114);
+ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0118);
+
+ /* clear unused bg registers */
+ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01c8);
+ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01d0);
+ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01dc);
+ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e0);
+ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e4);
+
+ for (n = 0; n < ARRAY_SIZE(mdp_upscale_table); n++)
+ mdp_writel(mdp, mdp_upscale_table[n].val,
+ mdp_upscale_table[n].reg);
+
+ for (n = 0; n < 9; n++)
+ mdp_writel(mdp, mdp_default_ccs[n], 0x40440 + 4 * n);
+ mdp_writel(mdp, mdp_default_ccs[9], 0x40500 + 4 * 0);
+ mdp_writel(mdp, mdp_default_ccs[10], 0x40500 + 4 * 0);
+ mdp_writel(mdp, mdp_default_ccs[11], 0x40500 + 4 * 0);
+
+ /* register mdp device */
+ mdp->mdp_dev.dev.parent = &pdev->dev;
+ mdp->mdp_dev.dev.class = mdp_class;
+ snprintf(mdp->mdp_dev.dev.bus_id, BUS_ID_SIZE, "mdp%d", pdev->id);
+
+ /* if you can remove the platform device you'd have to implement
+ * this:
+ mdp_dev.release = mdp_class; */
+
+ ret = device_register(&mdp->mdp_dev.dev);
+ if (ret)
+ goto error_device_register;
+ return 0;
+
+error_device_register:
+ free_irq(mdp->irq, mdp);
+error_request_irq:
+ iounmap(mdp->base);
+error_get_irq:
+error_ioremap:
+ kfree(mdp);
+ return ret;
+}
+
+static struct platform_driver msm_mdp_driver = {
+ .probe = mdp_probe,
+ .driver = {.name = "msm_mdp"},
+};
+
+static int __init mdp_init(void)
+{
+ mdp_class = class_create(THIS_MODULE, "msm_mdp");
+ if (IS_ERR(mdp_class)) {
+ printk(KERN_ERR "Error creating mdp class\n");
+ return PTR_ERR(mdp_class);
+ }
+ return platform_driver_register(&msm_mdp_driver);
+}
+
+subsys_initcall(mdp_init);
diff --git a/drivers/video/msm/mdp_csc_table.h b/drivers/video/msm/mdp_csc_table.h
new file mode 100644
index 00000000000..d1cde30ead5
--- /dev/null
+++ b/drivers/video/msm/mdp_csc_table.h
@@ -0,0 +1,582 @@
+/* drivers/video/msm_fb/mdp_csc_table.h
+ *
+ * Copyright (C) 2007 QUALCOMM Incorporated
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+static struct {
+ uint32_t reg;
+ uint32_t val;
+} csc_table[] = {
+ { 0x40400, 0x83 },
+ { 0x40404, 0x102 },
+ { 0x40408, 0x32 },
+ { 0x4040c, 0xffffffb5 },
+ { 0x40410, 0xffffff6c },
+ { 0x40414, 0xe1 },
+ { 0x40418, 0xe1 },
+ { 0x4041c, 0xffffff45 },
+ { 0x40420, 0xffffffdc },
+ { 0x40440, 0x254 },
+ { 0x40444, 0x0 },
+ { 0x40448, 0x331 },
+ { 0x4044c, 0x254 },
+ { 0x40450, 0xffffff38 },
+ { 0x40454, 0xfffffe61 },
+ { 0x40458, 0x254 },
+ { 0x4045c, 0x409 },
+ { 0x40460, 0x0 },
+ { 0x40480, 0x5d },
+ { 0x40484, 0x13a },
+ { 0x40488, 0x20 },
+ { 0x4048c, 0xffffffcd },
+ { 0x40490, 0xffffff54 },
+ { 0x40494, 0xe1 },
+ { 0x40498, 0xe1 },
+ { 0x4049c, 0xffffff35 },
+ { 0x404a0, 0xffffffec },
+ { 0x404c0, 0x254 },
+ { 0x404c4, 0x0 },
+ { 0x404c8, 0x396 },
+ { 0x404cc, 0x254 },
+ { 0x404d0, 0xffffff94 },
+ { 0x404d4, 0xfffffef0 },
+ { 0x404d8, 0x254 },
+ { 0x404dc, 0x43a },
+ { 0x404e0, 0x0 },
+ { 0x40500, 0x10 },
+ { 0x40504, 0x80 },
+ { 0x40508, 0x80 },
+ { 0x40540, 0x10 },
+ { 0x40544, 0x80 },
+ { 0x40548, 0x80 },
+ { 0x40580, 0x10 },
+ { 0x40584, 0xeb },
+ { 0x40588, 0x10 },
+ { 0x4058c, 0xf0 },
+ { 0x405c0, 0x10 },
+ { 0x405c4, 0xeb },
+ { 0x405c8, 0x10 },
+ { 0x405cc, 0xf0 },
+ { 0x40800, 0x0 },
+ { 0x40804, 0x151515 },
+ { 0x40808, 0x1d1d1d },
+ { 0x4080c, 0x232323 },
+ { 0x40810, 0x272727 },
+ { 0x40814, 0x2b2b2b },
+ { 0x40818, 0x2f2f2f },
+ { 0x4081c, 0x333333 },
+ { 0x40820, 0x363636 },
+ { 0x40824, 0x393939 },
+ { 0x40828, 0x3b3b3b },
+ { 0x4082c, 0x3e3e3e },
+ { 0x40830, 0x404040 },
+ { 0x40834, 0x434343 },
+ { 0x40838, 0x454545 },
+ { 0x4083c, 0x474747 },
+ { 0x40840, 0x494949 },
+ { 0x40844, 0x4b4b4b },
+ { 0x40848, 0x4d4d4d },
+ { 0x4084c, 0x4f4f4f },
+ { 0x40850, 0x515151 },
+ { 0x40854, 0x535353 },
+ { 0x40858, 0x555555 },
+ { 0x4085c, 0x565656 },
+ { 0x40860, 0x585858 },
+ { 0x40864, 0x5a5a5a },
+ { 0x40868, 0x5b5b5b },
+ { 0x4086c, 0x5d5d5d },
+ { 0x40870, 0x5e5e5e },
+ { 0x40874, 0x606060 },
+ { 0x40878, 0x616161 },
+ { 0x4087c, 0x636363 },
+ { 0x40880, 0x646464 },
+ { 0x40884, 0x666666 },
+ { 0x40888, 0x676767 },
+ { 0x4088c, 0x686868 },
+ { 0x40890, 0x6a6a6a },
+ { 0x40894, 0x6b6b6b },
+ { 0x40898, 0x6c6c6c },
+ { 0x4089c, 0x6e6e6e },
+ { 0x408a0, 0x6f6f6f },
+ { 0x408a4, 0x707070 },
+ { 0x408a8, 0x717171 },
+ { 0x408ac, 0x727272 },
+ { 0x408b0, 0x747474 },
+ { 0x408b4, 0x757575 },
+ { 0x408b8, 0x767676 },
+ { 0x408bc, 0x777777 },
+ { 0x408c0, 0x787878 },
+ { 0x408c4, 0x797979 },
+ { 0x408c8, 0x7a7a7a },
+ { 0x408cc, 0x7c7c7c },
+ { 0x408d0, 0x7d7d7d },
+ { 0x408d4, 0x7e7e7e },
+ { 0x408d8, 0x7f7f7f },
+ { 0x408dc, 0x808080 },
+ { 0x408e0, 0x818181 },
+ { 0x408e4, 0x828282 },
+ { 0x408e8, 0x838383 },
+ { 0x408ec, 0x848484 },
+ { 0x408f0, 0x858585 },
+ { 0x408f4, 0x868686 },
+ { 0x408f8, 0x878787 },
+ { 0x408fc, 0x888888 },
+ { 0x40900, 0x898989 },
+ { 0x40904, 0x8a8a8a },
+ { 0x40908, 0x8b8b8b },
+ { 0x4090c, 0x8c8c8c },
+ { 0x40910, 0x8d8d8d },
+ { 0x40914, 0x8e8e8e },
+ { 0x40918, 0x8f8f8f },
+ { 0x4091c, 0x8f8f8f },
+ { 0x40920, 0x909090 },
+ { 0x40924, 0x919191 },
+ { 0x40928, 0x929292 },
+ { 0x4092c, 0x939393 },
+ { 0x40930, 0x949494 },
+ { 0x40934, 0x959595 },
+ { 0x40938, 0x969696 },
+ { 0x4093c, 0x969696 },
+ { 0x40940, 0x979797 },
+ { 0x40944, 0x989898 },
+ { 0x40948, 0x999999 },
+ { 0x4094c, 0x9a9a9a },
+ { 0x40950, 0x9b9b9b },
+ { 0x40954, 0x9c9c9c },
+ { 0x40958, 0x9c9c9c },
+ { 0x4095c, 0x9d9d9d },
+ { 0x40960, 0x9e9e9e },
+ { 0x40964, 0x9f9f9f },
+ { 0x40968, 0xa0a0a0 },
+ { 0x4096c, 0xa0a0a0 },
+ { 0x40970, 0xa1a1a1 },
+ { 0x40974, 0xa2a2a2 },
+ { 0x40978, 0xa3a3a3 },
+ { 0x4097c, 0xa4a4a4 },
+ { 0x40980, 0xa4a4a4 },
+ { 0x40984, 0xa5a5a5 },
+ { 0x40988, 0xa6a6a6 },
+ { 0x4098c, 0xa7a7a7 },
+ { 0x40990, 0xa7a7a7 },
+ { 0x40994, 0xa8a8a8 },
+ { 0x40998, 0xa9a9a9 },
+ { 0x4099c, 0xaaaaaa },
+ { 0x409a0, 0xaaaaaa },
+ { 0x409a4, 0xababab },
+ { 0x409a8, 0xacacac },
+ { 0x409ac, 0xadadad },
+ { 0x409b0, 0xadadad },
+ { 0x409b4, 0xaeaeae },
+ { 0x409b8, 0xafafaf },
+ { 0x409bc, 0xafafaf },
+ { 0x409c0, 0xb0b0b0 },
+ { 0x409c4, 0xb1b1b1 },
+ { 0x409c8, 0xb2b2b2 },
+ { 0x409cc, 0xb2b2b2 },
+ { 0x409d0, 0xb3b3b3 },
+ { 0x409d4, 0xb4b4b4 },
+ { 0x409d8, 0xb4b4b4 },
+ { 0x409dc, 0xb5b5b5 },
+ { 0x409e0, 0xb6b6b6 },
+ { 0x409e4, 0xb6b6b6 },
+ { 0x409e8, 0xb7b7b7 },
+ { 0x409ec, 0xb8b8b8 },
+ { 0x409f0, 0xb8b8b8 },
+ { 0x409f4, 0xb9b9b9 },
+ { 0x409f8, 0xbababa },
+ { 0x409fc, 0xbababa },
+ { 0x40a00, 0xbbbbbb },
+ { 0x40a04, 0xbcbcbc },
+ { 0x40a08, 0xbcbcbc },
+ { 0x40a0c, 0xbdbdbd },
+ { 0x40a10, 0xbebebe },
+ { 0x40a14, 0xbebebe },
+ { 0x40a18, 0xbfbfbf },
+ { 0x40a1c, 0xc0c0c0 },
+ { 0x40a20, 0xc0c0c0 },
+ { 0x40a24, 0xc1c1c1 },
+ { 0x40a28, 0xc1c1c1 },
+ { 0x40a2c, 0xc2c2c2 },
+ { 0x40a30, 0xc3c3c3 },
+ { 0x40a34, 0xc3c3c3 },
+ { 0x40a38, 0xc4c4c4 },
+ { 0x40a3c, 0xc5c5c5 },
+ { 0x40a40, 0xc5c5c5 },
+ { 0x40a44, 0xc6c6c6 },
+ { 0x40a48, 0xc6c6c6 },
+ { 0x40a4c, 0xc7c7c7 },
+ { 0x40a50, 0xc8c8c8 },
+ { 0x40a54, 0xc8c8c8 },
+ { 0x40a58, 0xc9c9c9 },
+ { 0x40a5c, 0xc9c9c9 },
+ { 0x40a60, 0xcacaca },
+ { 0x40a64, 0xcbcbcb },
+ { 0x40a68, 0xcbcbcb },
+ { 0x40a6c, 0xcccccc },
+ { 0x40a70, 0xcccccc },
+ { 0x40a74, 0xcdcdcd },
+ { 0x40a78, 0xcecece },
+ { 0x40a7c, 0xcecece },
+ { 0x40a80, 0xcfcfcf },
+ { 0x40a84, 0xcfcfcf },
+ { 0x40a88, 0xd0d0d0 },
+ { 0x40a8c, 0xd0d0d0 },
+ { 0x40a90, 0xd1d1d1 },
+ { 0x40a94, 0xd2d2d2 },
+ { 0x40a98, 0xd2d2d2 },
+ { 0x40a9c, 0xd3d3d3 },
+ { 0x40aa0, 0xd3d3d3 },
+ { 0x40aa4, 0xd4d4d4 },
+ { 0x40aa8, 0xd4d4d4 },
+ { 0x40aac, 0xd5d5d5 },
+ { 0x40ab0, 0xd6d6d6 },
+ { 0x40ab4, 0xd6d6d6 },
+ { 0x40ab8, 0xd7d7d7 },
+ { 0x40abc, 0xd7d7d7 },
+ { 0x40ac0, 0xd8d8d8 },
+ { 0x40ac4, 0xd8d8d8 },
+ { 0x40ac8, 0xd9d9d9 },
+ { 0x40acc, 0xd9d9d9 },
+ { 0x40ad0, 0xdadada },
+ { 0x40ad4, 0xdbdbdb },
+ { 0x40ad8, 0xdbdbdb },
+ { 0x40adc, 0xdcdcdc },
+ { 0x40ae0, 0xdcdcdc },
+ { 0x40ae4, 0xdddddd },
+ { 0x40ae8, 0xdddddd },
+ { 0x40aec, 0xdedede },
+ { 0x40af0, 0xdedede },
+ { 0x40af4, 0xdfdfdf },
+ { 0x40af8, 0xdfdfdf },
+ { 0x40afc, 0xe0e0e0 },
+ { 0x40b00, 0xe0e0e0 },
+ { 0x40b04, 0xe1e1e1 },
+ { 0x40b08, 0xe1e1e1 },
+ { 0x40b0c, 0xe2e2e2 },
+ { 0x40b10, 0xe3e3e3 },
+ { 0x40b14, 0xe3e3e3 },
+ { 0x40b18, 0xe4e4e4 },
+ { 0x40b1c, 0xe4e4e4 },
+ { 0x40b20, 0xe5e5e5 },
+ { 0x40b24, 0xe5e5e5 },
+ { 0x40b28, 0xe6e6e6 },
+ { 0x40b2c, 0xe6e6e6 },
+ { 0x40b30, 0xe7e7e7 },
+ { 0x40b34, 0xe7e7e7 },
+ { 0x40b38, 0xe8e8e8 },
+ { 0x40b3c, 0xe8e8e8 },
+ { 0x40b40, 0xe9e9e9 },
+ { 0x40b44, 0xe9e9e9 },
+ { 0x40b48, 0xeaeaea },
+ { 0x40b4c, 0xeaeaea },
+ { 0x40b50, 0xebebeb },
+ { 0x40b54, 0xebebeb },
+ { 0x40b58, 0xececec },
+ { 0x40b5c, 0xececec },
+ { 0x40b60, 0xededed },
+ { 0x40b64, 0xededed },
+ { 0x40b68, 0xeeeeee },
+ { 0x40b6c, 0xeeeeee },
+ { 0x40b70, 0xefefef },
+ { 0x40b74, 0xefefef },
+ { 0x40b78, 0xf0f0f0 },
+ { 0x40b7c, 0xf0f0f0 },
+ { 0x40b80, 0xf1f1f1 },
+ { 0x40b84, 0xf1f1f1 },
+ { 0x40b88, 0xf2f2f2 },
+ { 0x40b8c, 0xf2f2f2 },
+ { 0x40b90, 0xf2f2f2 },
+ { 0x40b94, 0xf3f3f3 },
+ { 0x40b98, 0xf3f3f3 },
+ { 0x40b9c, 0xf4f4f4 },
+ { 0x40ba0, 0xf4f4f4 },
+ { 0x40ba4, 0xf5f5f5 },
+ { 0x40ba8, 0xf5f5f5 },
+ { 0x40bac, 0xf6f6f6 },
+ { 0x40bb0, 0xf6f6f6 },
+ { 0x40bb4, 0xf7f7f7 },
+ { 0x40bb8, 0xf7f7f7 },
+ { 0x40bbc, 0xf8f8f8 },
+ { 0x40bc0, 0xf8f8f8 },
+ { 0x40bc4, 0xf9f9f9 },
+ { 0x40bc8, 0xf9f9f9 },
+ { 0x40bcc, 0xfafafa },
+ { 0x40bd0, 0xfafafa },
+ { 0x40bd4, 0xfafafa },
+ { 0x40bd8, 0xfbfbfb },
+ { 0x40bdc, 0xfbfbfb },
+ { 0x40be0, 0xfcfcfc },
+ { 0x40be4, 0xfcfcfc },
+ { 0x40be8, 0xfdfdfd },
+ { 0x40bec, 0xfdfdfd },
+ { 0x40bf0, 0xfefefe },
+ { 0x40bf4, 0xfefefe },
+ { 0x40bf8, 0xffffff },
+ { 0x40bfc, 0xffffff },
+ { 0x40c00, 0x0 },
+ { 0x40c04, 0x0 },
+ { 0x40c08, 0x0 },
+ { 0x40c0c, 0x0 },
+ { 0x40c10, 0x0 },
+ { 0x40c14, 0x0 },
+ { 0x40c18, 0x0 },
+ { 0x40c1c, 0x0 },
+ { 0x40c20, 0x0 },
+ { 0x40c24, 0x0 },
+ { 0x40c28, 0x0 },
+ { 0x40c2c, 0x0 },
+ { 0x40c30, 0x0 },
+ { 0x40c34, 0x0 },
+ { 0x40c38, 0x0 },
+ { 0x40c3c, 0x0 },
+ { 0x40c40, 0x10101 },
+ { 0x40c44, 0x10101 },
+ { 0x40c48, 0x10101 },
+ { 0x40c4c, 0x10101 },
+ { 0x40c50, 0x10101 },
+ { 0x40c54, 0x10101 },
+ { 0x40c58, 0x10101 },
+ { 0x40c5c, 0x10101 },
+ { 0x40c60, 0x10101 },
+ { 0x40c64, 0x10101 },
+ { 0x40c68, 0x20202 },
+ { 0x40c6c, 0x20202 },
+ { 0x40c70, 0x20202 },
+ { 0x40c74, 0x20202 },
+ { 0x40c78, 0x20202 },
+ { 0x40c7c, 0x20202 },
+ { 0x40c80, 0x30303 },
+ { 0x40c84, 0x30303 },
+ { 0x40c88, 0x30303 },
+ { 0x40c8c, 0x30303 },
+ { 0x40c90, 0x30303 },
+ { 0x40c94, 0x40404 },
+ { 0x40c98, 0x40404 },
+ { 0x40c9c, 0x40404 },
+ { 0x40ca0, 0x40404 },
+ { 0x40ca4, 0x40404 },
+ { 0x40ca8, 0x50505 },
+ { 0x40cac, 0x50505 },
+ { 0x40cb0, 0x50505 },
+ { 0x40cb4, 0x50505 },
+ { 0x40cb8, 0x60606 },
+ { 0x40cbc, 0x60606 },
+ { 0x40cc0, 0x60606 },
+ { 0x40cc4, 0x70707 },
+ { 0x40cc8, 0x70707 },
+ { 0x40ccc, 0x70707 },
+ { 0x40cd0, 0x70707 },
+ { 0x40cd4, 0x80808 },
+ { 0x40cd8, 0x80808 },
+ { 0x40cdc, 0x80808 },
+ { 0x40ce0, 0x90909 },
+ { 0x40ce4, 0x90909 },
+ { 0x40ce8, 0xa0a0a },
+ { 0x40cec, 0xa0a0a },
+ { 0x40cf0, 0xa0a0a },
+ { 0x40cf4, 0xb0b0b },
+ { 0x40cf8, 0xb0b0b },
+ { 0x40cfc, 0xb0b0b },
+ { 0x40d00, 0xc0c0c },
+ { 0x40d04, 0xc0c0c },
+ { 0x40d08, 0xd0d0d },
+ { 0x40d0c, 0xd0d0d },
+ { 0x40d10, 0xe0e0e },
+ { 0x40d14, 0xe0e0e },
+ { 0x40d18, 0xe0e0e },
+ { 0x40d1c, 0xf0f0f },
+ { 0x40d20, 0xf0f0f },
+ { 0x40d24, 0x101010 },
+ { 0x40d28, 0x101010 },
+ { 0x40d2c, 0x111111 },
+ { 0x40d30, 0x111111 },
+ { 0x40d34, 0x121212 },
+ { 0x40d38, 0x121212 },
+ { 0x40d3c, 0x131313 },
+ { 0x40d40, 0x131313 },
+ { 0x40d44, 0x141414 },
+ { 0x40d48, 0x151515 },
+ { 0x40d4c, 0x151515 },
+ { 0x40d50, 0x161616 },
+ { 0x40d54, 0x161616 },
+ { 0x40d58, 0x171717 },
+ { 0x40d5c, 0x171717 },
+ { 0x40d60, 0x181818 },
+ { 0x40d64, 0x191919 },
+ { 0x40d68, 0x191919 },
+ { 0x40d6c, 0x1a1a1a },
+ { 0x40d70, 0x1b1b1b },
+ { 0x40d74, 0x1b1b1b },
+ { 0x40d78, 0x1c1c1c },
+ { 0x40d7c, 0x1c1c1c },
+ { 0x40d80, 0x1d1d1d },
+ { 0x40d84, 0x1e1e1e },
+ { 0x40d88, 0x1f1f1f },
+ { 0x40d8c, 0x1f1f1f },
+ { 0x40d90, 0x202020 },
+ { 0x40d94, 0x212121 },
+ { 0x40d98, 0x212121 },
+ { 0x40d9c, 0x222222 },
+ { 0x40da0, 0x232323 },
+ { 0x40da4, 0x242424 },
+ { 0x40da8, 0x242424 },
+ { 0x40dac, 0x252525 },
+ { 0x40db0, 0x262626 },
+ { 0x40db4, 0x272727 },
+ { 0x40db8, 0x272727 },
+ { 0x40dbc, 0x282828 },
+ { 0x40dc0, 0x292929 },
+ { 0x40dc4, 0x2a2a2a },
+ { 0x40dc8, 0x2b2b2b },
+ { 0x40dcc, 0x2c2c2c },
+ { 0x40dd0, 0x2c2c2c },
+ { 0x40dd4, 0x2d2d2d },
+ { 0x40dd8, 0x2e2e2e },
+ { 0x40ddc, 0x2f2f2f },
+ { 0x40de0, 0x303030 },
+ { 0x40de4, 0x313131 },
+ { 0x40de8, 0x323232 },
+ { 0x40dec, 0x333333 },
+ { 0x40df0, 0x333333 },
+ { 0x40df4, 0x343434 },
+ { 0x40df8, 0x353535 },
+ { 0x40dfc, 0x363636 },
+ { 0x40e00, 0x373737 },
+ { 0x40e04, 0x383838 },
+ { 0x40e08, 0x393939 },
+ { 0x40e0c, 0x3a3a3a },
+ { 0x40e10, 0x3b3b3b },
+ { 0x40e14, 0x3c3c3c },
+ { 0x40e18, 0x3d3d3d },
+ { 0x40e1c, 0x3e3e3e },
+ { 0x40e20, 0x3f3f3f },
+ { 0x40e24, 0x404040 },
+ { 0x40e28, 0x414141 },
+ { 0x40e2c, 0x424242 },
+ { 0x40e30, 0x434343 },
+ { 0x40e34, 0x444444 },
+ { 0x40e38, 0x464646 },
+ { 0x40e3c, 0x474747 },
+ { 0x40e40, 0x484848 },
+ { 0x40e44, 0x494949 },
+ { 0x40e48, 0x4a4a4a },
+ { 0x40e4c, 0x4b4b4b },
+ { 0x40e50, 0x4c4c4c },
+ { 0x40e54, 0x4d4d4d },
+ { 0x40e58, 0x4f4f4f },
+ { 0x40e5c, 0x505050 },
+ { 0x40e60, 0x515151 },
+ { 0x40e64, 0x525252 },
+ { 0x40e68, 0x535353 },
+ { 0x40e6c, 0x545454 },
+ { 0x40e70, 0x565656 },
+ { 0x40e74, 0x575757 },
+ { 0x40e78, 0x585858 },
+ { 0x40e7c, 0x595959 },
+ { 0x40e80, 0x5b5b5b },
+ { 0x40e84, 0x5c5c5c },
+ { 0x40e88, 0x5d5d5d },
+ { 0x40e8c, 0x5e5e5e },
+ { 0x40e90, 0x606060 },
+ { 0x40e94, 0x616161 },
+ { 0x40e98, 0x626262 },
+ { 0x40e9c, 0x646464 },
+ { 0x40ea0, 0x656565 },
+ { 0x40ea4, 0x666666 },
+ { 0x40ea8, 0x686868 },
+ { 0x40eac, 0x696969 },
+ { 0x40eb0, 0x6a6a6a },
+ { 0x40eb4, 0x6c6c6c },
+ { 0x40eb8, 0x6d6d6d },
+ { 0x40ebc, 0x6f6f6f },
+ { 0x40ec0, 0x707070 },
+ { 0x40ec4, 0x717171 },
+ { 0x40ec8, 0x737373 },
+ { 0x40ecc, 0x747474 },
+ { 0x40ed0, 0x767676 },
+ { 0x40ed4, 0x777777 },
+ { 0x40ed8, 0x797979 },
+ { 0x40edc, 0x7a7a7a },
+ { 0x40ee0, 0x7c7c7c },
+ { 0x40ee4, 0x7d7d7d },
+ { 0x40ee8, 0x7f7f7f },
+ { 0x40eec, 0x808080 },
+ { 0x40ef0, 0x828282 },
+ { 0x40ef4, 0x838383 },
+ { 0x40ef8, 0x858585 },
+ { 0x40efc, 0x868686 },
+ { 0x40f00, 0x888888 },
+ { 0x40f04, 0x898989 },
+ { 0x40f08, 0x8b8b8b },
+ { 0x40f0c, 0x8d8d8d },
+ { 0x40f10, 0x8e8e8e },
+ { 0x40f14, 0x909090 },
+ { 0x40f18, 0x919191 },
+ { 0x40f1c, 0x939393 },
+ { 0x40f20, 0x959595 },
+ { 0x40f24, 0x969696 },
+ { 0x40f28, 0x989898 },
+ { 0x40f2c, 0x9a9a9a },
+ { 0x40f30, 0x9b9b9b },
+ { 0x40f34, 0x9d9d9d },
+ { 0x40f38, 0x9f9f9f },
+ { 0x40f3c, 0xa1a1a1 },
+ { 0x40f40, 0xa2a2a2 },
+ { 0x40f44, 0xa4a4a4 },
+ { 0x40f48, 0xa6a6a6 },
+ { 0x40f4c, 0xa7a7a7 },
+ { 0x40f50, 0xa9a9a9 },
+ { 0x40f54, 0xababab },
+ { 0x40f58, 0xadadad },
+ { 0x40f5c, 0xafafaf },
+ { 0x40f60, 0xb0b0b0 },
+ { 0x40f64, 0xb2b2b2 },
+ { 0x40f68, 0xb4b4b4 },
+ { 0x40f6c, 0xb6b6b6 },
+ { 0x40f70, 0xb8b8b8 },
+ { 0x40f74, 0xbababa },
+ { 0x40f78, 0xbbbbbb },
+ { 0x40f7c, 0xbdbdbd },
+ { 0x40f80, 0xbfbfbf },
+ { 0x40f84, 0xc1c1c1 },
+ { 0x40f88, 0xc3c3c3 },
+ { 0x40f8c, 0xc5c5c5 },
+ { 0x40f90, 0xc7c7c7 },
+ { 0x40f94, 0xc9c9c9 },
+ { 0x40f98, 0xcbcbcb },
+ { 0x40f9c, 0xcdcdcd },
+ { 0x40fa0, 0xcfcfcf },
+ { 0x40fa4, 0xd1d1d1 },
+ { 0x40fa8, 0xd3d3d3 },
+ { 0x40fac, 0xd5d5d5 },
+ { 0x40fb0, 0xd7d7d7 },
+ { 0x40fb4, 0xd9d9d9 },
+ { 0x40fb8, 0xdbdbdb },
+ { 0x40fbc, 0xdddddd },
+ { 0x40fc0, 0xdfdfdf },
+ { 0x40fc4, 0xe1e1e1 },
+ { 0x40fc8, 0xe3e3e3 },
+ { 0x40fcc, 0xe5e5e5 },
+ { 0x40fd0, 0xe7e7e7 },
+ { 0x40fd4, 0xe9e9e9 },
+ { 0x40fd8, 0xebebeb },
+ { 0x40fdc, 0xeeeeee },
+ { 0x40fe0, 0xf0f0f0 },
+ { 0x40fe4, 0xf2f2f2 },
+ { 0x40fe8, 0xf4f4f4 },
+ { 0x40fec, 0xf6f6f6 },
+ { 0x40ff0, 0xf8f8f8 },
+ { 0x40ff4, 0xfbfbfb },
+ { 0x40ff8, 0xfdfdfd },
+ { 0x40ffc, 0xffffff },
+};
diff --git a/drivers/video/msm/mdp_hw.h b/drivers/video/msm/mdp_hw.h
new file mode 100644
index 00000000000..4e3deb4e592
--- /dev/null
+++ b/drivers/video/msm/mdp_hw.h
@@ -0,0 +1,621 @@
+/* drivers/video/msm_fb/mdp_hw.h
+ *
+ * Copyright (C) 2007 QUALCOMM Incorporated
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MDP_HW_H_
+#define _MDP_HW_H_
+
+#include <mach/msm_iomap.h>
+#include <mach/msm_fb.h>
+
+struct mdp_info {
+ struct mdp_device mdp_dev;
+ char * __iomem base;
+ int irq;
+};
+struct mdp_blit_req;
+struct mdp_device;
+int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req,
+ struct file *src_file, unsigned long src_start,
+ unsigned long src_len, struct file *dst_file,
+ unsigned long dst_start, unsigned long dst_len);
+#define mdp_writel(mdp, value, offset) writel(value, mdp->base + offset)
+#define mdp_readl(mdp, offset) readl(mdp->base + offset)
+
+#define MDP_SYNC_CONFIG_0 (0x00000)
+#define MDP_SYNC_CONFIG_1 (0x00004)
+#define MDP_SYNC_CONFIG_2 (0x00008)
+#define MDP_SYNC_STATUS_0 (0x0000c)
+#define MDP_SYNC_STATUS_1 (0x00010)
+#define MDP_SYNC_STATUS_2 (0x00014)
+#define MDP_SYNC_THRESH_0 (0x00018)
+#define MDP_SYNC_THRESH_1 (0x0001c)
+#define MDP_INTR_ENABLE (0x00020)
+#define MDP_INTR_STATUS (0x00024)
+#define MDP_INTR_CLEAR (0x00028)
+#define MDP_DISPLAY0_START (0x00030)
+#define MDP_DISPLAY1_START (0x00034)
+#define MDP_DISPLAY_STATUS (0x00038)
+#define MDP_EBI2_LCD0 (0x0003c)
+#define MDP_EBI2_LCD1 (0x00040)
+#define MDP_DISPLAY0_ADDR (0x00054)
+#define MDP_DISPLAY1_ADDR (0x00058)
+#define MDP_EBI2_PORTMAP_MODE (0x0005c)
+#define MDP_MODE (0x00060)
+#define MDP_TV_OUT_STATUS (0x00064)
+#define MDP_HW_VERSION (0x00070)
+#define MDP_SW_RESET (0x00074)
+#define MDP_AXI_ERROR_MASTER_STOP (0x00078)
+#define MDP_SEL_CLK_OR_HCLK_TEST_BUS (0x0007c)
+#define MDP_PRIMARY_VSYNC_OUT_CTRL (0x00080)
+#define MDP_SECONDARY_VSYNC_OUT_CTRL (0x00084)
+#define MDP_EXTERNAL_VSYNC_OUT_CTRL (0x00088)
+#define MDP_VSYNC_CTRL (0x0008c)
+#define MDP_CGC_EN (0x00100)
+#define MDP_CMD_STATUS (0x10008)
+#define MDP_PROFILE_EN (0x10010)
+#define MDP_PROFILE_COUNT (0x10014)
+#define MDP_DMA_START (0x10044)
+#define MDP_FULL_BYPASS_WORD0 (0x10100)
+#define MDP_FULL_BYPASS_WORD1 (0x10104)
+#define MDP_COMMAND_CONFIG (0x10104)
+#define MDP_FULL_BYPASS_WORD2 (0x10108)
+#define MDP_FULL_BYPASS_WORD3 (0x1010c)
+#define MDP_FULL_BYPASS_WORD4 (0x10110)
+#define MDP_FULL_BYPASS_WORD6 (0x10118)
+#define MDP_FULL_BYPASS_WORD7 (0x1011c)
+#define MDP_FULL_BYPASS_WORD8 (0x10120)
+#define MDP_FULL_BYPASS_WORD9 (0x10124)
+#define MDP_PPP_SOURCE_CONFIG (0x10124)
+#define MDP_FULL_BYPASS_WORD10 (0x10128)
+#define MDP_FULL_BYPASS_WORD11 (0x1012c)
+#define MDP_FULL_BYPASS_WORD12 (0x10130)
+#define MDP_FULL_BYPASS_WORD13 (0x10134)
+#define MDP_FULL_BYPASS_WORD14 (0x10138)
+#define MDP_PPP_OPERATION_CONFIG (0x10138)
+#define MDP_FULL_BYPASS_WORD15 (0x1013c)
+#define MDP_FULL_BYPASS_WORD16 (0x10140)
+#define MDP_FULL_BYPASS_WORD17 (0x10144)
+#define MDP_FULL_BYPASS_WORD18 (0x10148)
+#define MDP_FULL_BYPASS_WORD19 (0x1014c)
+#define MDP_FULL_BYPASS_WORD20 (0x10150)
+#define MDP_PPP_DESTINATION_CONFIG (0x10150)
+#define MDP_FULL_BYPASS_WORD21 (0x10154)
+#define MDP_FULL_BYPASS_WORD22 (0x10158)
+#define MDP_FULL_BYPASS_WORD23 (0x1015c)
+#define MDP_FULL_BYPASS_WORD24 (0x10160)
+#define MDP_FULL_BYPASS_WORD25 (0x10164)
+#define MDP_FULL_BYPASS_WORD26 (0x10168)
+#define MDP_FULL_BYPASS_WORD27 (0x1016c)
+#define MDP_FULL_BYPASS_WORD29 (0x10174)
+#define MDP_FULL_BYPASS_WORD30 (0x10178)
+#define MDP_FULL_BYPASS_WORD31 (0x1017c)
+#define MDP_FULL_BYPASS_WORD32 (0x10180)
+#define MDP_DMA_CONFIG (0x10180)
+#define MDP_FULL_BYPASS_WORD33 (0x10184)
+#define MDP_FULL_BYPASS_WORD34 (0x10188)
+#define MDP_FULL_BYPASS_WORD35 (0x1018c)
+#define MDP_FULL_BYPASS_WORD37 (0x10194)
+#define MDP_FULL_BYPASS_WORD39 (0x1019c)
+#define MDP_FULL_BYPASS_WORD40 (0x101a0)
+#define MDP_FULL_BYPASS_WORD41 (0x101a4)
+#define MDP_FULL_BYPASS_WORD43 (0x101ac)
+#define MDP_FULL_BYPASS_WORD46 (0x101b8)
+#define MDP_FULL_BYPASS_WORD47 (0x101bc)
+#define MDP_FULL_BYPASS_WORD48 (0x101c0)
+#define MDP_FULL_BYPASS_WORD49 (0x101c4)
+#define MDP_FULL_BYPASS_WORD50 (0x101c8)
+#define MDP_FULL_BYPASS_WORD51 (0x101cc)
+#define MDP_FULL_BYPASS_WORD52 (0x101d0)
+#define MDP_FULL_BYPASS_WORD53 (0x101d4)
+#define MDP_FULL_BYPASS_WORD54 (0x101d8)
+#define MDP_FULL_BYPASS_WORD55 (0x101dc)
+#define MDP_FULL_BYPASS_WORD56 (0x101e0)
+#define MDP_FULL_BYPASS_WORD57 (0x101e4)
+#define MDP_FULL_BYPASS_WORD58 (0x101e8)
+#define MDP_FULL_BYPASS_WORD59 (0x101ec)
+#define MDP_FULL_BYPASS_WORD60 (0x101f0)
+#define MDP_VSYNC_THRESHOLD (0x101f0)
+#define MDP_FULL_BYPASS_WORD61 (0x101f4)
+#define MDP_FULL_BYPASS_WORD62 (0x101f8)
+#define MDP_FULL_BYPASS_WORD63 (0x101fc)
+#define MDP_TFETCH_TEST_MODE (0x20004)
+#define MDP_TFETCH_STATUS (0x20008)
+#define MDP_TFETCH_TILE_COUNT (0x20010)
+#define MDP_TFETCH_FETCH_COUNT (0x20014)
+#define MDP_TFETCH_CONSTANT_COLOR (0x20040)
+#define MDP_CSC_BYPASS (0x40004)
+#define MDP_SCALE_COEFF_LSB (0x5fffc)
+#define MDP_TV_OUT_CTL (0xc0000)
+#define MDP_TV_OUT_FIR_COEFF (0xc0004)
+#define MDP_TV_OUT_BUF_ADDR (0xc0008)
+#define MDP_TV_OUT_CC_DATA (0xc000c)
+#define MDP_TV_OUT_SOBEL (0xc0010)
+#define MDP_TV_OUT_Y_CLAMP (0xc0018)
+#define MDP_TV_OUT_CB_CLAMP (0xc001c)
+#define MDP_TV_OUT_CR_CLAMP (0xc0020)
+#define MDP_TEST_MODE_CLK (0xd0000)
+#define MDP_TEST_MISR_RESET_CLK (0xd0004)
+#define MDP_TEST_EXPORT_MISR_CLK (0xd0008)
+#define MDP_TEST_MISR_CURR_VAL_CLK (0xd000c)
+#define MDP_TEST_MODE_HCLK (0xd0100)
+#define MDP_TEST_MISR_RESET_HCLK (0xd0104)
+#define MDP_TEST_EXPORT_MISR_HCLK (0xd0108)
+#define MDP_TEST_MISR_CURR_VAL_HCLK (0xd010c)
+#define MDP_TEST_MODE_DCLK (0xd0200)
+#define MDP_TEST_MISR_RESET_DCLK (0xd0204)
+#define MDP_TEST_EXPORT_MISR_DCLK (0xd0208)
+#define MDP_TEST_MISR_CURR_VAL_DCLK (0xd020c)
+#define MDP_TEST_CAPTURED_DCLK (0xd0210)
+#define MDP_TEST_MISR_CAPT_VAL_DCLK (0xd0214)
+#define MDP_LCDC_CTL (0xe0000)
+#define MDP_LCDC_HSYNC_CTL (0xe0004)
+#define MDP_LCDC_VSYNC_CTL (0xe0008)
+#define MDP_LCDC_ACTIVE_HCTL (0xe000c)
+#define MDP_LCDC_ACTIVE_VCTL (0xe0010)
+#define MDP_LCDC_BORDER_CLR (0xe0014)
+#define MDP_LCDC_H_BLANK (0xe0018)
+#define MDP_LCDC_V_BLANK (0xe001c)
+#define MDP_LCDC_UNDERFLOW_CLR (0xe0020)
+#define MDP_LCDC_HSYNC_SKEW (0xe0024)
+#define MDP_LCDC_TEST_CTL (0xe0028)
+#define MDP_LCDC_LINE_IRQ (0xe002c)
+#define MDP_LCDC_CTL_POLARITY (0xe0030)
+#define MDP_LCDC_DMA_CONFIG (0xe1000)
+#define MDP_LCDC_DMA_SIZE (0xe1004)
+#define MDP_LCDC_DMA_IBUF_ADDR (0xe1008)
+#define MDP_LCDC_DMA_IBUF_Y_STRIDE (0xe100c)
+
+
+#define MDP_DMA2_TERM 0x1
+#define MDP_DMA3_TERM 0x2
+#define MDP_PPP_TERM 0x3
+
+/* MDP_INTR_ENABLE */
+#define DL0_ROI_DONE (1<<0)
+#define DL1_ROI_DONE (1<<1)
+#define DL0_DMA2_TERM_DONE (1<<2)
+#define DL1_DMA2_TERM_DONE (1<<3)
+#define DL0_PPP_TERM_DONE (1<<4)
+#define DL1_PPP_TERM_DONE (1<<5)
+#define TV_OUT_DMA3_DONE (1<<6)
+#define TV_ENC_UNDERRUN (1<<7)
+#define DL0_FETCH_DONE (1<<11)
+#define DL1_FETCH_DONE (1<<12)
+
+#define MDP_PPP_BUSY_STATUS (DL0_ROI_DONE| \
+ DL1_ROI_DONE| \
+ DL0_PPP_TERM_DONE| \
+ DL1_PPP_TERM_DONE)
+
+#define MDP_ANY_INTR_MASK (DL0_ROI_DONE| \
+ DL1_ROI_DONE| \
+ DL0_DMA2_TERM_DONE| \
+ DL1_DMA2_TERM_DONE| \
+ DL0_PPP_TERM_DONE| \
+ DL1_PPP_TERM_DONE| \
+ DL0_FETCH_DONE| \
+ DL1_FETCH_DONE| \
+ TV_ENC_UNDERRUN)
+
+#define MDP_TOP_LUMA 16
+#define MDP_TOP_CHROMA 0
+#define MDP_BOTTOM_LUMA 19
+#define MDP_BOTTOM_CHROMA 3
+#define MDP_LEFT_LUMA 22
+#define MDP_LEFT_CHROMA 6
+#define MDP_RIGHT_LUMA 25
+#define MDP_RIGHT_CHROMA 9
+
+#define CLR_G 0x0
+#define CLR_B 0x1
+#define CLR_R 0x2
+#define CLR_ALPHA 0x3
+
+#define CLR_Y CLR_G
+#define CLR_CB CLR_B
+#define CLR_CR CLR_R
+
+/* from lsb to msb */
+#define MDP_GET_PACK_PATTERN(a, x, y, z, bit) \
+ (((a)<<(bit*3))|((x)<<(bit*2))|((y)<<bit)|(z))
+
+/* MDP_SYNC_CONFIG_0/1/2 */
+#define MDP_SYNCFG_HGT_LOC 22
+#define MDP_SYNCFG_VSYNC_EXT_EN (1<<21)
+#define MDP_SYNCFG_VSYNC_INT_EN (1<<20)
+
+/* MDP_SYNC_THRESH_0 */
+#define MDP_PRIM_BELOW_LOC 0
+#define MDP_PRIM_ABOVE_LOC 8
+
+/* MDP_{PRIMARY,SECONDARY,EXTERNAL}_VSYNC_OUT_CRL */
+#define VSYNC_PULSE_EN (1<<31)
+#define VSYNC_PULSE_INV (1<<30)
+
+/* MDP_VSYNC_CTRL */
+#define DISP0_VSYNC_MAP_VSYNC0 0
+#define DISP0_VSYNC_MAP_VSYNC1 (1<<0)
+#define DISP0_VSYNC_MAP_VSYNC2 ((1<<0)|(1<<1))
+
+#define DISP1_VSYNC_MAP_VSYNC0 0
+#define DISP1_VSYNC_MAP_VSYNC1 (1<<2)
+#define DISP1_VSYNC_MAP_VSYNC2 ((1<<2)|(1<<3))
+
+#define PRIMARY_LCD_SYNC_EN (1<<4)
+#define PRIMARY_LCD_SYNC_DISABLE 0
+
+#define SECONDARY_LCD_SYNC_EN (1<<5)
+#define SECONDARY_LCD_SYNC_DISABLE 0
+
+#define EXTERNAL_LCD_SYNC_EN (1<<6)
+#define EXTERNAL_LCD_SYNC_DISABLE 0
+
+/* MDP_VSYNC_THRESHOLD / MDP_FULL_BYPASS_WORD60 */
+#define VSYNC_THRESHOLD_ABOVE_LOC 0
+#define VSYNC_THRESHOLD_BELOW_LOC 16
+#define VSYNC_ANTI_TEAR_EN (1<<31)
+
+/* MDP_COMMAND_CONFIG / MDP_FULL_BYPASS_WORD1 */
+#define MDP_CMD_DBGBUS_EN (1<<0)
+
+/* MDP_PPP_SOURCE_CONFIG / MDP_FULL_BYPASS_WORD9&53 */
+#define PPP_SRC_C0G_8BIT ((1<<1)|(1<<0))
+#define PPP_SRC_C1B_8BIT ((1<<3)|(1<<2))
+#define PPP_SRC_C2R_8BIT ((1<<5)|(1<<4))
+#define PPP_SRC_C3A_8BIT ((1<<7)|(1<<6))
+
+#define PPP_SRC_C0G_6BIT (1<<1)
+#define PPP_SRC_C1B_6BIT (1<<3)
+#define PPP_SRC_C2R_6BIT (1<<5)
+
+#define PPP_SRC_C0G_5BIT (1<<0)
+#define PPP_SRC_C1B_5BIT (1<<2)
+#define PPP_SRC_C2R_5BIT (1<<4)
+
+#define PPP_SRC_C3ALPHA_EN (1<<8)
+
+#define PPP_SRC_BPP_1BYTES 0
+#define PPP_SRC_BPP_2BYTES (1<<9)
+#define PPP_SRC_BPP_3BYTES (1<<10)
+#define PPP_SRC_BPP_4BYTES ((1<<10)|(1<<9))
+
+#define PPP_SRC_BPP_ROI_ODD_X (1<<11)
+#define PPP_SRC_BPP_ROI_ODD_Y (1<<12)
+#define PPP_SRC_INTERLVD_2COMPONENTS (1<<13)
+#define PPP_SRC_INTERLVD_3COMPONENTS (1<<14)
+#define PPP_SRC_INTERLVD_4COMPONENTS ((1<<14)|(1<<13))
+
+
+/* RGB666 unpack format
+** TIGHT means R6+G6+B6 together
+** LOOSE means R6+2 +G6+2+ B6+2 (with MSB)
+** or 2+R6 +2+G6 +2+B6 (with LSB)
+*/
+#define PPP_SRC_PACK_TIGHT (1<<17)
+#define PPP_SRC_PACK_LOOSE 0
+#define PPP_SRC_PACK_ALIGN_LSB 0
+#define PPP_SRC_PACK_ALIGN_MSB (1<<18)
+
+#define PPP_SRC_PLANE_INTERLVD 0
+#define PPP_SRC_PLANE_PSEUDOPLNR (1<<20)
+
+#define PPP_SRC_WMV9_MODE (1<<21)
+
+/* MDP_PPP_OPERATION_CONFIG / MDP_FULL_BYPASS_WORD14 */
+#define PPP_OP_SCALE_X_ON (1<<0)
+#define PPP_OP_SCALE_Y_ON (1<<1)
+
+#define PPP_OP_CONVERT_RGB2YCBCR 0
+#define PPP_OP_CONVERT_YCBCR2RGB (1<<2)
+#define PPP_OP_CONVERT_ON (1<<3)
+
+#define PPP_OP_CONVERT_MATRIX_PRIMARY 0
+#define PPP_OP_CONVERT_MATRIX_SECONDARY (1<<4)
+
+#define PPP_OP_LUT_C0_ON (1<<5)
+#define PPP_OP_LUT_C1_ON (1<<6)
+#define PPP_OP_LUT_C2_ON (1<<7)
+
+/* rotate or blend enable */
+#define PPP_OP_ROT_ON (1<<8)
+
+#define PPP_OP_ROT_90 (1<<9)
+#define PPP_OP_FLIP_LR (1<<10)
+#define PPP_OP_FLIP_UD (1<<11)
+
+#define PPP_OP_BLEND_ON (1<<12)
+
+#define PPP_OP_BLEND_SRCPIXEL_ALPHA 0
+#define PPP_OP_BLEND_DSTPIXEL_ALPHA (1<<13)
+#define PPP_OP_BLEND_CONSTANT_ALPHA (1<<14)
+#define PPP_OP_BLEND_SRCPIXEL_TRANSP ((1<<13)|(1<<14))
+
+#define PPP_OP_BLEND_ALPHA_BLEND_NORMAL 0
+#define PPP_OP_BLEND_ALPHA_BLEND_REVERSE (1<<15)
+
+#define PPP_OP_DITHER_EN (1<<16)
+
+#define PPP_OP_COLOR_SPACE_RGB 0
+#define PPP_OP_COLOR_SPACE_YCBCR (1<<17)
+
+#define PPP_OP_SRC_CHROMA_RGB 0
+#define PPP_OP_SRC_CHROMA_H2V1 (1<<18)
+#define PPP_OP_SRC_CHROMA_H1V2 (1<<19)
+#define PPP_OP_SRC_CHROMA_420 ((1<<18)|(1<<19))
+#define PPP_OP_SRC_CHROMA_COSITE 0
+#define PPP_OP_SRC_CHROMA_OFFSITE (1<<20)
+
+#define PPP_OP_DST_CHROMA_RGB 0
+#define PPP_OP_DST_CHROMA_H2V1 (1<<21)
+#define PPP_OP_DST_CHROMA_H1V2 (1<<22)
+#define PPP_OP_DST_CHROMA_420 ((1<<21)|(1<<22))
+#define PPP_OP_DST_CHROMA_COSITE 0
+#define PPP_OP_DST_CHROMA_OFFSITE (1<<23)
+
+#define PPP_BLEND_ALPHA_TRANSP (1<<24)
+
+#define PPP_OP_BG_CHROMA_RGB 0
+#define PPP_OP_BG_CHROMA_H2V1 (1<<25)
+#define PPP_OP_BG_CHROMA_H1V2 (1<<26)
+#define PPP_OP_BG_CHROMA_420 ((1<<25)|(1<<26))
+#define PPP_OP_BG_CHROMA_SITE_COSITE 0
+#define PPP_OP_BG_CHROMA_SITE_OFFSITE (1<<27)
+
+/* MDP_PPP_DESTINATION_CONFIG / MDP_FULL_BYPASS_WORD20 */
+#define PPP_DST_C0G_8BIT ((1<<0)|(1<<1))
+#define PPP_DST_C1B_8BIT ((1<<3)|(1<<2))
+#define PPP_DST_C2R_8BIT ((1<<5)|(1<<4))
+#define PPP_DST_C3A_8BIT ((1<<7)|(1<<6))
+
+#define PPP_DST_C0G_6BIT (1<<1)
+#define PPP_DST_C1B_6BIT (1<<3)
+#define PPP_DST_C2R_6BIT (1<<5)
+
+#define PPP_DST_C0G_5BIT (1<<0)
+#define PPP_DST_C1B_5BIT (1<<2)
+#define PPP_DST_C2R_5BIT (1<<4)
+
+#define PPP_DST_C3A_8BIT ((1<<7)|(1<<6))
+#define PPP_DST_C3ALPHA_EN (1<<8)
+
+#define PPP_DST_INTERLVD_2COMPONENTS (1<<9)
+#define PPP_DST_INTERLVD_3COMPONENTS (1<<10)
+#define PPP_DST_INTERLVD_4COMPONENTS ((1<<10)|(1<<9))
+#define PPP_DST_INTERLVD_6COMPONENTS ((1<<11)|(1<<9))
+
+#define PPP_DST_PACK_LOOSE 0
+#define PPP_DST_PACK_TIGHT (1<<13)
+#define PPP_DST_PACK_ALIGN_LSB 0
+#define PPP_DST_PACK_ALIGN_MSB (1<<14)
+
+#define PPP_DST_OUT_SEL_AXI 0
+#define PPP_DST_OUT_SEL_MDDI (1<<15)
+
+#define PPP_DST_BPP_2BYTES (1<<16)
+#define PPP_DST_BPP_3BYTES (1<<17)
+#define PPP_DST_BPP_4BYTES ((1<<17)|(1<<16))
+
+#define PPP_DST_PLANE_INTERLVD 0
+#define PPP_DST_PLANE_PLANAR (1<<18)
+#define PPP_DST_PLANE_PSEUDOPLNR (1<<19)
+
+#define PPP_DST_TO_TV (1<<20)
+
+#define PPP_DST_MDDI_PRIMARY 0
+#define PPP_DST_MDDI_SECONDARY (1<<21)
+#define PPP_DST_MDDI_EXTERNAL (1<<22)
+
+/* image configurations by image type */
+#define PPP_CFG_MDP_RGB_565(dir) (PPP_##dir##_C2R_5BIT | \
+ PPP_##dir##_C0G_6BIT | \
+ PPP_##dir##_C1B_5BIT | \
+ PPP_##dir##_BPP_2BYTES | \
+ PPP_##dir##_INTERLVD_3COMPONENTS | \
+ PPP_##dir##_PACK_TIGHT | \
+ PPP_##dir##_PACK_ALIGN_LSB | \
+ PPP_##dir##_PLANE_INTERLVD)
+
+#define PPP_CFG_MDP_RGB_888(dir) (PPP_##dir##_C2R_8BIT | \
+ PPP_##dir##_C0G_8BIT | \
+ PPP_##dir##_C1B_8BIT | \
+ PPP_##dir##_BPP_3BYTES | \
+ PPP_##dir##_INTERLVD_3COMPONENTS | \
+ PPP_##dir##_PACK_TIGHT | \
+ PPP_##dir##_PACK_ALIGN_LSB | \
+ PPP_##dir##_PLANE_INTERLVD)
+
+#define PPP_CFG_MDP_ARGB_8888(dir) (PPP_##dir##_C2R_8BIT | \
+ PPP_##dir##_C0G_8BIT | \
+ PPP_##dir##_C1B_8BIT | \
+ PPP_##dir##_C3A_8BIT | \
+ PPP_##dir##_C3ALPHA_EN | \
+ PPP_##dir##_BPP_4BYTES | \
+ PPP_##dir##_INTERLVD_4COMPONENTS | \
+ PPP_##dir##_PACK_TIGHT | \
+ PPP_##dir##_PACK_ALIGN_LSB | \
+ PPP_##dir##_PLANE_INTERLVD)
+
+#define PPP_CFG_MDP_XRGB_8888(dir) PPP_CFG_MDP_ARGB_8888(dir)
+#define PPP_CFG_MDP_RGBA_8888(dir) PPP_CFG_MDP_ARGB_8888(dir)
+#define PPP_CFG_MDP_BGRA_8888(dir) PPP_CFG_MDP_ARGB_8888(dir)
+
+#define PPP_CFG_MDP_Y_CBCR_H2V2(dir) (PPP_##dir##_C2R_8BIT | \
+ PPP_##dir##_C0G_8BIT | \
+ PPP_##dir##_C1B_8BIT | \
+ PPP_##dir##_C3A_8BIT | \
+ PPP_##dir##_BPP_2BYTES | \
+ PPP_##dir##_INTERLVD_2COMPONENTS | \
+ PPP_##dir##_PACK_TIGHT | \
+ PPP_##dir##_PACK_ALIGN_LSB | \
+ PPP_##dir##_PLANE_PSEUDOPLNR)
+
+#define PPP_CFG_MDP_Y_CRCB_H2V2(dir) PPP_CFG_MDP_Y_CBCR_H2V2(dir)
+
+#define PPP_CFG_MDP_YCRYCB_H2V1(dir) (PPP_##dir##_C2R_8BIT | \
+ PPP_##dir##_C0G_8BIT | \
+ PPP_##dir##_C1B_8BIT | \
+ PPP_##dir##_C3A_8BIT | \
+ PPP_##dir##_BPP_2BYTES | \
+ PPP_##dir##_INTERLVD_4COMPONENTS | \
+ PPP_##dir##_PACK_TIGHT | \
+ PPP_##dir##_PACK_ALIGN_LSB |\
+ PPP_##dir##_PLANE_INTERLVD)
+
+#define PPP_CFG_MDP_Y_CBCR_H2V1(dir) (PPP_##dir##_C2R_8BIT | \
+ PPP_##dir##_C0G_8BIT | \
+ PPP_##dir##_C1B_8BIT | \
+ PPP_##dir##_C3A_8BIT | \
+ PPP_##dir##_BPP_2BYTES | \
+ PPP_##dir##_INTERLVD_2COMPONENTS | \
+ PPP_##dir##_PACK_TIGHT | \
+ PPP_##dir##_PACK_ALIGN_LSB | \
+ PPP_##dir##_PLANE_PSEUDOPLNR)
+
+#define PPP_CFG_MDP_Y_CRCB_H2V1(dir) PPP_CFG_MDP_Y_CBCR_H2V1(dir)
+
+#define PPP_PACK_PATTERN_MDP_RGB_565 \
+ MDP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8)
+#define PPP_PACK_PATTERN_MDP_RGB_888 PPP_PACK_PATTERN_MDP_RGB_565
+#define PPP_PACK_PATTERN_MDP_XRGB_8888 \
+ MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B, 8)
+#define PPP_PACK_PATTERN_MDP_ARGB_8888 PPP_PACK_PATTERN_MDP_XRGB_8888
+#define PPP_PACK_PATTERN_MDP_RGBA_8888 \
+ MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B, CLR_G, CLR_R, 8)
+#define PPP_PACK_PATTERN_MDP_BGRA_8888 \
+ MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B, 8)
+#define PPP_PACK_PATTERN_MDP_Y_CBCR_H2V1 \
+ MDP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8)
+#define PPP_PACK_PATTERN_MDP_Y_CBCR_H2V2 PPP_PACK_PATTERN_MDP_Y_CBCR_H2V1
+#define PPP_PACK_PATTERN_MDP_Y_CRCB_H2V1 \
+ MDP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8)
+#define PPP_PACK_PATTERN_MDP_Y_CRCB_H2V2 PPP_PACK_PATTERN_MDP_Y_CRCB_H2V1
+#define PPP_PACK_PATTERN_MDP_YCRYCB_H2V1 \
+ MDP_GET_PACK_PATTERN(CLR_Y, CLR_R, CLR_Y, CLR_B, 8)
+
+#define PPP_CHROMA_SAMP_MDP_RGB_565(dir) PPP_OP_##dir##_CHROMA_RGB
+#define PPP_CHROMA_SAMP_MDP_RGB_888(dir) PPP_OP_##dir##_CHROMA_RGB
+#define PPP_CHROMA_SAMP_MDP_XRGB_8888(dir) PPP_OP_##dir##_CHROMA_RGB
+#define PPP_CHROMA_SAMP_MDP_ARGB_8888(dir) PPP_OP_##dir##_CHROMA_RGB
+#define PPP_CHROMA_SAMP_MDP_RGBA_8888(dir) PPP_OP_##dir##_CHROMA_RGB
+#define PPP_CHROMA_SAMP_MDP_BGRA_8888(dir) PPP_OP_##dir##_CHROMA_RGB
+#define PPP_CHROMA_SAMP_MDP_Y_CBCR_H2V1(dir) PPP_OP_##dir##_CHROMA_H2V1
+#define PPP_CHROMA_SAMP_MDP_Y_CBCR_H2V2(dir) PPP_OP_##dir##_CHROMA_420
+#define PPP_CHROMA_SAMP_MDP_Y_CRCB_H2V1(dir) PPP_OP_##dir##_CHROMA_H2V1
+#define PPP_CHROMA_SAMP_MDP_Y_CRCB_H2V2(dir) PPP_OP_##dir##_CHROMA_420
+#define PPP_CHROMA_SAMP_MDP_YCRYCB_H2V1(dir) PPP_OP_##dir##_CHROMA_H2V1
+
+/* Helpful array generation macros */
+#define PPP_ARRAY0(name) \
+ [MDP_RGB_565] = PPP_##name##_MDP_RGB_565,\
+ [MDP_RGB_888] = PPP_##name##_MDP_RGB_888,\
+ [MDP_XRGB_8888] = PPP_##name##_MDP_XRGB_8888,\
+ [MDP_ARGB_8888] = PPP_##name##_MDP_ARGB_8888,\
+ [MDP_RGBA_8888] = PPP_##name##_MDP_RGBA_8888,\
+ [MDP_BGRA_8888] = PPP_##name##_MDP_BGRA_8888,\
+ [MDP_Y_CBCR_H2V1] = PPP_##name##_MDP_Y_CBCR_H2V1,\
+ [MDP_Y_CBCR_H2V2] = PPP_##name##_MDP_Y_CBCR_H2V2,\
+ [MDP_Y_CRCB_H2V1] = PPP_##name##_MDP_Y_CRCB_H2V1,\
+ [MDP_Y_CRCB_H2V2] = PPP_##name##_MDP_Y_CRCB_H2V2,\
+ [MDP_YCRYCB_H2V1] = PPP_##name##_MDP_YCRYCB_H2V1
+
+#define PPP_ARRAY1(name, dir) \
+ [MDP_RGB_565] = PPP_##name##_MDP_RGB_565(dir),\
+ [MDP_RGB_888] = PPP_##name##_MDP_RGB_888(dir),\
+ [MDP_XRGB_8888] = PPP_##name##_MDP_XRGB_8888(dir),\
+ [MDP_ARGB_8888] = PPP_##name##_MDP_ARGB_8888(dir),\
+ [MDP_RGBA_8888] = PPP_##name##_MDP_RGBA_8888(dir),\
+ [MDP_BGRA_8888] = PPP_##name##_MDP_BGRA_8888(dir),\
+ [MDP_Y_CBCR_H2V1] = PPP_##name##_MDP_Y_CBCR_H2V1(dir),\
+ [MDP_Y_CBCR_H2V2] = PPP_##name##_MDP_Y_CBCR_H2V2(dir),\
+ [MDP_Y_CRCB_H2V1] = PPP_##name##_MDP_Y_CRCB_H2V1(dir),\
+ [MDP_Y_CRCB_H2V2] = PPP_##name##_MDP_Y_CRCB_H2V2(dir),\
+ [MDP_YCRYCB_H2V1] = PPP_##name##_MDP_YCRYCB_H2V1(dir)
+
+#define IS_YCRCB(img) ((img == MDP_Y_CRCB_H2V2) | (img == MDP_Y_CBCR_H2V2) | \
+ (img == MDP_Y_CRCB_H2V1) | (img == MDP_Y_CBCR_H2V1) | \
+ (img == MDP_YCRYCB_H2V1))
+#define IS_RGB(img) ((img == MDP_RGB_565) | (img == MDP_RGB_888) | \
+ (img == MDP_ARGB_8888) | (img == MDP_RGBA_8888) | \
+ (img == MDP_XRGB_8888) | (img == MDP_BGRA_8888))
+#define HAS_ALPHA(img) ((img == MDP_ARGB_8888) | (img == MDP_RGBA_8888) | \
+ (img == MDP_BGRA_8888))
+
+#define IS_PSEUDOPLNR(img) ((img == MDP_Y_CRCB_H2V2) | \
+ (img == MDP_Y_CBCR_H2V2) | \
+ (img == MDP_Y_CRCB_H2V1) | \
+ (img == MDP_Y_CBCR_H2V1))
+
+/* Mappings from addr to purpose */
+#define PPP_ADDR_SRC_ROI MDP_FULL_BYPASS_WORD2
+#define PPP_ADDR_SRC0 MDP_FULL_BYPASS_WORD3
+#define PPP_ADDR_SRC1 MDP_FULL_BYPASS_WORD4
+#define PPP_ADDR_SRC_YSTRIDE MDP_FULL_BYPASS_WORD7
+#define PPP_ADDR_SRC_CFG MDP_FULL_BYPASS_WORD9
+#define PPP_ADDR_SRC_PACK_PATTERN MDP_FULL_BYPASS_WORD10
+#define PPP_ADDR_OPERATION MDP_FULL_BYPASS_WORD14
+#define PPP_ADDR_PHASEX_INIT MDP_FULL_BYPASS_WORD15
+#define PPP_ADDR_PHASEY_INIT MDP_FULL_BYPASS_WORD16
+#define PPP_ADDR_PHASEX_STEP MDP_FULL_BYPASS_WORD17
+#define PPP_ADDR_PHASEY_STEP MDP_FULL_BYPASS_WORD18
+#define PPP_ADDR_ALPHA_TRANSP MDP_FULL_BYPASS_WORD19
+#define PPP_ADDR_DST_CFG MDP_FULL_BYPASS_WORD20
+#define PPP_ADDR_DST_PACK_PATTERN MDP_FULL_BYPASS_WORD21
+#define PPP_ADDR_DST_ROI MDP_FULL_BYPASS_WORD25
+#define PPP_ADDR_DST0 MDP_FULL_BYPASS_WORD26
+#define PPP_ADDR_DST1 MDP_FULL_BYPASS_WORD27
+#define PPP_ADDR_DST_YSTRIDE MDP_FULL_BYPASS_WORD30
+#define PPP_ADDR_EDGE MDP_FULL_BYPASS_WORD46
+#define PPP_ADDR_BG0 MDP_FULL_BYPASS_WORD48
+#define PPP_ADDR_BG1 MDP_FULL_BYPASS_WORD49
+#define PPP_ADDR_BG_YSTRIDE MDP_FULL_BYPASS_WORD51
+#define PPP_ADDR_BG_CFG MDP_FULL_BYPASS_WORD53
+#define PPP_ADDR_BG_PACK_PATTERN MDP_FULL_BYPASS_WORD54
+
+/* MDP_DMA_CONFIG / MDP_FULL_BYPASS_WORD32 */
+#define DMA_DSTC0G_6BITS (1<<1)
+#define DMA_DSTC1B_6BITS (1<<3)
+#define DMA_DSTC2R_6BITS (1<<5)
+#define DMA_DSTC0G_5BITS (1<<0)
+#define DMA_DSTC1B_5BITS (1<<2)
+#define DMA_DSTC2R_5BITS (1<<4)
+
+#define DMA_PACK_TIGHT (1<<6)
+#define DMA_PACK_LOOSE 0
+#define DMA_PACK_ALIGN_LSB 0
+#define DMA_PACK_ALIGN_MSB (1<<7)
+#define DMA_PACK_PATTERN_RGB \
+ (MDP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 2)<<8)
+
+#define DMA_OUT_SEL_AHB 0
+#define DMA_OUT_SEL_MDDI (1<<14)
+#define DMA_AHBM_LCD_SEL_PRIMARY 0
+#define DMA_AHBM_LCD_SEL_SECONDARY (1<<15)
+#define DMA_IBUF_C3ALPHA_EN (1<<16)
+#define DMA_DITHER_EN (1<<17)
+
+#define DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY 0
+#define DMA_MDDI_DMAOUT_LCD_SEL_SECONDARY (1<<18)
+#define DMA_MDDI_DMAOUT_LCD_SEL_EXTERNAL (1<<19)
+
+#define DMA_IBUF_FORMAT_RGB565 (1<<20)
+#define DMA_IBUF_FORMAT_RGB888_OR_ARGB8888 0
+
+#define DMA_IBUF_NONCONTIGUOUS (1<<21)
+
+/* MDDI REGISTER ? */
+#define MDDI_VDO_PACKET_DESC 0x5666
+#define MDDI_VDO_PACKET_PRIM 0xC3
+#define MDDI_VDO_PACKET_SECD 0xC0
+
+#endif
diff --git a/drivers/video/msm/mdp_ppp.c b/drivers/video/msm/mdp_ppp.c
new file mode 100644
index 00000000000..ba2c4673b64
--- /dev/null
+++ b/drivers/video/msm/mdp_ppp.c
@@ -0,0 +1,750 @@
+/* drivers/video/msm/mdp_ppp.c
+ *
+ * Copyright (C) 2007 QUALCOMM Incorporated
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/fb.h>
+#include <linux/file.h>
+#include <linux/delay.h>
+#include <linux/msm_mdp.h>
+#include <linux/android_pmem.h>
+#include <mach/msm_fb.h>
+
+#include "mdp_hw.h"
+#include "mdp_scale_tables.h"
+
+#define DLOG(x...) do {} while (0)
+
+#define MDP_DOWNSCALE_BLUR (MDP_DOWNSCALE_MAX + 1)
+static int downscale_y_table = MDP_DOWNSCALE_MAX;
+static int downscale_x_table = MDP_DOWNSCALE_MAX;
+
+struct mdp_regs {
+ uint32_t src0;
+ uint32_t src1;
+ uint32_t dst0;
+ uint32_t dst1;
+ uint32_t src_cfg;
+ uint32_t dst_cfg;
+ uint32_t src_pack;
+ uint32_t dst_pack;
+ uint32_t src_rect;
+ uint32_t dst_rect;
+ uint32_t src_ystride;
+ uint32_t dst_ystride;
+ uint32_t op;
+ uint32_t src_bpp;
+ uint32_t dst_bpp;
+ uint32_t edge;
+ uint32_t phasex_init;
+ uint32_t phasey_init;
+ uint32_t phasex_step;
+ uint32_t phasey_step;
+};
+
+static uint32_t pack_pattern[] = {
+ PPP_ARRAY0(PACK_PATTERN)
+};
+
+static uint32_t src_img_cfg[] = {
+ PPP_ARRAY1(CFG, SRC)
+};
+
+static uint32_t dst_img_cfg[] = {
+ PPP_ARRAY1(CFG, DST)
+};
+
+static uint32_t bytes_per_pixel[] = {
+ [MDP_RGB_565] = 2,
+ [MDP_RGB_888] = 3,
+ [MDP_XRGB_8888] = 4,
+ [MDP_ARGB_8888] = 4,
+ [MDP_RGBA_8888] = 4,
+ [MDP_BGRA_8888] = 4,
+ [MDP_Y_CBCR_H2V1] = 1,
+ [MDP_Y_CBCR_H2V2] = 1,
+ [MDP_Y_CRCB_H2V1] = 1,
+ [MDP_Y_CRCB_H2V2] = 1,
+ [MDP_YCRYCB_H2V1] = 2
+};
+
+static uint32_t dst_op_chroma[] = {
+ PPP_ARRAY1(CHROMA_SAMP, DST)
+};
+
+static uint32_t src_op_chroma[] = {
+ PPP_ARRAY1(CHROMA_SAMP, SRC)
+};
+
+static uint32_t bg_op_chroma[] = {
+ PPP_ARRAY1(CHROMA_SAMP, BG)
+};
+
+static void rotate_dst_addr_x(struct mdp_blit_req *req, struct mdp_regs *regs)
+{
+ regs->dst0 += (req->dst_rect.w -
+ min((uint32_t)16, req->dst_rect.w)) * regs->dst_bpp;
+ regs->dst1 += (req->dst_rect.w -
+ min((uint32_t)16, req->dst_rect.w)) * regs->dst_bpp;
+}
+
+static void rotate_dst_addr_y(struct mdp_blit_req *req, struct mdp_regs *regs)
+{
+ regs->dst0 += (req->dst_rect.h -
+ min((uint32_t)16, req->dst_rect.h)) *
+ regs->dst_ystride;
+ regs->dst1 += (req->dst_rect.h -
+ min((uint32_t)16, req->dst_rect.h)) *
+ regs->dst_ystride;
+}
+
+static void blit_rotate(struct mdp_blit_req *req,
+ struct mdp_regs *regs)
+{
+ if (req->flags == MDP_ROT_NOP)
+ return;
+
+ regs->op |= PPP_OP_ROT_ON;
+ if ((req->flags & MDP_ROT_90 || req->flags & MDP_FLIP_LR) &&
+ !(req->flags & MDP_ROT_90 && req->flags & MDP_FLIP_LR))
+ rotate_dst_addr_x(req, regs);
+ if (req->flags & MDP_ROT_90)
+ regs->op |= PPP_OP_ROT_90;
+ if (req->flags & MDP_FLIP_UD) {
+ regs->op |= PPP_OP_FLIP_UD;
+ rotate_dst_addr_y(req, regs);
+ }
+ if (req->flags & MDP_FLIP_LR)
+ regs->op |= PPP_OP_FLIP_LR;
+}
+
+static void blit_convert(struct mdp_blit_req *req, struct mdp_regs *regs)
+{
+ if (req->src.format == req->dst.format)
+ return;
+ if (IS_RGB(req->src.format) && IS_YCRCB(req->dst.format)) {
+ regs->op |= PPP_OP_CONVERT_RGB2YCBCR | PPP_OP_CONVERT_ON;
+ } else if (IS_YCRCB(req->src.format) && IS_RGB(req->dst.format)) {
+ regs->op |= PPP_OP_CONVERT_YCBCR2RGB | PPP_OP_CONVERT_ON;
+ if (req->dst.format == MDP_RGB_565)
+ regs->op |= PPP_OP_CONVERT_MATRIX_SECONDARY;
+ }
+}
+
+#define GET_BIT_RANGE(value, high, low) \
+ (((1 << (high - low + 1)) - 1) & (value >> low))
+static uint32_t transp_convert(struct mdp_blit_req *req)
+{
+ uint32_t transp = 0;
+ if (req->src.format == MDP_RGB_565) {
+ /* pad each value to 8 bits by copying the high bits into the
+ * low end, convert RGB to RBG by switching low 2 components */
+ transp |= ((GET_BIT_RANGE(req->transp_mask, 15, 11) << 3) |
+ (GET_BIT_RANGE(req->transp_mask, 15, 13))) << 16;
+
+ transp |= ((GET_BIT_RANGE(req->transp_mask, 4, 0) << 3) |
+ (GET_BIT_RANGE(req->transp_mask, 4, 2))) << 8;
+
+ transp |= (GET_BIT_RANGE(req->transp_mask, 10, 5) << 2) |
+ (GET_BIT_RANGE(req->transp_mask, 10, 9));
+ } else {
+ /* convert RGB to RBG */
+ transp |= (GET_BIT_RANGE(req->transp_mask, 15, 8)) |
+ (GET_BIT_RANGE(req->transp_mask, 23, 16) << 16) |
+ (GET_BIT_RANGE(req->transp_mask, 7, 0) << 8);
+ }
+ return transp;
+}
+#undef GET_BIT_RANGE
+
+static void blit_blend(struct mdp_blit_req *req, struct mdp_regs *regs)
+{
+ /* TRANSP BLEND */
+ if (req->transp_mask != MDP_TRANSP_NOP) {
+ req->transp_mask = transp_convert(req);
+ if (req->alpha != MDP_ALPHA_NOP) {
+ /* use blended transparancy mode
+ * pixel = (src == transp) ? dst : blend
+ * blend is combo of blend_eq_sel and
+ * blend_alpha_sel */
+ regs->op |= PPP_OP_ROT_ON | PPP_OP_BLEND_ON |
+ PPP_OP_BLEND_ALPHA_BLEND_NORMAL |
+ PPP_OP_BLEND_CONSTANT_ALPHA |
+ PPP_BLEND_ALPHA_TRANSP;
+ } else {
+ /* simple transparancy mode
+ * pixel = (src == transp) ? dst : src */
+ regs->op |= PPP_OP_ROT_ON | PPP_OP_BLEND_ON |
+ PPP_OP_BLEND_SRCPIXEL_TRANSP;
+ }
+ }
+
+ req->alpha &= 0xff;
+ /* ALPHA BLEND */
+ if (HAS_ALPHA(req->src.format)) {
+ regs->op |= PPP_OP_ROT_ON | PPP_OP_BLEND_ON |
+ PPP_OP_BLEND_SRCPIXEL_ALPHA;
+ } else if (req->alpha < MDP_ALPHA_NOP) {
+ /* just blend by alpha */
+ regs->op |= PPP_OP_ROT_ON | PPP_OP_BLEND_ON |
+ PPP_OP_BLEND_ALPHA_BLEND_NORMAL |
+ PPP_OP_BLEND_CONSTANT_ALPHA;
+ }
+
+ regs->op |= bg_op_chroma[req->dst.format];
+}
+
+#define ONE_HALF (1LL << 32)
+#define ONE (1LL << 33)
+#define TWO (2LL << 33)
+#define THREE (3LL << 33)
+#define FRAC_MASK (ONE - 1)
+#define INT_MASK (~FRAC_MASK)
+
+static int scale_params(uint32_t dim_in, uint32_t dim_out, uint32_t origin,
+ uint32_t *phase_init, uint32_t *phase_step)
+{
+ /* to improve precicsion calculations are done in U31.33 and converted
+ * to U3.29 at the end */
+ int64_t k1, k2, k3, k4, tmp;
+ uint64_t n, d, os, os_p, od, od_p, oreq;
+ unsigned rpa = 0;
+ int64_t ip64, delta;
+
+ if (dim_out % 3 == 0)
+ rpa = !(dim_in % (dim_out / 3));
+
+ n = ((uint64_t)dim_out) << 34;
+ d = dim_in;
+ if (!d)
+ return -1;
+ do_div(n, d);
+ k3 = (n + 1) >> 1;
+ if ((k3 >> 4) < (1LL << 27) || (k3 >> 4) > (1LL << 31)) {
+ DLOG("crap bad scale\n");
+ return -1;
+ }
+ n = ((uint64_t)dim_in) << 34;
+ d = (uint64_t)dim_out;
+ if (!d)
+ return -1;
+ do_div(n, d);
+ k1 = (n + 1) >> 1;
+ k2 = (k1 - ONE) >> 1;
+
+ *phase_init = (int)(k2 >> 4);
+ k4 = (k3 - ONE) >> 1;
+
+ if (rpa) {
+ os = ((uint64_t)origin << 33) - ONE_HALF;
+ tmp = (dim_out * os) + ONE_HALF;
+ if (!dim_in)
+ return -1;
+ do_div(tmp, dim_in);
+ od = tmp - ONE_HALF;
+ } else {
+ os = ((uint64_t)origin << 1) - 1;
+ od = (((k3 * os) >> 1) + k4);
+ }
+
+ od_p = od & INT_MASK;
+ if (od_p != od)
+ od_p += ONE;
+
+ if (rpa) {
+ tmp = (dim_in * od_p) + ONE_HALF;
+ if (!dim_in)
+ return -1;
+ do_div(tmp, dim_in);
+ os_p = tmp - ONE_HALF;
+ } else {
+ os_p = ((k1 * (od_p >> 33)) + k2);
+ }
+
+ oreq = (os_p & INT_MASK) - ONE;
+
+ ip64 = os_p - oreq;
+ delta = ((int64_t)(origin) << 33) - oreq;
+ ip64 -= delta;
+ /* limit to valid range before the left shift */
+ delta = (ip64 & (1LL << 63)) ? 4 : -4;
+ delta <<= 33;
+ while (abs((int)(ip64 >> 33)) > 4)
+ ip64 += delta;
+ *phase_init = (int)(ip64 >> 4);
+ *phase_step = (uint32_t)(k1 >> 4);
+ return 0;
+}
+
+static void load_scale_table(const struct mdp_info *mdp,
+ struct mdp_table_entry *table, int len)
+{
+ int i;
+ for (i = 0; i < len; i++)
+ mdp_writel(mdp, table[i].val, table[i].reg);
+}
+
+enum {
+IMG_LEFT,
+IMG_RIGHT,
+IMG_TOP,
+IMG_BOTTOM,
+};
+
+static void get_edge_info(uint32_t src, uint32_t src_coord, uint32_t dst,
+ uint32_t *interp1, uint32_t *interp2,
+ uint32_t *repeat1, uint32_t *repeat2) {
+ if (src > 3 * dst) {
+ *interp1 = 0;
+ *interp2 = src - 1;
+ *repeat1 = 0;
+ *repeat2 = 0;
+ } else if (src == 3 * dst) {
+ *interp1 = 0;
+ *interp2 = src;
+ *repeat1 = 0;
+ *repeat2 = 1;
+ } else if (src > dst && src < 3 * dst) {
+ *interp1 = -1;
+ *interp2 = src;
+ *repeat1 = 1;
+ *repeat2 = 1;
+ } else if (src == dst) {
+ *interp1 = -1;
+ *interp2 = src + 1;
+ *repeat1 = 1;
+ *repeat2 = 2;
+ } else {
+ *interp1 = -2;
+ *interp2 = src + 1;
+ *repeat1 = 2;
+ *repeat2 = 2;
+ }
+ *interp1 += src_coord;
+ *interp2 += src_coord;
+}
+
+static int get_edge_cond(struct mdp_blit_req *req, struct mdp_regs *regs)
+{
+ int32_t luma_interp[4];
+ int32_t luma_repeat[4];
+ int32_t chroma_interp[4];
+ int32_t chroma_bound[4];
+ int32_t chroma_repeat[4];
+ uint32_t dst_w, dst_h;
+
+ memset(&luma_interp, 0, sizeof(int32_t) * 4);
+ memset(&luma_repeat, 0, sizeof(int32_t) * 4);
+ memset(&chroma_interp, 0, sizeof(int32_t) * 4);
+ memset(&chroma_bound, 0, sizeof(int32_t) * 4);
+ memset(&chroma_repeat, 0, sizeof(int32_t) * 4);
+ regs->edge = 0;
+
+ if (req->flags & MDP_ROT_90) {
+ dst_w = req->dst_rect.h;
+ dst_h = req->dst_rect.w;
+ } else {
+ dst_w = req->dst_rect.w;
+ dst_h = req->dst_rect.h;
+ }
+
+ if (regs->op & (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON)) {
+ get_edge_info(req->src_rect.h, req->src_rect.y, dst_h,
+ &luma_interp[IMG_TOP], &luma_interp[IMG_BOTTOM],
+ &luma_repeat[IMG_TOP], &luma_repeat[IMG_BOTTOM]);
+ get_edge_info(req->src_rect.w, req->src_rect.x, dst_w,
+ &luma_interp[IMG_LEFT], &luma_interp[IMG_RIGHT],
+ &luma_repeat[IMG_LEFT], &luma_repeat[IMG_RIGHT]);
+ } else {
+ luma_interp[IMG_LEFT] = req->src_rect.x;
+ luma_interp[IMG_RIGHT] = req->src_rect.x + req->src_rect.w - 1;
+ luma_interp[IMG_TOP] = req->src_rect.y;
+ luma_interp[IMG_BOTTOM] = req->src_rect.y + req->src_rect.h - 1;
+ luma_repeat[IMG_LEFT] = 0;
+ luma_repeat[IMG_TOP] = 0;
+ luma_repeat[IMG_RIGHT] = 0;
+ luma_repeat[IMG_BOTTOM] = 0;
+ }
+
+ chroma_interp[IMG_LEFT] = luma_interp[IMG_LEFT];
+ chroma_interp[IMG_RIGHT] = luma_interp[IMG_RIGHT];
+ chroma_interp[IMG_TOP] = luma_interp[IMG_TOP];
+ chroma_interp[IMG_BOTTOM] = luma_interp[IMG_BOTTOM];
+
+ chroma_bound[IMG_LEFT] = req->src_rect.x;
+ chroma_bound[IMG_RIGHT] = req->src_rect.x + req->src_rect.w - 1;
+ chroma_bound[IMG_TOP] = req->src_rect.y;
+ chroma_bound[IMG_BOTTOM] = req->src_rect.y + req->src_rect.h - 1;
+
+ if (IS_YCRCB(req->src.format)) {
+ chroma_interp[IMG_LEFT] = chroma_interp[IMG_LEFT] >> 1;
+ chroma_interp[IMG_RIGHT] = (chroma_interp[IMG_RIGHT] + 1) >> 1;
+
+ chroma_bound[IMG_LEFT] = chroma_bound[IMG_LEFT] >> 1;
+ chroma_bound[IMG_RIGHT] = chroma_bound[IMG_RIGHT] >> 1;
+ }
+
+ if (req->src.format == MDP_Y_CBCR_H2V2 ||
+ req->src.format == MDP_Y_CRCB_H2V2) {
+ chroma_interp[IMG_TOP] = (chroma_interp[IMG_TOP] - 1) >> 1;
+ chroma_interp[IMG_BOTTOM] = (chroma_interp[IMG_BOTTOM] + 1)
+ >> 1;
+ chroma_bound[IMG_TOP] = (chroma_bound[IMG_TOP] + 1) >> 1;
+ chroma_bound[IMG_BOTTOM] = chroma_bound[IMG_BOTTOM] >> 1;
+ }
+
+ chroma_repeat[IMG_LEFT] = chroma_bound[IMG_LEFT] -
+ chroma_interp[IMG_LEFT];
+ chroma_repeat[IMG_RIGHT] = chroma_interp[IMG_RIGHT] -
+ chroma_bound[IMG_RIGHT];
+ chroma_repeat[IMG_TOP] = chroma_bound[IMG_TOP] -
+ chroma_interp[IMG_TOP];
+ chroma_repeat[IMG_BOTTOM] = chroma_interp[IMG_BOTTOM] -
+ chroma_bound[IMG_BOTTOM];
+
+ if (chroma_repeat[IMG_LEFT] < 0 || chroma_repeat[IMG_LEFT] > 3 ||
+ chroma_repeat[IMG_RIGHT] < 0 || chroma_repeat[IMG_RIGHT] > 3 ||
+ chroma_repeat[IMG_TOP] < 0 || chroma_repeat[IMG_TOP] > 3 ||
+ chroma_repeat[IMG_BOTTOM] < 0 || chroma_repeat[IMG_BOTTOM] > 3 ||
+ luma_repeat[IMG_LEFT] < 0 || luma_repeat[IMG_LEFT] > 3 ||
+ luma_repeat[IMG_RIGHT] < 0 || luma_repeat[IMG_RIGHT] > 3 ||
+ luma_repeat[IMG_TOP] < 0 || luma_repeat[IMG_TOP] > 3 ||
+ luma_repeat[IMG_BOTTOM] < 0 || luma_repeat[IMG_BOTTOM] > 3)
+ return -1;
+
+ regs->edge |= (chroma_repeat[IMG_LEFT] & 3) << MDP_LEFT_CHROMA;
+ regs->edge |= (chroma_repeat[IMG_RIGHT] & 3) << MDP_RIGHT_CHROMA;
+ regs->edge |= (chroma_repeat[IMG_TOP] & 3) << MDP_TOP_CHROMA;
+ regs->edge |= (chroma_repeat[IMG_BOTTOM] & 3) << MDP_BOTTOM_CHROMA;
+ regs->edge |= (luma_repeat[IMG_LEFT] & 3) << MDP_LEFT_LUMA;
+ regs->edge |= (luma_repeat[IMG_RIGHT] & 3) << MDP_RIGHT_LUMA;
+ regs->edge |= (luma_repeat[IMG_TOP] & 3) << MDP_TOP_LUMA;
+ regs->edge |= (luma_repeat[IMG_BOTTOM] & 3) << MDP_BOTTOM_LUMA;
+ return 0;
+}
+
+static int blit_scale(const struct mdp_info *mdp, struct mdp_blit_req *req,
+ struct mdp_regs *regs)
+{
+ uint32_t phase_init_x, phase_init_y, phase_step_x, phase_step_y;
+ uint32_t scale_factor_x, scale_factor_y;
+ uint32_t downscale;
+ uint32_t dst_w, dst_h;
+
+ if (req->flags & MDP_ROT_90) {
+ dst_w = req->dst_rect.h;
+ dst_h = req->dst_rect.w;
+ } else {
+ dst_w = req->dst_rect.w;
+ dst_h = req->dst_rect.h;
+ }
+ if ((req->src_rect.w == dst_w) && (req->src_rect.h == dst_h) &&
+ !(req->flags & MDP_BLUR)) {
+ regs->phasex_init = 0;
+ regs->phasey_init = 0;
+ regs->phasex_step = 0;
+ regs->phasey_step = 0;
+ return 0;
+ }
+
+ if (scale_params(req->src_rect.w, dst_w, 1, &phase_init_x,
+ &phase_step_x) ||
+ scale_params(req->src_rect.h, dst_h, 1, &phase_init_y,
+ &phase_step_y))
+ return -1;
+
+ scale_factor_x = (dst_w * 10) / req->src_rect.w;
+ scale_factor_y = (dst_h * 10) / req->src_rect.h;
+
+ if (scale_factor_x > 8)
+ downscale = MDP_DOWNSCALE_PT8TO1;
+ else if (scale_factor_x > 6)
+ downscale = MDP_DOWNSCALE_PT6TOPT8;
+ else if (scale_factor_x > 4)
+ downscale = MDP_DOWNSCALE_PT4TOPT6;
+ else
+ downscale = MDP_DOWNSCALE_PT2TOPT4;
+ if (downscale != downscale_x_table) {
+ load_scale_table(mdp, mdp_downscale_x_table[downscale], 64);
+ downscale_x_table = downscale;
+ }
+
+ if (scale_factor_y > 8)
+ downscale = MDP_DOWNSCALE_PT8TO1;
+ else if (scale_factor_y > 6)
+ downscale = MDP_DOWNSCALE_PT6TOPT8;
+ else if (scale_factor_y > 4)
+ downscale = MDP_DOWNSCALE_PT4TOPT6;
+ else
+ downscale = MDP_DOWNSCALE_PT2TOPT4;
+ if (downscale != downscale_y_table) {
+ load_scale_table(mdp, mdp_downscale_y_table[downscale], 64);
+ downscale_y_table = downscale;
+ }
+
+ regs->phasex_init = phase_init_x;
+ regs->phasey_init = phase_init_y;
+ regs->phasex_step = phase_step_x;
+ regs->phasey_step = phase_step_y;
+ regs->op |= (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON);
+ return 0;
+
+}
+
+static void blit_blur(const struct mdp_info *mdp, struct mdp_blit_req *req,
+ struct mdp_regs *regs)
+{
+ if (!(req->flags & MDP_BLUR))
+ return;
+
+ if (!(downscale_x_table == MDP_DOWNSCALE_BLUR &&
+ downscale_y_table == MDP_DOWNSCALE_BLUR)) {
+ load_scale_table(mdp, mdp_gaussian_blur_table, 128);
+ downscale_x_table = MDP_DOWNSCALE_BLUR;
+ downscale_y_table = MDP_DOWNSCALE_BLUR;
+ }
+
+ regs->op |= (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON);
+}
+
+
+#define IMG_LEN(rect_h, w, rect_w, bpp) (((rect_h) * w) * bpp)
+
+#define Y_TO_CRCB_RATIO(format) \
+ ((format == MDP_Y_CBCR_H2V2 || format == MDP_Y_CRCB_H2V2) ? 2 :\
+ (format == MDP_Y_CBCR_H2V1 || format == MDP_Y_CRCB_H2V1) ? 1 : 1)
+
+static void get_len(struct mdp_img *img, struct mdp_rect *rect, uint32_t bpp,
+ uint32_t *len0, uint32_t *len1)
+{
+ *len0 = IMG_LEN(rect->h, img->width, rect->w, bpp);
+ if (IS_PSEUDOPLNR(img->format))
+ *len1 = *len0/Y_TO_CRCB_RATIO(img->format);
+ else
+ *len1 = 0;
+}
+
+static int valid_src_dst(unsigned long src_start, unsigned long src_len,
+ unsigned long dst_start, unsigned long dst_len,
+ struct mdp_blit_req *req, struct mdp_regs *regs)
+{
+ unsigned long src_min_ok = src_start;
+ unsigned long src_max_ok = src_start + src_len;
+ unsigned long dst_min_ok = dst_start;
+ unsigned long dst_max_ok = dst_start + dst_len;
+ uint32_t src0_len, src1_len, dst0_len, dst1_len;
+ get_len(&req->src, &req->src_rect, regs->src_bpp, &src0_len,
+ &src1_len);
+ get_len(&req->dst, &req->dst_rect, regs->dst_bpp, &dst0_len,
+ &dst1_len);
+
+ if (regs->src0 < src_min_ok || regs->src0 > src_max_ok ||
+ regs->src0 + src0_len > src_max_ok) {
+ DLOG("invalid_src %x %x %lx %lx\n", regs->src0,
+ src0_len, src_min_ok, src_max_ok);
+ return 0;
+ }
+ if (regs->src_cfg & PPP_SRC_PLANE_PSEUDOPLNR) {
+ if (regs->src1 < src_min_ok || regs->src1 > src_max_ok ||
+ regs->src1 + src1_len > src_max_ok) {
+ DLOG("invalid_src1");
+ return 0;
+ }
+ }
+ if (regs->dst0 < dst_min_ok || regs->dst0 > dst_max_ok ||
+ regs->dst0 + dst0_len > dst_max_ok) {
+ DLOG("invalid_dst");
+ return 0;
+ }
+ if (regs->dst_cfg & PPP_SRC_PLANE_PSEUDOPLNR) {
+ if (regs->dst1 < dst_min_ok || regs->dst1 > dst_max_ok ||
+ regs->dst1 + dst1_len > dst_max_ok) {
+ DLOG("invalid_dst1");
+ return 0;
+ }
+ }
+ return 1;
+}
+
+
+static void flush_imgs(struct mdp_blit_req *req, struct mdp_regs *regs,
+ struct file *src_file, struct file *dst_file)
+{
+#ifdef CONFIG_ANDROID_PMEM
+ uint32_t src0_len, src1_len, dst0_len, dst1_len;
+
+ /* flush src images to memory before dma to mdp */
+ get_len(&req->src, &req->src_rect, regs->src_bpp, &src0_len,
+ &src1_len);
+ flush_pmem_file(src_file, req->src.offset, src0_len);
+ if (IS_PSEUDOPLNR(req->src.format))
+ flush_pmem_file(src_file, req->src.offset + src0_len,
+ src1_len);
+
+ /* flush dst images */
+ get_len(&req->dst, &req->dst_rect, regs->dst_bpp, &dst0_len,
+ &dst1_len);
+ flush_pmem_file(dst_file, req->dst.offset, dst0_len);
+ if (IS_PSEUDOPLNR(req->dst.format))
+ flush_pmem_file(dst_file, req->dst.offset + dst0_len,
+ dst1_len);
+#endif
+}
+
+static void get_chroma_addr(struct mdp_img *img, struct mdp_rect *rect,
+ uint32_t base, uint32_t bpp, uint32_t cfg,
+ uint32_t *addr, uint32_t *ystride)
+{
+ uint32_t compress_v = Y_TO_CRCB_RATIO(img->format);
+ uint32_t compress_h = 2;
+ uint32_t offset;
+
+ if (IS_PSEUDOPLNR(img->format)) {
+ offset = (rect->x / compress_h) * compress_h;
+ offset += rect->y == 0 ? 0 :
+ ((rect->y + 1) / compress_v) * img->width;
+ *addr = base + (img->width * img->height * bpp);
+ *addr += offset * bpp;
+ *ystride |= *ystride << 16;
+ } else {
+ *addr = 0;
+ }
+}
+
+static int send_blit(const struct mdp_info *mdp, struct mdp_blit_req *req,
+ struct mdp_regs *regs, struct file *src_file,
+ struct file *dst_file)
+{
+ mdp_writel(mdp, 1, 0x060);
+ mdp_writel(mdp, regs->src_rect, PPP_ADDR_SRC_ROI);
+ mdp_writel(mdp, regs->src0, PPP_ADDR_SRC0);
+ mdp_writel(mdp, regs->src1, PPP_ADDR_SRC1);
+ mdp_writel(mdp, regs->src_ystride, PPP_ADDR_SRC_YSTRIDE);
+ mdp_writel(mdp, regs->src_cfg, PPP_ADDR_SRC_CFG);
+ mdp_writel(mdp, regs->src_pack, PPP_ADDR_SRC_PACK_PATTERN);
+
+ mdp_writel(mdp, regs->op, PPP_ADDR_OPERATION);
+ mdp_writel(mdp, regs->phasex_init, PPP_ADDR_PHASEX_INIT);
+ mdp_writel(mdp, regs->phasey_init, PPP_ADDR_PHASEY_INIT);
+ mdp_writel(mdp, regs->phasex_step, PPP_ADDR_PHASEX_STEP);
+ mdp_writel(mdp, regs->phasey_step, PPP_ADDR_PHASEY_STEP);
+
+ mdp_writel(mdp, (req->alpha << 24) | (req->transp_mask & 0xffffff),
+ PPP_ADDR_ALPHA_TRANSP);
+
+ mdp_writel(mdp, regs->dst_cfg, PPP_ADDR_DST_CFG);
+ mdp_writel(mdp, regs->dst_pack, PPP_ADDR_DST_PACK_PATTERN);
+ mdp_writel(mdp, regs->dst_rect, PPP_ADDR_DST_ROI);
+ mdp_writel(mdp, regs->dst0, PPP_ADDR_DST0);
+ mdp_writel(mdp, regs->dst1, PPP_ADDR_DST1);
+ mdp_writel(mdp, regs->dst_ystride, PPP_ADDR_DST_YSTRIDE);
+
+ mdp_writel(mdp, regs->edge, PPP_ADDR_EDGE);
+ if (regs->op & PPP_OP_BLEND_ON) {
+ mdp_writel(mdp, regs->dst0, PPP_ADDR_BG0);
+ mdp_writel(mdp, regs->dst1, PPP_ADDR_BG1);
+ mdp_writel(mdp, regs->dst_ystride, PPP_ADDR_BG_YSTRIDE);
+ mdp_writel(mdp, src_img_cfg[req->dst.format], PPP_ADDR_BG_CFG);
+ mdp_writel(mdp, pack_pattern[req->dst.format],
+ PPP_ADDR_BG_PACK_PATTERN);
+ }
+ flush_imgs(req, regs, src_file, dst_file);
+ mdp_writel(mdp, 0x1000, MDP_DISPLAY0_START);
+ return 0;
+}
+
+int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req,
+ struct file *src_file, unsigned long src_start, unsigned long src_len,
+ struct file *dst_file, unsigned long dst_start, unsigned long dst_len)
+{
+ struct mdp_regs regs = {0};
+
+ if (unlikely(req->src.format >= MDP_IMGTYPE_LIMIT ||
+ req->dst.format >= MDP_IMGTYPE_LIMIT)) {
+ printk(KERN_ERR "mpd_ppp: img is of wrong format\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(req->src_rect.x > req->src.width ||
+ req->src_rect.y > req->src.height ||
+ req->dst_rect.x > req->dst.width ||
+ req->dst_rect.y > req->dst.height)) {
+ printk(KERN_ERR "mpd_ppp: img rect is outside of img!\n");
+ return -EINVAL;
+ }
+
+ /* set the src image configuration */
+ regs.src_cfg = src_img_cfg[req->src.format];
+ regs.src_cfg |= (req->src_rect.x & 0x1) ? PPP_SRC_BPP_ROI_ODD_X : 0;
+ regs.src_cfg |= (req->src_rect.y & 0x1) ? PPP_SRC_BPP_ROI_ODD_Y : 0;
+ regs.src_rect = (req->src_rect.h << 16) | req->src_rect.w;
+ regs.src_pack = pack_pattern[req->src.format];
+
+ /* set the dest image configuration */
+ regs.dst_cfg = dst_img_cfg[req->dst.format] | PPP_DST_OUT_SEL_AXI;
+ regs.dst_rect = (req->dst_rect.h << 16) | req->dst_rect.w;
+ regs.dst_pack = pack_pattern[req->dst.format];
+
+ /* set src, bpp, start pixel and ystride */
+ regs.src_bpp = bytes_per_pixel[req->src.format];
+ regs.src0 = src_start + req->src.offset;
+ regs.src_ystride = req->src.width * regs.src_bpp;
+ get_chroma_addr(&req->src, &req->src_rect, regs.src0, regs.src_bpp,
+ regs.src_cfg, &regs.src1, &regs.src_ystride);
+ regs.src0 += (req->src_rect.x + (req->src_rect.y * req->src.width)) *
+ regs.src_bpp;
+
+ /* set dst, bpp, start pixel and ystride */
+ regs.dst_bpp = bytes_per_pixel[req->dst.format];
+ regs.dst0 = dst_start + req->dst.offset;
+ regs.dst_ystride = req->dst.width * regs.dst_bpp;
+ get_chroma_addr(&req->dst, &req->dst_rect, regs.dst0, regs.dst_bpp,
+ regs.dst_cfg, &regs.dst1, &regs.dst_ystride);
+ regs.dst0 += (req->dst_rect.x + (req->dst_rect.y * req->dst.width)) *
+ regs.dst_bpp;
+
+ if (!valid_src_dst(src_start, src_len, dst_start, dst_len, req,
+ &regs)) {
+ printk(KERN_ERR "mpd_ppp: final src or dst location is "
+ "invalid, are you trying to make an image too large "
+ "or to place it outside the screen?\n");
+ return -EINVAL;
+ }
+
+ /* set up operation register */
+ regs.op = 0;
+ blit_rotate(req, &regs);
+ blit_convert(req, &regs);
+ if (req->flags & MDP_DITHER)
+ regs.op |= PPP_OP_DITHER_EN;
+ blit_blend(req, &regs);
+ if (blit_scale(mdp, req, &regs)) {
+ printk(KERN_ERR "mpd_ppp: error computing scale for img.\n");
+ return -EINVAL;
+ }
+ blit_blur(mdp, req, &regs);
+ regs.op |= dst_op_chroma[req->dst.format] |
+ src_op_chroma[req->src.format];
+
+ /* if the image is YCRYCB, the x and w must be even */
+ if (unlikely(req->src.format == MDP_YCRYCB_H2V1)) {
+ req->src_rect.x = req->src_rect.x & (~0x1);
+ req->src_rect.w = req->src_rect.w & (~0x1);
+ req->dst_rect.x = req->dst_rect.x & (~0x1);
+ req->dst_rect.w = req->dst_rect.w & (~0x1);
+ }
+ if (get_edge_cond(req, &regs))
+ return -EINVAL;
+
+ send_blit(mdp, req, &regs, src_file, dst_file);
+ return 0;
+}
diff --git a/drivers/video/msm/mdp_scale_tables.c b/drivers/video/msm/mdp_scale_tables.c
new file mode 100644
index 00000000000..604783b2e17
--- /dev/null
+++ b/drivers/video/msm/mdp_scale_tables.c
@@ -0,0 +1,766 @@
+/* drivers/video/msm_fb/mdp_scale_tables.c
+ *
+ * Copyright (C) 2007 QUALCOMM Incorporated
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mdp_scale_tables.h"
+#include "mdp_hw.h"
+
+struct mdp_table_entry mdp_upscale_table[] = {
+ { 0x5fffc, 0x0 },
+ { 0x50200, 0x7fc00000 },
+ { 0x5fffc, 0xff80000d },
+ { 0x50204, 0x7ec003f9 },
+ { 0x5fffc, 0xfec0001c },
+ { 0x50208, 0x7d4003f3 },
+ { 0x5fffc, 0xfe40002b },
+ { 0x5020c, 0x7b8003ed },
+ { 0x5fffc, 0xfd80003c },
+ { 0x50210, 0x794003e8 },
+ { 0x5fffc, 0xfcc0004d },
+ { 0x50214, 0x76c003e4 },
+ { 0x5fffc, 0xfc40005f },
+ { 0x50218, 0x73c003e0 },
+ { 0x5fffc, 0xfb800071 },
+ { 0x5021c, 0x708003de },
+ { 0x5fffc, 0xfac00085 },
+ { 0x50220, 0x6d0003db },
+ { 0x5fffc, 0xfa000098 },
+ { 0x50224, 0x698003d9 },
+ { 0x5fffc, 0xf98000ac },
+ { 0x50228, 0x654003d8 },
+ { 0x5fffc, 0xf8c000c1 },
+ { 0x5022c, 0x610003d7 },
+ { 0x5fffc, 0xf84000d5 },
+ { 0x50230, 0x5c8003d7 },
+ { 0x5fffc, 0xf7c000e9 },
+ { 0x50234, 0x580003d7 },
+ { 0x5fffc, 0xf74000fd },
+ { 0x50238, 0x534003d8 },
+ { 0x5fffc, 0xf6c00112 },
+ { 0x5023c, 0x4e8003d8 },
+ { 0x5fffc, 0xf6800126 },
+ { 0x50240, 0x494003da },
+ { 0x5fffc, 0xf600013a },
+ { 0x50244, 0x448003db },
+ { 0x5fffc, 0xf600014d },
+ { 0x50248, 0x3f4003dd },
+ { 0x5fffc, 0xf5c00160 },
+ { 0x5024c, 0x3a4003df },
+ { 0x5fffc, 0xf5c00172 },
+ { 0x50250, 0x354003e1 },
+ { 0x5fffc, 0xf5c00184 },
+ { 0x50254, 0x304003e3 },
+ { 0x5fffc, 0xf6000195 },
+ { 0x50258, 0x2b0003e6 },
+ { 0x5fffc, 0xf64001a6 },
+ { 0x5025c, 0x260003e8 },
+ { 0x5fffc, 0xf6c001b4 },
+ { 0x50260, 0x214003eb },
+ { 0x5fffc, 0xf78001c2 },
+ { 0x50264, 0x1c4003ee },
+ { 0x5fffc, 0xf80001cf },
+ { 0x50268, 0x17c003f1 },
+ { 0x5fffc, 0xf90001db },
+ { 0x5026c, 0x134003f3 },
+ { 0x5fffc, 0xfa0001e5 },
+ { 0x50270, 0xf0003f6 },
+ { 0x5fffc, 0xfb4001ee },
+ { 0x50274, 0xac003f9 },
+ { 0x5fffc, 0xfcc001f5 },
+ { 0x50278, 0x70003fb },
+ { 0x5fffc, 0xfe4001fb },
+ { 0x5027c, 0x34003fe },
+};
+
+static struct mdp_table_entry mdp_downscale_x_table_PT2TOPT4[] = {
+ { 0x5fffc, 0x740008c },
+ { 0x50280, 0x33800088 },
+ { 0x5fffc, 0x800008e },
+ { 0x50284, 0x33400084 },
+ { 0x5fffc, 0x8400092 },
+ { 0x50288, 0x33000080 },
+ { 0x5fffc, 0x9000094 },
+ { 0x5028c, 0x3300007b },
+ { 0x5fffc, 0x9c00098 },
+ { 0x50290, 0x32400077 },
+ { 0x5fffc, 0xa40009b },
+ { 0x50294, 0x32000073 },
+ { 0x5fffc, 0xb00009d },
+ { 0x50298, 0x31c0006f },
+ { 0x5fffc, 0xbc000a0 },
+ { 0x5029c, 0x3140006b },
+ { 0x5fffc, 0xc8000a2 },
+ { 0x502a0, 0x31000067 },
+ { 0x5fffc, 0xd8000a5 },
+ { 0x502a4, 0x30800062 },
+ { 0x5fffc, 0xe4000a8 },
+ { 0x502a8, 0x2fc0005f },
+ { 0x5fffc, 0xec000aa },
+ { 0x502ac, 0x2fc0005b },
+ { 0x5fffc, 0xf8000ad },
+ { 0x502b0, 0x2f400057 },
+ { 0x5fffc, 0x108000b0 },
+ { 0x502b4, 0x2e400054 },
+ { 0x5fffc, 0x114000b2 },
+ { 0x502b8, 0x2e000050 },
+ { 0x5fffc, 0x124000b4 },
+ { 0x502bc, 0x2d80004c },
+ { 0x5fffc, 0x130000b6 },
+ { 0x502c0, 0x2d000049 },
+ { 0x5fffc, 0x140000b8 },
+ { 0x502c4, 0x2c800045 },
+ { 0x5fffc, 0x150000b9 },
+ { 0x502c8, 0x2c000042 },
+ { 0x5fffc, 0x15c000bd },
+ { 0x502cc, 0x2b40003e },
+ { 0x5fffc, 0x16c000bf },
+ { 0x502d0, 0x2a80003b },
+ { 0x5fffc, 0x17c000bf },
+ { 0x502d4, 0x2a000039 },
+ { 0x5fffc, 0x188000c2 },
+ { 0x502d8, 0x29400036 },
+ { 0x5fffc, 0x19c000c4 },
+ { 0x502dc, 0x28800032 },
+ { 0x5fffc, 0x1ac000c5 },
+ { 0x502e0, 0x2800002f },
+ { 0x5fffc, 0x1bc000c7 },
+ { 0x502e4, 0x2740002c },
+ { 0x5fffc, 0x1cc000c8 },
+ { 0x502e8, 0x26c00029 },
+ { 0x5fffc, 0x1dc000c9 },
+ { 0x502ec, 0x26000027 },
+ { 0x5fffc, 0x1ec000cc },
+ { 0x502f0, 0x25000024 },
+ { 0x5fffc, 0x200000cc },
+ { 0x502f4, 0x24800021 },
+ { 0x5fffc, 0x210000cd },
+ { 0x502f8, 0x23800020 },
+ { 0x5fffc, 0x220000ce },
+ { 0x502fc, 0x2300001d },
+};
+
+static struct mdp_table_entry mdp_downscale_x_table_PT4TOPT6[] = {
+ { 0x5fffc, 0x740008c },
+ { 0x50280, 0x33800088 },
+ { 0x5fffc, 0x800008e },
+ { 0x50284, 0x33400084 },
+ { 0x5fffc, 0x8400092 },
+ { 0x50288, 0x33000080 },
+ { 0x5fffc, 0x9000094 },
+ { 0x5028c, 0x3300007b },
+ { 0x5fffc, 0x9c00098 },
+ { 0x50290, 0x32400077 },
+ { 0x5fffc, 0xa40009b },
+ { 0x50294, 0x32000073 },
+ { 0x5fffc, 0xb00009d },
+ { 0x50298, 0x31c0006f },
+ { 0x5fffc, 0xbc000a0 },
+ { 0x5029c, 0x3140006b },
+ { 0x5fffc, 0xc8000a2 },
+ { 0x502a0, 0x31000067 },
+ { 0x5fffc, 0xd8000a5 },
+ { 0x502a4, 0x30800062 },
+ { 0x5fffc, 0xe4000a8 },
+ { 0x502a8, 0x2fc0005f },
+ { 0x5fffc, 0xec000aa },
+ { 0x502ac, 0x2fc0005b },
+ { 0x5fffc, 0xf8000ad },
+ { 0x502b0, 0x2f400057 },
+ { 0x5fffc, 0x108000b0 },
+ { 0x502b4, 0x2e400054 },
+ { 0x5fffc, 0x114000b2 },
+ { 0x502b8, 0x2e000050 },
+ { 0x5fffc, 0x124000b4 },
+ { 0x502bc, 0x2d80004c },
+ { 0x5fffc, 0x130000b6 },
+ { 0x502c0, 0x2d000049 },
+ { 0x5fffc, 0x140000b8 },
+ { 0x502c4, 0x2c800045 },
+ { 0x5fffc, 0x150000b9 },
+ { 0x502c8, 0x2c000042 },
+ { 0x5fffc, 0x15c000bd },
+ { 0x502cc, 0x2b40003e },
+ { 0x5fffc, 0x16c000bf },
+ { 0x502d0, 0x2a80003b },
+ { 0x5fffc, 0x17c000bf },
+ { 0x502d4, 0x2a000039 },
+ { 0x5fffc, 0x188000c2 },
+ { 0x502d8, 0x29400036 },
+ { 0x5fffc, 0x19c000c4 },
+ { 0x502dc, 0x28800032 },
+ { 0x5fffc, 0x1ac000c5 },
+ { 0x502e0, 0x2800002f },
+ { 0x5fffc, 0x1bc000c7 },
+ { 0x502e4, 0x2740002c },
+ { 0x5fffc, 0x1cc000c8 },
+ { 0x502e8, 0x26c00029 },
+ { 0x5fffc, 0x1dc000c9 },
+ { 0x502ec, 0x26000027 },
+ { 0x5fffc, 0x1ec000cc },
+ { 0x502f0, 0x25000024 },
+ { 0x5fffc, 0x200000cc },
+ { 0x502f4, 0x24800021 },
+ { 0x5fffc, 0x210000cd },
+ { 0x502f8, 0x23800020 },
+ { 0x5fffc, 0x220000ce },
+ { 0x502fc, 0x2300001d },
+};
+
+static struct mdp_table_entry mdp_downscale_x_table_PT6TOPT8[] = {
+ { 0x5fffc, 0xfe000070 },
+ { 0x50280, 0x4bc00068 },
+ { 0x5fffc, 0xfe000078 },
+ { 0x50284, 0x4bc00060 },
+ { 0x5fffc, 0xfe000080 },
+ { 0x50288, 0x4b800059 },
+ { 0x5fffc, 0xfe000089 },
+ { 0x5028c, 0x4b000052 },
+ { 0x5fffc, 0xfe400091 },
+ { 0x50290, 0x4a80004b },
+ { 0x5fffc, 0xfe40009a },
+ { 0x50294, 0x4a000044 },
+ { 0x5fffc, 0xfe8000a3 },
+ { 0x50298, 0x4940003d },
+ { 0x5fffc, 0xfec000ac },
+ { 0x5029c, 0x48400037 },
+ { 0x5fffc, 0xff0000b4 },
+ { 0x502a0, 0x47800031 },
+ { 0x5fffc, 0xff8000bd },
+ { 0x502a4, 0x4640002b },
+ { 0x5fffc, 0xc5 },
+ { 0x502a8, 0x45000026 },
+ { 0x5fffc, 0x8000ce },
+ { 0x502ac, 0x43800021 },
+ { 0x5fffc, 0x10000d6 },
+ { 0x502b0, 0x4240001c },
+ { 0x5fffc, 0x18000df },
+ { 0x502b4, 0x40800018 },
+ { 0x5fffc, 0x24000e6 },
+ { 0x502b8, 0x3f000014 },
+ { 0x5fffc, 0x30000ee },
+ { 0x502bc, 0x3d400010 },
+ { 0x5fffc, 0x40000f5 },
+ { 0x502c0, 0x3b80000c },
+ { 0x5fffc, 0x50000fc },
+ { 0x502c4, 0x39800009 },
+ { 0x5fffc, 0x6000102 },
+ { 0x502c8, 0x37c00006 },
+ { 0x5fffc, 0x7000109 },
+ { 0x502cc, 0x35800004 },
+ { 0x5fffc, 0x840010e },
+ { 0x502d0, 0x33800002 },
+ { 0x5fffc, 0x9800114 },
+ { 0x502d4, 0x31400000 },
+ { 0x5fffc, 0xac00119 },
+ { 0x502d8, 0x2f4003fe },
+ { 0x5fffc, 0xc40011e },
+ { 0x502dc, 0x2d0003fc },
+ { 0x5fffc, 0xdc00121 },
+ { 0x502e0, 0x2b0003fb },
+ { 0x5fffc, 0xf400125 },
+ { 0x502e4, 0x28c003fa },
+ { 0x5fffc, 0x11000128 },
+ { 0x502e8, 0x268003f9 },
+ { 0x5fffc, 0x12c0012a },
+ { 0x502ec, 0x244003f9 },
+ { 0x5fffc, 0x1480012c },
+ { 0x502f0, 0x224003f8 },
+ { 0x5fffc, 0x1640012e },
+ { 0x502f4, 0x200003f8 },
+ { 0x5fffc, 0x1800012f },
+ { 0x502f8, 0x1e0003f8 },
+ { 0x5fffc, 0x1a00012f },
+ { 0x502fc, 0x1c0003f8 },
+};
+
+static struct mdp_table_entry mdp_downscale_x_table_PT8TO1[] = {
+ { 0x5fffc, 0x0 },
+ { 0x50280, 0x7fc00000 },
+ { 0x5fffc, 0xff80000d },
+ { 0x50284, 0x7ec003f9 },
+ { 0x5fffc, 0xfec0001c },
+ { 0x50288, 0x7d4003f3 },
+ { 0x5fffc, 0xfe40002b },
+ { 0x5028c, 0x7b8003ed },
+ { 0x5fffc, 0xfd80003c },
+ { 0x50290, 0x794003e8 },
+ { 0x5fffc, 0xfcc0004d },
+ { 0x50294, 0x76c003e4 },
+ { 0x5fffc, 0xfc40005f },
+ { 0x50298, 0x73c003e0 },
+ { 0x5fffc, 0xfb800071 },
+ { 0x5029c, 0x708003de },
+ { 0x5fffc, 0xfac00085 },
+ { 0x502a0, 0x6d0003db },
+ { 0x5fffc, 0xfa000098 },
+ { 0x502a4, 0x698003d9 },
+ { 0x5fffc, 0xf98000ac },
+ { 0x502a8, 0x654003d8 },
+ { 0x5fffc, 0xf8c000c1 },
+ { 0x502ac, 0x610003d7 },
+ { 0x5fffc, 0xf84000d5 },
+ { 0x502b0, 0x5c8003d7 },
+ { 0x5fffc, 0xf7c000e9 },
+ { 0x502b4, 0x580003d7 },
+ { 0x5fffc, 0xf74000fd },
+ { 0x502b8, 0x534003d8 },
+ { 0x5fffc, 0xf6c00112 },
+ { 0x502bc, 0x4e8003d8 },
+ { 0x5fffc, 0xf6800126 },
+ { 0x502c0, 0x494003da },
+ { 0x5fffc, 0xf600013a },
+ { 0x502c4, 0x448003db },
+ { 0x5fffc, 0xf600014d },
+ { 0x502c8, 0x3f4003dd },
+ { 0x5fffc, 0xf5c00160 },
+ { 0x502cc, 0x3a4003df },
+ { 0x5fffc, 0xf5c00172 },
+ { 0x502d0, 0x354003e1 },
+ { 0x5fffc, 0xf5c00184 },
+ { 0x502d4, 0x304003e3 },
+ { 0x5fffc, 0xf6000195 },
+ { 0x502d8, 0x2b0003e6 },
+ { 0x5fffc, 0xf64001a6 },
+ { 0x502dc, 0x260003e8 },
+ { 0x5fffc, 0xf6c001b4 },
+ { 0x502e0, 0x214003eb },
+ { 0x5fffc, 0xf78001c2 },
+ { 0x502e4, 0x1c4003ee },
+ { 0x5fffc, 0xf80001cf },
+ { 0x502e8, 0x17c003f1 },
+ { 0x5fffc, 0xf90001db },
+ { 0x502ec, 0x134003f3 },
+ { 0x5fffc, 0xfa0001e5 },
+ { 0x502f0, 0xf0003f6 },
+ { 0x5fffc, 0xfb4001ee },
+ { 0x502f4, 0xac003f9 },
+ { 0x5fffc, 0xfcc001f5 },
+ { 0x502f8, 0x70003fb },
+ { 0x5fffc, 0xfe4001fb },
+ { 0x502fc, 0x34003fe },
+};
+
+struct mdp_table_entry *mdp_downscale_x_table[MDP_DOWNSCALE_MAX] = {
+ [MDP_DOWNSCALE_PT2TOPT4] = mdp_downscale_x_table_PT2TOPT4,
+ [MDP_DOWNSCALE_PT4TOPT6] = mdp_downscale_x_table_PT4TOPT6,
+ [MDP_DOWNSCALE_PT6TOPT8] = mdp_downscale_x_table_PT6TOPT8,
+ [MDP_DOWNSCALE_PT8TO1] = mdp_downscale_x_table_PT8TO1,
+};
+
+static struct mdp_table_entry mdp_downscale_y_table_PT2TOPT4[] = {
+ { 0x5fffc, 0x740008c },
+ { 0x50300, 0x33800088 },
+ { 0x5fffc, 0x800008e },
+ { 0x50304, 0x33400084 },
+ { 0x5fffc, 0x8400092 },
+ { 0x50308, 0x33000080 },
+ { 0x5fffc, 0x9000094 },
+ { 0x5030c, 0x3300007b },
+ { 0x5fffc, 0x9c00098 },
+ { 0x50310, 0x32400077 },
+ { 0x5fffc, 0xa40009b },
+ { 0x50314, 0x32000073 },
+ { 0x5fffc, 0xb00009d },
+ { 0x50318, 0x31c0006f },
+ { 0x5fffc, 0xbc000a0 },
+ { 0x5031c, 0x3140006b },
+ { 0x5fffc, 0xc8000a2 },
+ { 0x50320, 0x31000067 },
+ { 0x5fffc, 0xd8000a5 },
+ { 0x50324, 0x30800062 },
+ { 0x5fffc, 0xe4000a8 },
+ { 0x50328, 0x2fc0005f },
+ { 0x5fffc, 0xec000aa },
+ { 0x5032c, 0x2fc0005b },
+ { 0x5fffc, 0xf8000ad },
+ { 0x50330, 0x2f400057 },
+ { 0x5fffc, 0x108000b0 },
+ { 0x50334, 0x2e400054 },
+ { 0x5fffc, 0x114000b2 },
+ { 0x50338, 0x2e000050 },
+ { 0x5fffc, 0x124000b4 },
+ { 0x5033c, 0x2d80004c },
+ { 0x5fffc, 0x130000b6 },
+ { 0x50340, 0x2d000049 },
+ { 0x5fffc, 0x140000b8 },
+ { 0x50344, 0x2c800045 },
+ { 0x5fffc, 0x150000b9 },
+ { 0x50348, 0x2c000042 },
+ { 0x5fffc, 0x15c000bd },
+ { 0x5034c, 0x2b40003e },
+ { 0x5fffc, 0x16c000bf },
+ { 0x50350, 0x2a80003b },
+ { 0x5fffc, 0x17c000bf },
+ { 0x50354, 0x2a000039 },
+ { 0x5fffc, 0x188000c2 },
+ { 0x50358, 0x29400036 },
+ { 0x5fffc, 0x19c000c4 },
+ { 0x5035c, 0x28800032 },
+ { 0x5fffc, 0x1ac000c5 },
+ { 0x50360, 0x2800002f },
+ { 0x5fffc, 0x1bc000c7 },
+ { 0x50364, 0x2740002c },
+ { 0x5fffc, 0x1cc000c8 },
+ { 0x50368, 0x26c00029 },
+ { 0x5fffc, 0x1dc000c9 },
+ { 0x5036c, 0x26000027 },
+ { 0x5fffc, 0x1ec000cc },
+ { 0x50370, 0x25000024 },
+ { 0x5fffc, 0x200000cc },
+ { 0x50374, 0x24800021 },
+ { 0x5fffc, 0x210000cd },
+ { 0x50378, 0x23800020 },
+ { 0x5fffc, 0x220000ce },
+ { 0x5037c, 0x2300001d },
+};
+
+static struct mdp_table_entry mdp_downscale_y_table_PT4TOPT6[] = {
+ { 0x5fffc, 0x740008c },
+ { 0x50300, 0x33800088 },
+ { 0x5fffc, 0x800008e },
+ { 0x50304, 0x33400084 },
+ { 0x5fffc, 0x8400092 },
+ { 0x50308, 0x33000080 },
+ { 0x5fffc, 0x9000094 },
+ { 0x5030c, 0x3300007b },
+ { 0x5fffc, 0x9c00098 },
+ { 0x50310, 0x32400077 },
+ { 0x5fffc, 0xa40009b },
+ { 0x50314, 0x32000073 },
+ { 0x5fffc, 0xb00009d },
+ { 0x50318, 0x31c0006f },
+ { 0x5fffc, 0xbc000a0 },
+ { 0x5031c, 0x3140006b },
+ { 0x5fffc, 0xc8000a2 },
+ { 0x50320, 0x31000067 },
+ { 0x5fffc, 0xd8000a5 },
+ { 0x50324, 0x30800062 },
+ { 0x5fffc, 0xe4000a8 },
+ { 0x50328, 0x2fc0005f },
+ { 0x5fffc, 0xec000aa },
+ { 0x5032c, 0x2fc0005b },
+ { 0x5fffc, 0xf8000ad },
+ { 0x50330, 0x2f400057 },
+ { 0x5fffc, 0x108000b0 },
+ { 0x50334, 0x2e400054 },
+ { 0x5fffc, 0x114000b2 },
+ { 0x50338, 0x2e000050 },
+ { 0x5fffc, 0x124000b4 },
+ { 0x5033c, 0x2d80004c },
+ { 0x5fffc, 0x130000b6 },
+ { 0x50340, 0x2d000049 },
+ { 0x5fffc, 0x140000b8 },
+ { 0x50344, 0x2c800045 },
+ { 0x5fffc, 0x150000b9 },
+ { 0x50348, 0x2c000042 },
+ { 0x5fffc, 0x15c000bd },
+ { 0x5034c, 0x2b40003e },
+ { 0x5fffc, 0x16c000bf },
+ { 0x50350, 0x2a80003b },
+ { 0x5fffc, 0x17c000bf },
+ { 0x50354, 0x2a000039 },
+ { 0x5fffc, 0x188000c2 },
+ { 0x50358, 0x29400036 },
+ { 0x5fffc, 0x19c000c4 },
+ { 0x5035c, 0x28800032 },
+ { 0x5fffc, 0x1ac000c5 },
+ { 0x50360, 0x2800002f },
+ { 0x5fffc, 0x1bc000c7 },
+ { 0x50364, 0x2740002c },
+ { 0x5fffc, 0x1cc000c8 },
+ { 0x50368, 0x26c00029 },
+ { 0x5fffc, 0x1dc000c9 },
+ { 0x5036c, 0x26000027 },
+ { 0x5fffc, 0x1ec000cc },
+ { 0x50370, 0x25000024 },
+ { 0x5fffc, 0x200000cc },
+ { 0x50374, 0x24800021 },
+ { 0x5fffc, 0x210000cd },
+ { 0x50378, 0x23800020 },
+ { 0x5fffc, 0x220000ce },
+ { 0x5037c, 0x2300001d },
+};
+
+static struct mdp_table_entry mdp_downscale_y_table_PT6TOPT8[] = {
+ { 0x5fffc, 0xfe000070 },
+ { 0x50300, 0x4bc00068 },
+ { 0x5fffc, 0xfe000078 },
+ { 0x50304, 0x4bc00060 },
+ { 0x5fffc, 0xfe000080 },
+ { 0x50308, 0x4b800059 },
+ { 0x5fffc, 0xfe000089 },
+ { 0x5030c, 0x4b000052 },
+ { 0x5fffc, 0xfe400091 },
+ { 0x50310, 0x4a80004b },
+ { 0x5fffc, 0xfe40009a },
+ { 0x50314, 0x4a000044 },
+ { 0x5fffc, 0xfe8000a3 },
+ { 0x50318, 0x4940003d },
+ { 0x5fffc, 0xfec000ac },
+ { 0x5031c, 0x48400037 },
+ { 0x5fffc, 0xff0000b4 },
+ { 0x50320, 0x47800031 },
+ { 0x5fffc, 0xff8000bd },
+ { 0x50324, 0x4640002b },
+ { 0x5fffc, 0xc5 },
+ { 0x50328, 0x45000026 },
+ { 0x5fffc, 0x8000ce },
+ { 0x5032c, 0x43800021 },
+ { 0x5fffc, 0x10000d6 },
+ { 0x50330, 0x4240001c },
+ { 0x5fffc, 0x18000df },
+ { 0x50334, 0x40800018 },
+ { 0x5fffc, 0x24000e6 },
+ { 0x50338, 0x3f000014 },
+ { 0x5fffc, 0x30000ee },
+ { 0x5033c, 0x3d400010 },
+ { 0x5fffc, 0x40000f5 },
+ { 0x50340, 0x3b80000c },
+ { 0x5fffc, 0x50000fc },
+ { 0x50344, 0x39800009 },
+ { 0x5fffc, 0x6000102 },
+ { 0x50348, 0x37c00006 },
+ { 0x5fffc, 0x7000109 },
+ { 0x5034c, 0x35800004 },
+ { 0x5fffc, 0x840010e },
+ { 0x50350, 0x33800002 },
+ { 0x5fffc, 0x9800114 },
+ { 0x50354, 0x31400000 },
+ { 0x5fffc, 0xac00119 },
+ { 0x50358, 0x2f4003fe },
+ { 0x5fffc, 0xc40011e },
+ { 0x5035c, 0x2d0003fc },
+ { 0x5fffc, 0xdc00121 },
+ { 0x50360, 0x2b0003fb },
+ { 0x5fffc, 0xf400125 },
+ { 0x50364, 0x28c003fa },
+ { 0x5fffc, 0x11000128 },
+ { 0x50368, 0x268003f9 },
+ { 0x5fffc, 0x12c0012a },
+ { 0x5036c, 0x244003f9 },
+ { 0x5fffc, 0x1480012c },
+ { 0x50370, 0x224003f8 },
+ { 0x5fffc, 0x1640012e },
+ { 0x50374, 0x200003f8 },
+ { 0x5fffc, 0x1800012f },
+ { 0x50378, 0x1e0003f8 },
+ { 0x5fffc, 0x1a00012f },
+ { 0x5037c, 0x1c0003f8 },
+};
+
+static struct mdp_table_entry mdp_downscale_y_table_PT8TO1[] = {
+ { 0x5fffc, 0x0 },
+ { 0x50300, 0x7fc00000 },
+ { 0x5fffc, 0xff80000d },
+ { 0x50304, 0x7ec003f9 },
+ { 0x5fffc, 0xfec0001c },
+ { 0x50308, 0x7d4003f3 },
+ { 0x5fffc, 0xfe40002b },
+ { 0x5030c, 0x7b8003ed },
+ { 0x5fffc, 0xfd80003c },
+ { 0x50310, 0x794003e8 },
+ { 0x5fffc, 0xfcc0004d },
+ { 0x50314, 0x76c003e4 },
+ { 0x5fffc, 0xfc40005f },
+ { 0x50318, 0x73c003e0 },
+ { 0x5fffc, 0xfb800071 },
+ { 0x5031c, 0x708003de },
+ { 0x5fffc, 0xfac00085 },
+ { 0x50320, 0x6d0003db },
+ { 0x5fffc, 0xfa000098 },
+ { 0x50324, 0x698003d9 },
+ { 0x5fffc, 0xf98000ac },
+ { 0x50328, 0x654003d8 },
+ { 0x5fffc, 0xf8c000c1 },
+ { 0x5032c, 0x610003d7 },
+ { 0x5fffc, 0xf84000d5 },
+ { 0x50330, 0x5c8003d7 },
+ { 0x5fffc, 0xf7c000e9 },
+ { 0x50334, 0x580003d7 },
+ { 0x5fffc, 0xf74000fd },
+ { 0x50338, 0x534003d8 },
+ { 0x5fffc, 0xf6c00112 },
+ { 0x5033c, 0x4e8003d8 },
+ { 0x5fffc, 0xf6800126 },
+ { 0x50340, 0x494003da },
+ { 0x5fffc, 0xf600013a },
+ { 0x50344, 0x448003db },
+ { 0x5fffc, 0xf600014d },
+ { 0x50348, 0x3f4003dd },
+ { 0x5fffc, 0xf5c00160 },
+ { 0x5034c, 0x3a4003df },
+ { 0x5fffc, 0xf5c00172 },
+ { 0x50350, 0x354003e1 },
+ { 0x5fffc, 0xf5c00184 },
+ { 0x50354, 0x304003e3 },
+ { 0x5fffc, 0xf6000195 },
+ { 0x50358, 0x2b0003e6 },
+ { 0x5fffc, 0xf64001a6 },
+ { 0x5035c, 0x260003e8 },
+ { 0x5fffc, 0xf6c001b4 },
+ { 0x50360, 0x214003eb },
+ { 0x5fffc, 0xf78001c2 },
+ { 0x50364, 0x1c4003ee },
+ { 0x5fffc, 0xf80001cf },
+ { 0x50368, 0x17c003f1 },
+ { 0x5fffc, 0xf90001db },
+ { 0x5036c, 0x134003f3 },
+ { 0x5fffc, 0xfa0001e5 },
+ { 0x50370, 0xf0003f6 },
+ { 0x5fffc, 0xfb4001ee },
+ { 0x50374, 0xac003f9 },
+ { 0x5fffc, 0xfcc001f5 },
+ { 0x50378, 0x70003fb },
+ { 0x5fffc, 0xfe4001fb },
+ { 0x5037c, 0x34003fe },
+};
+
+struct mdp_table_entry *mdp_downscale_y_table[MDP_DOWNSCALE_MAX] = {
+ [MDP_DOWNSCALE_PT2TOPT4] = mdp_downscale_y_table_PT2TOPT4,
+ [MDP_DOWNSCALE_PT4TOPT6] = mdp_downscale_y_table_PT4TOPT6,
+ [MDP_DOWNSCALE_PT6TOPT8] = mdp_downscale_y_table_PT6TOPT8,
+ [MDP_DOWNSCALE_PT8TO1] = mdp_downscale_y_table_PT8TO1,
+};
+
+struct mdp_table_entry mdp_gaussian_blur_table[] = {
+ /* max variance */
+ { 0x5fffc, 0x20000080 },
+ { 0x50280, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50284, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50288, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x5028c, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50290, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50294, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50298, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x5029c, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502a0, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502a4, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502a8, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502ac, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502b0, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502b4, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502b8, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502bc, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502c0, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502c4, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502c8, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502cc, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502d0, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502d4, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502d8, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502dc, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502e0, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502e4, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502e8, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502ec, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502f0, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502f4, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502f8, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x502fc, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50300, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50304, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50308, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x5030c, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50310, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50314, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50318, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x5031c, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50320, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50324, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50328, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x5032c, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50330, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50334, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50338, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x5033c, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50340, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50344, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50348, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x5034c, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50350, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50354, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50358, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x5035c, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50360, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50364, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50368, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x5036c, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50370, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50374, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x50378, 0x20000080 },
+ { 0x5fffc, 0x20000080 },
+ { 0x5037c, 0x20000080 },
+};
diff --git a/drivers/video/msm/mdp_scale_tables.h b/drivers/video/msm/mdp_scale_tables.h
new file mode 100644
index 00000000000..34077b1af60
--- /dev/null
+++ b/drivers/video/msm/mdp_scale_tables.h
@@ -0,0 +1,38 @@
+/* drivers/video/msm_fb/mdp_scale_tables.h
+ *
+ * Copyright (C) 2007 QUALCOMM Incorporated
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MDP_SCALE_TABLES_H_
+#define _MDP_SCALE_TABLES_H_
+
+#include <linux/types.h>
+struct mdp_table_entry {
+ uint32_t reg;
+ uint32_t val;
+};
+
+extern struct mdp_table_entry mdp_upscale_table[64];
+
+enum {
+ MDP_DOWNSCALE_PT2TOPT4,
+ MDP_DOWNSCALE_PT4TOPT6,
+ MDP_DOWNSCALE_PT6TOPT8,
+ MDP_DOWNSCALE_PT8TO1,
+ MDP_DOWNSCALE_MAX,
+};
+
+extern struct mdp_table_entry *mdp_downscale_x_table[MDP_DOWNSCALE_MAX];
+extern struct mdp_table_entry *mdp_downscale_y_table[MDP_DOWNSCALE_MAX];
+extern struct mdp_table_entry mdp_gaussian_blur_table[];
+
+#endif
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
new file mode 100644
index 00000000000..49101dda45e
--- /dev/null
+++ b/drivers/video/msm/msm_fb.c
@@ -0,0 +1,636 @@
+/* drivers/video/msm/msm_fb.c
+ *
+ * Core MSM framebuffer driver.
+ *
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/fb.h>
+#include <linux/delay.h>
+
+#include <linux/freezer.h>
+#include <linux/wait.h>
+#include <linux/msm_mdp.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <mach/msm_fb.h>
+#include <mach/board.h>
+#include <linux/workqueue.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+
+#define PRINT_FPS 0
+#define PRINT_BLIT_TIME 0
+
+#define SLEEPING 0x4
+#define UPDATING 0x3
+#define FULL_UPDATE_DONE 0x2
+#define WAKING 0x1
+#define AWAKE 0x0
+
+#define NONE 0
+#define SUSPEND_RESUME 0x1
+#define FPS 0x2
+#define BLIT_TIME 0x4
+#define SHOW_UPDATES 0x8
+
+#define DLOG(mask, fmt, args...) \
+do { \
+ if (msmfb_debug_mask & mask) \
+ printk(KERN_INFO "msmfb: "fmt, ##args); \
+} while (0)
+
+static int msmfb_debug_mask;
+module_param_named(msmfb_debug_mask, msmfb_debug_mask, int,
+ S_IRUGO | S_IWUSR | S_IWGRP);
+
+struct mdp_device *mdp;
+
+struct msmfb_info {
+ struct fb_info *fb;
+ struct msm_panel_data *panel;
+ int xres;
+ int yres;
+ unsigned output_format;
+ unsigned yoffset;
+ unsigned frame_requested;
+ unsigned frame_done;
+ int sleeping;
+ unsigned update_frame;
+ struct {
+ int left;
+ int top;
+ int eright; /* exclusive */
+ int ebottom; /* exclusive */
+ } update_info;
+ char *black;
+
+ spinlock_t update_lock;
+ struct mutex panel_init_lock;
+ wait_queue_head_t frame_wq;
+ struct workqueue_struct *resume_workqueue;
+ struct work_struct resume_work;
+ struct msmfb_callback dma_callback;
+ struct msmfb_callback vsync_callback;
+ struct hrtimer fake_vsync;
+ ktime_t vsync_request_time;
+};
+
+static int msmfb_open(struct fb_info *info, int user)
+{
+ return 0;
+}
+
+static int msmfb_release(struct fb_info *info, int user)
+{
+ return 0;
+}
+
+/* Called from dma interrupt handler, must not sleep */
+static void msmfb_handle_dma_interrupt(struct msmfb_callback *callback)
+{
+ unsigned long irq_flags;
+ struct msmfb_info *msmfb = container_of(callback, struct msmfb_info,
+ dma_callback);
+
+ spin_lock_irqsave(&msmfb->update_lock, irq_flags);
+ msmfb->frame_done = msmfb->frame_requested;
+ if (msmfb->sleeping == UPDATING &&
+ msmfb->frame_done == msmfb->update_frame) {
+ DLOG(SUSPEND_RESUME, "full update completed\n");
+ queue_work(msmfb->resume_workqueue, &msmfb->resume_work);
+ }
+ spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
+ wake_up(&msmfb->frame_wq);
+}
+
+static int msmfb_start_dma(struct msmfb_info *msmfb)
+{
+ uint32_t x, y, w, h;
+ unsigned addr;
+ unsigned long irq_flags;
+ uint32_t yoffset;
+ s64 time_since_request;
+ struct msm_panel_data *panel = msmfb->panel;
+
+ spin_lock_irqsave(&msmfb->update_lock, irq_flags);
+ time_since_request = ktime_to_ns(ktime_sub(ktime_get(),
+ msmfb->vsync_request_time));
+ if (time_since_request > 20 * NSEC_PER_MSEC) {
+ uint32_t us;
+ us = do_div(time_since_request, NSEC_PER_MSEC) / NSEC_PER_USEC;
+ printk(KERN_WARNING "msmfb_start_dma %lld.%03u ms after vsync "
+ "request\n", time_since_request, us);
+ }
+ if (msmfb->frame_done == msmfb->frame_requested) {
+ spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
+ return -1;
+ }
+ if (msmfb->sleeping == SLEEPING) {
+ DLOG(SUSPEND_RESUME, "tried to start dma while asleep\n");
+ spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
+ return -1;
+ }
+ x = msmfb->update_info.left;
+ y = msmfb->update_info.top;
+ w = msmfb->update_info.eright - x;
+ h = msmfb->update_info.ebottom - y;
+ yoffset = msmfb->yoffset;
+ msmfb->update_info.left = msmfb->xres + 1;
+ msmfb->update_info.top = msmfb->yres + 1;
+ msmfb->update_info.eright = 0;
+ msmfb->update_info.ebottom = 0;
+ if (unlikely(w > msmfb->xres || h > msmfb->yres ||
+ w == 0 || h == 0)) {
+ printk(KERN_INFO "invalid update: %d %d %d "
+ "%d\n", x, y, w, h);
+ msmfb->frame_done = msmfb->frame_requested;
+ goto error;
+ }
+ spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
+
+ addr = ((msmfb->xres * (yoffset + y) + x) * 2);
+ mdp->dma(mdp, addr + msmfb->fb->fix.smem_start,
+ msmfb->xres * 2, w, h, x, y, &msmfb->dma_callback,
+ panel->interface_type);
+ return 0;
+error:
+ spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
+ /* some clients need to clear their vsync interrupt */
+ if (panel->clear_vsync)
+ panel->clear_vsync(panel);
+ wake_up(&msmfb->frame_wq);
+ return 0;
+}
+
+/* Called from esync interrupt handler, must not sleep */
+static void msmfb_handle_vsync_interrupt(struct msmfb_callback *callback)
+{
+ struct msmfb_info *msmfb = container_of(callback, struct msmfb_info,
+ vsync_callback);
+ msmfb_start_dma(msmfb);
+}
+
+static enum hrtimer_restart msmfb_fake_vsync(struct hrtimer *timer)
+{
+ struct msmfb_info *msmfb = container_of(timer, struct msmfb_info,
+ fake_vsync);
+ msmfb_start_dma(msmfb);
+ return HRTIMER_NORESTART;
+}
+
+static void msmfb_pan_update(struct fb_info *info, uint32_t left, uint32_t top,
+ uint32_t eright, uint32_t ebottom,
+ uint32_t yoffset, int pan_display)
+{
+ struct msmfb_info *msmfb = info->par;
+ struct msm_panel_data *panel = msmfb->panel;
+ unsigned long irq_flags;
+ int sleeping;
+ int retry = 1;
+
+ DLOG(SHOW_UPDATES, "update %d %d %d %d %d %d\n",
+ left, top, eright, ebottom, yoffset, pan_display);
+restart:
+ spin_lock_irqsave(&msmfb->update_lock, irq_flags);
+
+ /* if we are sleeping, on a pan_display wait 10ms (to throttle back
+ * drawing otherwise return */
+ if (msmfb->sleeping == SLEEPING) {
+ DLOG(SUSPEND_RESUME, "drawing while asleep\n");
+ spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
+ if (pan_display)
+ wait_event_interruptible_timeout(msmfb->frame_wq,
+ msmfb->sleeping != SLEEPING, HZ/10);
+ return;
+ }
+
+ sleeping = msmfb->sleeping;
+ /* on a full update, if the last frame has not completed, wait for it */
+ if (pan_display && (msmfb->frame_requested != msmfb->frame_done ||
+ sleeping == UPDATING)) {
+ int ret;
+ spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
+ ret = wait_event_interruptible_timeout(msmfb->frame_wq,
+ msmfb->frame_done == msmfb->frame_requested &&
+ msmfb->sleeping != UPDATING, 5 * HZ);
+ if (ret <= 0 && (msmfb->frame_requested != msmfb->frame_done ||
+ msmfb->sleeping == UPDATING)) {
+ if (retry && panel->request_vsync &&
+ (sleeping == AWAKE)) {
+ panel->request_vsync(panel,
+ &msmfb->vsync_callback);
+ retry = 0;
+ printk(KERN_WARNING "msmfb_pan_display timeout "
+ "rerequest vsync\n");
+ } else {
+ printk(KERN_WARNING "msmfb_pan_display timeout "
+ "waiting for frame start, %d %d\n",
+ msmfb->frame_requested,
+ msmfb->frame_done);
+ return;
+ }
+ }
+ goto restart;
+ }
+
+
+ msmfb->frame_requested++;
+ /* if necessary, update the y offset, if this is the
+ * first full update on resume, set the sleeping state */
+ if (pan_display) {
+ msmfb->yoffset = yoffset;
+ if (left == 0 && top == 0 && eright == info->var.xres &&
+ ebottom == info->var.yres) {
+ if (sleeping == WAKING) {
+ msmfb->update_frame = msmfb->frame_requested;
+ DLOG(SUSPEND_RESUME, "full update starting\n");
+ msmfb->sleeping = UPDATING;
+ }
+ }
+ }
+
+ /* set the update request */
+ if (left < msmfb->update_info.left)
+ msmfb->update_info.left = left;
+ if (top < msmfb->update_info.top)
+ msmfb->update_info.top = top;
+ if (eright > msmfb->update_info.eright)
+ msmfb->update_info.eright = eright;
+ if (ebottom > msmfb->update_info.ebottom)
+ msmfb->update_info.ebottom = ebottom;
+ DLOG(SHOW_UPDATES, "update queued %d %d %d %d %d\n",
+ msmfb->update_info.left, msmfb->update_info.top,
+ msmfb->update_info.eright, msmfb->update_info.ebottom,
+ msmfb->yoffset);
+ spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
+
+ /* if the panel is all the way on wait for vsync, otherwise sleep
+ * for 16 ms (long enough for the dma to panel) and then begin dma */
+ msmfb->vsync_request_time = ktime_get();
+ if (panel->request_vsync && (sleeping == AWAKE)) {
+ panel->request_vsync(panel, &msmfb->vsync_callback);
+ } else {
+ if (!hrtimer_active(&msmfb->fake_vsync)) {
+ hrtimer_start(&msmfb->fake_vsync,
+ ktime_set(0, NSEC_PER_SEC/60),
+ HRTIMER_MODE_REL);
+ }
+ }
+}
+
+static void msmfb_update(struct fb_info *info, uint32_t left, uint32_t top,
+ uint32_t eright, uint32_t ebottom)
+{
+ msmfb_pan_update(info, left, top, eright, ebottom, 0, 0);
+}
+
+static void power_on_panel(struct work_struct *work)
+{
+ struct msmfb_info *msmfb =
+ container_of(work, struct msmfb_info, resume_work);
+ struct msm_panel_data *panel = msmfb->panel;
+ unsigned long irq_flags;
+
+ mutex_lock(&msmfb->panel_init_lock);
+ DLOG(SUSPEND_RESUME, "turning on panel\n");
+ if (msmfb->sleeping == UPDATING) {
+ if (panel->unblank(panel)) {
+ printk(KERN_INFO "msmfb: panel unblank failed,"
+ "not starting drawing\n");
+ goto error;
+ }
+ spin_lock_irqsave(&msmfb->update_lock, irq_flags);
+ msmfb->sleeping = AWAKE;
+ wake_up(&msmfb->frame_wq);
+ spin_unlock_irqrestore(&msmfb->update_lock, irq_flags);
+ }
+error:
+ mutex_unlock(&msmfb->panel_init_lock);
+}
+
+
+static int msmfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ if ((var->xres != info->var.xres) ||
+ (var->yres != info->var.yres) ||
+ (var->xres_virtual != info->var.xres_virtual) ||
+ (var->yres_virtual != info->var.yres_virtual) ||
+ (var->xoffset != info->var.xoffset) ||
+ (var->bits_per_pixel != info->var.bits_per_pixel) ||
+ (var->grayscale != info->var.grayscale))
+ return -EINVAL;
+ return 0;
+}
+
+int msmfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct msmfb_info *msmfb = info->par;
+ struct msm_panel_data *panel = msmfb->panel;
+
+ /* "UPDT" */
+ if ((panel->caps & MSMFB_CAP_PARTIAL_UPDATES) &&
+ (var->reserved[0] == 0x54445055)) {
+ msmfb_pan_update(info, var->reserved[1] & 0xffff,
+ var->reserved[1] >> 16,
+ var->reserved[2] & 0xffff,
+ var->reserved[2] >> 16, var->yoffset, 1);
+ } else {
+ msmfb_pan_update(info, 0, 0, info->var.xres, info->var.yres,
+ var->yoffset, 1);
+ }
+ return 0;
+}
+
+static void msmfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
+{
+ cfb_fillrect(p, rect);
+ msmfb_update(p, rect->dx, rect->dy, rect->dx + rect->width,
+ rect->dy + rect->height);
+}
+
+static void msmfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
+{
+ cfb_copyarea(p, area);
+ msmfb_update(p, area->dx, area->dy, area->dx + area->width,
+ area->dy + area->height);
+}
+
+static void msmfb_imageblit(struct fb_info *p, const struct fb_image *image)
+{
+ cfb_imageblit(p, image);
+ msmfb_update(p, image->dx, image->dy, image->dx + image->width,
+ image->dy + image->height);
+}
+
+
+static int msmfb_blit(struct fb_info *info,
+ void __user *p)
+{
+ struct mdp_blit_req req;
+ struct mdp_blit_req_list req_list;
+ int i;
+ int ret;
+
+ if (copy_from_user(&req_list, p, sizeof(req_list)))
+ return -EFAULT;
+
+ for (i = 0; i < req_list.count; i++) {
+ struct mdp_blit_req_list *list =
+ (struct mdp_blit_req_list *)p;
+ if (copy_from_user(&req, &list->req[i], sizeof(req)))
+ return -EFAULT;
+ ret = mdp->blit(mdp, info, &req);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+
+DEFINE_MUTEX(mdp_ppp_lock);
+
+static int msmfb_ioctl(struct fb_info *p, unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int ret;
+
+ switch (cmd) {
+ case MSMFB_GRP_DISP:
+ mdp->set_grp_disp(mdp, arg);
+ break;
+ case MSMFB_BLIT:
+ ret = msmfb_blit(p, argp);
+ if (ret)
+ return ret;
+ break;
+ default:
+ printk(KERN_INFO "msmfb unknown ioctl: %d\n", cmd);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct fb_ops msmfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_open = msmfb_open,
+ .fb_release = msmfb_release,
+ .fb_check_var = msmfb_check_var,
+ .fb_pan_display = msmfb_pan_display,
+ .fb_fillrect = msmfb_fillrect,
+ .fb_copyarea = msmfb_copyarea,
+ .fb_imageblit = msmfb_imageblit,
+ .fb_ioctl = msmfb_ioctl,
+};
+
+static unsigned PP[16];
+
+
+
+#define BITS_PER_PIXEL 16
+
+static void setup_fb_info(struct msmfb_info *msmfb)
+{
+ struct fb_info *fb_info = msmfb->fb;
+ int r;
+
+ /* finish setting up the fb_info struct */
+ strncpy(fb_info->fix.id, "msmfb", 16);
+ fb_info->fix.ypanstep = 1;
+
+ fb_info->fbops = &msmfb_ops;
+ fb_info->flags = FBINFO_DEFAULT;
+
+ fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
+ fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
+ fb_info->fix.line_length = msmfb->xres * 2;
+
+ fb_info->var.xres = msmfb->xres;
+ fb_info->var.yres = msmfb->yres;
+ fb_info->var.width = msmfb->panel->fb_data->width;
+ fb_info->var.height = msmfb->panel->fb_data->height;
+ fb_info->var.xres_virtual = msmfb->xres;
+ fb_info->var.yres_virtual = msmfb->yres * 2;
+ fb_info->var.bits_per_pixel = BITS_PER_PIXEL;
+ fb_info->var.accel_flags = 0;
+
+ fb_info->var.yoffset = 0;
+
+ if (msmfb->panel->caps & MSMFB_CAP_PARTIAL_UPDATES) {
+ fb_info->var.reserved[0] = 0x54445055;
+ fb_info->var.reserved[1] = 0;
+ fb_info->var.reserved[2] = (uint16_t)msmfb->xres |
+ ((uint32_t)msmfb->yres << 16);
+ }
+
+ fb_info->var.red.offset = 11;
+ fb_info->var.red.length = 5;
+ fb_info->var.red.msb_right = 0;
+ fb_info->var.green.offset = 5;
+ fb_info->var.green.length = 6;
+ fb_info->var.green.msb_right = 0;
+ fb_info->var.blue.offset = 0;
+ fb_info->var.blue.length = 5;
+ fb_info->var.blue.msb_right = 0;
+
+ r = fb_alloc_cmap(&fb_info->cmap, 16, 0);
+ fb_info->pseudo_palette = PP;
+
+ PP[0] = 0;
+ for (r = 1; r < 16; r++)
+ PP[r] = 0xffffffff;
+}
+
+static int setup_fbmem(struct msmfb_info *msmfb, struct platform_device *pdev)
+{
+ struct fb_info *fb = msmfb->fb;
+ struct resource *resource;
+ unsigned long size = msmfb->xres * msmfb->yres *
+ (BITS_PER_PIXEL >> 3) * 2;
+ unsigned char *fbram;
+
+ /* board file might have attached a resource describing an fb */
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!resource)
+ return -EINVAL;
+
+ /* check the resource is large enough to fit the fb */
+ if (resource->end - resource->start < size) {
+ printk(KERN_ERR "allocated resource is too small for "
+ "fb\n");
+ return -ENOMEM;
+ }
+ fb->fix.smem_start = resource->start;
+ fb->fix.smem_len = resource->end - resource->start;
+ fbram = ioremap(resource->start,
+ resource->end - resource->start);
+ if (fbram == 0) {
+ printk(KERN_ERR "msmfb: cannot allocate fbram!\n");
+ return -ENOMEM;
+ }
+ fb->screen_base = fbram;
+ return 0;
+}
+
+static int msmfb_probe(struct platform_device *pdev)
+{
+ struct fb_info *fb;
+ struct msmfb_info *msmfb;
+ struct msm_panel_data *panel = pdev->dev.platform_data;
+ int ret;
+
+ if (!panel) {
+ pr_err("msmfb_probe: no platform data\n");
+ return -EINVAL;
+ }
+ if (!panel->fb_data) {
+ pr_err("msmfb_probe: no fb_data\n");
+ return -EINVAL;
+ }
+
+ fb = framebuffer_alloc(sizeof(struct msmfb_info), &pdev->dev);
+ if (!fb)
+ return -ENOMEM;
+ msmfb = fb->par;
+ msmfb->fb = fb;
+ msmfb->panel = panel;
+ msmfb->xres = panel->fb_data->xres;
+ msmfb->yres = panel->fb_data->yres;
+
+ ret = setup_fbmem(msmfb, pdev);
+ if (ret)
+ goto error_setup_fbmem;
+
+ setup_fb_info(msmfb);
+
+ spin_lock_init(&msmfb->update_lock);
+ mutex_init(&msmfb->panel_init_lock);
+ init_waitqueue_head(&msmfb->frame_wq);
+ msmfb->resume_workqueue = create_workqueue("panel_on");
+ if (msmfb->resume_workqueue == NULL) {
+ printk(KERN_ERR "failed to create panel_on workqueue\n");
+ ret = -ENOMEM;
+ goto error_create_workqueue;
+ }
+ INIT_WORK(&msmfb->resume_work, power_on_panel);
+ msmfb->black = kzalloc(msmfb->fb->var.bits_per_pixel*msmfb->xres,
+ GFP_KERNEL);
+
+ printk(KERN_INFO "msmfb_probe() installing %d x %d panel\n",
+ msmfb->xres, msmfb->yres);
+
+ msmfb->dma_callback.func = msmfb_handle_dma_interrupt;
+ msmfb->vsync_callback.func = msmfb_handle_vsync_interrupt;
+ hrtimer_init(&msmfb->fake_vsync, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+
+
+ msmfb->fake_vsync.function = msmfb_fake_vsync;
+
+ ret = register_framebuffer(fb);
+ if (ret)
+ goto error_register_framebuffer;
+
+ msmfb->sleeping = WAKING;
+
+ return 0;
+
+error_register_framebuffer:
+ destroy_workqueue(msmfb->resume_workqueue);
+error_create_workqueue:
+ iounmap(fb->screen_base);
+error_setup_fbmem:
+ framebuffer_release(msmfb->fb);
+ return ret;
+}
+
+static struct platform_driver msm_panel_driver = {
+ /* need to write remove */
+ .probe = msmfb_probe,
+ .driver = {.name = "msm_panel"},
+};
+
+
+static int msmfb_add_mdp_device(struct device *dev,
+ struct class_interface *class_intf)
+{
+ /* might need locking if mulitple mdp devices */
+ if (mdp)
+ return 0;
+ mdp = container_of(dev, struct mdp_device, dev);
+ return platform_driver_register(&msm_panel_driver);
+}
+
+static void msmfb_remove_mdp_device(struct device *dev,
+ struct class_interface *class_intf)
+{
+ /* might need locking if mulitple mdp devices */
+ if (dev != &mdp->dev)
+ return;
+ platform_driver_unregister(&msm_panel_driver);
+ mdp = NULL;
+}
+
+static struct class_interface msm_fb_interface = {
+ .add_dev = &msmfb_add_mdp_device,
+ .remove_dev = &msmfb_remove_mdp_device,
+};
+
+static int __init msmfb_init(void)
+{
+ return register_mdp_client(&msm_fb_interface);
+}
+
+module_init(msmfb_init);
diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
index 44408850e2e..551e3e9c4cb 100644
--- a/drivers/video/omap/Kconfig
+++ b/drivers/video/omap/Kconfig
@@ -7,6 +7,69 @@ config FB_OMAP
help
Frame buffer driver for OMAP based boards.
+config FB_OMAP_LCD_VGA
+ bool "Use LCD in VGA mode"
+ depends on MACH_OMAP_3430SDP || MACH_OMAP_LDP
+
+choice
+ depends on FB_OMAP && MACH_OVERO
+ prompt "Screen resolution"
+ default FB_OMAP_079M3R
+ help
+ Selected desired screen resolution
+
+config FB_OMAP_031M3R
+ boolean "640 x 480 @ 60 Hz Reduced blanking"
+
+config FB_OMAP_048M3R
+ boolean "800 x 600 @ 60 Hz Reduced blanking"
+
+config FB_OMAP_079M3R
+ boolean "1024 x 768 @ 60 Hz Reduced blanking"
+
+config FB_OMAP_092M9R
+ boolean "1280 x 720 @ 60 Hz Reduced blanking"
+
+endchoice
+
+config FB_OMAP_LCDC_EXTERNAL
+ bool "External LCD controller support"
+ depends on FB_OMAP
+ help
+ Say Y here, if you want to have support for boards with an
+ external LCD controller connected to the SoSSI/RFBI interface.
+
+config FB_OMAP_LCDC_HWA742
+ bool "Epson HWA742 LCD controller support"
+ depends on FB_OMAP && FB_OMAP_LCDC_EXTERNAL
+ help
+ Say Y here if you want to have support for the external
+ Epson HWA742 LCD controller.
+
+config FB_OMAP_LCDC_BLIZZARD
+ bool "Epson Blizzard LCD controller support"
+ depends on FB_OMAP && FB_OMAP_LCDC_EXTERNAL
+ help
+ Say Y here if you want to have support for the external
+ Epson Blizzard LCD controller.
+
+config FB_OMAP_MANUAL_UPDATE
+ bool "Default to manual update mode"
+ depends on FB_OMAP && FB_OMAP_LCDC_EXTERNAL
+ help
+ Say Y here, if your user-space applications are capable of
+ notifying the frame buffer driver when a change has occured in
+ the frame buffer content and thus a reload of the image data to
+ the external frame buffer is required. If unsure, say N.
+
+config FB_OMAP_LCD_MIPID
+ bool "MIPI DBI-C/DCS compatible LCD support"
+ depends on FB_OMAP && SPI_MASTER
+ help
+ Say Y here if you want to have support for LCDs compatible with
+ the Mobile Industry Processor Interface DBI-C/DCS
+ specification. (Supported LCDs: Philips LPH8923, Sharp LS041Y3)
+
config FB_OMAP_BOOTLOADER_INIT
bool "Check bootloader initialization"
depends on FB_OMAP
@@ -36,23 +99,4 @@ config FB_OMAP_DMA_TUNE
answer yes. Answer no if you have a dedicated video
memory, or don't use any of the accelerated features.
-config FB_OMAP_LCDC_EXTERNAL
- bool "External LCD controller support"
- depends on FB_OMAP
- help
- Say Y here, if you want to have support for boards with an
- external LCD controller connected to the SoSSI/RFBI interface.
-
-config FB_OMAP_LCDC_HWA742
- bool "Epson HWA742 LCD controller support"
- depends on FB_OMAP && FB_OMAP_LCDC_EXTERNAL
- help
- Say Y here if you want to have support for the external
- Epson HWA742 LCD controller.
-config FB_OMAP_LCDC_BLIZZARD
- bool "Epson Blizzard LCD controller support"
- depends on FB_OMAP && FB_OMAP_LCDC_EXTERNAL
- help
- Say Y here if you want to have support for the external
- Epson Blizzard LCD controller.
diff --git a/drivers/video/omap/Makefile b/drivers/video/omap/Makefile
index ed13889c116..b63b198d1f0 100644
--- a/drivers/video/omap/Makefile
+++ b/drivers/video/omap/Makefile
@@ -8,6 +8,7 @@ objs-yy := omapfb_main.o
objs-y$(CONFIG_ARCH_OMAP1) += lcdc.o
objs-y$(CONFIG_ARCH_OMAP2) += dispc.o
+objs-y$(CONFIG_ARCH_OMAP3) += dispc.o
objs-$(CONFIG_ARCH_OMAP1)$(CONFIG_FB_OMAP_LCDC_EXTERNAL) += sossi.o
objs-$(CONFIG_ARCH_OMAP2)$(CONFIG_FB_OMAP_LCDC_EXTERNAL) += rfbi.o
@@ -15,6 +16,7 @@ objs-$(CONFIG_ARCH_OMAP2)$(CONFIG_FB_OMAP_LCDC_EXTERNAL) += rfbi.o
objs-y$(CONFIG_FB_OMAP_LCDC_HWA742) += hwa742.o
objs-y$(CONFIG_FB_OMAP_LCDC_BLIZZARD) += blizzard.o
+objs-y$(CONFIG_MACH_AMS_DELTA) += lcd_ams_delta.o
objs-y$(CONFIG_MACH_OMAP_H4) += lcd_h4.o
objs-y$(CONFIG_MACH_OMAP_H3) += lcd_h3.o
objs-y$(CONFIG_MACH_OMAP_PALMTE) += lcd_palmte.o
@@ -24,5 +26,15 @@ objs-$(CONFIG_ARCH_OMAP16XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1610.o
objs-$(CONFIG_ARCH_OMAP15XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1510.o
objs-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o
+objs-y$(CONFIG_MACH_OMAP_APOLLON) += lcd_apollon.o
+objs-y$(CONFIG_MACH_OMAP_2430SDP) += lcd_2430sdp.o
+objs-y$(CONFIG_MACH_OMAP_3430SDP) += lcd_2430sdp.o
+objs-y$(CONFIG_MACH_OMAP_LDP) += lcd_ldp.o
+objs-y$(CONFIG_MACH_OMAP2EVM) += lcd_omap2evm.o
+objs-y$(CONFIG_MACH_OMAP3EVM) += lcd_omap3evm.o
+objs-y$(CONFIG_MACH_OMAP3_BEAGLE) += lcd_omap3beagle.o
+objs-y$(CONFIG_FB_OMAP_LCD_MIPID) += lcd_mipid.o
+objs-y$(CONFIG_MACH_OVERO) += lcd_overo.o
+
omapfb-objs := $(objs-yy)
diff --git a/drivers/video/omap/blizzard.c b/drivers/video/omap/blizzard.c
index 9dfcf39d336..d5e59556f9e 100644
--- a/drivers/video/omap/blizzard.c
+++ b/drivers/video/omap/blizzard.c
@@ -44,6 +44,7 @@
#define BLIZZARD_CLK_SRC 0x0e
#define BLIZZARD_MEM_BANK0_ACTIVATE 0x10
#define BLIZZARD_MEM_BANK0_STATUS 0x14
+#define BLIZZARD_PANEL_CONFIGURATION 0x28
#define BLIZZARD_HDISP 0x2a
#define BLIZZARD_HNDP 0x2c
#define BLIZZARD_VDISP0 0x2e
@@ -162,6 +163,10 @@ struct blizzard_struct {
int vid_scaled;
int last_color_mode;
int zoom_on;
+ int zoom_area_gx1;
+ int zoom_area_gx2;
+ int zoom_area_gy1;
+ int zoom_area_gy2;
int screen_width;
int screen_height;
unsigned te_connected:1;
@@ -513,6 +518,13 @@ static int do_full_screen_update(struct blizzard_request *req)
return REQ_PENDING;
}
+static int check_1d_intersect(int a1, int a2, int b1, int b2)
+{
+ if (a2 <= b1 || b2 <= a1)
+ return 0;
+ return 1;
+}
+
/* Setup all planes with an overlapping area with the update window. */
static int do_partial_update(struct blizzard_request *req, int plane,
int x, int y, int w, int h,
@@ -525,6 +537,7 @@ static int do_partial_update(struct blizzard_request *req, int plane,
int color_mode;
int flags;
int zoom_off;
+ int have_zoom_for_this_update = 0;
/* Global coordinates, relative to pixel 0,0 of the LCD */
gx1 = x + blizzard.plane[plane].pos_x;
@@ -544,10 +557,6 @@ static int do_partial_update(struct blizzard_request *req, int plane,
gx2_out = gx1_out + w_out;
gy2_out = gy1_out + h_out;
}
- zoom_off = blizzard.zoom_on && gx1 == 0 && gy1 == 0 &&
- w == blizzard.screen_width && h == blizzard.screen_height;
- blizzard.zoom_on = (!zoom_off && blizzard.zoom_on) ||
- (w < w_out || h < h_out);
for (i = 0; i < OMAPFB_PLANE_NUM; i++) {
struct plane_info *p = &blizzard.plane[i];
@@ -653,8 +662,49 @@ static int do_partial_update(struct blizzard_request *req, int plane,
else
disable_tearsync();
+ if ((gx2_out - gx1_out) != (gx2 - gx1) ||
+ (gy2_out - gy1_out) != (gy2 - gy1))
+ have_zoom_for_this_update = 1;
+
+ /* 'background' type of screen update (as opposed to 'destructive')
+ can be used to disable scaling if scaling is active */
+ zoom_off = blizzard.zoom_on && !have_zoom_for_this_update &&
+ (gx1_out == 0) && (gx2_out == blizzard.screen_width) &&
+ (gy1_out == 0) && (gy2_out == blizzard.screen_height) &&
+ (gx1 == 0) && (gy1 == 0);
+
+ if (blizzard.zoom_on && !have_zoom_for_this_update && !zoom_off &&
+ check_1d_intersect(blizzard.zoom_area_gx1, blizzard.zoom_area_gx2,
+ gx1_out, gx2_out) &&
+ check_1d_intersect(blizzard.zoom_area_gy1, blizzard.zoom_area_gy2,
+ gy1_out, gy2_out)) {
+ /* Previous screen update was using scaling, current update
+ * is not using it. Additionally, current screen update is
+ * going to overlap with the scaled area. Scaling needs to be
+ * disabled in order to avoid 'magnifying glass' effect.
+ * Dummy setup of background window can be used for this.
+ */
+ set_window_regs(0, 0, blizzard.screen_width,
+ blizzard.screen_height,
+ 0, 0, blizzard.screen_width,
+ blizzard.screen_height,
+ BLIZZARD_COLOR_RGB565, 1, flags);
+ blizzard.zoom_on = 0;
+ }
+
+ /* remember scaling settings if we have scaled update */
+ if (have_zoom_for_this_update) {
+ blizzard.zoom_on = 1;
+ blizzard.zoom_area_gx1 = gx1_out;
+ blizzard.zoom_area_gx2 = gx2_out;
+ blizzard.zoom_area_gy1 = gy1_out;
+ blizzard.zoom_area_gy2 = gy2_out;
+ }
+
set_window_regs(gx1, gy1, gx2, gy2, gx1_out, gy1_out, gx2_out, gy2_out,
color_mode, zoom_off, flags);
+ if (zoom_off)
+ blizzard.zoom_on = 0;
blizzard.extif->set_bits_per_cycle(16);
/* set_window_regs has left the register index at the right
@@ -908,6 +958,35 @@ static int blizzard_set_scale(int plane, int orig_w, int orig_h,
return 0;
}
+static int blizzard_set_rotate(int angle)
+{
+ u32 l;
+
+ l = blizzard_read_reg(BLIZZARD_PANEL_CONFIGURATION);
+ l &= ~0x03;
+
+ switch (angle) {
+ case 0:
+ l = l | 0x00;
+ break;
+ case 90:
+ l = l | 0x03;
+ break;
+ case 180:
+ l = l | 0x02;
+ break;
+ case 270:
+ l = l | 0x01;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ blizzard_write_reg(BLIZZARD_PANEL_CONFIGURATION, l);
+
+ return 0;
+}
+
static int blizzard_enable_plane(int plane, int enable)
{
if (enable)
@@ -1285,7 +1364,8 @@ static void blizzard_get_caps(int plane, struct omapfb_caps *caps)
caps->ctrl |= OMAPFB_CAPS_MANUAL_UPDATE |
OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE |
OMAPFB_CAPS_WINDOW_SCALE |
- OMAPFB_CAPS_WINDOW_OVERLAY;
+ OMAPFB_CAPS_WINDOW_OVERLAY |
+ OMAPFB_CAPS_WINDOW_ROTATE;
if (blizzard.te_connected)
caps->ctrl |= OMAPFB_CAPS_TEARSYNC;
caps->wnd_color |= (1 << OMAPFB_COLOR_RGB565) |
@@ -1560,6 +1640,7 @@ struct lcd_ctrl blizzard_ctrl = {
.setup_plane = blizzard_setup_plane,
.set_scale = blizzard_set_scale,
.enable_plane = blizzard_enable_plane,
+ .set_rotate = blizzard_set_rotate,
.update_window = blizzard_update_window_async,
.sync = blizzard_sync,
.suspend = blizzard_suspend,
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index 915439dc05a..80a11d078df 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -155,6 +155,8 @@ struct resmap {
unsigned long *map;
};
+#define MAX_IRQ_HANDLERS 4
+
static struct {
void __iomem *base;
@@ -167,9 +169,11 @@ static struct {
int ext_mode;
- unsigned long enabled_irqs;
- void (*irq_callback)(void *);
- void *irq_callback_data;
+ struct {
+ u32 irq_mask;
+ void (*callback)(void *);
+ void *data;
+ } irq_handlers[MAX_IRQ_HANDLERS];
struct completion frame_done;
int fir_hinc[OMAPFB_PLANE_NUM];
@@ -286,7 +290,7 @@ static void setup_plane_fifo(int plane, int ext_mode)
BUG_ON(plane > 2);
l = dispc_read_reg(fsz_reg[plane]);
- l &= FLD_MASK(0, 9);
+ l &= FLD_MASK(0, 11);
if (ext_mode) {
low = l * 3 / 4;
high = l;
@@ -294,7 +298,7 @@ static void setup_plane_fifo(int plane, int ext_mode)
low = l / 4;
high = l * 3 / 4;
}
- MOD_REG_FLD(ftrs_reg[plane], FLD_MASK(16, 9) | FLD_MASK(0, 9),
+ MOD_REG_FLD(ftrs_reg[plane], FLD_MASK(16, 12) | FLD_MASK(0, 12),
(high << 16) | low);
}
@@ -809,57 +813,74 @@ static void set_lcd_timings(void)
panel->pixel_clock = fck / lck_div / pck_div / 1000;
}
-int omap_dispc_request_irq(void (*callback)(void *data), void *data)
+static void recalc_irq_mask(void)
{
- int r = 0;
+ int i;
+ unsigned long irq_mask = DISPC_IRQ_MASK_ERROR;
- BUG_ON(callback == NULL);
+ for (i = 0; i < MAX_IRQ_HANDLERS; i++) {
+ if (!dispc.irq_handlers[i].callback)
+ continue;
- if (dispc.irq_callback)
- r = -EBUSY;
- else {
- dispc.irq_callback = callback;
- dispc.irq_callback_data = data;
+ irq_mask |= dispc.irq_handlers[i].irq_mask;
}
- return r;
-}
-EXPORT_SYMBOL(omap_dispc_request_irq);
-
-void omap_dispc_enable_irqs(int irq_mask)
-{
enable_lcd_clocks(1);
- dispc.enabled_irqs = irq_mask;
- irq_mask |= DISPC_IRQ_MASK_ERROR;
MOD_REG_FLD(DISPC_IRQENABLE, 0x7fff, irq_mask);
enable_lcd_clocks(0);
}
-EXPORT_SYMBOL(omap_dispc_enable_irqs);
-void omap_dispc_disable_irqs(int irq_mask)
+int omap_dispc_request_irq(unsigned long irq_mask, void (*callback)(void *data),
+ void *data)
{
- enable_lcd_clocks(1);
- dispc.enabled_irqs &= ~irq_mask;
- irq_mask &= ~DISPC_IRQ_MASK_ERROR;
- MOD_REG_FLD(DISPC_IRQENABLE, 0x7fff, irq_mask);
- enable_lcd_clocks(0);
+ int i;
+
+ BUG_ON(callback == NULL);
+
+ for (i = 0; i < MAX_IRQ_HANDLERS; i++) {
+ if (dispc.irq_handlers[i].callback)
+ continue;
+
+ dispc.irq_handlers[i].irq_mask = irq_mask;
+ dispc.irq_handlers[i].callback = callback;
+ dispc.irq_handlers[i].data = data;
+ recalc_irq_mask();
+
+ return 0;
+ }
+
+ return -EBUSY;
}
-EXPORT_SYMBOL(omap_dispc_disable_irqs);
+EXPORT_SYMBOL(omap_dispc_request_irq);
-void omap_dispc_free_irq(void)
+void omap_dispc_free_irq(unsigned long irq_mask, void (*callback)(void *data),
+ void *data)
{
- enable_lcd_clocks(1);
- omap_dispc_disable_irqs(DISPC_IRQ_MASK_ALL);
- dispc.irq_callback = NULL;
- dispc.irq_callback_data = NULL;
- enable_lcd_clocks(0);
+ int i;
+
+ for (i = 0; i < MAX_IRQ_HANDLERS; i++) {
+ if (dispc.irq_handlers[i].callback == callback &&
+ dispc.irq_handlers[i].data == data) {
+ dispc.irq_handlers[i].irq_mask = 0;
+ dispc.irq_handlers[i].callback = NULL;
+ dispc.irq_handlers[i].data = NULL;
+ recalc_irq_mask();
+ return;
+ }
+ }
+
+ BUG();
}
EXPORT_SYMBOL(omap_dispc_free_irq);
static irqreturn_t omap_dispc_irq_handler(int irq, void *dev)
{
- u32 stat = dispc_read_reg(DISPC_IRQSTATUS);
+ u32 stat;
+ int i = 0;
+
+ enable_lcd_clocks(1);
+ stat = dispc_read_reg(DISPC_IRQSTATUS);
if (stat & DISPC_IRQ_FRAMEMASK)
complete(&dispc.frame_done);
@@ -870,11 +891,17 @@ static irqreturn_t omap_dispc_irq_handler(int irq, void *dev)
}
}
- if ((stat & dispc.enabled_irqs) && dispc.irq_callback)
- dispc.irq_callback(dispc.irq_callback_data);
+ for (i = 0; i < MAX_IRQ_HANDLERS; i++) {
+ if (unlikely(dispc.irq_handlers[i].callback &&
+ (stat & dispc.irq_handlers[i].irq_mask)))
+ dispc.irq_handlers[i].callback(
+ dispc.irq_handlers[i].data);
+ }
dispc_write_reg(DISPC_IRQSTATUS, stat);
+ enable_lcd_clocks(0);
+
return IRQ_HANDLED;
}
@@ -913,18 +940,13 @@ static void put_dss_clocks(void)
static void enable_lcd_clocks(int enable)
{
- if (enable)
+ if (enable) {
+ clk_enable(dispc.dss_ick);
clk_enable(dispc.dss1_fck);
- else
+ } else {
clk_disable(dispc.dss1_fck);
-}
-
-static void enable_interface_clocks(int enable)
-{
- if (enable)
- clk_enable(dispc.dss_ick);
- else
clk_disable(dispc.dss_ick);
+ }
}
static void enable_digit_clocks(int enable)
@@ -1365,7 +1387,6 @@ static int omap_dispc_init(struct omapfb_device *fbdev, int ext_mode,
if ((r = get_dss_clocks()) < 0)
goto fail0;
- enable_interface_clocks(1);
enable_lcd_clocks(1);
#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
@@ -1396,10 +1417,10 @@ static int omap_dispc_init(struct omapfb_device *fbdev, int ext_mode,
enable_digit_clocks(0);
}
- /* Enable smart idle and autoidle */
- l = dispc_read_reg(DISPC_CONTROL);
+ /* Enable smart standby/idle, autoidle and wakeup */
+ l = dispc_read_reg(DISPC_SYSCONFIG);
l &= ~((3 << 12) | (3 << 3));
- l |= (2 << 12) | (2 << 3) | (1 << 0);
+ l |= (2 << 12) | (2 << 3) | (1 << 2) | (1 << 0);
dispc_write_reg(DISPC_SYSCONFIG, l);
omap_writel(1 << 0, DSS_BASE + DSS_SYSCONFIG);
@@ -1409,10 +1430,9 @@ static int omap_dispc_init(struct omapfb_device *fbdev, int ext_mode,
dispc_write_reg(DISPC_CONFIG, l);
l = dispc_read_reg(DISPC_IRQSTATUS);
- dispc_write_reg(l, DISPC_IRQSTATUS);
+ dispc_write_reg(DISPC_IRQSTATUS, l);
- /* Enable those that we handle always */
- omap_dispc_enable_irqs(DISPC_IRQ_FRAMEMASK);
+ recalc_irq_mask();
if ((r = request_irq(INT_24XX_DSS_IRQ, omap_dispc_irq_handler,
0, MODULE_NAME, fbdev)) < 0) {
@@ -1469,7 +1489,6 @@ fail2:
free_irq(INT_24XX_DSS_IRQ, fbdev);
fail1:
enable_lcd_clocks(0);
- enable_interface_clocks(0);
put_dss_clocks();
fail0:
iounmap(dispc.base);
@@ -1487,7 +1506,6 @@ static void omap_dispc_cleanup(void)
cleanup_fbmem();
free_palette_ram();
free_irq(INT_24XX_DSS_IRQ, dispc.fbdev);
- enable_interface_clocks(0);
put_dss_clocks();
iounmap(dispc.base);
}
diff --git a/drivers/video/omap/dispc.h b/drivers/video/omap/dispc.h
index ef720a78f6d..c15ea77f060 100644
--- a/drivers/video/omap/dispc.h
+++ b/drivers/video/omap/dispc.h
@@ -37,9 +37,10 @@ extern void omap_dispc_set_lcd_size(int width, int height);
extern void omap_dispc_enable_lcd_out(int enable);
extern void omap_dispc_enable_digit_out(int enable);
-extern int omap_dispc_request_irq(void (*callback)(void *data), void *data);
-extern void omap_dispc_free_irq(void);
+extern int omap_dispc_request_irq(unsigned long irq_mask,
+ void (*callback)(void *data), void *data);
+extern void omap_dispc_free_irq(unsigned long irq_mask,
+ void (*callback)(void *data), void *data);
extern const struct lcd_ctrl omap2_int_ctrl;
-
#endif
diff --git a/drivers/video/omap/hwa742.c b/drivers/video/omap/hwa742.c
index 5d4f34887a2..ca51583ec98 100644
--- a/drivers/video/omap/hwa742.c
+++ b/drivers/video/omap/hwa742.c
@@ -131,7 +131,7 @@ struct {
struct omapfb_device *fbdev;
struct lcd_ctrl_extif *extif;
- struct lcd_ctrl *int_ctrl;
+ const struct lcd_ctrl *int_ctrl;
struct clk *sys_ck;
} hwa742;
diff --git a/drivers/video/omap/lcd_2430sdp.c b/drivers/video/omap/lcd_2430sdp.c
new file mode 100644
index 00000000000..393712b6f36
--- /dev/null
+++ b/drivers/video/omap/lcd_2430sdp.c
@@ -0,0 +1,202 @@
+/*
+ * LCD panel support for the TI 2430SDP board
+ *
+ * Copyright (C) 2007 MontaVista
+ * Author: Hunyue Yau <hyau@mvista.com>
+ *
+ * Derived from drivers/video/omap/lcd-apollon.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/i2c/twl4030.h>
+
+#include <mach/mux.h>
+#include <mach/omapfb.h>
+#include <asm/mach-types.h>
+
+#define SDP2430_LCD_PANEL_BACKLIGHT_GPIO 91
+#define SDP2430_LCD_PANEL_ENABLE_GPIO 154
+#define SDP3430_LCD_PANEL_BACKLIGHT_GPIO 24
+#define SDP3430_LCD_PANEL_ENABLE_GPIO 28
+
+static unsigned backlight_gpio;
+static unsigned enable_gpio;
+
+#define LCD_PIXCLOCK_MAX 5400 /* freq 5.4 MHz */
+#define PM_RECEIVER TWL4030_MODULE_PM_RECEIVER
+#define ENABLE_VAUX2_DEDICATED 0x09
+#define ENABLE_VAUX2_DEV_GRP 0x20
+#define ENABLE_VAUX3_DEDICATED 0x03
+#define ENABLE_VAUX3_DEV_GRP 0x20
+
+#define ENABLE_VPLL2_DEDICATED 0x05
+#define ENABLE_VPLL2_DEV_GRP 0xE0
+#define TWL4030_VPLL2_DEV_GRP 0x33
+#define TWL4030_VPLL2_DEDICATED 0x36
+
+#define t2_out(c, r, v) twl4030_i2c_write_u8(c, r, v)
+
+
+static int sdp2430_panel_init(struct lcd_panel *panel,
+ struct omapfb_device *fbdev)
+{
+ if (machine_is_omap_3430sdp()) {
+ enable_gpio = SDP3430_LCD_PANEL_ENABLE_GPIO;
+ backlight_gpio = SDP3430_LCD_PANEL_BACKLIGHT_GPIO;
+ } else {
+ enable_gpio = SDP2430_LCD_PANEL_ENABLE_GPIO;
+ backlight_gpio = SDP2430_LCD_PANEL_BACKLIGHT_GPIO;
+ }
+
+ gpio_request(enable_gpio, "LCD enable"); /* LCD panel */
+ gpio_request(backlight_gpio, "LCD bl"); /* LCD backlight */
+ gpio_direction_output(enable_gpio, 0);
+ gpio_direction_output(backlight_gpio, 0);
+
+ return 0;
+}
+
+static void sdp2430_panel_cleanup(struct lcd_panel *panel)
+{
+ gpio_free(backlight_gpio);
+ gpio_free(enable_gpio);
+}
+
+static int sdp2430_panel_enable(struct lcd_panel *panel)
+{
+ u8 ded_val, ded_reg;
+ u8 grp_val, grp_reg;
+
+ if (machine_is_omap_3430sdp()) {
+ ded_reg = TWL4030_VAUX3_DEDICATED;
+ ded_val = ENABLE_VAUX3_DEDICATED;
+ grp_reg = TWL4030_VAUX3_DEV_GRP;
+ grp_val = ENABLE_VAUX3_DEV_GRP;
+
+ if (omap_rev() > OMAP3430_REV_ES1_0) {
+ t2_out(PM_RECEIVER, ENABLE_VPLL2_DEDICATED,
+ TWL4030_VPLL2_DEDICATED);
+ t2_out(PM_RECEIVER, ENABLE_VPLL2_DEV_GRP,
+ TWL4030_VPLL2_DEV_GRP);
+ }
+ } else {
+ ded_reg = TWL4030_VAUX2_DEDICATED;
+ ded_val = ENABLE_VAUX2_DEDICATED;
+ grp_reg = TWL4030_VAUX2_DEV_GRP;
+ grp_val = ENABLE_VAUX2_DEV_GRP;
+ }
+
+ gpio_set_value(enable_gpio, 1);
+ gpio_set_value(backlight_gpio, 1);
+
+ if (0 != t2_out(PM_RECEIVER, ded_val, ded_reg))
+ return -EIO;
+ if (0 != t2_out(PM_RECEIVER, grp_val, grp_reg))
+ return -EIO;
+
+ return 0;
+}
+
+static void sdp2430_panel_disable(struct lcd_panel *panel)
+{
+ gpio_set_value(enable_gpio, 0);
+ gpio_set_value(backlight_gpio, 0);
+ if (omap_rev() > OMAP3430_REV_ES1_0) {
+ t2_out(PM_RECEIVER, 0x0, TWL4030_VPLL2_DEDICATED);
+ t2_out(PM_RECEIVER, 0x0, TWL4030_VPLL2_DEV_GRP);
+ msleep(4);
+ }
+}
+
+static unsigned long sdp2430_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+struct lcd_panel sdp2430_panel = {
+ .name = "sdp2430",
+ .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
+ OMAP_LCDC_INV_HSYNC,
+
+ .bpp = 16,
+ .data_lines = 16,
+ .x_res = 240,
+ .y_res = 320,
+ .hsw = 3, /* hsync_len (4) - 1 */
+ .hfp = 3, /* right_margin (4) - 1 */
+ .hbp = 39, /* left_margin (40) - 1 */
+ .vsw = 1, /* vsync_len (2) - 1 */
+ .vfp = 2, /* lower_margin */
+ .vbp = 7, /* upper_margin (8) - 1 */
+
+ .pixel_clock = LCD_PIXCLOCK_MAX,
+
+ .init = sdp2430_panel_init,
+ .cleanup = sdp2430_panel_cleanup,
+ .enable = sdp2430_panel_enable,
+ .disable = sdp2430_panel_disable,
+ .get_caps = sdp2430_panel_get_caps,
+};
+
+static int sdp2430_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&sdp2430_panel);
+ return 0;
+}
+
+static int sdp2430_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int sdp2430_panel_suspend(struct platform_device *pdev,
+ pm_message_t mesg)
+{
+ return 0;
+}
+
+static int sdp2430_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver sdp2430_panel_driver = {
+ .probe = sdp2430_panel_probe,
+ .remove = sdp2430_panel_remove,
+ .suspend = sdp2430_panel_suspend,
+ .resume = sdp2430_panel_resume,
+ .driver = {
+ .name = "sdp2430_lcd",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sdp2430_panel_drv_init(void)
+{
+ return platform_driver_register(&sdp2430_panel_driver);
+}
+
+static void __exit sdp2430_panel_drv_exit(void)
+{
+ platform_driver_unregister(&sdp2430_panel_driver);
+}
+
+module_init(sdp2430_panel_drv_init);
+module_exit(sdp2430_panel_drv_exit);
diff --git a/drivers/video/omap/lcd_ams_delta.c b/drivers/video/omap/lcd_ams_delta.c
new file mode 100644
index 00000000000..1f7439955e0
--- /dev/null
+++ b/drivers/video/omap/lcd_ams_delta.c
@@ -0,0 +1,137 @@
+/*
+ * Based on drivers/video/omap/lcd_inn1510.c
+ *
+ * LCD panel support for the Amstrad E3 (Delta) videophone.
+ *
+ * Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include <mach/board-ams-delta.h>
+#include <mach/hardware.h>
+#include <mach/omapfb.h>
+
+#define AMS_DELTA_DEFAULT_CONTRAST 112
+
+static int ams_delta_panel_init(struct lcd_panel *panel,
+ struct omapfb_device *fbdev)
+{
+ return 0;
+}
+
+static void ams_delta_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static int ams_delta_panel_enable(struct lcd_panel *panel)
+{
+ ams_delta_latch2_write(AMS_DELTA_LATCH2_LCD_NDISP,
+ AMS_DELTA_LATCH2_LCD_NDISP);
+ ams_delta_latch2_write(AMS_DELTA_LATCH2_LCD_VBLEN,
+ AMS_DELTA_LATCH2_LCD_VBLEN);
+
+ omap_writeb(1, OMAP_PWL_CLK_ENABLE);
+ omap_writeb(AMS_DELTA_DEFAULT_CONTRAST, OMAP_PWL_ENABLE);
+
+ return 0;
+}
+
+static void ams_delta_panel_disable(struct lcd_panel *panel)
+{
+ ams_delta_latch2_write(AMS_DELTA_LATCH2_LCD_VBLEN, 0);
+ ams_delta_latch2_write(AMS_DELTA_LATCH2_LCD_NDISP, 0);
+}
+
+static unsigned long ams_delta_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+static struct lcd_panel ams_delta_panel = {
+ .name = "ams-delta",
+ .config = 0,
+
+ .bpp = 12,
+ .data_lines = 16,
+ .x_res = 480,
+ .y_res = 320,
+ .pixel_clock = 4687,
+ .hsw = 3,
+ .hfp = 1,
+ .hbp = 1,
+ .vsw = 1,
+ .vfp = 0,
+ .vbp = 0,
+ .pcd = 0,
+ .acb = 37,
+
+ .init = ams_delta_panel_init,
+ .cleanup = ams_delta_panel_cleanup,
+ .enable = ams_delta_panel_enable,
+ .disable = ams_delta_panel_disable,
+ .get_caps = ams_delta_panel_get_caps,
+};
+
+static int ams_delta_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&ams_delta_panel);
+ return 0;
+}
+
+static int ams_delta_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int ams_delta_panel_suspend(struct platform_device *pdev,
+ pm_message_t mesg)
+{
+ return 0;
+}
+
+static int ams_delta_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver ams_delta_panel_driver = {
+ .probe = ams_delta_panel_probe,
+ .remove = ams_delta_panel_remove,
+ .suspend = ams_delta_panel_suspend,
+ .resume = ams_delta_panel_resume,
+ .driver = {
+ .name = "lcd_ams_delta",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int ams_delta_panel_drv_init(void)
+{
+ return platform_driver_register(&ams_delta_panel_driver);
+}
+
+static void ams_delta_panel_drv_cleanup(void)
+{
+ platform_driver_unregister(&ams_delta_panel_driver);
+}
+
+module_init(ams_delta_panel_drv_init);
+module_exit(ams_delta_panel_drv_cleanup);
diff --git a/drivers/video/omap/lcd_apollon.c b/drivers/video/omap/lcd_apollon.c
new file mode 100644
index 00000000000..626ae3a532f
--- /dev/null
+++ b/drivers/video/omap/lcd_apollon.c
@@ -0,0 +1,138 @@
+/*
+ * LCD panel support for the Samsung OMAP2 Apollon board
+ *
+ * Copyright (C) 2005,2006 Samsung Electronics
+ * Author: Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * Derived from drivers/video/omap/lcd-h4.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <mach/gpio.h>
+#include <mach/mux.h>
+#include <mach/omapfb.h>
+
+/* #define USE_35INCH_LCD 1 */
+
+static int apollon_panel_init(struct lcd_panel *panel,
+ struct omapfb_device *fbdev)
+{
+ /* configure LCD PWR_EN */
+ omap_cfg_reg(M21_242X_GPIO11);
+ return 0;
+}
+
+static void apollon_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static int apollon_panel_enable(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+static void apollon_panel_disable(struct lcd_panel *panel)
+{
+}
+
+static unsigned long apollon_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+struct lcd_panel apollon_panel = {
+ .name = "apollon",
+ .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
+ OMAP_LCDC_INV_HSYNC,
+
+ .bpp = 16,
+ .data_lines = 18,
+#ifdef USE_35INCH_LCD
+ .x_res = 240,
+ .y_res = 320,
+ .hsw = 2,
+ .hfp = 3,
+ .hbp = 9,
+ .vsw = 4,
+ .vfp = 3,
+ .vbp = 5,
+#else
+ .x_res = 480,
+ .y_res = 272,
+ .hsw = 41,
+ .hfp = 2,
+ .hbp = 2,
+ .vsw = 10,
+ .vfp = 2,
+ .vbp = 2,
+#endif
+ .pixel_clock = 6250,
+
+ .init = apollon_panel_init,
+ .cleanup = apollon_panel_cleanup,
+ .enable = apollon_panel_enable,
+ .disable = apollon_panel_disable,
+ .get_caps = apollon_panel_get_caps,
+};
+
+static int apollon_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&apollon_panel);
+ return 0;
+}
+
+static int apollon_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int apollon_panel_suspend(struct platform_device *pdev,
+ pm_message_t mesg)
+{
+ return 0;
+}
+
+static int apollon_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver apollon_panel_driver = {
+ .probe = apollon_panel_probe,
+ .remove = apollon_panel_remove,
+ .suspend = apollon_panel_suspend,
+ .resume = apollon_panel_resume,
+ .driver = {
+ .name = "apollon_lcd",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init apollon_panel_drv_init(void)
+{
+ return platform_driver_register(&apollon_panel_driver);
+}
+
+static void __exit apollon_panel_drv_exit(void)
+{
+ platform_driver_unregister(&apollon_panel_driver);
+}
+
+module_init(apollon_panel_drv_init);
+module_exit(apollon_panel_drv_exit);
diff --git a/drivers/video/omap/lcd_h3.c b/drivers/video/omap/lcd_h3.c
index 2486237ebba..417ae5efa8b 100644
--- a/drivers/video/omap/lcd_h3.c
+++ b/drivers/video/omap/lcd_h3.c
@@ -124,12 +124,12 @@ struct platform_driver h3_panel_driver = {
},
};
-static int h3_panel_drv_init(void)
+static int __init h3_panel_drv_init(void)
{
return platform_driver_register(&h3_panel_driver);
}
-static void h3_panel_drv_cleanup(void)
+static void __exit h3_panel_drv_cleanup(void)
{
platform_driver_unregister(&h3_panel_driver);
}
diff --git a/drivers/video/omap/lcd_h4.c b/drivers/video/omap/lcd_h4.c
index 6ff56430341..0c398bda760 100644
--- a/drivers/video/omap/lcd_h4.c
+++ b/drivers/video/omap/lcd_h4.c
@@ -102,12 +102,12 @@ static struct platform_driver h4_panel_driver = {
},
};
-static int h4_panel_drv_init(void)
+static int __init h4_panel_drv_init(void)
{
return platform_driver_register(&h4_panel_driver);
}
-static void h4_panel_drv_cleanup(void)
+static void __exit h4_panel_drv_cleanup(void)
{
platform_driver_unregister(&h4_panel_driver);
}
diff --git a/drivers/video/omap/lcd_inn1510.c b/drivers/video/omap/lcd_inn1510.c
index 6953ed4b582..cdbd8bb607b 100644
--- a/drivers/video/omap/lcd_inn1510.c
+++ b/drivers/video/omap/lcd_inn1510.c
@@ -109,12 +109,12 @@ struct platform_driver innovator1510_panel_driver = {
},
};
-static int innovator1510_panel_drv_init(void)
+static int __init innovator1510_panel_drv_init(void)
{
return platform_driver_register(&innovator1510_panel_driver);
}
-static void innovator1510_panel_drv_cleanup(void)
+static void __exit innovator1510_panel_drv_cleanup(void)
{
platform_driver_unregister(&innovator1510_panel_driver);
}
diff --git a/drivers/video/omap/lcd_inn1610.c b/drivers/video/omap/lcd_inn1610.c
index 4c4f7ee6d73..268f7f808a4 100644
--- a/drivers/video/omap/lcd_inn1610.c
+++ b/drivers/video/omap/lcd_inn1610.c
@@ -133,12 +133,12 @@ struct platform_driver innovator1610_panel_driver = {
},
};
-static int innovator1610_panel_drv_init(void)
+static int __init innovator1610_panel_drv_init(void)
{
return platform_driver_register(&innovator1610_panel_driver);
}
-static void innovator1610_panel_drv_cleanup(void)
+static void __exit innovator1610_panel_drv_cleanup(void)
{
platform_driver_unregister(&innovator1610_panel_driver);
}
diff --git a/drivers/video/omap/lcd_ldp.c b/drivers/video/omap/lcd_ldp.c
new file mode 100644
index 00000000000..dbfe8974fb9
--- /dev/null
+++ b/drivers/video/omap/lcd_ldp.c
@@ -0,0 +1,200 @@
+/*
+ * LCD panel support for the TI LDP board
+ *
+ * Copyright (C) 2007 WindRiver
+ * Author: Stanley Miao <stanley.miao@windriver.com>
+ *
+ * Derived from drivers/video/omap/lcd-2430sdp.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/i2c/twl4030.h>
+
+#include <mach/gpio.h>
+#include <mach/mux.h>
+#include <mach/omapfb.h>
+#include <asm/mach-types.h>
+
+#define LCD_PANEL_BACKLIGHT_GPIO (15 + OMAP_MAX_GPIO_LINES)
+#define LCD_PANEL_ENABLE_GPIO (7 + OMAP_MAX_GPIO_LINES)
+
+#define LCD_PANEL_RESET_GPIO 55
+#define LCD_PANEL_QVGA_GPIO 56
+
+#ifdef CONFIG_FB_OMAP_LCD_VGA
+#define LCD_XRES 480
+#define LCD_YRES 640
+#define LCD_PIXCLOCK_MAX 41700
+#else
+#define LCD_XRES 240
+#define LCD_YRES 320
+#define LCD_PIXCLOCK_MAX 185186
+#endif
+
+#define PM_RECEIVER TWL4030_MODULE_PM_RECEIVER
+#define ENABLE_VAUX2_DEDICATED 0x09
+#define ENABLE_VAUX2_DEV_GRP 0x20
+#define ENABLE_VAUX3_DEDICATED 0x03
+#define ENABLE_VAUX3_DEV_GRP 0x20
+
+#define ENABLE_VPLL2_DEDICATED 0x05
+#define ENABLE_VPLL2_DEV_GRP 0xE0
+#define TWL4030_VPLL2_DEV_GRP 0x33
+#define TWL4030_VPLL2_DEDICATED 0x36
+
+#define t2_out(c, r, v) twl4030_i2c_write_u8(c, r, v)
+
+
+static int ldp_panel_init(struct lcd_panel *panel,
+ struct omapfb_device *fbdev)
+{
+ gpio_request(LCD_PANEL_RESET_GPIO, "lcd reset");
+ gpio_request(LCD_PANEL_QVGA_GPIO, "lcd qvga");
+ gpio_request(LCD_PANEL_ENABLE_GPIO, "lcd panel");
+ gpio_request(LCD_PANEL_BACKLIGHT_GPIO, "lcd backlight");
+
+ gpio_direction_output(LCD_PANEL_QVGA_GPIO, 0);
+ gpio_direction_output(LCD_PANEL_RESET_GPIO, 0);
+ gpio_direction_output(LCD_PANEL_ENABLE_GPIO, 0);
+ gpio_direction_output(LCD_PANEL_BACKLIGHT_GPIO, 0);
+
+#ifdef CONFIG_FB_OMAP_LCD_VGA
+ gpio_set_value(LCD_PANEL_QVGA_GPIO, 0);
+#else
+ gpio_set_value(LCD_PANEL_QVGA_GPIO, 1);
+#endif
+ gpio_set_value(LCD_PANEL_RESET_GPIO, 1);
+
+ return 0;
+}
+
+static void ldp_panel_cleanup(struct lcd_panel *panel)
+{
+ gpio_free(LCD_PANEL_BACKLIGHT_GPIO);
+ gpio_free(LCD_PANEL_ENABLE_GPIO);
+ gpio_free(LCD_PANEL_QVGA_GPIO);
+ gpio_free(LCD_PANEL_RESET_GPIO);
+}
+
+static int ldp_panel_enable(struct lcd_panel *panel)
+{
+ if (0 != t2_out(PM_RECEIVER, ENABLE_VPLL2_DEDICATED,
+ TWL4030_VPLL2_DEDICATED))
+ return -EIO;
+ if (0 != t2_out(PM_RECEIVER, ENABLE_VPLL2_DEV_GRP,
+ TWL4030_VPLL2_DEV_GRP))
+ return -EIO;
+
+ gpio_direction_output(LCD_PANEL_ENABLE_GPIO, 1);
+ gpio_direction_output(LCD_PANEL_BACKLIGHT_GPIO, 1);
+
+ if (0 != t2_out(PM_RECEIVER, ENABLE_VAUX3_DEDICATED,
+ TWL4030_VAUX3_DEDICATED))
+ return -EIO;
+ if (0 != t2_out(PM_RECEIVER, ENABLE_VAUX3_DEV_GRP,
+ TWL4030_VAUX3_DEV_GRP))
+ return -EIO;
+
+ return 0;
+}
+
+static void ldp_panel_disable(struct lcd_panel *panel)
+{
+ gpio_direction_output(LCD_PANEL_ENABLE_GPIO, 0);
+ gpio_direction_output(LCD_PANEL_BACKLIGHT_GPIO, 0);
+
+ t2_out(PM_RECEIVER, 0x0, TWL4030_VPLL2_DEDICATED);
+ t2_out(PM_RECEIVER, 0x0, TWL4030_VPLL2_DEV_GRP);
+ msleep(4);
+}
+
+static unsigned long ldp_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+struct lcd_panel ldp_panel = {
+ .name = "ldp",
+ .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
+ OMAP_LCDC_INV_HSYNC,
+
+ .bpp = 16,
+ .data_lines = 18,
+ .x_res = LCD_XRES,
+ .y_res = LCD_YRES,
+ .hsw = 3, /* hsync_len (4) - 1 */
+ .hfp = 3, /* right_margin (4) - 1 */
+ .hbp = 39, /* left_margin (40) - 1 */
+ .vsw = 1, /* vsync_len (2) - 1 */
+ .vfp = 2, /* lower_margin */
+ .vbp = 7, /* upper_margin (8) - 1 */
+
+ .pixel_clock = LCD_PIXCLOCK_MAX,
+
+ .init = ldp_panel_init,
+ .cleanup = ldp_panel_cleanup,
+ .enable = ldp_panel_enable,
+ .disable = ldp_panel_disable,
+ .get_caps = ldp_panel_get_caps,
+};
+
+static int ldp_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&ldp_panel);
+ return 0;
+}
+
+static int ldp_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int ldp_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ return 0;
+}
+
+static int ldp_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver ldp_panel_driver = {
+ .probe = ldp_panel_probe,
+ .remove = ldp_panel_remove,
+ .suspend = ldp_panel_suspend,
+ .resume = ldp_panel_resume,
+ .driver = {
+ .name = "ldp_lcd",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ldp_panel_drv_init(void)
+{
+ return platform_driver_register(&ldp_panel_driver);
+}
+
+static void __exit ldp_panel_drv_exit(void)
+{
+ platform_driver_unregister(&ldp_panel_driver);
+}
+
+module_init(ldp_panel_drv_init);
+module_exit(ldp_panel_drv_exit);
diff --git a/drivers/video/omap/lcd_mipid.c b/drivers/video/omap/lcd_mipid.c
new file mode 100644
index 00000000000..918ee893419
--- /dev/null
+++ b/drivers/video/omap/lcd_mipid.c
@@ -0,0 +1,625 @@
+/*
+ * LCD driver for MIPI DBI-C / DCS compatible LCDs
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/spi/spi.h>
+
+#include <mach/omapfb.h>
+#include <mach/lcd_mipid.h>
+
+#define MIPID_MODULE_NAME "lcd_mipid"
+
+#define MIPID_CMD_READ_DISP_ID 0x04
+#define MIPID_CMD_READ_RED 0x06
+#define MIPID_CMD_READ_GREEN 0x07
+#define MIPID_CMD_READ_BLUE 0x08
+#define MIPID_CMD_READ_DISP_STATUS 0x09
+#define MIPID_CMD_RDDSDR 0x0F
+#define MIPID_CMD_SLEEP_IN 0x10
+#define MIPID_CMD_SLEEP_OUT 0x11
+#define MIPID_CMD_DISP_OFF 0x28
+#define MIPID_CMD_DISP_ON 0x29
+
+#define MIPID_ESD_CHECK_PERIOD msecs_to_jiffies(5000)
+
+#define to_mipid_device(p) container_of(p, struct mipid_device, \
+ panel)
+struct mipid_device {
+ int enabled;
+ int revision;
+ unsigned int saved_bklight_level;
+ unsigned long hw_guard_end; /* next value of jiffies
+ when we can issue the
+ next sleep in/out command */
+ unsigned long hw_guard_wait; /* max guard time in jiffies */
+
+ struct omapfb_device *fbdev;
+ struct spi_device *spi;
+ struct mutex mutex;
+ struct lcd_panel panel;
+
+ struct workqueue_struct *esd_wq;
+ struct delayed_work esd_work;
+ void (*esd_check)(struct mipid_device *m);
+};
+
+static void mipid_transfer(struct mipid_device *md, int cmd, const u8 *wbuf,
+ int wlen, u8 *rbuf, int rlen)
+{
+ struct spi_message m;
+ struct spi_transfer *x, xfer[4];
+ u16 w;
+ int r;
+
+ BUG_ON(md->spi == NULL);
+
+ spi_message_init(&m);
+
+ memset(xfer, 0, sizeof(xfer));
+ x = &xfer[0];
+
+ cmd &= 0xff;
+ x->tx_buf = &cmd;
+ x->bits_per_word = 9;
+ x->len = 2;
+ spi_message_add_tail(x, &m);
+
+ if (wlen) {
+ x++;
+ x->tx_buf = wbuf;
+ x->len = wlen;
+ x->bits_per_word = 9;
+ spi_message_add_tail(x, &m);
+ }
+
+ if (rlen) {
+ x++;
+ x->rx_buf = &w;
+ x->len = 1;
+ spi_message_add_tail(x, &m);
+
+ if (rlen > 1) {
+ /* Arrange for the extra clock before the first
+ * data bit.
+ */
+ x->bits_per_word = 9;
+ x->len = 2;
+
+ x++;
+ x->rx_buf = &rbuf[1];
+ x->len = rlen - 1;
+ spi_message_add_tail(x, &m);
+ }
+ }
+
+ r = spi_sync(md->spi, &m);
+ if (r < 0)
+ dev_dbg(&md->spi->dev, "spi_sync %d\n", r);
+
+ if (rlen)
+ rbuf[0] = w & 0xff;
+}
+
+static inline void mipid_cmd(struct mipid_device *md, int cmd)
+{
+ mipid_transfer(md, cmd, NULL, 0, NULL, 0);
+}
+
+static inline void mipid_write(struct mipid_device *md,
+ int reg, const u8 *buf, int len)
+{
+ mipid_transfer(md, reg, buf, len, NULL, 0);
+}
+
+static inline void mipid_read(struct mipid_device *md,
+ int reg, u8 *buf, int len)
+{
+ mipid_transfer(md, reg, NULL, 0, buf, len);
+}
+
+static void set_data_lines(struct mipid_device *md, int data_lines)
+{
+ u16 par;
+
+ switch (data_lines) {
+ case 16:
+ par = 0x150;
+ break;
+ case 18:
+ par = 0x160;
+ break;
+ case 24:
+ par = 0x170;
+ break;
+ }
+ mipid_write(md, 0x3a, (u8 *)&par, 2);
+}
+
+static void send_init_string(struct mipid_device *md)
+{
+ u16 initpar[] = { 0x0102, 0x0100, 0x0100 };
+
+ mipid_write(md, 0xc2, (u8 *)initpar, sizeof(initpar));
+ set_data_lines(md, md->panel.data_lines);
+}
+
+static void hw_guard_start(struct mipid_device *md, int guard_msec)
+{
+ md->hw_guard_wait = msecs_to_jiffies(guard_msec);
+ md->hw_guard_end = jiffies + md->hw_guard_wait;
+}
+
+static void hw_guard_wait(struct mipid_device *md)
+{
+ unsigned long wait = md->hw_guard_end - jiffies;
+
+ if ((long)wait > 0 && wait <= md->hw_guard_wait) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(wait);
+ }
+}
+
+static void set_sleep_mode(struct mipid_device *md, int on)
+{
+ int cmd, sleep_time = 50;
+
+ if (on)
+ cmd = MIPID_CMD_SLEEP_IN;
+ else
+ cmd = MIPID_CMD_SLEEP_OUT;
+ hw_guard_wait(md);
+ mipid_cmd(md, cmd);
+ hw_guard_start(md, 120);
+ /*
+ * When we enable the panel, it seems we _have_ to sleep
+ * 120 ms before sending the init string. When disabling the
+ * panel we'll sleep for the duration of 2 frames, so that the
+ * controller can still provide the PCLK,HS,VS signals.
+ */
+ if (!on)
+ sleep_time = 120;
+ msleep(sleep_time);
+}
+
+static void set_display_state(struct mipid_device *md, int enabled)
+{
+ int cmd = enabled ? MIPID_CMD_DISP_ON : MIPID_CMD_DISP_OFF;
+
+ mipid_cmd(md, cmd);
+}
+
+static int mipid_set_bklight_level(struct lcd_panel *panel, unsigned int level)
+{
+ struct mipid_device *md = to_mipid_device(panel);
+ struct mipid_platform_data *pd = md->spi->dev.platform_data;
+
+ if (pd->get_bklight_max == NULL || pd->set_bklight_level == NULL)
+ return -ENODEV;
+ if (level > pd->get_bklight_max(pd))
+ return -EINVAL;
+ if (!md->enabled) {
+ md->saved_bklight_level = level;
+ return 0;
+ }
+ pd->set_bklight_level(pd, level);
+
+ return 0;
+}
+
+static unsigned int mipid_get_bklight_level(struct lcd_panel *panel)
+{
+ struct mipid_device *md = to_mipid_device(panel);
+ struct mipid_platform_data *pd = md->spi->dev.platform_data;
+
+ if (pd->get_bklight_level == NULL)
+ return -ENODEV;
+ return pd->get_bklight_level(pd);
+}
+
+static unsigned int mipid_get_bklight_max(struct lcd_panel *panel)
+{
+ struct mipid_device *md = to_mipid_device(panel);
+ struct mipid_platform_data *pd = md->spi->dev.platform_data;
+
+ if (pd->get_bklight_max == NULL)
+ return -ENODEV;
+
+ return pd->get_bklight_max(pd);
+}
+
+static unsigned long mipid_get_caps(struct lcd_panel *panel)
+{
+ return OMAPFB_CAPS_SET_BACKLIGHT;
+}
+
+static u16 read_first_pixel(struct mipid_device *md)
+{
+ u16 pixel;
+ u8 red, green, blue;
+
+ mutex_lock(&md->mutex);
+ mipid_read(md, MIPID_CMD_READ_RED, &red, 1);
+ mipid_read(md, MIPID_CMD_READ_GREEN, &green, 1);
+ mipid_read(md, MIPID_CMD_READ_BLUE, &blue, 1);
+ mutex_unlock(&md->mutex);
+
+ switch (md->panel.data_lines) {
+ case 16:
+ pixel = ((red >> 1) << 11) | (green << 5) | (blue >> 1);
+ break;
+ case 24:
+ /* 24 bit -> 16 bit */
+ pixel = ((red >> 3) << 11) | ((green >> 2) << 5) |
+ (blue >> 3);
+ break;
+ default:
+ pixel = 0;
+ BUG();
+ }
+
+ return pixel;
+}
+
+static int mipid_run_test(struct lcd_panel *panel, int test_num)
+{
+ struct mipid_device *md = to_mipid_device(panel);
+ static const u16 test_values[4] = {
+ 0x0000, 0xffff, 0xaaaa, 0x5555,
+ };
+ int i;
+
+ if (test_num != MIPID_TEST_RGB_LINES)
+ return MIPID_TEST_INVALID;
+
+ for (i = 0; i < ARRAY_SIZE(test_values); i++) {
+ int delay;
+ unsigned long tmo;
+
+ omapfb_write_first_pixel(md->fbdev, test_values[i]);
+ tmo = jiffies + msecs_to_jiffies(100);
+ delay = 25;
+ while (1) {
+ u16 pixel;
+
+ msleep(delay);
+ pixel = read_first_pixel(md);
+ if (pixel == test_values[i])
+ break;
+ if (time_after(jiffies, tmo)) {
+ dev_err(&md->spi->dev,
+ "MIPI LCD RGB I/F test failed: "
+ "expecting %04x, got %04x\n",
+ test_values[i], pixel);
+ return MIPID_TEST_FAILED;
+ }
+ delay = 10;
+ }
+ }
+
+ return 0;
+}
+
+static void ls041y3_esd_recover(struct mipid_device *md)
+{
+ dev_err(&md->spi->dev, "performing LCD ESD recovery\n");
+ set_sleep_mode(md, 1);
+ set_sleep_mode(md, 0);
+}
+
+static void ls041y3_esd_check_mode1(struct mipid_device *md)
+{
+ u8 state1, state2;
+
+ mipid_read(md, MIPID_CMD_RDDSDR, &state1, 1);
+ set_sleep_mode(md, 0);
+ mipid_read(md, MIPID_CMD_RDDSDR, &state2, 1);
+ dev_dbg(&md->spi->dev, "ESD mode 1 state1 %02x state2 %02x\n",
+ state1, state2);
+ /* Each sleep out command will trigger a self diagnostic and flip
+ * Bit6 if the test passes.
+ */
+ if (!((state1 ^ state2) & (1 << 6)))
+ ls041y3_esd_recover(md);
+}
+
+static void ls041y3_esd_check_mode2(struct mipid_device *md)
+{
+ int i;
+ u8 rbuf[2];
+ static const struct {
+ int cmd;
+ int wlen;
+ u16 wbuf[3];
+ } *rd, rd_ctrl[7] = {
+ { 0xb0, 4, { 0x0101, 0x01fe, } },
+ { 0xb1, 4, { 0x01de, 0x0121, } },
+ { 0xc2, 4, { 0x0100, 0x0100, } },
+ { 0xbd, 2, { 0x0100, } },
+ { 0xc2, 4, { 0x01fc, 0x0103, } },
+ { 0xb4, 0, },
+ { 0x00, 0, },
+ };
+
+ rd = rd_ctrl;
+ for (i = 0; i < 3; i++, rd++)
+ mipid_write(md, rd->cmd, (u8 *)rd->wbuf, rd->wlen);
+
+ udelay(10);
+ mipid_read(md, rd->cmd, rbuf, 2);
+ rd++;
+
+ for (i = 0; i < 3; i++, rd++) {
+ udelay(10);
+ mipid_write(md, rd->cmd, (u8 *)rd->wbuf, rd->wlen);
+ }
+
+ dev_dbg(&md->spi->dev, "ESD mode 2 state %02x\n", rbuf[1]);
+ if (rbuf[1] == 0x00)
+ ls041y3_esd_recover(md);
+}
+
+static void ls041y3_esd_check(struct mipid_device *md)
+{
+ ls041y3_esd_check_mode1(md);
+ if (md->revision >= 0x88)
+ ls041y3_esd_check_mode2(md);
+}
+
+static void mipid_esd_start_check(struct mipid_device *md)
+{
+ if (md->esd_check != NULL)
+ queue_delayed_work(md->esd_wq, &md->esd_work,
+ MIPID_ESD_CHECK_PERIOD);
+}
+
+static void mipid_esd_stop_check(struct mipid_device *md)
+{
+ if (md->esd_check != NULL)
+ cancel_rearming_delayed_workqueue(md->esd_wq, &md->esd_work);
+}
+
+static void mipid_esd_work(struct work_struct *work)
+{
+ struct mipid_device *md = container_of(work, struct mipid_device,
+ esd_work.work);
+
+ mutex_lock(&md->mutex);
+ md->esd_check(md);
+ mutex_unlock(&md->mutex);
+ mipid_esd_start_check(md);
+}
+
+static int mipid_enable(struct lcd_panel *panel)
+{
+ struct mipid_device *md = to_mipid_device(panel);
+
+ mutex_lock(&md->mutex);
+
+ if (md->enabled) {
+ mutex_unlock(&md->mutex);
+ return 0;
+ }
+ set_sleep_mode(md, 0);
+ md->enabled = 1;
+ send_init_string(md);
+ set_display_state(md, 1);
+ mipid_set_bklight_level(panel, md->saved_bklight_level);
+ mipid_esd_start_check(md);
+
+ mutex_unlock(&md->mutex);
+ return 0;
+}
+
+static void mipid_disable(struct lcd_panel *panel)
+{
+ struct mipid_device *md = to_mipid_device(panel);
+
+ /*
+ * A final ESD work might be called before returning,
+ * so do this without holding the lock.
+ */
+ mipid_esd_stop_check(md);
+ mutex_lock(&md->mutex);
+
+ if (!md->enabled) {
+ mutex_unlock(&md->mutex);
+ return;
+ }
+ md->saved_bklight_level = mipid_get_bklight_level(panel);
+ mipid_set_bklight_level(panel, 0);
+ set_display_state(md, 0);
+ set_sleep_mode(md, 1);
+ md->enabled = 0;
+
+ mutex_unlock(&md->mutex);
+}
+
+static int panel_enabled(struct mipid_device *md)
+{
+ u32 disp_status;
+ int enabled;
+
+ mipid_read(md, MIPID_CMD_READ_DISP_STATUS, (u8 *)&disp_status, 4);
+ disp_status = __be32_to_cpu(disp_status);
+ enabled = (disp_status & (1 << 17)) && (disp_status & (1 << 10));
+ dev_dbg(&md->spi->dev,
+ "LCD panel %senabled by bootloader (status 0x%04x)\n",
+ enabled ? "" : "not ", disp_status);
+ return enabled;
+}
+
+static int mipid_init(struct lcd_panel *panel,
+ struct omapfb_device *fbdev)
+{
+ struct mipid_device *md = to_mipid_device(panel);
+
+ md->fbdev = fbdev;
+ md->esd_wq = create_singlethread_workqueue("mipid_esd");
+ if (md->esd_wq == NULL) {
+ dev_err(&md->spi->dev, "can't create ESD workqueue\n");
+ return -ENOMEM;
+ }
+ INIT_DELAYED_WORK(&md->esd_work, mipid_esd_work);
+ mutex_init(&md->mutex);
+
+ md->enabled = panel_enabled(md);
+
+ if (md->enabled)
+ mipid_esd_start_check(md);
+ else
+ md->saved_bklight_level = mipid_get_bklight_level(panel);
+
+ return 0;
+}
+
+static void mipid_cleanup(struct lcd_panel *panel)
+{
+ struct mipid_device *md = to_mipid_device(panel);
+
+ if (md->enabled)
+ mipid_esd_stop_check(md);
+ destroy_workqueue(md->esd_wq);
+}
+
+static struct lcd_panel mipid_panel = {
+ .config = OMAP_LCDC_PANEL_TFT,
+
+ .bpp = 16,
+ .x_res = 800,
+ .y_res = 480,
+ .pixel_clock = 21940,
+ .hsw = 50,
+ .hfp = 20,
+ .hbp = 15,
+ .vsw = 2,
+ .vfp = 1,
+ .vbp = 3,
+
+ .init = mipid_init,
+ .cleanup = mipid_cleanup,
+ .enable = mipid_enable,
+ .disable = mipid_disable,
+ .get_caps = mipid_get_caps,
+ .set_bklight_level = mipid_set_bklight_level,
+ .get_bklight_level = mipid_get_bklight_level,
+ .get_bklight_max = mipid_get_bklight_max,
+ .run_test = mipid_run_test,
+};
+
+static int mipid_detect(struct mipid_device *md)
+{
+ struct mipid_platform_data *pdata;
+ u8 display_id[3];
+
+ pdata = md->spi->dev.platform_data;
+ if (pdata == NULL) {
+ dev_err(&md->spi->dev, "missing platform data\n");
+ return -ENOENT;
+ }
+
+ mipid_read(md, MIPID_CMD_READ_DISP_ID, display_id, 3);
+ dev_dbg(&md->spi->dev, "MIPI display ID: %02x%02x%02x\n",
+ display_id[0], display_id[1], display_id[2]);
+
+ switch (display_id[0]) {
+ case 0x45:
+ md->panel.name = "lph8923";
+ break;
+ case 0x83:
+ md->panel.name = "ls041y3";
+ md->esd_check = ls041y3_esd_check;
+ break;
+ default:
+ md->panel.name = "unknown";
+ dev_err(&md->spi->dev, "invalid display ID\n");
+ return -ENODEV;
+ }
+
+ md->revision = display_id[1];
+ md->panel.data_lines = pdata->data_lines;
+ pr_info("omapfb: %s rev %02x LCD detected, %d data lines\n",
+ md->panel.name, md->revision, md->panel.data_lines);
+
+ return 0;
+}
+
+static int mipid_spi_probe(struct spi_device *spi)
+{
+ struct mipid_device *md;
+ int r;
+
+ md = kzalloc(sizeof(*md), GFP_KERNEL);
+ if (md == NULL) {
+ dev_err(&spi->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ spi->mode = SPI_MODE_0;
+ md->spi = spi;
+ dev_set_drvdata(&spi->dev, md);
+ md->panel = mipid_panel;
+
+ r = mipid_detect(md);
+ if (r < 0)
+ return r;
+
+ omapfb_register_panel(&md->panel);
+
+ return 0;
+}
+
+static int mipid_spi_remove(struct spi_device *spi)
+{
+ struct mipid_device *md = dev_get_drvdata(&spi->dev);
+
+ mipid_disable(&md->panel);
+ kfree(md);
+
+ return 0;
+}
+
+static struct spi_driver mipid_spi_driver = {
+ .driver = {
+ .name = MIPID_MODULE_NAME,
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = mipid_spi_probe,
+ .remove = __devexit_p(mipid_spi_remove),
+};
+
+static int mipid_drv_init(void)
+{
+ spi_register_driver(&mipid_spi_driver);
+
+ return 0;
+}
+module_init(mipid_drv_init);
+
+static void mipid_drv_cleanup(void)
+{
+ spi_unregister_driver(&mipid_spi_driver);
+}
+module_exit(mipid_drv_cleanup);
+
+MODULE_DESCRIPTION("MIPI display driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap/lcd_omap2evm.c b/drivers/video/omap/lcd_omap2evm.c
new file mode 100644
index 00000000000..7a2bbe2ecec
--- /dev/null
+++ b/drivers/video/omap/lcd_omap2evm.c
@@ -0,0 +1,191 @@
+/*
+ * LCD panel support for the MISTRAL OMAP2EVM board
+ *
+ * Author: Arun C <arunedarath@mistralsolutions.com>
+ *
+ * Derived from drivers/video/omap/lcd_omap3evm.c
+ * Derived from drivers/video/omap/lcd-apollon.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/i2c/twl4030.h>
+
+#include <mach/mux.h>
+#include <mach/omapfb.h>
+#include <asm/mach-types.h>
+
+#define LCD_PANEL_ENABLE_GPIO 154
+#define LCD_PANEL_LR 128
+#define LCD_PANEL_UD 129
+#define LCD_PANEL_INI 152
+#define LCD_PANEL_QVGA 148
+#define LCD_PANEL_RESB 153
+
+#define TWL_LED_LEDEN 0x00
+#define TWL_PWMA_PWMAON 0x00
+#define TWL_PWMA_PWMAOFF 0x01
+
+static unsigned int bklight_level;
+
+static int omap2evm_panel_init(struct lcd_panel *panel,
+ struct omapfb_device *fbdev)
+{
+ gpio_request(LCD_PANEL_ENABLE_GPIO, "LCD enable");
+ gpio_request(LCD_PANEL_LR, "LCD lr");
+ gpio_request(LCD_PANEL_UD, "LCD ud");
+ gpio_request(LCD_PANEL_INI, "LCD ini");
+ gpio_request(LCD_PANEL_QVGA, "LCD qvga");
+ gpio_request(LCD_PANEL_RESB, "LCD resb");
+
+ gpio_direction_output(LCD_PANEL_ENABLE_GPIO, 1);
+ gpio_direction_output(LCD_PANEL_RESB, 1);
+ gpio_direction_output(LCD_PANEL_INI, 1);
+ gpio_direction_output(LCD_PANEL_QVGA, 0);
+ gpio_direction_output(LCD_PANEL_LR, 1);
+ gpio_direction_output(LCD_PANEL_UD, 1);
+
+ twl4030_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN);
+ twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON);
+ twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF);
+ bklight_level = 100;
+
+ return 0;
+}
+
+static void omap2evm_panel_cleanup(struct lcd_panel *panel)
+{
+ gpio_free(LCD_PANEL_RESB);
+ gpio_free(LCD_PANEL_QVGA);
+ gpio_free(LCD_PANEL_INI);
+ gpio_free(LCD_PANEL_UD);
+ gpio_free(LCD_PANEL_LR);
+ gpio_free(LCD_PANEL_ENABLE_GPIO);
+}
+
+static int omap2evm_panel_enable(struct lcd_panel *panel)
+{
+ gpio_set_value(LCD_PANEL_ENABLE_GPIO, 0);
+ return 0;
+}
+
+static void omap2evm_panel_disable(struct lcd_panel *panel)
+{
+ gpio_set_value(LCD_PANEL_ENABLE_GPIO, 1);
+}
+
+static unsigned long omap2evm_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+static int omap2evm_bklight_setlevel(struct lcd_panel *panel,
+ unsigned int level)
+{
+ u8 c;
+ if ((level >= 0) && (level <= 100)) {
+ c = (125 * (100 - level)) / 100 + 2;
+ twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF);
+ bklight_level = level;
+ }
+ return 0;
+}
+
+static unsigned int omap2evm_bklight_getlevel(struct lcd_panel *panel)
+{
+ return bklight_level;
+}
+
+static unsigned int omap2evm_bklight_getmaxlevel(struct lcd_panel *panel)
+{
+ return 100;
+}
+
+struct lcd_panel omap2evm_panel = {
+ .name = "omap2evm",
+ .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
+ OMAP_LCDC_INV_HSYNC,
+
+ .bpp = 16,
+ .data_lines = 18,
+ .x_res = 480,
+ .y_res = 640,
+ .hsw = 3,
+ .hfp = 0,
+ .hbp = 28,
+ .vsw = 2,
+ .vfp = 1,
+ .vbp = 0,
+
+ .pixel_clock = 20000,
+
+ .init = omap2evm_panel_init,
+ .cleanup = omap2evm_panel_cleanup,
+ .enable = omap2evm_panel_enable,
+ .disable = omap2evm_panel_disable,
+ .get_caps = omap2evm_panel_get_caps,
+ .set_bklight_level = omap2evm_bklight_setlevel,
+ .get_bklight_level = omap2evm_bklight_getlevel,
+ .get_bklight_max = omap2evm_bklight_getmaxlevel,
+};
+
+static int omap2evm_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&omap2evm_panel);
+ return 0;
+}
+
+static int omap2evm_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int omap2evm_panel_suspend(struct platform_device *pdev,
+ pm_message_t mesg)
+{
+ return 0;
+}
+
+static int omap2evm_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver omap2evm_panel_driver = {
+ .probe = omap2evm_panel_probe,
+ .remove = omap2evm_panel_remove,
+ .suspend = omap2evm_panel_suspend,
+ .resume = omap2evm_panel_resume,
+ .driver = {
+ .name = "omap2evm_lcd",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init omap2evm_panel_drv_init(void)
+{
+ return platform_driver_register(&omap2evm_panel_driver);
+}
+
+static void __exit omap2evm_panel_drv_exit(void)
+{
+ platform_driver_unregister(&omap2evm_panel_driver);
+}
+
+module_init(omap2evm_panel_drv_init);
+module_exit(omap2evm_panel_drv_exit);
diff --git a/drivers/video/omap/lcd_omap3beagle.c b/drivers/video/omap/lcd_omap3beagle.c
new file mode 100644
index 00000000000..4011910123b
--- /dev/null
+++ b/drivers/video/omap/lcd_omap3beagle.c
@@ -0,0 +1,130 @@
+/*
+ * LCD panel support for the TI OMAP3 Beagle board
+ *
+ * Author: Koen Kooi <koen@openembedded.org>
+ *
+ * Derived from drivers/video/omap/lcd-omap3evm.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/i2c/twl4030.h>
+
+#include <mach/mux.h>
+#include <mach/omapfb.h>
+#include <asm/mach-types.h>
+
+#define LCD_PANEL_ENABLE_GPIO 170
+
+static int omap3beagle_panel_init(struct lcd_panel *panel,
+ struct omapfb_device *fbdev)
+{
+ gpio_request(LCD_PANEL_ENABLE_GPIO, "LCD enable");
+ return 0;
+}
+
+static void omap3beagle_panel_cleanup(struct lcd_panel *panel)
+{
+ gpio_free(LCD_PANEL_ENABLE_GPIO);
+}
+
+static int omap3beagle_panel_enable(struct lcd_panel *panel)
+{
+ gpio_set_value(LCD_PANEL_ENABLE_GPIO, 1);
+ return 0;
+}
+
+static void omap3beagle_panel_disable(struct lcd_panel *panel)
+{
+ gpio_set_value(LCD_PANEL_ENABLE_GPIO, 0);
+}
+
+static unsigned long omap3beagle_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+struct lcd_panel omap3beagle_panel = {
+ .name = "omap3beagle",
+ .config = OMAP_LCDC_PANEL_TFT,
+
+ .bpp = 16,
+ .data_lines = 24,
+ .x_res = 1024,
+ .y_res = 768,
+ .hsw = 3, /* hsync_len (4) - 1 */
+ .hfp = 3, /* right_margin (4) - 1 */
+ .hbp = 39, /* left_margin (40) - 1 */
+ .vsw = 1, /* vsync_len (2) - 1 */
+ .vfp = 2, /* lower_margin */
+ .vbp = 7, /* upper_margin (8) - 1 */
+
+ .pixel_clock = 64000,
+
+ .init = omap3beagle_panel_init,
+ .cleanup = omap3beagle_panel_cleanup,
+ .enable = omap3beagle_panel_enable,
+ .disable = omap3beagle_panel_disable,
+ .get_caps = omap3beagle_panel_get_caps,
+};
+
+static int omap3beagle_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&omap3beagle_panel);
+ return 0;
+}
+
+static int omap3beagle_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int omap3beagle_panel_suspend(struct platform_device *pdev,
+ pm_message_t mesg)
+{
+ return 0;
+}
+
+static int omap3beagle_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver omap3beagle_panel_driver = {
+ .probe = omap3beagle_panel_probe,
+ .remove = omap3beagle_panel_remove,
+ .suspend = omap3beagle_panel_suspend,
+ .resume = omap3beagle_panel_resume,
+ .driver = {
+ .name = "omap3beagle_lcd",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init omap3beagle_panel_drv_init(void)
+{
+ return platform_driver_register(&omap3beagle_panel_driver);
+}
+
+static void __exit omap3beagle_panel_drv_exit(void)
+{
+ platform_driver_unregister(&omap3beagle_panel_driver);
+}
+
+module_init(omap3beagle_panel_drv_init);
+module_exit(omap3beagle_panel_drv_exit);
diff --git a/drivers/video/omap/lcd_omap3evm.c b/drivers/video/omap/lcd_omap3evm.c
new file mode 100644
index 00000000000..b6a4c2c57a2
--- /dev/null
+++ b/drivers/video/omap/lcd_omap3evm.c
@@ -0,0 +1,192 @@
+/*
+ * LCD panel support for the TI OMAP3 EVM board
+ *
+ * Author: Steve Sakoman <steve@sakoman.com>
+ *
+ * Derived from drivers/video/omap/lcd-apollon.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/i2c/twl4030.h>
+
+#include <mach/mux.h>
+#include <mach/omapfb.h>
+#include <asm/mach-types.h>
+
+#define LCD_PANEL_ENABLE_GPIO 153
+#define LCD_PANEL_LR 2
+#define LCD_PANEL_UD 3
+#define LCD_PANEL_INI 152
+#define LCD_PANEL_QVGA 154
+#define LCD_PANEL_RESB 155
+
+#define ENABLE_VDAC_DEDICATED 0x03
+#define ENABLE_VDAC_DEV_GRP 0x20
+#define ENABLE_VPLL2_DEDICATED 0x05
+#define ENABLE_VPLL2_DEV_GRP 0xE0
+
+#define TWL_LED_LEDEN 0x00
+#define TWL_PWMA_PWMAON 0x00
+#define TWL_PWMA_PWMAOFF 0x01
+
+static unsigned int bklight_level;
+
+static int omap3evm_panel_init(struct lcd_panel *panel,
+ struct omapfb_device *fbdev)
+{
+ gpio_request(LCD_PANEL_LR, "LCD lr");
+ gpio_request(LCD_PANEL_UD, "LCD ud");
+ gpio_request(LCD_PANEL_INI, "LCD ini");
+ gpio_request(LCD_PANEL_RESB, "LCD resb");
+ gpio_request(LCD_PANEL_QVGA, "LCD qvga");
+
+ gpio_direction_output(LCD_PANEL_RESB, 1);
+ gpio_direction_output(LCD_PANEL_INI, 1);
+ gpio_direction_output(LCD_PANEL_QVGA, 0);
+ gpio_direction_output(LCD_PANEL_LR, 1);
+ gpio_direction_output(LCD_PANEL_UD, 1);
+
+ twl4030_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN);
+ twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON);
+ twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF);
+ bklight_level = 100;
+
+ return 0;
+}
+
+static void omap3evm_panel_cleanup(struct lcd_panel *panel)
+{
+ gpio_free(LCD_PANEL_QVGA);
+ gpio_free(LCD_PANEL_RESB);
+ gpio_free(LCD_PANEL_INI);
+ gpio_free(LCD_PANEL_UD);
+ gpio_free(LCD_PANEL_LR);
+}
+
+static int omap3evm_panel_enable(struct lcd_panel *panel)
+{
+ gpio_set_value(LCD_PANEL_ENABLE_GPIO, 0);
+ return 0;
+}
+
+static void omap3evm_panel_disable(struct lcd_panel *panel)
+{
+ gpio_set_value(LCD_PANEL_ENABLE_GPIO, 1);
+}
+
+static unsigned long omap3evm_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+static int omap3evm_bklight_setlevel(struct lcd_panel *panel,
+ unsigned int level)
+{
+ u8 c;
+ if ((level >= 0) && (level <= 100)) {
+ c = (125 * (100 - level)) / 100 + 2;
+ twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF);
+ bklight_level = level;
+ }
+ return 0;
+}
+
+static unsigned int omap3evm_bklight_getlevel(struct lcd_panel *panel)
+{
+ return bklight_level;
+}
+
+static unsigned int omap3evm_bklight_getmaxlevel(struct lcd_panel *panel)
+{
+ return 100;
+}
+
+struct lcd_panel omap3evm_panel = {
+ .name = "omap3evm",
+ .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
+ OMAP_LCDC_INV_HSYNC,
+
+ .bpp = 16,
+ .data_lines = 18,
+ .x_res = 480,
+ .y_res = 640,
+ .hsw = 3, /* hsync_len (4) - 1 */
+ .hfp = 3, /* right_margin (4) - 1 */
+ .hbp = 39, /* left_margin (40) - 1 */
+ .vsw = 1, /* vsync_len (2) - 1 */
+ .vfp = 2, /* lower_margin */
+ .vbp = 7, /* upper_margin (8) - 1 */
+
+ .pixel_clock = 26000,
+
+ .init = omap3evm_panel_init,
+ .cleanup = omap3evm_panel_cleanup,
+ .enable = omap3evm_panel_enable,
+ .disable = omap3evm_panel_disable,
+ .get_caps = omap3evm_panel_get_caps,
+ .set_bklight_level = omap3evm_bklight_setlevel,
+ .get_bklight_level = omap3evm_bklight_getlevel,
+ .get_bklight_max = omap3evm_bklight_getmaxlevel,
+};
+
+static int omap3evm_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&omap3evm_panel);
+ return 0;
+}
+
+static int omap3evm_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int omap3evm_panel_suspend(struct platform_device *pdev,
+ pm_message_t mesg)
+{
+ return 0;
+}
+
+static int omap3evm_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver omap3evm_panel_driver = {
+ .probe = omap3evm_panel_probe,
+ .remove = omap3evm_panel_remove,
+ .suspend = omap3evm_panel_suspend,
+ .resume = omap3evm_panel_resume,
+ .driver = {
+ .name = "omap3evm_lcd",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init omap3evm_panel_drv_init(void)
+{
+ return platform_driver_register(&omap3evm_panel_driver);
+}
+
+static void __exit omap3evm_panel_drv_exit(void)
+{
+ platform_driver_unregister(&omap3evm_panel_driver);
+}
+
+module_init(omap3evm_panel_drv_init);
+module_exit(omap3evm_panel_drv_exit);
diff --git a/drivers/video/omap/lcd_osk.c b/drivers/video/omap/lcd_osk.c
index 379c96d36da..b3fa88bc626 100644
--- a/drivers/video/omap/lcd_osk.c
+++ b/drivers/video/omap/lcd_osk.c
@@ -127,12 +127,12 @@ struct platform_driver osk_panel_driver = {
},
};
-static int osk_panel_drv_init(void)
+static int __init osk_panel_drv_init(void)
{
return platform_driver_register(&osk_panel_driver);
}
-static void osk_panel_drv_cleanup(void)
+static void __exit osk_panel_drv_cleanup(void)
{
platform_driver_unregister(&osk_panel_driver);
}
diff --git a/drivers/video/omap/lcd_overo.c b/drivers/video/omap/lcd_overo.c
new file mode 100644
index 00000000000..2bc5c9268e5
--- /dev/null
+++ b/drivers/video/omap/lcd_overo.c
@@ -0,0 +1,179 @@
+/*
+ * LCD panel support for the Gumstix Overo
+ *
+ * Author: Steve Sakoman <steve@sakoman.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/i2c/twl4030.h>
+
+#include <mach/gpio.h>
+#include <mach/mux.h>
+#include <mach/omapfb.h>
+#include <asm/mach-types.h>
+
+#define LCD_ENABLE 144
+
+static int overo_panel_init(struct lcd_panel *panel,
+ struct omapfb_device *fbdev)
+{
+ if ((gpio_request(LCD_ENABLE, "LCD_ENABLE") == 0) &&
+ (gpio_direction_output(LCD_ENABLE, 1) == 0))
+ gpio_export(LCD_ENABLE, 0);
+ else
+ printk(KERN_ERR "could not obtain gpio for LCD_ENABLE\n");
+
+ return 0;
+}
+
+static void overo_panel_cleanup(struct lcd_panel *panel)
+{
+ gpio_free(LCD_ENABLE);
+}
+
+static int overo_panel_enable(struct lcd_panel *panel)
+{
+ gpio_set_value(LCD_ENABLE, 1);
+ return 0;
+}
+
+static void overo_panel_disable(struct lcd_panel *panel)
+{
+ gpio_set_value(LCD_ENABLE, 0);
+}
+
+static unsigned long overo_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+struct lcd_panel overo_panel = {
+ .name = "overo",
+ .config = OMAP_LCDC_PANEL_TFT,
+ .bpp = 16,
+ .data_lines = 24,
+
+#if defined CONFIG_FB_OMAP_031M3R
+
+ /* 640 x 480 @ 60 Hz Reduced blanking VESA CVT 0.31M3-R */
+ .x_res = 640,
+ .y_res = 480,
+ .hfp = 48,
+ .hsw = 32,
+ .hbp = 80,
+ .vfp = 3,
+ .vsw = 4,
+ .vbp = 7,
+ .pixel_clock = 23500,
+
+#elif defined CONFIG_FB_OMAP_048M3R
+
+ /* 800 x 600 @ 60 Hz Reduced blanking VESA CVT 0.48M3-R */
+ .x_res = 800,
+ .y_res = 600,
+ .hfp = 48,
+ .hsw = 32,
+ .hbp = 80,
+ .vfp = 3,
+ .vsw = 4,
+ .vbp = 11,
+ .pixel_clock = 35500,
+
+#elif defined CONFIG_FB_OMAP_079M3R
+
+ /* 1024 x 768 @ 60 Hz Reduced blanking VESA CVT 0.79M3-R */
+ .x_res = 1024,
+ .y_res = 768,
+ .hfp = 48,
+ .hsw = 32,
+ .hbp = 80,
+ .vfp = 3,
+ .vsw = 4,
+ .vbp = 15,
+ .pixel_clock = 56000,
+
+#elif defined CONFIG_FB_OMAP_092M9R
+
+ /* 1280 x 720 @ 60 Hz Reduced blanking VESA CVT 0.92M9-R */
+ .x_res = 1280,
+ .y_res = 720,
+ .hfp = 48,
+ .hsw = 32,
+ .hbp = 80,
+ .vfp = 3,
+ .vsw = 5,
+ .vbp = 13,
+ .pixel_clock = 64000,
+
+#else
+
+ /* use 640 x 480 if no config option */
+ /* 640 x 480 @ 60 Hz Reduced blanking VESA CVT 0.31M3-R */
+ .x_res = 640,
+ .y_res = 480,
+ .hfp = 48,
+ .hsw = 32,
+ .hbp = 80,
+ .vfp = 3,
+ .vsw = 4,
+ .vbp = 7,
+ .pixel_clock = 23500,
+
+#endif
+
+ .init = overo_panel_init,
+ .cleanup = overo_panel_cleanup,
+ .enable = overo_panel_enable,
+ .disable = overo_panel_disable,
+ .get_caps = overo_panel_get_caps,
+};
+
+static int overo_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&overo_panel);
+ return 0;
+}
+
+static int overo_panel_remove(struct platform_device *pdev)
+{
+ /* omapfb does not have unregister_panel */
+ return 0;
+}
+
+static struct platform_driver overo_panel_driver = {
+ .probe = overo_panel_probe,
+ .remove = overo_panel_remove,
+ .driver = {
+ .name = "overo_lcd",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init overo_panel_drv_init(void)
+{
+ return platform_driver_register(&overo_panel_driver);
+}
+
+static void __exit overo_panel_drv_exit(void)
+{
+ platform_driver_unregister(&overo_panel_driver);
+}
+
+module_init(overo_panel_drv_init);
+module_exit(overo_panel_drv_exit);
diff --git a/drivers/video/omap/lcd_palmte.c b/drivers/video/omap/lcd_palmte.c
index 218317366e6..4bf3c79f3cc 100644
--- a/drivers/video/omap/lcd_palmte.c
+++ b/drivers/video/omap/lcd_palmte.c
@@ -108,12 +108,12 @@ struct platform_driver palmte_panel_driver = {
},
};
-static int palmte_panel_drv_init(void)
+static int __init palmte_panel_drv_init(void)
{
return platform_driver_register(&palmte_panel_driver);
}
-static void palmte_panel_drv_cleanup(void)
+static void __exit palmte_panel_drv_cleanup(void)
{
platform_driver_unregister(&palmte_panel_driver);
}
diff --git a/drivers/video/omap/lcd_palmtt.c b/drivers/video/omap/lcd_palmtt.c
index 57b0f6cf6a5..48ea1f9f2cb 100644
--- a/drivers/video/omap/lcd_palmtt.c
+++ b/drivers/video/omap/lcd_palmtt.c
@@ -113,12 +113,12 @@ struct platform_driver palmtt_panel_driver = {
},
};
-static int palmtt_panel_drv_init(void)
+static int __init palmtt_panel_drv_init(void)
{
return platform_driver_register(&palmtt_panel_driver);
}
-static void palmtt_panel_drv_cleanup(void)
+static void __exit palmtt_panel_drv_cleanup(void)
{
platform_driver_unregister(&palmtt_panel_driver);
}
diff --git a/drivers/video/omap/lcd_palmz71.c b/drivers/video/omap/lcd_palmz71.c
index d33d78b1172..0697d29b4d3 100644
--- a/drivers/video/omap/lcd_palmz71.c
+++ b/drivers/video/omap/lcd_palmz71.c
@@ -109,12 +109,12 @@ struct platform_driver palmz71_panel_driver = {
},
};
-static int palmz71_panel_drv_init(void)
+static int __init palmz71_panel_drv_init(void)
{
return platform_driver_register(&palmz71_panel_driver);
}
-static void palmz71_panel_drv_cleanup(void)
+static void __exit palmz71_panel_drv_cleanup(void)
{
platform_driver_unregister(&palmz71_panel_driver);
}
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index 8862233d57b..125e605b8c6 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -67,6 +67,7 @@ static struct caps_table_struct ctrl_caps[] = {
{ OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE, "pixel double window" },
{ OMAPFB_CAPS_WINDOW_SCALE, "scale window" },
{ OMAPFB_CAPS_WINDOW_OVERLAY, "overlay window" },
+ { OMAPFB_CAPS_WINDOW_ROTATE, "rotate window" },
{ OMAPFB_CAPS_SET_BACKLIGHT, "backlight setting" },
};
@@ -215,6 +216,15 @@ static int ctrl_change_mode(struct fb_info *fbi)
offset, var->xres_virtual,
plane->info.pos_x, plane->info.pos_y,
var->xres, var->yres, plane->color_mode);
+ if (r < 0)
+ return r;
+
+ if (fbdev->ctrl->set_rotate != NULL) {
+ r = fbdev->ctrl->set_rotate(var->rotate);
+ if (r < 0)
+ return r;
+ }
+
if (fbdev->ctrl->set_scale != NULL)
r = fbdev->ctrl->set_scale(plane->idx,
var->xres, var->yres,
@@ -554,7 +564,6 @@ static int set_fb_var(struct fb_info *fbi,
var->xoffset = var->xres_virtual - var->xres;
if (var->yres + var->yoffset > var->yres_virtual)
var->yoffset = var->yres_virtual - var->yres;
- line_size = var->xres * bpp / 8;
if (plane->color_mode == OMAPFB_COLOR_RGB444) {
var->red.offset = 8; var->red.length = 4;
@@ -600,7 +609,7 @@ static void omapfb_rotate(struct fb_info *fbi, int rotate)
struct omapfb_device *fbdev = plane->fbdev;
omapfb_rqueue_lock(fbdev);
- if (cpu_is_omap15xx() && rotate != fbi->var.rotate) {
+ if (rotate != fbi->var.rotate) {
struct fb_var_screeninfo *new_var = &fbdev->new_var;
memcpy(new_var, &fbi->var, sizeof(*new_var));
@@ -707,28 +716,42 @@ int omapfb_update_window_async(struct fb_info *fbi,
void (*callback)(void *),
void *callback_data)
{
+ int xres, yres;
struct omapfb_plane_struct *plane = fbi->par;
struct omapfb_device *fbdev = plane->fbdev;
- struct fb_var_screeninfo *var;
+ struct fb_var_screeninfo *var = &fbi->var;
+
+ switch (var->rotate) {
+ case 0:
+ case 180:
+ xres = fbdev->panel->x_res;
+ yres = fbdev->panel->y_res;
+ break;
+ case 90:
+ case 270:
+ xres = fbdev->panel->y_res;
+ yres = fbdev->panel->x_res;
+ break;
+ default:
+ return -EINVAL;
+ }
- var = &fbi->var;
- if (win->x >= var->xres || win->y >= var->yres ||
- win->out_x > var->xres || win->out_y >= var->yres)
+ if (win->x >= xres || win->y >= yres ||
+ win->out_x > xres || win->out_y > yres)
return -EINVAL;
if (!fbdev->ctrl->update_window ||
fbdev->ctrl->get_update_mode() != OMAPFB_MANUAL_UPDATE)
return -ENODEV;
- if (win->x + win->width >= var->xres)
- win->width = var->xres - win->x;
- if (win->y + win->height >= var->yres)
- win->height = var->yres - win->y;
- /* The out sizes should be cropped to the LCD size */
- if (win->out_x + win->out_width > fbdev->panel->x_res)
- win->out_width = fbdev->panel->x_res - win->out_x;
- if (win->out_y + win->out_height > fbdev->panel->y_res)
- win->out_height = fbdev->panel->y_res - win->out_y;
+ if (win->x + win->width > xres)
+ win->width = xres - win->x;
+ if (win->y + win->height > yres)
+ win->height = yres - win->y;
+ if (win->out_x + win->out_width > xres)
+ win->out_width = xres - win->out_x;
+ if (win->out_y + win->out_height > yres)
+ win->out_height = yres - win->out_y;
if (!win->width || !win->height || !win->out_width || !win->out_height)
return 0;
@@ -1699,8 +1722,8 @@ static int omapfb_do_probe(struct platform_device *pdev,
pr_info("omapfb: configured for panel %s\n", fbdev->panel->name);
- def_vxres = def_vxres ? : fbdev->panel->x_res;
- def_vyres = def_vyres ? : fbdev->panel->y_res;
+ def_vxres = def_vxres ? def_vxres : fbdev->panel->x_res;
+ def_vyres = def_vyres ? def_vyres : fbdev->panel->y_res;
init_state++;
@@ -1822,8 +1845,8 @@ static int omapfb_suspend(struct platform_device *pdev, pm_message_t mesg)
{
struct omapfb_device *fbdev = platform_get_drvdata(pdev);
- omapfb_blank(FB_BLANK_POWERDOWN, fbdev->fb_info[0]);
-
+ if (fbdev != NULL)
+ omapfb_blank(FB_BLANK_POWERDOWN, fbdev->fb_info[0]);
return 0;
}
@@ -1832,7 +1855,8 @@ static int omapfb_resume(struct platform_device *pdev)
{
struct omapfb_device *fbdev = platform_get_drvdata(pdev);
- omapfb_blank(FB_BLANK_UNBLANK, fbdev->fb_info[0]);
+ if (fbdev != NULL)
+ omapfb_blank(FB_BLANK_UNBLANK, fbdev->fb_info[0]);
return 0;
}
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
index 9332d6ca645..ee01e84e19c 100644
--- a/drivers/video/omap/rfbi.c
+++ b/drivers/video/omap/rfbi.c
@@ -57,6 +57,7 @@
#define DISPC_BASE 0x48050400
#define DISPC_CONTROL 0x0040
+#define DISPC_IRQ_FRAMEMASK 0x0001
static struct {
void __iomem *base;
@@ -553,7 +554,9 @@ static int rfbi_init(struct omapfb_device *fbdev)
l = (0x01 << 2);
rfbi_write_reg(RFBI_CONTROL, l);
- if ((r = omap_dispc_request_irq(rfbi_dma_callback, NULL)) < 0) {
+ r = omap_dispc_request_irq(DISPC_IRQ_FRAMEMASK, rfbi_dma_callback,
+ NULL);
+ if (r < 0) {
dev_err(fbdev->dev, "can't get DISPC irq\n");
rfbi_enable_clocks(0);
return r;
@@ -570,7 +573,7 @@ static int rfbi_init(struct omapfb_device *fbdev)
static void rfbi_cleanup(void)
{
- omap_dispc_free_irq();
+ omap_dispc_free_irq(DISPC_IRQ_FRAMEMASK, rfbi_dma_callback, NULL);
rfbi_put_clocks();
iounmap(rfbi.base);
}
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index bacfabd9ce1..0a366d86f08 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -223,10 +223,14 @@ static int platinumfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
static inline int platinum_vram_reqd(int video_mode, int color_mode)
{
- return vmode_attrs[video_mode-1].vres *
- (vmode_attrs[video_mode-1].hres * (1<<color_mode) +
- ((video_mode == VMODE_832_624_75) &&
- (color_mode > CMODE_8)) ? 0x10 : 0x20) + 0x1000;
+ int baseval = vmode_attrs[video_mode-1].hres * (1<<color_mode);
+
+ if ((video_mode == VMODE_832_624_75) && (color_mode > CMODE_8))
+ baseval += 0x10;
+ else
+ baseval += 0x20;
+
+ return vmode_attrs[video_mode-1].vres * baseval + 0x1000;
}
#define STORE_D2(a, d) { \
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 6506117c134..1820c4a2443 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1638,24 +1638,26 @@ pxafb_freq_policy(struct notifier_block *nb, unsigned long val, void *data)
* Power management hooks. Note that we won't be called from IRQ context,
* unlike the blank functions above, so we may sleep.
*/
-static int pxafb_suspend(struct platform_device *dev, pm_message_t state)
+static int pxafb_suspend(struct device *dev)
{
- struct pxafb_info *fbi = platform_get_drvdata(dev);
+ struct pxafb_info *fbi = dev_get_drvdata(dev);
set_ctrlr_state(fbi, C_DISABLE_PM);
return 0;
}
-static int pxafb_resume(struct platform_device *dev)
+static int pxafb_resume(struct device *dev)
{
- struct pxafb_info *fbi = platform_get_drvdata(dev);
+ struct pxafb_info *fbi = dev_get_drvdata(dev);
set_ctrlr_state(fbi, C_ENABLE_PM);
return 0;
}
-#else
-#define pxafb_suspend NULL
-#define pxafb_resume NULL
+
+static struct dev_pm_ops pxafb_pm_ops = {
+ .suspend = pxafb_suspend,
+ .resume = pxafb_resume,
+};
#endif
static int __devinit pxafb_init_video_memory(struct pxafb_info *fbi)
@@ -2081,6 +2083,9 @@ static int __devinit pxafb_probe(struct platform_device *dev)
goto failed;
}
+ if (cpu_is_pxa3xx() && inf->acceleration_enabled)
+ fbi->fb.fix.accel = FB_ACCEL_PXA3XX;
+
fbi->backlight_power = inf->pxafb_backlight_power;
fbi->lcd_power = inf->pxafb_lcd_power;
@@ -2091,14 +2096,14 @@ static int __devinit pxafb_probe(struct platform_device *dev)
goto failed_fbi;
}
- r = request_mem_region(r->start, r->end - r->start + 1, dev->name);
+ r = request_mem_region(r->start, resource_size(r), dev->name);
if (r == NULL) {
dev_err(&dev->dev, "failed to request I/O memory\n");
ret = -EBUSY;
goto failed_fbi;
}
- fbi->mmio_base = ioremap(r->start, r->end - r->start + 1);
+ fbi->mmio_base = ioremap(r->start, resource_size(r));
if (fbi->mmio_base == NULL) {
dev_err(&dev->dev, "failed to map I/O memory\n");
ret = -EBUSY;
@@ -2197,7 +2202,7 @@ failed_free_dma:
failed_free_io:
iounmap(fbi->mmio_base);
failed_free_res:
- release_mem_region(r->start, r->end - r->start + 1);
+ release_mem_region(r->start, resource_size(r));
failed_fbi:
clk_put(fbi->clk);
platform_set_drvdata(dev, NULL);
@@ -2237,7 +2242,7 @@ static int __devexit pxafb_remove(struct platform_device *dev)
iounmap(fbi->mmio_base);
r = platform_get_resource(dev, IORESOURCE_MEM, 0);
- release_mem_region(r->start, r->end - r->start + 1);
+ release_mem_region(r->start, resource_size(r));
clk_put(fbi->clk);
kfree(fbi);
@@ -2248,11 +2253,12 @@ static int __devexit pxafb_remove(struct platform_device *dev)
static struct platform_driver pxafb_driver = {
.probe = pxafb_probe,
.remove = __devexit_p(pxafb_remove),
- .suspend = pxafb_suspend,
- .resume = pxafb_resume,
.driver = {
.owner = THIS_MODULE,
.name = "pxa2xx-fb",
+#ifdef CONFIG_PM
+ .pm = &pxafb_pm_ops,
+#endif
},
};
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 5a72083dc67..adf9632c6b1 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -1036,7 +1036,7 @@ static int s3c_fb_resume(struct platform_device *pdev)
static struct platform_driver s3c_fb_driver = {
.probe = s3c_fb_probe,
- .remove = s3c_fb_remove,
+ .remove = __devexit_p(s3c_fb_remove),
.suspend = s3c_fb_suspend,
.resume = s3c_fb_resume,
.driver = {
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 7da0027e240..aac661225c7 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -369,7 +369,9 @@ static void s3c2410fb_activate_var(struct fb_info *info)
void __iomem *regs = fbi->io;
int type = fbi->regs.lcdcon1 & S3C2410_LCDCON1_TFT;
struct fb_var_screeninfo *var = &info->var;
- int clkdiv = s3c2410fb_calc_pixclk(fbi, var->pixclock) / 2;
+ int clkdiv;
+
+ clkdiv = DIV_ROUND_UP(s3c2410fb_calc_pixclk(fbi, var->pixclock), 2);
dprintk("%s: var->xres = %d\n", __func__, var->xres);
dprintk("%s: var->yres = %d\n", __func__, var->yres);
@@ -1119,7 +1121,7 @@ int __init s3c2410fb_init(void)
int ret = platform_driver_register(&s3c2410fb_driver);
if (ret == 0)
- ret = platform_driver_register(&s3c2412fb_driver);;
+ ret = platform_driver_register(&s3c2412fb_driver);
return ret;
}
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 4a067f0d0ce..a4e05e4d750 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -698,8 +698,8 @@ sisfb_search_refresh_rate(struct sis_video_info *ivideo, unsigned int rate, int
rate, sisfb_vrate[i].refresh);
ivideo->rate_idx = sisfb_vrate[i].idx;
ivideo->refresh_rate = sisfb_vrate[i].refresh;
- } else if(((rate - sisfb_vrate[i-1].refresh) <= 2)
- && (sisfb_vrate[i].idx != 1)) {
+ } else if((sisfb_vrate[i].idx != 1) &&
+ ((rate - sisfb_vrate[i-1].refresh) <= 2)) {
DPRINTK("sisfb: Adjusting rate from %d down to %d\n",
rate, sisfb_vrate[i-1].refresh);
ivideo->rate_idx = sisfb_vrate[i-1].idx;
diff --git a/drivers/video/sis/vstruct.h b/drivers/video/sis/vstruct.h
index 705c8536052..bef4aae388d 100644
--- a/drivers/video/sis/vstruct.h
+++ b/drivers/video/sis/vstruct.h
@@ -342,7 +342,7 @@ struct SiS_Private
unsigned short SiS_RY4COE;
unsigned short SiS_LCDHDES;
unsigned short SiS_LCDVDES;
- unsigned short SiS_DDC_Port;
+ SISIOADDRESS SiS_DDC_Port;
unsigned short SiS_DDC_Index;
unsigned short SiS_DDC_Data;
unsigned short SiS_DDC_NData;
diff --git a/drivers/video/tmiofb.c b/drivers/video/tmiofb.c
index a1eb0862255..6913fe168c2 100644
--- a/drivers/video/tmiofb.c
+++ b/drivers/video/tmiofb.c
@@ -974,7 +974,7 @@ static int tmiofb_resume(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
struct mfd_cell *cell = dev->dev.platform_data;
- int retval;
+ int retval = 0;
acquire_console_sem();
diff --git a/drivers/video/via/accel.c b/drivers/video/via/accel.c
index 45c54bfe99b..9d4f3a49ba4 100644
--- a/drivers/video/via/accel.c
+++ b/drivers/video/via/accel.c
@@ -20,229 +20,430 @@
*/
#include "global.h"
-void viafb_init_accel(void)
+static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height,
+ u8 dst_bpp, u32 dst_addr, u32 dst_pitch, u32 dst_x, u32 dst_y,
+ u32 *src_mem, u32 src_addr, u32 src_pitch, u32 src_x, u32 src_y,
+ u32 fg_color, u32 bg_color, u8 fill_rop)
{
- viaparinfo->fbmem_free -= CURSOR_SIZE;
- viaparinfo->cursor_start = viaparinfo->fbmem_free;
- viaparinfo->fbmem_used += CURSOR_SIZE;
+ u32 ge_cmd = 0, tmp, i;
- /* Reverse 8*1024 memory space for cursor image */
- viaparinfo->fbmem_free -= (CURSOR_SIZE + VQ_SIZE);
- viaparinfo->VQ_start = viaparinfo->fbmem_free;
- viaparinfo->VQ_end = viaparinfo->VQ_start + VQ_SIZE - 1;
- viaparinfo->fbmem_used += (CURSOR_SIZE + VQ_SIZE); }
-
-void viafb_init_2d_engine(void)
-{
- u32 dwVQStartAddr, dwVQEndAddr;
- u32 dwVQLen, dwVQStartL, dwVQEndL, dwVQStartEndH;
-
- /* init 2D engine regs to reset 2D engine */
- writel(0x0, viaparinfo->io_virt + VIA_REG_GEMODE);
- writel(0x0, viaparinfo->io_virt + VIA_REG_SRCPOS);
- writel(0x0, viaparinfo->io_virt + VIA_REG_DSTPOS);
- writel(0x0, viaparinfo->io_virt + VIA_REG_DIMENSION);
- writel(0x0, viaparinfo->io_virt + VIA_REG_PATADDR);
- writel(0x0, viaparinfo->io_virt + VIA_REG_FGCOLOR);
- writel(0x0, viaparinfo->io_virt + VIA_REG_BGCOLOR);
- writel(0x0, viaparinfo->io_virt + VIA_REG_CLIPTL);
- writel(0x0, viaparinfo->io_virt + VIA_REG_CLIPBR);
- writel(0x0, viaparinfo->io_virt + VIA_REG_OFFSET);
- writel(0x0, viaparinfo->io_virt + VIA_REG_KEYCONTROL);
- writel(0x0, viaparinfo->io_virt + VIA_REG_SRCBASE);
- writel(0x0, viaparinfo->io_virt + VIA_REG_DSTBASE);
- writel(0x0, viaparinfo->io_virt + VIA_REG_PITCH);
- writel(0x0, viaparinfo->io_virt + VIA_REG_MONOPAT1);
-
- /* Init AGP and VQ regs */
- switch (viaparinfo->chip_info->gfx_chip_name) {
- case UNICHROME_K8M890:
- case UNICHROME_P4M900:
- writel(0x00100000, viaparinfo->io_virt + VIA_REG_CR_TRANSET);
- writel(0x680A0000, viaparinfo->io_virt + VIA_REG_CR_TRANSPACE);
- writel(0x02000000, viaparinfo->io_virt + VIA_REG_CR_TRANSPACE);
- break;
+ if (!op || op > 3) {
+ printk(KERN_WARNING "hw_bitblt_1: Invalid operation: %d\n", op);
+ return -EINVAL;
+ }
- default:
- writel(0x00100000, viaparinfo->io_virt + VIA_REG_TRANSET);
- writel(0x00000000, viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x00333004, viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x60000000, viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x61000000, viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x62000000, viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x63000000, viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x64000000, viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x7D000000, viaparinfo->io_virt + VIA_REG_TRANSPACE);
-
- writel(0xFE020000, viaparinfo->io_virt + VIA_REG_TRANSET);
- writel(0x00000000, viaparinfo->io_virt + VIA_REG_TRANSPACE);
- break;
+ if (op != VIA_BITBLT_FILL && !src_mem && src_addr == dst_addr) {
+ if (src_x < dst_x) {
+ ge_cmd |= 0x00008000;
+ src_x += width - 1;
+ dst_x += width - 1;
+ }
+ if (src_y < dst_y) {
+ ge_cmd |= 0x00004000;
+ src_y += height - 1;
+ dst_y += height - 1;
+ }
}
- if (viaparinfo->VQ_start != 0) {
- /* Enable VQ */
- dwVQStartAddr = viaparinfo->VQ_start;
- dwVQEndAddr = viaparinfo->VQ_end;
-
- dwVQStartL = 0x50000000 | (dwVQStartAddr & 0xFFFFFF);
- dwVQEndL = 0x51000000 | (dwVQEndAddr & 0xFFFFFF);
- dwVQStartEndH = 0x52000000 |
- ((dwVQStartAddr & 0xFF000000) >> 24) |
- ((dwVQEndAddr & 0xFF000000) >> 16);
- dwVQLen = 0x53000000 | (VQ_SIZE >> 3);
- switch (viaparinfo->chip_info->gfx_chip_name) {
- case UNICHROME_K8M890:
- case UNICHROME_P4M900:
- dwVQStartL |= 0x20000000;
- dwVQEndL |= 0x20000000;
- dwVQStartEndH |= 0x20000000;
- dwVQLen |= 0x20000000;
+
+ if (op == VIA_BITBLT_FILL) {
+ switch (fill_rop) {
+ case 0x00: /* blackness */
+ case 0x5A: /* pattern inversion */
+ case 0xF0: /* pattern copy */
+ case 0xFF: /* whiteness */
break;
default:
- break;
+ printk(KERN_WARNING "hw_bitblt_1: Invalid fill rop: "
+ "%u\n", fill_rop);
+ return -EINVAL;
}
+ }
- switch (viaparinfo->chip_info->gfx_chip_name) {
- case UNICHROME_K8M890:
- case UNICHROME_P4M900:
- writel(0x00100000,
- viaparinfo->io_virt + VIA_REG_CR_TRANSET);
- writel(dwVQStartEndH,
- viaparinfo->io_virt + VIA_REG_CR_TRANSPACE);
- writel(dwVQStartL,
- viaparinfo->io_virt + VIA_REG_CR_TRANSPACE);
- writel(dwVQEndL,
- viaparinfo->io_virt + VIA_REG_CR_TRANSPACE);
- writel(dwVQLen,
- viaparinfo->io_virt + VIA_REG_CR_TRANSPACE);
- writel(0x74301001,
- viaparinfo->io_virt + VIA_REG_CR_TRANSPACE);
- writel(0x00000000,
- viaparinfo->io_virt + VIA_REG_CR_TRANSPACE);
- break;
- default:
- writel(0x00FE0000,
- viaparinfo->io_virt + VIA_REG_TRANSET);
- writel(0x080003FE,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x0A00027C,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x0B000260,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x0C000274,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x0D000264,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x0E000000,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x0F000020,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x1000027E,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x110002FE,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x200F0060,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
-
- writel(0x00000006,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x40008C0F,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x44000000,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x45080C04,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x46800408,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
-
- writel(dwVQStartEndH,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(dwVQStartL,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(dwVQEndL,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(dwVQLen,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- break;
+ switch (dst_bpp) {
+ case 8:
+ tmp = 0x00000000;
+ break;
+ case 16:
+ tmp = 0x00000100;
+ break;
+ case 32:
+ tmp = 0x00000300;
+ break;
+ default:
+ printk(KERN_WARNING "hw_bitblt_1: Unsupported bpp %d\n",
+ dst_bpp);
+ return -EINVAL;
+ }
+ writel(tmp, engine + 0x04);
+
+ if (op != VIA_BITBLT_FILL) {
+ if (src_x & (op == VIA_BITBLT_MONO ? 0xFFFF8000 : 0xFFFFF000)
+ || src_y & 0xFFFFF000) {
+ printk(KERN_WARNING "hw_bitblt_1: Unsupported source "
+ "x/y %d %d\n", src_x, src_y);
+ return -EINVAL;
}
- } else {
- /* Disable VQ */
- switch (viaparinfo->chip_info->gfx_chip_name) {
- case UNICHROME_K8M890:
- case UNICHROME_P4M900:
- writel(0x00100000,
- viaparinfo->io_virt + VIA_REG_CR_TRANSET);
- writel(0x74301000,
- viaparinfo->io_virt + VIA_REG_CR_TRANSPACE);
- break;
- default:
- writel(0x00FE0000,
- viaparinfo->io_virt + VIA_REG_TRANSET);
- writel(0x00000004,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x40008C0F,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x44000000,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x45080C04,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- writel(0x46800408,
- viaparinfo->io_virt + VIA_REG_TRANSPACE);
- break;
+ tmp = src_x | (src_y << 16);
+ writel(tmp, engine + 0x08);
+ }
+
+ if (dst_x & 0xFFFFF000 || dst_y & 0xFFFFF000) {
+ printk(KERN_WARNING "hw_bitblt_1: Unsupported destination x/y "
+ "%d %d\n", dst_x, dst_y);
+ return -EINVAL;
+ }
+ tmp = dst_x | (dst_y << 16);
+ writel(tmp, engine + 0x0C);
+
+ if ((width - 1) & 0xFFFFF000 || (height - 1) & 0xFFFFF000) {
+ printk(KERN_WARNING "hw_bitblt_1: Unsupported width/height "
+ "%d %d\n", width, height);
+ return -EINVAL;
+ }
+ tmp = (width - 1) | ((height - 1) << 16);
+ writel(tmp, engine + 0x10);
+
+ if (op != VIA_BITBLT_COLOR)
+ writel(fg_color, engine + 0x18);
+
+ if (op == VIA_BITBLT_MONO)
+ writel(bg_color, engine + 0x1C);
+
+ if (op != VIA_BITBLT_FILL) {
+ tmp = src_mem ? 0 : src_addr;
+ if (dst_addr & 0xE0000007) {
+ printk(KERN_WARNING "hw_bitblt_1: Unsupported source "
+ "address %X\n", tmp);
+ return -EINVAL;
}
+ tmp >>= 3;
+ writel(tmp, engine + 0x30);
+ }
+
+ if (dst_addr & 0xE0000007) {
+ printk(KERN_WARNING "hw_bitblt_1: Unsupported destination "
+ "address %X\n", dst_addr);
+ return -EINVAL;
}
+ tmp = dst_addr >> 3;
+ writel(tmp, engine + 0x34);
- viafb_set_2d_color_depth(viaparinfo->bpp);
+ if (op == VIA_BITBLT_FILL)
+ tmp = 0;
+ else
+ tmp = src_pitch;
+ if (tmp & 0xFFFFC007 || dst_pitch & 0xFFFFC007) {
+ printk(KERN_WARNING "hw_bitblt_1: Unsupported pitch %X %X\n",
+ tmp, dst_pitch);
+ return -EINVAL;
+ }
+ tmp = (tmp >> 3) | (dst_pitch << (16 - 3));
+ writel(tmp, engine + 0x38);
+
+ if (op == VIA_BITBLT_FILL)
+ ge_cmd |= fill_rop << 24 | 0x00002000 | 0x00000001;
+ else {
+ ge_cmd |= 0xCC000000; /* ROP=SRCCOPY */
+ if (src_mem)
+ ge_cmd |= 0x00000040;
+ if (op == VIA_BITBLT_MONO)
+ ge_cmd |= 0x00000002 | 0x00000100 | 0x00020000;
+ else
+ ge_cmd |= 0x00000001;
+ }
+ writel(ge_cmd, engine);
- writel(0x0, viaparinfo->io_virt + VIA_REG_SRCBASE);
- writel(0x0, viaparinfo->io_virt + VIA_REG_DSTBASE);
+ if (op == VIA_BITBLT_FILL || !src_mem)
+ return 0;
- writel(VIA_PITCH_ENABLE |
- (((viaparinfo->hres *
- viaparinfo->bpp >> 3) >> 3) | (((viaparinfo->hres *
- viaparinfo->
- bpp >> 3) >> 3) << 16)),
- viaparinfo->io_virt + VIA_REG_PITCH);
+ tmp = (width * height * (op == VIA_BITBLT_MONO ? 1 : (dst_bpp >> 3)) +
+ 3) >> 2;
+
+ for (i = 0; i < tmp; i++)
+ writel(src_mem[i], engine + VIA_MMIO_BLTBASE);
+
+ return 0;
}
-void viafb_set_2d_color_depth(int bpp)
+static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height,
+ u8 dst_bpp, u32 dst_addr, u32 dst_pitch, u32 dst_x, u32 dst_y,
+ u32 *src_mem, u32 src_addr, u32 src_pitch, u32 src_x, u32 src_y,
+ u32 fg_color, u32 bg_color, u8 fill_rop)
{
- u32 dwGEMode;
+ u32 ge_cmd = 0, tmp, i;
+
+ if (!op || op > 3) {
+ printk(KERN_WARNING "hw_bitblt_2: Invalid operation: %d\n", op);
+ return -EINVAL;
+ }
- dwGEMode = readl(viaparinfo->io_virt + 0x04) & 0xFFFFFCFF;
+ if (op != VIA_BITBLT_FILL && !src_mem && src_addr == dst_addr) {
+ if (src_x < dst_x) {
+ ge_cmd |= 0x00008000;
+ src_x += width - 1;
+ dst_x += width - 1;
+ }
+ if (src_y < dst_y) {
+ ge_cmd |= 0x00004000;
+ src_y += height - 1;
+ dst_y += height - 1;
+ }
+ }
- switch (bpp) {
+ if (op == VIA_BITBLT_FILL) {
+ switch (fill_rop) {
+ case 0x00: /* blackness */
+ case 0x5A: /* pattern inversion */
+ case 0xF0: /* pattern copy */
+ case 0xFF: /* whiteness */
+ break;
+ default:
+ printk(KERN_WARNING "hw_bitblt_2: Invalid fill rop: "
+ "%u\n", fill_rop);
+ return -EINVAL;
+ }
+ }
+
+ switch (dst_bpp) {
+ case 8:
+ tmp = 0x00000000;
+ break;
case 16:
- dwGEMode |= VIA_GEM_16bpp;
+ tmp = 0x00000100;
break;
case 32:
- dwGEMode |= VIA_GEM_32bpp;
+ tmp = 0x00000300;
break;
default:
- dwGEMode |= VIA_GEM_8bpp;
- break;
+ printk(KERN_WARNING "hw_bitblt_2: Unsupported bpp %d\n",
+ dst_bpp);
+ return -EINVAL;
+ }
+ writel(tmp, engine + 0x04);
+
+ if (op == VIA_BITBLT_FILL)
+ tmp = 0;
+ else
+ tmp = src_pitch;
+ if (tmp & 0xFFFFC007 || dst_pitch & 0xFFFFC007) {
+ printk(KERN_WARNING "hw_bitblt_2: Unsupported pitch %X %X\n",
+ tmp, dst_pitch);
+ return -EINVAL;
+ }
+ tmp = (tmp >> 3) | (dst_pitch << (16 - 3));
+ writel(tmp, engine + 0x08);
+
+ if ((width - 1) & 0xFFFFF000 || (height - 1) & 0xFFFFF000) {
+ printk(KERN_WARNING "hw_bitblt_2: Unsupported width/height "
+ "%d %d\n", width, height);
+ return -EINVAL;
+ }
+ tmp = (width - 1) | ((height - 1) << 16);
+ writel(tmp, engine + 0x0C);
+
+ if (dst_x & 0xFFFFF000 || dst_y & 0xFFFFF000) {
+ printk(KERN_WARNING "hw_bitblt_2: Unsupported destination x/y "
+ "%d %d\n", dst_x, dst_y);
+ return -EINVAL;
+ }
+ tmp = dst_x | (dst_y << 16);
+ writel(tmp, engine + 0x10);
+
+ if (dst_addr & 0xE0000007) {
+ printk(KERN_WARNING "hw_bitblt_2: Unsupported destination "
+ "address %X\n", dst_addr);
+ return -EINVAL;
+ }
+ tmp = dst_addr >> 3;
+ writel(tmp, engine + 0x14);
+
+ if (op != VIA_BITBLT_FILL) {
+ if (src_x & (op == VIA_BITBLT_MONO ? 0xFFFF8000 : 0xFFFFF000)
+ || src_y & 0xFFFFF000) {
+ printk(KERN_WARNING "hw_bitblt_2: Unsupported source "
+ "x/y %d %d\n", src_x, src_y);
+ return -EINVAL;
+ }
+ tmp = src_x | (src_y << 16);
+ writel(tmp, engine + 0x18);
+
+ tmp = src_mem ? 0 : src_addr;
+ if (dst_addr & 0xE0000007) {
+ printk(KERN_WARNING "hw_bitblt_2: Unsupported source "
+ "address %X\n", tmp);
+ return -EINVAL;
+ }
+ tmp >>= 3;
+ writel(tmp, engine + 0x1C);
}
- /* Set BPP and Pitch */
- writel(dwGEMode, viaparinfo->io_virt + VIA_REG_GEMODE);
+ if (op != VIA_BITBLT_COLOR)
+ writel(fg_color, engine + 0x4C);
+
+ if (op == VIA_BITBLT_MONO)
+ writel(bg_color, engine + 0x50);
+
+ if (op == VIA_BITBLT_FILL)
+ ge_cmd |= fill_rop << 24 | 0x00002000 | 0x00000001;
+ else {
+ ge_cmd |= 0xCC000000; /* ROP=SRCCOPY */
+ if (src_mem)
+ ge_cmd |= 0x00000040;
+ if (op == VIA_BITBLT_MONO)
+ ge_cmd |= 0x00000002 | 0x00000100 | 0x00020000;
+ else
+ ge_cmd |= 0x00000001;
+ }
+ writel(ge_cmd, engine);
+
+ if (op == VIA_BITBLT_FILL || !src_mem)
+ return 0;
+
+ tmp = (width * height * (op == VIA_BITBLT_MONO ? 1 : (dst_bpp >> 3)) +
+ 3) >> 2;
+
+ for (i = 0; i < tmp; i++)
+ writel(src_mem[i], engine + VIA_MMIO_BLTBASE);
+
+ return 0;
}
-void viafb_hw_cursor_init(void)
+int viafb_init_engine(struct fb_info *info)
{
+ struct viafb_par *viapar = info->par;
+ void __iomem *engine;
+ u32 vq_start_addr, vq_end_addr, vq_start_low, vq_end_low, vq_high,
+ vq_len, chip_name = viapar->shared->chip_info.gfx_chip_name;
+
+ engine = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
+ viapar->shared->engine_mmio = engine;
+ if (!engine) {
+ printk(KERN_WARNING "viafb_init_accel: ioremap failed, "
+ "hardware acceleration disabled\n");
+ return -ENOMEM;
+ }
+
+ switch (chip_name) {
+ case UNICHROME_CLE266:
+ case UNICHROME_K400:
+ case UNICHROME_K800:
+ case UNICHROME_PM800:
+ case UNICHROME_CN700:
+ case UNICHROME_CX700:
+ case UNICHROME_CN750:
+ case UNICHROME_K8M890:
+ case UNICHROME_P4M890:
+ case UNICHROME_P4M900:
+ viapar->shared->hw_bitblt = hw_bitblt_1;
+ break;
+ case UNICHROME_VX800:
+ case UNICHROME_VX855:
+ viapar->shared->hw_bitblt = hw_bitblt_2;
+ break;
+ default:
+ viapar->shared->hw_bitblt = NULL;
+ }
+
+ viapar->fbmem_free -= CURSOR_SIZE;
+ viapar->shared->cursor_vram_addr = viapar->fbmem_free;
+ viapar->fbmem_used += CURSOR_SIZE;
+
+ viapar->fbmem_free -= VQ_SIZE;
+ viapar->shared->vq_vram_addr = viapar->fbmem_free;
+ viapar->fbmem_used += VQ_SIZE;
+
+ /* Init AGP and VQ regs */
+ switch (chip_name) {
+ case UNICHROME_K8M890:
+ case UNICHROME_P4M900:
+ writel(0x00100000, engine + VIA_REG_CR_TRANSET);
+ writel(0x680A0000, engine + VIA_REG_CR_TRANSPACE);
+ writel(0x02000000, engine + VIA_REG_CR_TRANSPACE);
+ break;
+
+ default:
+ writel(0x00100000, engine + VIA_REG_TRANSET);
+ writel(0x00000000, engine + VIA_REG_TRANSPACE);
+ writel(0x00333004, engine + VIA_REG_TRANSPACE);
+ writel(0x60000000, engine + VIA_REG_TRANSPACE);
+ writel(0x61000000, engine + VIA_REG_TRANSPACE);
+ writel(0x62000000, engine + VIA_REG_TRANSPACE);
+ writel(0x63000000, engine + VIA_REG_TRANSPACE);
+ writel(0x64000000, engine + VIA_REG_TRANSPACE);
+ writel(0x7D000000, engine + VIA_REG_TRANSPACE);
+
+ writel(0xFE020000, engine + VIA_REG_TRANSET);
+ writel(0x00000000, engine + VIA_REG_TRANSPACE);
+ break;
+ }
+
+ /* Enable VQ */
+ vq_start_addr = viapar->shared->vq_vram_addr;
+ vq_end_addr = viapar->shared->vq_vram_addr + VQ_SIZE - 1;
+
+ vq_start_low = 0x50000000 | (vq_start_addr & 0xFFFFFF);
+ vq_end_low = 0x51000000 | (vq_end_addr & 0xFFFFFF);
+ vq_high = 0x52000000 | ((vq_start_addr & 0xFF000000) >> 24) |
+ ((vq_end_addr & 0xFF000000) >> 16);
+ vq_len = 0x53000000 | (VQ_SIZE >> 3);
+
+ switch (chip_name) {
+ case UNICHROME_K8M890:
+ case UNICHROME_P4M900:
+ vq_start_low |= 0x20000000;
+ vq_end_low |= 0x20000000;
+ vq_high |= 0x20000000;
+ vq_len |= 0x20000000;
+
+ writel(0x00100000, engine + VIA_REG_CR_TRANSET);
+ writel(vq_high, engine + VIA_REG_CR_TRANSPACE);
+ writel(vq_start_low, engine + VIA_REG_CR_TRANSPACE);
+ writel(vq_end_low, engine + VIA_REG_CR_TRANSPACE);
+ writel(vq_len, engine + VIA_REG_CR_TRANSPACE);
+ writel(0x74301001, engine + VIA_REG_CR_TRANSPACE);
+ writel(0x00000000, engine + VIA_REG_CR_TRANSPACE);
+ break;
+ default:
+ writel(0x00FE0000, engine + VIA_REG_TRANSET);
+ writel(0x080003FE, engine + VIA_REG_TRANSPACE);
+ writel(0x0A00027C, engine + VIA_REG_TRANSPACE);
+ writel(0x0B000260, engine + VIA_REG_TRANSPACE);
+ writel(0x0C000274, engine + VIA_REG_TRANSPACE);
+ writel(0x0D000264, engine + VIA_REG_TRANSPACE);
+ writel(0x0E000000, engine + VIA_REG_TRANSPACE);
+ writel(0x0F000020, engine + VIA_REG_TRANSPACE);
+ writel(0x1000027E, engine + VIA_REG_TRANSPACE);
+ writel(0x110002FE, engine + VIA_REG_TRANSPACE);
+ writel(0x200F0060, engine + VIA_REG_TRANSPACE);
+
+ writel(0x00000006, engine + VIA_REG_TRANSPACE);
+ writel(0x40008C0F, engine + VIA_REG_TRANSPACE);
+ writel(0x44000000, engine + VIA_REG_TRANSPACE);
+ writel(0x45080C04, engine + VIA_REG_TRANSPACE);
+ writel(0x46800408, engine + VIA_REG_TRANSPACE);
+
+ writel(vq_high, engine + VIA_REG_TRANSPACE);
+ writel(vq_start_low, engine + VIA_REG_TRANSPACE);
+ writel(vq_end_low, engine + VIA_REG_TRANSPACE);
+ writel(vq_len, engine + VIA_REG_TRANSPACE);
+ break;
+ }
+
/* Set Cursor Image Base Address */
- writel(viaparinfo->cursor_start,
- viaparinfo->io_virt + VIA_REG_CURSOR_MODE);
- writel(0x0, viaparinfo->io_virt + VIA_REG_CURSOR_POS);
- writel(0x0, viaparinfo->io_virt + VIA_REG_CURSOR_ORG);
- writel(0x0, viaparinfo->io_virt + VIA_REG_CURSOR_BG);
- writel(0x0, viaparinfo->io_virt + VIA_REG_CURSOR_FG);
+ writel(viapar->shared->cursor_vram_addr, engine + VIA_REG_CURSOR_MODE);
+ writel(0x0, engine + VIA_REG_CURSOR_POS);
+ writel(0x0, engine + VIA_REG_CURSOR_ORG);
+ writel(0x0, engine + VIA_REG_CURSOR_BG);
+ writel(0x0, engine + VIA_REG_CURSOR_FG);
+ return 0;
}
void viafb_show_hw_cursor(struct fb_info *info, int Status)
{
- u32 temp;
- u32 iga_path = ((struct viafb_par *)(info->par))->iga_path;
+ struct viafb_par *viapar = info->par;
+ u32 temp, iga_path = viapar->iga_path;
- temp = readl(viaparinfo->io_virt + VIA_REG_CURSOR_MODE);
+ temp = readl(viapar->shared->engine_mmio + VIA_REG_CURSOR_MODE);
switch (Status) {
case HW_Cursor_ON:
temp |= 0x1;
@@ -259,25 +460,27 @@ void viafb_show_hw_cursor(struct fb_info *info, int Status)
default:
temp &= 0x7FFFFFFF;
}
- writel(temp, viaparinfo->io_virt + VIA_REG_CURSOR_MODE);
+ writel(temp, viapar->shared->engine_mmio + VIA_REG_CURSOR_MODE);
}
-int viafb_wait_engine_idle(void)
+void viafb_wait_engine_idle(struct fb_info *info)
{
+ struct viafb_par *viapar = info->par;
int loop = 0;
- while (!(readl(viaparinfo->io_virt + VIA_REG_STATUS) &
+ while (!(readl(viapar->shared->engine_mmio + VIA_REG_STATUS) &
VIA_VR_QUEUE_BUSY) && (loop < MAXLOOP)) {
loop++;
cpu_relax();
}
- while ((readl(viaparinfo->io_virt + VIA_REG_STATUS) &
+ while ((readl(viapar->shared->engine_mmio + VIA_REG_STATUS) &
(VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | VIA_3D_ENG_BUSY)) &&
(loop < MAXLOOP)) {
loop++;
cpu_relax();
}
- return loop >= MAXLOOP;
+ if (loop >= MAXLOOP)
+ printk(KERN_ERR "viafb_wait_engine_idle: not syncing\n");
}
diff --git a/drivers/video/via/accel.h b/drivers/video/via/accel.h
index 29bf854e8cc..615c84ad0f0 100644
--- a/drivers/video/via/accel.h
+++ b/drivers/video/via/accel.h
@@ -159,11 +159,12 @@
#define MAXLOOP 0xFFFFFF
-void viafb_init_accel(void);
-void viafb_init_2d_engine(void);
-void set_2d_color_depth(int);
-void viafb_hw_cursor_init(void);
-void viafb_show_hw_cursor(struct fb_info *info, int Status); int
-viafb_wait_engine_idle(void); void viafb_set_2d_color_depth(int bpp);
+#define VIA_BITBLT_COLOR 1
+#define VIA_BITBLT_MONO 2
+#define VIA_BITBLT_FILL 3
+
+int viafb_init_engine(struct fb_info *info);
+void viafb_show_hw_cursor(struct fb_info *info, int Status);
+void viafb_wait_engine_idle(struct fb_info *info);
#endif /* __ACCEL_H__ */
diff --git a/drivers/video/via/chip.h b/drivers/video/via/chip.h
index dde95edc387..474f428aea9 100644
--- a/drivers/video/via/chip.h
+++ b/drivers/video/via/chip.h
@@ -68,6 +68,9 @@
#define UNICHROME_VX800 11
#define UNICHROME_VX800_DID 0x1122
+#define UNICHROME_VX855 12
+#define UNICHROME_VX855_DID 0x5122
+
/**************************************************/
/* Definition TMDS Trasmitter Information */
/**************************************************/
@@ -122,7 +125,6 @@ struct lvds_chip_information {
struct chip_information {
int gfx_chip_name;
int gfx_chip_revision;
- int chip_on_slot;
struct tmds_chip_information tmds_chip_info;
struct lvds_chip_information lvds_chip_info;
struct lvds_chip_information lvds_chip_info2;
diff --git a/drivers/video/via/dvi.c b/drivers/video/via/dvi.c
index d6965447ca6..c5c32b6b6e6 100644
--- a/drivers/video/via/dvi.c
+++ b/drivers/video/via/dvi.c
@@ -160,7 +160,7 @@ int viafb_tmds_trasmitter_identify(void)
static void tmds_register_write(int index, u8 data)
{
- viaparinfo->i2c_stuff.i2c_port =
+ viaparinfo->shared->i2c_stuff.i2c_port =
viaparinfo->chip_info->tmds_chip_info.i2c_port;
viafb_i2c_writebyte(viaparinfo->chip_info->tmds_chip_info.
@@ -172,7 +172,7 @@ static int tmds_register_read(int index)
{
u8 data;
- viaparinfo->i2c_stuff.i2c_port =
+ viaparinfo->shared->i2c_stuff.i2c_port =
viaparinfo->chip_info->tmds_chip_info.i2c_port;
viafb_i2c_readbyte((u8) viaparinfo->chip_info->
tmds_chip_info.tmds_chip_slave_addr,
@@ -182,7 +182,7 @@ static int tmds_register_read(int index)
static int tmds_register_read_bytes(int index, u8 *buff, int buff_len)
{
- viaparinfo->i2c_stuff.i2c_port =
+ viaparinfo->shared->i2c_stuff.i2c_port =
viaparinfo->chip_info->tmds_chip_info.i2c_port;
viafb_i2c_readbytes((u8) viaparinfo->chip_info->tmds_chip_info.
tmds_chip_slave_addr, (u8) index, buff, buff_len);
diff --git a/drivers/video/via/global.c b/drivers/video/via/global.c
index 468be2425af..b675cdbb03a 100644
--- a/drivers/video/via/global.c
+++ b/drivers/video/via/global.c
@@ -32,7 +32,6 @@ int viafb_lcd_dsp_method = LCD_EXPANDSION;
int viafb_lcd_mode = LCD_OPENLDI;
int viafb_bpp = 32;
int viafb_bpp1 = 32;
-int viafb_accel = 1;
int viafb_CRT_ON = 1;
int viafb_DVI_ON;
int viafb_LCD_ON ;
@@ -46,13 +45,11 @@ int viafb_hotplug_refresh = 60;
unsigned int viafb_second_offset;
int viafb_second_size;
int viafb_primary_dev = None_Device;
-void __iomem *viafb_FB_MM;
unsigned int viafb_second_xres = 640;
unsigned int viafb_second_yres = 480;
unsigned int viafb_second_virtual_xres;
unsigned int viafb_second_virtual_yres;
int viafb_lcd_panel_id = LCD_PANEL_ID_MAXIMUM + 1;
-struct fb_cursor viacursor;
struct fb_info *viafbinfo;
struct fb_info *viafbinfo1;
struct viafb_par *viaparinfo;
diff --git a/drivers/video/via/global.h b/drivers/video/via/global.h
index 7543d5f7e30..d69d0ca99c2 100644
--- a/drivers/video/via/global.h
+++ b/drivers/video/via/global.h
@@ -77,8 +77,6 @@ extern int viafb_hotplug_Yres;
extern int viafb_hotplug_bpp;
extern int viafb_hotplug_refresh;
extern int viafb_primary_dev;
-extern void __iomem *viafb_FB_MM;
-extern struct fb_cursor viacursor;
extern unsigned int viafb_second_xres;
extern unsigned int viafb_second_yres;
diff --git a/drivers/video/via/hw.c b/drivers/video/via/hw.c
index c8960003f47..3e083ff67ae 100644
--- a/drivers/video/via/hw.c
+++ b/drivers/video/via/hw.c
@@ -21,125 +21,143 @@
#include "global.h"
-static const struct pci_device_id_info pciidlist[] = {
- {PCI_VIA_VENDOR_ID, UNICHROME_CLE266_DID, UNICHROME_CLE266},
- {PCI_VIA_VENDOR_ID, UNICHROME_PM800_DID, UNICHROME_PM800},
- {PCI_VIA_VENDOR_ID, UNICHROME_K400_DID, UNICHROME_K400},
- {PCI_VIA_VENDOR_ID, UNICHROME_K800_DID, UNICHROME_K800},
- {PCI_VIA_VENDOR_ID, UNICHROME_CN700_DID, UNICHROME_CN700},
- {PCI_VIA_VENDOR_ID, UNICHROME_P4M890_DID, UNICHROME_P4M890},
- {PCI_VIA_VENDOR_ID, UNICHROME_K8M890_DID, UNICHROME_K8M890},
- {PCI_VIA_VENDOR_ID, UNICHROME_CX700_DID, UNICHROME_CX700},
- {PCI_VIA_VENDOR_ID, UNICHROME_P4M900_DID, UNICHROME_P4M900},
- {PCI_VIA_VENDOR_ID, UNICHROME_CN750_DID, UNICHROME_CN750},
- {PCI_VIA_VENDOR_ID, UNICHROME_VX800_DID, UNICHROME_VX800},
- {0, 0, 0}
-};
-
-struct offset offset_reg = {
- /* IGA1 Offset Register */
- {IGA1_OFFSET_REG_NUM, {{CR13, 0, 7}, {CR35, 5, 7} } },
- /* IGA2 Offset Register */
- {IGA2_OFFSET_REG_NUM, {{CR66, 0, 7}, {CR67, 0, 1} } }
-};
-
static struct pll_map pll_value[] = {
- {CLK_25_175M, CLE266_PLL_25_175M, K800_PLL_25_175M, CX700_25_175M},
- {CLK_29_581M, CLE266_PLL_29_581M, K800_PLL_29_581M, CX700_29_581M},
- {CLK_26_880M, CLE266_PLL_26_880M, K800_PLL_26_880M, CX700_26_880M},
- {CLK_31_490M, CLE266_PLL_31_490M, K800_PLL_31_490M, CX700_31_490M},
- {CLK_31_500M, CLE266_PLL_31_500M, K800_PLL_31_500M, CX700_31_500M},
- {CLK_31_728M, CLE266_PLL_31_728M, K800_PLL_31_728M, CX700_31_728M},
- {CLK_32_668M, CLE266_PLL_32_668M, K800_PLL_32_668M, CX700_32_668M},
- {CLK_36_000M, CLE266_PLL_36_000M, K800_PLL_36_000M, CX700_36_000M},
- {CLK_40_000M, CLE266_PLL_40_000M, K800_PLL_40_000M, CX700_40_000M},
- {CLK_41_291M, CLE266_PLL_41_291M, K800_PLL_41_291M, CX700_41_291M},
- {CLK_43_163M, CLE266_PLL_43_163M, K800_PLL_43_163M, CX700_43_163M},
- {CLK_45_250M, CLE266_PLL_45_250M, K800_PLL_45_250M, CX700_45_250M},
- {CLK_46_000M, CLE266_PLL_46_000M, K800_PLL_46_000M, CX700_46_000M},
- {CLK_46_996M, CLE266_PLL_46_996M, K800_PLL_46_996M, CX700_46_996M},
- {CLK_48_000M, CLE266_PLL_48_000M, K800_PLL_48_000M, CX700_48_000M},
- {CLK_48_875M, CLE266_PLL_48_875M, K800_PLL_48_875M, CX700_48_875M},
- {CLK_49_500M, CLE266_PLL_49_500M, K800_PLL_49_500M, CX700_49_500M},
- {CLK_52_406M, CLE266_PLL_52_406M, K800_PLL_52_406M, CX700_52_406M},
- {CLK_52_977M, CLE266_PLL_52_977M, K800_PLL_52_977M, CX700_52_977M},
- {CLK_56_250M, CLE266_PLL_56_250M, K800_PLL_56_250M, CX700_56_250M},
- {CLK_60_466M, CLE266_PLL_60_466M, K800_PLL_60_466M, CX700_60_466M},
- {CLK_61_500M, CLE266_PLL_61_500M, K800_PLL_61_500M, CX700_61_500M},
- {CLK_65_000M, CLE266_PLL_65_000M, K800_PLL_65_000M, CX700_65_000M},
- {CLK_65_178M, CLE266_PLL_65_178M, K800_PLL_65_178M, CX700_65_178M},
- {CLK_66_750M, CLE266_PLL_66_750M, K800_PLL_66_750M, CX700_66_750M},
- {CLK_68_179M, CLE266_PLL_68_179M, K800_PLL_68_179M, CX700_68_179M},
- {CLK_69_924M, CLE266_PLL_69_924M, K800_PLL_69_924M, CX700_69_924M},
- {CLK_70_159M, CLE266_PLL_70_159M, K800_PLL_70_159M, CX700_70_159M},
- {CLK_72_000M, CLE266_PLL_72_000M, K800_PLL_72_000M, CX700_72_000M},
- {CLK_78_750M, CLE266_PLL_78_750M, K800_PLL_78_750M, CX700_78_750M},
- {CLK_80_136M, CLE266_PLL_80_136M, K800_PLL_80_136M, CX700_80_136M},
- {CLK_83_375M, CLE266_PLL_83_375M, K800_PLL_83_375M, CX700_83_375M},
- {CLK_83_950M, CLE266_PLL_83_950M, K800_PLL_83_950M, CX700_83_950M},
- {CLK_84_750M, CLE266_PLL_84_750M, K800_PLL_84_750M, CX700_84_750M},
- {CLK_85_860M, CLE266_PLL_85_860M, K800_PLL_85_860M, CX700_85_860M},
- {CLK_88_750M, CLE266_PLL_88_750M, K800_PLL_88_750M, CX700_88_750M},
- {CLK_94_500M, CLE266_PLL_94_500M, K800_PLL_94_500M, CX700_94_500M},
- {CLK_97_750M, CLE266_PLL_97_750M, K800_PLL_97_750M, CX700_97_750M},
+ {CLK_25_175M, CLE266_PLL_25_175M, K800_PLL_25_175M,
+ CX700_25_175M, VX855_25_175M},
+ {CLK_29_581M, CLE266_PLL_29_581M, K800_PLL_29_581M,
+ CX700_29_581M, VX855_29_581M},
+ {CLK_26_880M, CLE266_PLL_26_880M, K800_PLL_26_880M,
+ CX700_26_880M, VX855_26_880M},
+ {CLK_31_490M, CLE266_PLL_31_490M, K800_PLL_31_490M,
+ CX700_31_490M, VX855_31_490M},
+ {CLK_31_500M, CLE266_PLL_31_500M, K800_PLL_31_500M,
+ CX700_31_500M, VX855_31_500M},
+ {CLK_31_728M, CLE266_PLL_31_728M, K800_PLL_31_728M,
+ CX700_31_728M, VX855_31_728M},
+ {CLK_32_668M, CLE266_PLL_32_668M, K800_PLL_32_668M,
+ CX700_32_668M, VX855_32_668M},
+ {CLK_36_000M, CLE266_PLL_36_000M, K800_PLL_36_000M,
+ CX700_36_000M, VX855_36_000M},
+ {CLK_40_000M, CLE266_PLL_40_000M, K800_PLL_40_000M,
+ CX700_40_000M, VX855_40_000M},
+ {CLK_41_291M, CLE266_PLL_41_291M, K800_PLL_41_291M,
+ CX700_41_291M, VX855_41_291M},
+ {CLK_43_163M, CLE266_PLL_43_163M, K800_PLL_43_163M,
+ CX700_43_163M, VX855_43_163M},
+ {CLK_45_250M, CLE266_PLL_45_250M, K800_PLL_45_250M,
+ CX700_45_250M, VX855_45_250M},
+ {CLK_46_000M, CLE266_PLL_46_000M, K800_PLL_46_000M,
+ CX700_46_000M, VX855_46_000M},
+ {CLK_46_996M, CLE266_PLL_46_996M, K800_PLL_46_996M,
+ CX700_46_996M, VX855_46_996M},
+ {CLK_48_000M, CLE266_PLL_48_000M, K800_PLL_48_000M,
+ CX700_48_000M, VX855_48_000M},
+ {CLK_48_875M, CLE266_PLL_48_875M, K800_PLL_48_875M,
+ CX700_48_875M, VX855_48_875M},
+ {CLK_49_500M, CLE266_PLL_49_500M, K800_PLL_49_500M,
+ CX700_49_500M, VX855_49_500M},
+ {CLK_52_406M, CLE266_PLL_52_406M, K800_PLL_52_406M,
+ CX700_52_406M, VX855_52_406M},
+ {CLK_52_977M, CLE266_PLL_52_977M, K800_PLL_52_977M,
+ CX700_52_977M, VX855_52_977M},
+ {CLK_56_250M, CLE266_PLL_56_250M, K800_PLL_56_250M,
+ CX700_56_250M, VX855_56_250M},
+ {CLK_60_466M, CLE266_PLL_60_466M, K800_PLL_60_466M,
+ CX700_60_466M, VX855_60_466M},
+ {CLK_61_500M, CLE266_PLL_61_500M, K800_PLL_61_500M,
+ CX700_61_500M, VX855_61_500M},
+ {CLK_65_000M, CLE266_PLL_65_000M, K800_PLL_65_000M,
+ CX700_65_000M, VX855_65_000M},
+ {CLK_65_178M, CLE266_PLL_65_178M, K800_PLL_65_178M,
+ CX700_65_178M, VX855_65_178M},
+ {CLK_66_750M, CLE266_PLL_66_750M, K800_PLL_66_750M,
+ CX700_66_750M, VX855_66_750M},
+ {CLK_68_179M, CLE266_PLL_68_179M, K800_PLL_68_179M,
+ CX700_68_179M, VX855_68_179M},
+ {CLK_69_924M, CLE266_PLL_69_924M, K800_PLL_69_924M,
+ CX700_69_924M, VX855_69_924M},
+ {CLK_70_159M, CLE266_PLL_70_159M, K800_PLL_70_159M,
+ CX700_70_159M, VX855_70_159M},
+ {CLK_72_000M, CLE266_PLL_72_000M, K800_PLL_72_000M,
+ CX700_72_000M, VX855_72_000M},
+ {CLK_78_750M, CLE266_PLL_78_750M, K800_PLL_78_750M,
+ CX700_78_750M, VX855_78_750M},
+ {CLK_80_136M, CLE266_PLL_80_136M, K800_PLL_80_136M,
+ CX700_80_136M, VX855_80_136M},
+ {CLK_83_375M, CLE266_PLL_83_375M, K800_PLL_83_375M,
+ CX700_83_375M, VX855_83_375M},
+ {CLK_83_950M, CLE266_PLL_83_950M, K800_PLL_83_950M,
+ CX700_83_950M, VX855_83_950M},
+ {CLK_84_750M, CLE266_PLL_84_750M, K800_PLL_84_750M,
+ CX700_84_750M, VX855_84_750M},
+ {CLK_85_860M, CLE266_PLL_85_860M, K800_PLL_85_860M,
+ CX700_85_860M, VX855_85_860M},
+ {CLK_88_750M, CLE266_PLL_88_750M, K800_PLL_88_750M,
+ CX700_88_750M, VX855_88_750M},
+ {CLK_94_500M, CLE266_PLL_94_500M, K800_PLL_94_500M,
+ CX700_94_500M, VX855_94_500M},
+ {CLK_97_750M, CLE266_PLL_97_750M, K800_PLL_97_750M,
+ CX700_97_750M, VX855_97_750M},
{CLK_101_000M, CLE266_PLL_101_000M, K800_PLL_101_000M,
- CX700_101_000M},
+ CX700_101_000M, VX855_101_000M},
{CLK_106_500M, CLE266_PLL_106_500M, K800_PLL_106_500M,
- CX700_106_500M},
+ CX700_106_500M, VX855_106_500M},
{CLK_108_000M, CLE266_PLL_108_000M, K800_PLL_108_000M,
- CX700_108_000M},
+ CX700_108_000M, VX855_108_000M},
{CLK_113_309M, CLE266_PLL_113_309M, K800_PLL_113_309M,
- CX700_113_309M},
+ CX700_113_309M, VX855_113_309M},
{CLK_118_840M, CLE266_PLL_118_840M, K800_PLL_118_840M,
- CX700_118_840M},
+ CX700_118_840M, VX855_118_840M},
{CLK_119_000M, CLE266_PLL_119_000M, K800_PLL_119_000M,
- CX700_119_000M},
+ CX700_119_000M, VX855_119_000M},
{CLK_121_750M, CLE266_PLL_121_750M, K800_PLL_121_750M,
- CX700_121_750M},
+ CX700_121_750M, 0},
{CLK_125_104M, CLE266_PLL_125_104M, K800_PLL_125_104M,
- CX700_125_104M},
+ CX700_125_104M, 0},
{CLK_133_308M, CLE266_PLL_133_308M, K800_PLL_133_308M,
- CX700_133_308M},
+ CX700_133_308M, 0},
{CLK_135_000M, CLE266_PLL_135_000M, K800_PLL_135_000M,
- CX700_135_000M},
+ CX700_135_000M, VX855_135_000M},
{CLK_136_700M, CLE266_PLL_136_700M, K800_PLL_136_700M,
- CX700_136_700M},
+ CX700_136_700M, VX855_136_700M},
{CLK_138_400M, CLE266_PLL_138_400M, K800_PLL_138_400M,
- CX700_138_400M},
+ CX700_138_400M, VX855_138_400M},
{CLK_146_760M, CLE266_PLL_146_760M, K800_PLL_146_760M,
- CX700_146_760M},
+ CX700_146_760M, VX855_146_760M},
{CLK_153_920M, CLE266_PLL_153_920M, K800_PLL_153_920M,
- CX700_153_920M},
+ CX700_153_920M, VX855_153_920M},
{CLK_156_000M, CLE266_PLL_156_000M, K800_PLL_156_000M,
- CX700_156_000M},
+ CX700_156_000M, VX855_156_000M},
{CLK_157_500M, CLE266_PLL_157_500M, K800_PLL_157_500M,
- CX700_157_500M},
+ CX700_157_500M, VX855_157_500M},
{CLK_162_000M, CLE266_PLL_162_000M, K800_PLL_162_000M,
- CX700_162_000M},
+ CX700_162_000M, VX855_162_000M},
{CLK_187_000M, CLE266_PLL_187_000M, K800_PLL_187_000M,
- CX700_187_000M},
+ CX700_187_000M, VX855_187_000M},
{CLK_193_295M, CLE266_PLL_193_295M, K800_PLL_193_295M,
- CX700_193_295M},
+ CX700_193_295M, VX855_193_295M},
{CLK_202_500M, CLE266_PLL_202_500M, K800_PLL_202_500M,
- CX700_202_500M},
+ CX700_202_500M, VX855_202_500M},
{CLK_204_000M, CLE266_PLL_204_000M, K800_PLL_204_000M,
- CX700_204_000M},
+ CX700_204_000M, VX855_204_000M},
{CLK_218_500M, CLE266_PLL_218_500M, K800_PLL_218_500M,
- CX700_218_500M},
+ CX700_218_500M, VX855_218_500M},
{CLK_234_000M, CLE266_PLL_234_000M, K800_PLL_234_000M,
- CX700_234_000M},
+ CX700_234_000M, VX855_234_000M},
{CLK_267_250M, CLE266_PLL_267_250M, K800_PLL_267_250M,
- CX700_267_250M},
+ CX700_267_250M, VX855_267_250M},
{CLK_297_500M, CLE266_PLL_297_500M, K800_PLL_297_500M,
- CX700_297_500M},
- {CLK_74_481M, CLE266_PLL_74_481M, K800_PLL_74_481M, CX700_74_481M},
+ CX700_297_500M, VX855_297_500M},
+ {CLK_74_481M, CLE266_PLL_74_481M, K800_PLL_74_481M,
+ CX700_74_481M, VX855_74_481M},
{CLK_172_798M, CLE266_PLL_172_798M, K800_PLL_172_798M,
- CX700_172_798M},
+ CX700_172_798M, VX855_172_798M},
{CLK_122_614M, CLE266_PLL_122_614M, K800_PLL_122_614M,
- CX700_122_614M},
- {CLK_74_270M, CLE266_PLL_74_270M, K800_PLL_74_270M, CX700_74_270M},
+ CX700_122_614M, VX855_122_614M},
+ {CLK_74_270M, CLE266_PLL_74_270M, K800_PLL_74_270M,
+ CX700_74_270M, 0},
{CLK_148_500M, CLE266_PLL_148_500M, K800_PLL_148_500M,
- CX700_148_500M}
+ CX700_148_500M, VX855_148_500M}
};
static struct fifo_depth_select display_fifo_depth_reg = {
@@ -508,7 +526,8 @@ static void set_dvi_output_path(int set_iga, int output_interface);
static void set_lcd_output_path(int set_iga, int output_interface);
static int search_mode_setting(int ModeInfoIndex);
static void load_fix_bit_crtc_reg(void);
-static void init_gfx_chip_info(void);
+static void init_gfx_chip_info(struct pci_dev *pdev,
+ const struct pci_device_id *pdi);
static void init_tmds_chip_info(void);
static void init_lvds_chip_info(void);
static void device_screen_off(void);
@@ -518,7 +537,6 @@ static void device_off(void);
static void device_on(void);
static void enable_second_display_channel(void);
static void disable_second_display_channel(void);
-static int get_fb_size_from_pci(void);
void viafb_write_reg(u8 index, u16 io_port, u8 data)
{
@@ -629,70 +647,43 @@ void viafb_set_iga_path(void)
}
}
-void viafb_set_start_addr(void)
+void viafb_set_primary_address(u32 addr)
{
- unsigned long offset = 0, tmp = 0, size = 0;
- unsigned long length;
-
- DEBUG_MSG(KERN_INFO "viafb_set_start_addr!\n");
- viafb_unlock_crt();
- /* update starting address of IGA1 */
- viafb_write_reg(CR0C, VIACR, 0x00); /*initial starting address */
- viafb_write_reg(CR0D, VIACR, 0x00);
- viafb_write_reg(CR34, VIACR, 0x00);
- viafb_write_reg_mask(CR48, VIACR, 0x00, 0x1F);
-
- if (viafb_dual_fb) {
- viaparinfo->iga_path = IGA1;
- viaparinfo1->iga_path = IGA2;
- }
-
- if (viafb_SAMM_ON == 1) {
- if (!viafb_dual_fb) {
- if (viafb_second_size)
- size = viafb_second_size * 1024 * 1024;
- else
- size = 8 * 1024 * 1024;
- } else {
+ DEBUG_MSG(KERN_DEBUG "viafb_set_primary_address(0x%08X)\n", addr);
+ viafb_write_reg(CR0D, VIACR, addr & 0xFF);
+ viafb_write_reg(CR0C, VIACR, (addr >> 8) & 0xFF);
+ viafb_write_reg(CR34, VIACR, (addr >> 16) & 0xFF);
+ viafb_write_reg_mask(CR48, VIACR, (addr >> 24) & 0x1F, 0x1F);
+}
- size = viaparinfo1->memsize;
- }
- offset = viafb_second_offset;
- DEBUG_MSG(KERN_INFO
- "viafb_second_size=%lx, second start_adddress=%lx\n",
- size, offset);
- }
- if (viafb_SAMM_ON == 1) {
- offset = offset >> 3;
-
- tmp = viafb_read_reg(VIACR, 0x62) & 0x01;
- tmp |= (offset & 0x7F) << 1;
- viafb_write_reg(CR62, VIACR, tmp);
- viafb_write_reg(CR63, VIACR, ((offset & 0x7F80) >> 7));
- viafb_write_reg(CR64, VIACR, ((offset & 0x7F8000) >> 15));
- viafb_write_reg(CRA3, VIACR, ((offset & 0x3800000) >> 23));
- } else {
- /* update starting address */
- viafb_write_reg(CR62, VIACR, 0x00);
- viafb_write_reg(CR63, VIACR, 0x00);
- viafb_write_reg(CR64, VIACR, 0x00);
- viafb_write_reg(CRA3, VIACR, 0x00);
- }
+void viafb_set_secondary_address(u32 addr)
+{
+ DEBUG_MSG(KERN_DEBUG "viafb_set_secondary_address(0x%08X)\n", addr);
+ /* secondary display supports only quadword aligned memory */
+ viafb_write_reg_mask(CR62, VIACR, (addr >> 2) & 0xFE, 0xFE);
+ viafb_write_reg(CR63, VIACR, (addr >> 10) & 0xFF);
+ viafb_write_reg(CR64, VIACR, (addr >> 18) & 0xFF);
+ viafb_write_reg_mask(CRA3, VIACR, (addr >> 26) & 0x07, 0x07);
+}
- if (viafb_SAMM_ON == 1) {
- if (viafb_accel) {
- if (!viafb_dual_fb)
- length = size - viaparinfo->fbmem_used;
- else
- length = size - viaparinfo1->fbmem_used;
- } else
- length = size;
- offset = (unsigned long)(void *)viafb_FB_MM +
- viafb_second_offset;
- memset((void *)offset, 0, length);
- }
+void viafb_set_primary_pitch(u32 pitch)
+{
+ DEBUG_MSG(KERN_DEBUG "viafb_set_primary_pitch(0x%08X)\n", pitch);
+ /* spec does not say that first adapter skips 3 bits but old
+ * code did it and seems to be reasonable in analogy to 2nd adapter
+ */
+ pitch = pitch >> 3;
+ viafb_write_reg(0x13, VIACR, pitch & 0xFF);
+ viafb_write_reg_mask(0x35, VIACR, (pitch >> (8 - 5)) & 0xE0, 0xE0);
+}
- viafb_lock_crt();
+void viafb_set_secondary_pitch(u32 pitch)
+{
+ DEBUG_MSG(KERN_DEBUG "viafb_set_secondary_pitch(0x%08X)\n", pitch);
+ pitch = pitch >> 3;
+ viafb_write_reg(0x66, VIACR, pitch & 0xFF);
+ viafb_write_reg_mask(0x67, VIACR, (pitch >> 8) & 0x03, 0x03);
+ viafb_write_reg_mask(0x71, VIACR, (pitch >> (10 - 7)) & 0x80, 0x80);
}
void viafb_set_output_path(int device, int set_iga, int output_interface)
@@ -1123,30 +1114,6 @@ void viafb_write_regx(struct io_reg RegTable[], int ItemNum)
}
}
-void viafb_load_offset_reg(int h_addr, int bpp_byte, int set_iga)
-{
- int reg_value;
- int viafb_load_reg_num;
- struct io_register *reg;
-
- switch (set_iga) {
- case IGA1_IGA2:
- case IGA1:
- reg_value = IGA1_OFFSET_FORMULA(h_addr, bpp_byte);
- viafb_load_reg_num = offset_reg.iga1_offset_reg.reg_num;
- reg = offset_reg.iga1_offset_reg.reg;
- viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIACR);
- if (set_iga == IGA1)
- break;
- case IGA2:
- reg_value = IGA2_OFFSET_FORMULA(h_addr, bpp_byte);
- viafb_load_reg_num = offset_reg.iga2_offset_reg.reg_num;
- reg = offset_reg.iga2_offset_reg.reg;
- viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIACR);
- break;
- }
-}
-
void viafb_load_fetch_count_reg(int h_addr, int bpp_byte, int set_iga)
{
int reg_value;
@@ -1277,6 +1244,15 @@ void viafb_load_FIFO_reg(int set_iga, int hor_active, int ver_active)
VX800_IGA1_DISPLAY_QUEUE_EXPIRE_NUM;
}
+ if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_VX855) {
+ iga1_fifo_max_depth = VX855_IGA1_FIFO_MAX_DEPTH;
+ iga1_fifo_threshold = VX855_IGA1_FIFO_THRESHOLD;
+ iga1_fifo_high_threshold =
+ VX855_IGA1_FIFO_HIGH_THRESHOLD;
+ iga1_display_queue_expire_num =
+ VX855_IGA1_DISPLAY_QUEUE_EXPIRE_NUM;
+ }
+
/* Set Display FIFO Depath Select */
reg_value = IGA1_FIFO_DEPTH_SELECT_FORMULA(iga1_fifo_max_depth);
viafb_load_reg_num =
@@ -1408,6 +1384,15 @@ void viafb_load_FIFO_reg(int set_iga, int hor_active, int ver_active)
VX800_IGA2_DISPLAY_QUEUE_EXPIRE_NUM;
}
+ if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_VX855) {
+ iga2_fifo_max_depth = VX855_IGA2_FIFO_MAX_DEPTH;
+ iga2_fifo_threshold = VX855_IGA2_FIFO_THRESHOLD;
+ iga2_fifo_high_threshold =
+ VX855_IGA2_FIFO_HIGH_THRESHOLD;
+ iga2_display_queue_expire_num =
+ VX855_IGA2_DISPLAY_QUEUE_EXPIRE_NUM;
+ }
+
if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_K800) {
/* Set Display FIFO Depath Select */
reg_value =
@@ -1496,6 +1481,8 @@ u32 viafb_get_clk_value(int clk)
case UNICHROME_P4M900:
case UNICHROME_VX800:
return pll_value[i].cx700_pll;
+ case UNICHROME_VX855:
+ return pll_value[i].vx855_pll;
}
}
}
@@ -1529,6 +1516,7 @@ void viafb_set_vclock(u32 CLK, int set_iga)
case UNICHROME_P4M890:
case UNICHROME_P4M900:
case UNICHROME_VX800:
+ case UNICHROME_VX855:
viafb_write_reg(SR44, VIASR, CLK / 0x10000);
DEBUG_MSG(KERN_INFO "\nSR44=%x", CLK / 0x10000);
viafb_write_reg(SR45, VIASR, (CLK & 0xFFFF) / 0x100);
@@ -1557,6 +1545,7 @@ void viafb_set_vclock(u32 CLK, int set_iga)
case UNICHROME_P4M890:
case UNICHROME_P4M900:
case UNICHROME_VX800:
+ case UNICHROME_VX855:
viafb_write_reg(SR4A, VIASR, CLK / 0x10000);
viafb_write_reg(SR4B, VIASR, (CLK & 0xFFFF) / 0x100);
viafb_write_reg(SR4C, VIASR, CLK % 0x100);
@@ -1916,7 +1905,6 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
load_fix_bit_crtc_reg();
viafb_lock_crt();
viafb_write_reg_mask(CR17, VIACR, 0x80, BIT7);
- viafb_load_offset_reg(h_addr, bpp_byte, set_iga);
viafb_load_fetch_count_reg(h_addr, bpp_byte, set_iga);
/* load FIFO */
@@ -1933,9 +1921,10 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
}
-void viafb_init_chip_info(void)
+void viafb_init_chip_info(struct pci_dev *pdev,
+ const struct pci_device_id *pdi)
{
- init_gfx_chip_info();
+ init_gfx_chip_info(pdev, pdi);
init_tmds_chip_info();
init_lvds_chip_info();
@@ -2008,24 +1997,12 @@ void viafb_update_device_setting(int hres, int vres,
}
}
-static void init_gfx_chip_info(void)
+static void init_gfx_chip_info(struct pci_dev *pdev,
+ const struct pci_device_id *pdi)
{
- struct pci_dev *pdev = NULL;
- u32 i;
u8 tmp;
- /* Indentify GFX Chip Name */
- for (i = 0; pciidlist[i].vendor != 0; i++) {
- pdev = pci_get_device(pciidlist[i].vendor,
- pciidlist[i].device, 0);
- if (pdev)
- break;
- }
-
- if (!pciidlist[i].vendor)
- return ;
-
- viaparinfo->chip_info->gfx_chip_name = pciidlist[i].chip_index;
+ viaparinfo->chip_info->gfx_chip_name = pdi->driver_data;
/* Check revision of CLE266 Chip */
if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) {
@@ -2056,8 +2033,6 @@ static void init_gfx_chip_info(void)
CX700_REVISION_700;
}
}
-
- pci_dev_put(pdev);
}
static void init_tmds_chip_info(void)
@@ -2271,11 +2246,12 @@ int viafb_setmode(int vmode_index, int hor_res, int ver_res, int video_bpp,
break;
case UNICHROME_CX700:
- viafb_write_regx(CX700_ModeXregs, NUM_TOTAL_CX700_ModeXregs);
-
case UNICHROME_VX800:
- viafb_write_regx(VX800_ModeXregs, NUM_TOTAL_VX800_ModeXregs);
+ viafb_write_regx(CX700_ModeXregs, NUM_TOTAL_CX700_ModeXregs);
+ break;
+ case UNICHROME_VX855:
+ viafb_write_regx(VX855_ModeXregs, NUM_TOTAL_VX855_ModeXregs);
break;
}
@@ -2291,7 +2267,8 @@ int viafb_setmode(int vmode_index, int hor_res, int ver_res, int video_bpp,
outb(VPIT.SR[i - 1], VIASR + 1);
}
- viafb_set_start_addr();
+ viafb_set_primary_address(0);
+ viafb_set_secondary_address(viafb_SAMM_ON ? viafb_second_offset : 0);
viafb_set_iga_path();
/* Write CRTC */
@@ -2371,6 +2348,9 @@ int viafb_setmode(int vmode_index, int hor_res, int ver_res, int video_bpp,
}
}
+ viafb_set_primary_pitch(viafbinfo->fix.line_length);
+ viafb_set_secondary_pitch(viafb_dual_fb ? viafbinfo1->fix.line_length
+ : viafbinfo->fix.line_length);
/* Update Refresh Rate Setting */
/* Clear On Screen */
@@ -2545,38 +2525,6 @@ void viafb_crt_enable(void)
viafb_write_reg_mask(CR36, VIACR, 0x0, BIT5 + BIT4);
}
-void viafb_get_mmio_info(unsigned long *mmio_base,
- unsigned long *mmio_len)
-{
- struct pci_dev *pdev = NULL;
- u32 vendor, device;
- u32 i;
-
- for (i = 0; pciidlist[i].vendor != 0; i++)
- if (viaparinfo->chip_info->gfx_chip_name ==
- pciidlist[i].chip_index)
- break;
-
- if (!pciidlist[i].vendor)
- return ;
-
- vendor = pciidlist[i].vendor;
- device = pciidlist[i].device;
-
- pdev = pci_get_device(vendor, device, NULL);
-
- if (!pdev) {
- *mmio_base = 0;
- *mmio_len = 0;
- return ;
- }
-
- *mmio_base = pci_resource_start(pdev, 1);
- *mmio_len = pci_resource_len(pdev, 1);
-
- pci_dev_put(pdev);
-}
-
static void enable_second_display_channel(void)
{
/* to enable second display channel. */
@@ -2593,44 +2541,7 @@ static void disable_second_display_channel(void)
viafb_write_reg_mask(CR6A, VIACR, BIT6, BIT6);
}
-void viafb_get_fb_info(unsigned int *fb_base, unsigned int *fb_len)
-{
- struct pci_dev *pdev = NULL;
- u32 vendor, device;
- u32 i;
-
- for (i = 0; pciidlist[i].vendor != 0; i++)
- if (viaparinfo->chip_info->gfx_chip_name ==
- pciidlist[i].chip_index)
- break;
-
- if (!pciidlist[i].vendor)
- return ;
-
- vendor = pciidlist[i].vendor;
- device = pciidlist[i].device;
-
- pdev = pci_get_device(vendor, device, NULL);
-
- if (!pdev) {
- *fb_base = viafb_read_reg(VIASR, SR30) << 24;
- *fb_len = viafb_get_memsize();
- DEBUG_MSG(KERN_INFO "Get FB info from SR30!\n");
- DEBUG_MSG(KERN_INFO "fb_base = %08x\n", *fb_base);
- DEBUG_MSG(KERN_INFO "fb_len = %08x\n", *fb_len);
- return ;
- }
-
- *fb_base = (unsigned int)pci_resource_start(pdev, 0);
- *fb_len = get_fb_size_from_pci();
- DEBUG_MSG(KERN_INFO "Get FB info from PCI system!\n");
- DEBUG_MSG(KERN_INFO "fb_base = %08x\n", *fb_base);
- DEBUG_MSG(KERN_INFO "fb_len = %08x\n", *fb_len);
-
- pci_dev_put(pdev);
-}
-
-static int get_fb_size_from_pci(void)
+int viafb_get_fb_size_from_pci(void)
{
unsigned long configid, deviceid, FBSize = 0;
int VideoMemSize;
@@ -2656,6 +2567,7 @@ static int get_fb_size_from_pci(void)
case P4M890_FUNCTION3:
case P4M900_FUNCTION3:
case VX800_FUNCTION3:
+ case VX855_FUNCTION3:
/*case CN750_FUNCTION3: */
outl(configid + 0xA0, (unsigned long)0xCF8);
FBSize = inl((unsigned long)0xCFC);
@@ -2719,6 +2631,10 @@ static int get_fb_size_from_pci(void)
VideoMemSize = (256 << 20); /*256M */
break;
+ case 0x00007000: /* Only on VX855/875 */
+ VideoMemSize = (512 << 20); /*512M */
+ break;
+
default:
VideoMemSize = (32 << 20); /*32M */
break;
@@ -2788,24 +2704,6 @@ void viafb_set_dpa_gfx(int output_interface, struct GFX_DPA_SETTING\
}
}
-void viafb_memory_pitch_patch(struct fb_info *info)
-{
- if (info->var.xres != info->var.xres_virtual) {
- viafb_load_offset_reg(info->var.xres_virtual,
- info->var.bits_per_pixel >> 3, IGA1);
-
- if (viafb_SAMM_ON) {
- viafb_load_offset_reg(viafb_second_virtual_xres,
- viafb_bpp1 >> 3,
- IGA2);
- } else {
- viafb_load_offset_reg(info->var.xres_virtual,
- info->var.bits_per_pixel >> 3, IGA2);
- }
-
- }
-}
-
/*According var's xres, yres fill var's other timing information*/
void viafb_fill_var_timing_info(struct fb_var_screeninfo *var, int refresh,
int mode_index)
diff --git a/drivers/video/via/hw.h b/drivers/video/via/hw.h
index 6ff38fa8569..b874d952b44 100644
--- a/drivers/video/via/hw.h
+++ b/drivers/video/via/hw.h
@@ -147,14 +147,8 @@ is reserved, so it may have problem to set 1600x1200 on IGA2. */
/* location: {CR5F,0,4} */
#define IGA2_VER_SYNC_END_REG_NUM 1
-/* Define Offset and Fetch Count Register*/
+/* Define Fetch Count Register*/
-/* location: {CR13,0,7},{CR35,5,7} */
-#define IGA1_OFFSET_REG_NUM 2
-/* 8 bytes alignment. */
-#define IGA1_OFFSER_ALIGN_BYTE 8
-/* x: H resolution, y: color depth */
-#define IGA1_OFFSET_FORMULA(x, y) ((x*y)/IGA1_OFFSER_ALIGN_BYTE)
/* location: {SR1C,0,7},{SR1D,0,1} */
#define IGA1_FETCH_COUNT_REG_NUM 2
/* 16 bytes alignment. */
@@ -164,11 +158,6 @@ is reserved, so it may have problem to set 1600x1200 on IGA2. */
#define IGA1_FETCH_COUNT_FORMULA(x, y) \
(((x*y)/IGA1_FETCH_COUNT_ALIGN_BYTE) + IGA1_FETCH_COUNT_PATCH_VALUE)
-/* location: {CR66,0,7},{CR67,0,1} */
-#define IGA2_OFFSET_REG_NUM 2
-#define IGA2_OFFSET_ALIGN_BYTE 8
-/* x: H resolution, y: color depth */
-#define IGA2_OFFSET_FORMULA(x, y) ((x*y)/IGA2_OFFSET_ALIGN_BYTE)
/* location: {CR65,0,7},{CR67,2,3} */
#define IGA2_FETCH_COUNT_REG_NUM 2
#define IGA2_FETCH_COUNT_ALIGN_BYTE 16
@@ -335,6 +324,17 @@ is reserved, so it may have problem to set 1600x1200 on IGA2. */
/* location: {CR94,0,6} */
#define VX800_IGA2_DISPLAY_QUEUE_EXPIRE_NUM 128
+/* For VT3409 */
+#define VX855_IGA1_FIFO_MAX_DEPTH 400
+#define VX855_IGA1_FIFO_THRESHOLD 320
+#define VX855_IGA1_FIFO_HIGH_THRESHOLD 320
+#define VX855_IGA1_DISPLAY_QUEUE_EXPIRE_NUM 160
+
+#define VX855_IGA2_FIFO_MAX_DEPTH 200
+#define VX855_IGA2_FIFO_THRESHOLD 160
+#define VX855_IGA2_FIFO_HIGH_THRESHOLD 160
+#define VX855_IGA2_DISPLAY_QUEUE_EXPIRE_NUM 320
+
#define IGA1_FIFO_DEPTH_SELECT_REG_NUM 1
#define IGA1_FIFO_THRESHOLD_REG_NUM 2
#define IGA1_FIFO_HIGH_THRESHOLD_REG_NUM 2
@@ -617,23 +617,6 @@ struct iga2_ver_sync_end {
struct io_register reg[IGA2_VER_SYNC_END_REG_NUM];
};
-/* IGA1 Offset Register */
-struct iga1_offset {
- int reg_num;
- struct io_register reg[IGA1_OFFSET_REG_NUM];
-};
-
-/* IGA2 Offset Register */
-struct iga2_offset {
- int reg_num;
- struct io_register reg[IGA2_OFFSET_REG_NUM];
-};
-
-struct offset {
- struct iga1_offset iga1_offset_reg;
- struct iga2_offset iga2_offset_reg;
-};
-
/* IGA1 Fetch Count Register */
struct iga1_fetch_count {
int reg_num;
@@ -716,6 +699,7 @@ struct pll_map {
u32 cle266_pll;
u32 k800_pll;
u32 cx700_pll;
+ u32 vx855_pll;
};
struct rgbLUT {
@@ -860,6 +844,8 @@ struct iga2_crtc_timing {
#define P4M900_FUNCTION3 0x3364
/* VT3353 chipset*/
#define VX800_FUNCTION3 0x3353
+/* VT3409 chipset*/
+#define VX855_FUNCTION3 0x3409
#define NUM_TOTAL_PLL_TABLE ARRAY_SIZE(pll_value)
@@ -883,7 +869,6 @@ extern int viafb_dual_fb;
extern int viafb_LCD2_ON;
extern int viafb_LCD_ON;
extern int viafb_DVI_ON;
-extern int viafb_accel;
extern int viafb_hotplug;
void viafb_write_reg_mask(u8 index, int io_port, u8 data, u8 mask);
@@ -904,7 +889,6 @@ void viafb_write_reg(u8 index, u16 io_port, u8 data);
u8 viafb_read_reg(int io_port, u8 index);
void viafb_lock_crt(void);
void viafb_unlock_crt(void);
-void viafb_load_offset_reg(int h_addr, int bpp_byte, int set_iga);
void viafb_load_fetch_count_reg(int h_addr, int bpp_byte, int set_iga);
void viafb_write_regx(struct io_reg RegTable[], int ItemNum);
struct VideoModeTable *viafb_get_modetbl_pointer(int Index);
@@ -917,17 +901,20 @@ void viafb_set_dpa_gfx(int output_interface, struct GFX_DPA_SETTING\
int viafb_setmode(int vmode_index, int hor_res, int ver_res,
int video_bpp, int vmode_index1, int hor_res1,
int ver_res1, int video_bpp1);
-void viafb_init_chip_info(void);
+void viafb_init_chip_info(struct pci_dev *pdev,
+ const struct pci_device_id *pdi);
void viafb_init_dac(int set_iga);
int viafb_get_pixclock(int hres, int vres, int vmode_refresh);
int viafb_get_refresh(int hres, int vres, u32 float_refresh);
void viafb_update_device_setting(int hres, int vres, int bpp,
int vmode_refresh, int flag);
-void viafb_get_mmio_info(unsigned long *mmio_base,
- unsigned long *mmio_len);
+int viafb_get_fb_size_from_pci(void);
void viafb_set_iga_path(void);
-void viafb_set_start_addr(void);
+void viafb_set_primary_address(u32 addr);
+void viafb_set_secondary_address(u32 addr);
+void viafb_set_primary_pitch(u32 pitch);
+void viafb_set_secondary_pitch(u32 pitch);
void viafb_get_fb_info(unsigned int *fb_base, unsigned int *fb_len);
#endif /* __HW_H__ */
diff --git a/drivers/video/via/ioctl.h b/drivers/video/via/ioctl.h
index 842fe30b986..de899807ead 100644
--- a/drivers/video/via/ioctl.h
+++ b/drivers/video/via/ioctl.h
@@ -50,8 +50,6 @@
#define VIAFB_GET_GAMMA_LUT 0x56494124
#define VIAFB_SET_GAMMA_LUT 0x56494125
#define VIAFB_GET_GAMMA_SUPPORT_STATE 0x56494126
-#define VIAFB_SET_VIDEO_DEVICE 0x56494127
-#define VIAFB_GET_VIDEO_DEVICE 0x56494128
#define VIAFB_SET_SECOND_MODE 0x56494129
#define VIAFB_SYNC_SURFACE 0x56494130
#define VIAFB_GET_DRIVER_CAPS 0x56494131
@@ -179,9 +177,7 @@ struct viafb_ioctl_setting {
unsigned short second_dev_bpp;
/* Indicate which device are primary display device. */
unsigned int primary_device;
- /* Indicate which device will show video. only valid in duoview mode */
- unsigned int video_device_status;
- unsigned int struct_reserved[34];
+ unsigned int struct_reserved[35];
struct viafb_ioctl_lcd_attribute lcd_attributes;
};
diff --git a/drivers/video/via/lcd.c b/drivers/video/via/lcd.c
index 78c6b338794..e3e597f937a 100644
--- a/drivers/video/via/lcd.c
+++ b/drivers/video/via/lcd.c
@@ -207,13 +207,13 @@ static bool lvds_identify_integratedlvds(void)
int viafb_lvds_trasmitter_identify(void)
{
- viaparinfo->i2c_stuff.i2c_port = I2CPORTINDEX;
+ viaparinfo->shared->i2c_stuff.i2c_port = I2CPORTINDEX;
if (viafb_lvds_identify_vt1636()) {
viaparinfo->chip_info->lvds_chip_info.i2c_port = I2CPORTINDEX;
DEBUG_MSG(KERN_INFO
"Found VIA VT1636 LVDS on port i2c 0x31 \n");
} else {
- viaparinfo->i2c_stuff.i2c_port = GPIOPORTINDEX;
+ viaparinfo->shared->i2c_stuff.i2c_port = GPIOPORTINDEX;
if (viafb_lvds_identify_vt1636()) {
viaparinfo->chip_info->lvds_chip_info.i2c_port =
GPIOPORTINDEX;
@@ -470,7 +470,7 @@ static int lvds_register_read(int index)
{
u8 data;
- viaparinfo->i2c_stuff.i2c_port = GPIOPORTINDEX;
+ viaparinfo->shared->i2c_stuff.i2c_port = GPIOPORTINDEX;
viafb_i2c_readbyte((u8) viaparinfo->chip_info->
lvds_chip_info.lvds_chip_slave_addr,
(u8) index, &data);
@@ -952,13 +952,10 @@ void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
int video_index = plvds_setting_info->lcd_panel_size;
int set_iga = plvds_setting_info->iga_path;
int mode_bpp = plvds_setting_info->bpp;
- int viafb_load_reg_num = 0;
- int reg_value = 0;
int set_hres, set_vres;
int panel_hres, panel_vres;
u32 pll_D_N;
int offset;
- struct io_register *reg = NULL;
struct display_timing mode_crt_reg, panel_crt_reg;
struct crt_mode_table *panel_crt_table = NULL;
struct VideoModeTable *vmode_tbl = NULL;
@@ -1038,16 +1035,11 @@ void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
}
/* Offset for simultaneous */
- reg_value = offset;
- viafb_load_reg_num = offset_reg.iga2_offset_reg.reg_num;
- reg = offset_reg.iga2_offset_reg.reg;
- viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIACR);
+ viafb_set_secondary_pitch(offset << 3);
DEBUG_MSG(KERN_INFO "viafb_load_reg!!\n");
viafb_load_fetch_count_reg(set_hres, 4, IGA2);
/* Fetch count for simultaneous */
} else { /* SAMM */
- /* Offset for IGA2 only */
- viafb_load_offset_reg(set_hres, mode_bpp / 8, set_iga);
/* Fetch count for IGA2 only */
viafb_load_fetch_count_reg(set_hres, mode_bpp / 8, set_iga);
diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h
index 2e1254da9c8..7cd03e2a127 100644
--- a/drivers/video/via/share.h
+++ b/drivers/video/via/share.h
@@ -167,6 +167,10 @@
#define SR4B 0x4B
#define SR4C 0x4C
#define SR52 0x52
+#define SR57 0x57
+#define SR58 0x58
+#define SR59 0x59
+#define SR5D 0x5D
#define SR5E 0x5E
#define SR65 0x65
@@ -966,6 +970,100 @@
#define CX700_297_500M 0x00CE0403
#define CX700_122_614M 0x00870802
+/* PLL for VX855 */
+#define VX855_22_000M 0x007B1005
+#define VX855_25_175M 0x008D1005
+#define VX855_26_719M 0x00961005
+#define VX855_26_880M 0x00961005
+#define VX855_27_000M 0x00971005
+#define VX855_29_581M 0x00A51005
+#define VX855_29_829M 0x00641003
+#define VX855_31_490M 0x00B01005
+#define VX855_31_500M 0x00B01005
+#define VX855_31_728M 0x008E1004
+#define VX855_32_668M 0x00921004
+#define VX855_36_000M 0x00A11004
+#define VX855_40_000M 0x00700C05
+#define VX855_41_291M 0x00730C05
+#define VX855_43_163M 0x00790C05
+#define VX855_45_250M 0x007F0C05 /* 45.46MHz */
+#define VX855_46_000M 0x00670C04
+#define VX855_46_996M 0x00690C04
+#define VX855_48_000M 0x00860C05
+#define VX855_48_875M 0x00890C05
+#define VX855_49_500M 0x00530C03
+#define VX855_52_406M 0x00580C03
+#define VX855_52_977M 0x00940C05
+#define VX855_56_250M 0x009D0C05
+#define VX855_60_466M 0x00A90C05
+#define VX855_61_500M 0x00AC0C05
+#define VX855_65_000M 0x006D0C03
+#define VX855_65_178M 0x00B60C05
+#define VX855_66_750M 0x00700C03 /*67.116MHz */
+#define VX855_67_295M 0x00BC0C05
+#define VX855_68_179M 0x00BF0C05
+#define VX855_68_369M 0x00BF0C05
+#define VX855_69_924M 0x00C30C05
+#define VX855_70_159M 0x00C30C05
+#define VX855_72_000M 0x00A10C04
+#define VX855_73_023M 0x00CC0C05
+#define VX855_74_481M 0x00D10C05
+#define VX855_78_750M 0x006E0805
+#define VX855_79_466M 0x006F0805
+#define VX855_80_136M 0x00700805
+#define VX855_81_627M 0x00720805
+#define VX855_83_375M 0x00750805
+#define VX855_83_527M 0x00750805
+#define VX855_83_950M 0x00750805
+#define VX855_84_537M 0x00760805
+#define VX855_84_750M 0x00760805 /* 84.537Mhz */
+#define VX855_85_500M 0x00760805 /* 85.909080 MHz*/
+#define VX855_85_860M 0x00760805
+#define VX855_85_909M 0x00760805
+#define VX855_88_750M 0x007C0805
+#define VX855_89_489M 0x007D0805
+#define VX855_94_500M 0x00840805
+#define VX855_96_648M 0x00870805
+#define VX855_97_750M 0x00890805
+#define VX855_101_000M 0x008D0805
+#define VX855_106_500M 0x00950805
+#define VX855_108_000M 0x00970805
+#define VX855_110_125M 0x00990805
+#define VX855_112_000M 0x009D0805
+#define VX855_113_309M 0x009F0805
+#define VX855_115_000M 0x00A10805
+#define VX855_118_840M 0x00A60805
+#define VX855_119_000M 0x00A70805
+#define VX855_121_750M 0x00AA0805 /* 121.704MHz */
+#define VX855_122_614M 0x00AC0805
+#define VX855_126_266M 0x00B10805
+#define VX855_130_250M 0x00B60805 /* 130.250 */
+#define VX855_135_000M 0x00BD0805
+#define VX855_136_700M 0x00BF0805
+#define VX855_137_750M 0x00C10805
+#define VX855_138_400M 0x00C20805
+#define VX855_144_300M 0x00CA0805
+#define VX855_146_760M 0x00CE0805
+#define VX855_148_500M 0x00D00805
+#define VX855_153_920M 0x00540402
+#define VX855_156_000M 0x006C0405
+#define VX855_156_867M 0x006E0405
+#define VX855_157_500M 0x006E0405
+#define VX855_162_000M 0x00710405
+#define VX855_172_798M 0x00790405
+#define VX855_187_000M 0x00830405
+#define VX855_193_295M 0x00870405
+#define VX855_202_500M 0x008E0405
+#define VX855_204_000M 0x008F0405
+#define VX855_218_500M 0x00990405
+#define VX855_229_500M 0x00A10405
+#define VX855_234_000M 0x00A40405
+#define VX855_267_250M 0x00BB0405
+#define VX855_297_500M 0x00D00405
+#define VX855_339_500M 0x00770005
+#define VX855_340_772M 0x00770005
+
+
/* Definition CRTC Timing Index */
#define H_TOTAL_INDEX 0
#define H_ADDR_INDEX 1
diff --git a/drivers/video/via/via_i2c.c b/drivers/video/via/via_i2c.c
index 0f3ed4eb236..15543e96824 100644
--- a/drivers/video/via/via_i2c.c
+++ b/drivers/video/via/via_i2c.c
@@ -97,7 +97,7 @@ int viafb_i2c_readbyte(u8 slave_addr, u8 index, u8 *pdata)
mm1[0] = index;
msgs[0].len = 1; msgs[1].len = 1;
msgs[0].buf = mm1; msgs[1].buf = pdata;
- i2c_transfer(&viaparinfo->i2c_stuff.adapter, msgs, 2);
+ i2c_transfer(&viaparinfo->shared->i2c_stuff.adapter, msgs, 2);
return 0;
}
@@ -111,7 +111,7 @@ int viafb_i2c_writebyte(u8 slave_addr, u8 index, u8 data)
msgs.addr = slave_addr / 2;
msgs.len = 2;
msgs.buf = msg;
- return i2c_transfer(&viaparinfo->i2c_stuff.adapter, &msgs, 1);
+ return i2c_transfer(&viaparinfo->shared->i2c_stuff.adapter, &msgs, 1);
}
int viafb_i2c_readbytes(u8 slave_addr, u8 index, u8 *buff, int buff_len)
@@ -125,53 +125,53 @@ int viafb_i2c_readbytes(u8 slave_addr, u8 index, u8 *buff, int buff_len)
mm1[0] = index;
msgs[0].len = 1; msgs[1].len = buff_len;
msgs[0].buf = mm1; msgs[1].buf = buff;
- i2c_transfer(&viaparinfo->i2c_stuff.adapter, msgs, 2);
+ i2c_transfer(&viaparinfo->shared->i2c_stuff.adapter, msgs, 2);
return 0;
}
int viafb_create_i2c_bus(void *viapar)
{
int ret;
- struct viafb_par *par = (struct viafb_par *)viapar;
-
- strcpy(par->i2c_stuff.adapter.name, "via_i2c");
- par->i2c_stuff.i2c_port = 0x0;
- par->i2c_stuff.adapter.owner = THIS_MODULE;
- par->i2c_stuff.adapter.id = 0x01FFFF;
- par->i2c_stuff.adapter.class = 0;
- par->i2c_stuff.adapter.algo_data = &par->i2c_stuff.algo;
- par->i2c_stuff.adapter.dev.parent = NULL;
- par->i2c_stuff.algo.setsda = via_i2c_setsda;
- par->i2c_stuff.algo.setscl = via_i2c_setscl;
- par->i2c_stuff.algo.getsda = via_i2c_getsda;
- par->i2c_stuff.algo.getscl = via_i2c_getscl;
- par->i2c_stuff.algo.udelay = 40;
- par->i2c_stuff.algo.timeout = 20;
- par->i2c_stuff.algo.data = &par->i2c_stuff;
-
- i2c_set_adapdata(&par->i2c_stuff.adapter, &par->i2c_stuff);
+ struct via_i2c_stuff *i2c_stuff =
+ &((struct viafb_par *)viapar)->shared->i2c_stuff;
+
+ strcpy(i2c_stuff->adapter.name, "via_i2c");
+ i2c_stuff->i2c_port = 0x0;
+ i2c_stuff->adapter.owner = THIS_MODULE;
+ i2c_stuff->adapter.id = 0x01FFFF;
+ i2c_stuff->adapter.class = 0;
+ i2c_stuff->adapter.algo_data = &i2c_stuff->algo;
+ i2c_stuff->adapter.dev.parent = NULL;
+ i2c_stuff->algo.setsda = via_i2c_setsda;
+ i2c_stuff->algo.setscl = via_i2c_setscl;
+ i2c_stuff->algo.getsda = via_i2c_getsda;
+ i2c_stuff->algo.getscl = via_i2c_getscl;
+ i2c_stuff->algo.udelay = 40;
+ i2c_stuff->algo.timeout = 20;
+ i2c_stuff->algo.data = i2c_stuff;
+
+ i2c_set_adapdata(&i2c_stuff->adapter, i2c_stuff);
/* Raise SCL and SDA */
- par->i2c_stuff.i2c_port = I2CPORTINDEX;
- via_i2c_setsda(&par->i2c_stuff, 1);
- via_i2c_setscl(&par->i2c_stuff, 1);
+ i2c_stuff->i2c_port = I2CPORTINDEX;
+ via_i2c_setsda(i2c_stuff, 1);
+ via_i2c_setscl(i2c_stuff, 1);
- par->i2c_stuff.i2c_port = GPIOPORTINDEX;
- via_i2c_setsda(&par->i2c_stuff, 1);
- via_i2c_setscl(&par->i2c_stuff, 1);
+ i2c_stuff->i2c_port = GPIOPORTINDEX;
+ via_i2c_setsda(i2c_stuff, 1);
+ via_i2c_setscl(i2c_stuff, 1);
udelay(20);
- ret = i2c_bit_add_bus(&par->i2c_stuff.adapter);
+ ret = i2c_bit_add_bus(&i2c_stuff->adapter);
if (ret == 0)
- DEBUG_MSG("I2C bus %s registered.\n",
- par->i2c_stuff.adapter.name);
+ DEBUG_MSG("I2C bus %s registered.\n", i2c_stuff->adapter.name);
else
DEBUG_MSG("Failed to register I2C bus %s.\n",
- par->i2c_stuff.adapter.name);
+ i2c_stuff->adapter.name);
return ret;
}
void viafb_delete_i2c_buss(void *par)
{
- i2c_del_adapter(&((struct viafb_par *)par)->i2c_stuff.adapter);
+ i2c_del_adapter(&((struct viafb_par *)par)->shared->i2c_stuff.adapter);
}
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 72833f3334b..56ec696e8af 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -20,11 +20,12 @@
*/
#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/stat.h>
#define _MASTER_FILE
#include "global.h"
-static int MAX_CURS = 32;
static struct fb_var_screeninfo default_var;
static char *viafb_name = "Via";
static u32 pseudo_pal[17];
@@ -33,12 +34,11 @@ static u32 pseudo_pal[17];
static char *viafb_mode = "640x480";
static char *viafb_mode1 = "640x480";
+static int viafb_accel = 1;
+
/* Added for specifying active devices.*/
char *viafb_active_dev = "";
-/* Added for specifying video on devices.*/
-char *viafb_video_dev = "";
-
/*Added for specify lcd output port*/
char *viafb_lcd_port = "";
char *viafb_dvi_port = "";
@@ -50,71 +50,20 @@ static void apply_second_mode_setting(struct fb_var_screeninfo
*sec_var);
static void retrieve_device_setting(struct viafb_ioctl_setting
*setting_info);
-static void viafb_set_video_device(u32 video_dev_info);
-static void viafb_get_video_device(u32 *video_dev_info);
-
-/* Mode information */
-static const struct viafb_modeinfo viafb_modentry[] = {
- {480, 640, VIA_RES_480X640},
- {640, 480, VIA_RES_640X480},
- {800, 480, VIA_RES_800X480},
- {800, 600, VIA_RES_800X600},
- {1024, 768, VIA_RES_1024X768},
- {1152, 864, VIA_RES_1152X864},
- {1280, 1024, VIA_RES_1280X1024},
- {1600, 1200, VIA_RES_1600X1200},
- {1440, 1050, VIA_RES_1440X1050},
- {1280, 768, VIA_RES_1280X768,},
- {1280, 800, VIA_RES_1280X800},
- {1280, 960, VIA_RES_1280X960},
- {1920, 1440, VIA_RES_1920X1440},
- {848, 480, VIA_RES_848X480},
- {1400, 1050, VIA_RES_1400X1050},
- {720, 480, VIA_RES_720X480},
- {720, 576, VIA_RES_720X576},
- {1024, 512, VIA_RES_1024X512},
- {1024, 576, VIA_RES_1024X576},
- {1024, 600, VIA_RES_1024X600},
- {1280, 720, VIA_RES_1280X720},
- {1920, 1080, VIA_RES_1920X1080},
- {1366, 768, VIA_RES_1368X768},
- {1680, 1050, VIA_RES_1680X1050},
- {960, 600, VIA_RES_960X600},
- {1000, 600, VIA_RES_1000X600},
- {1024, 576, VIA_RES_1024X576},
- {1024, 600, VIA_RES_1024X600},
- {1088, 612, VIA_RES_1088X612},
- {1152, 720, VIA_RES_1152X720},
- {1200, 720, VIA_RES_1200X720},
- {1280, 600, VIA_RES_1280X600},
- {1360, 768, VIA_RES_1360X768},
- {1440, 900, VIA_RES_1440X900},
- {1600, 900, VIA_RES_1600X900},
- {1600, 1024, VIA_RES_1600X1024},
- {1792, 1344, VIA_RES_1792X1344},
- {1856, 1392, VIA_RES_1856X1392},
- {1920, 1200, VIA_RES_1920X1200},
- {2048, 1536, VIA_RES_2048X1536},
- {0, 0, VIA_RES_INVALID}
-};
static struct fb_ops viafb_ops;
-static int viafb_update_fix(struct fb_fix_screeninfo *fix, struct fb_info *info)
-{
- struct viafb_par *ppar;
- ppar = info->par;
-
- DEBUG_MSG(KERN_INFO "viafb_update_fix!\n");
- fix->visual =
- ppar->bpp == 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
- fix->line_length = ppar->linelength;
+static void viafb_update_fix(struct fb_info *info)
+{
+ u32 bpp = info->var.bits_per_pixel;
- return 0;
+ info->fix.visual =
+ bpp == 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
+ info->fix.line_length =
+ ((info->var.xres_virtual + 7) & ~7) * bpp / 8;
}
-
static void viafb_setup_fixinfo(struct fb_fix_screeninfo *fix,
struct viafb_par *viaparinfo)
{
@@ -123,8 +72,6 @@ static void viafb_setup_fixinfo(struct fb_fix_screeninfo *fix,
fix->smem_start = viaparinfo->fbmem;
fix->smem_len = viaparinfo->fbmem_free;
- fix->mmio_start = viaparinfo->mmio_base;
- fix->mmio_len = viaparinfo->mmio_len;
fix->type = FB_TYPE_PACKED_PIXELS;
fix->type_aux = 0;
@@ -147,28 +94,12 @@ static int viafb_release(struct fb_info *info, int user)
return 0;
}
-static void viafb_update_viafb_par(struct fb_info *info)
-{
- struct viafb_par *ppar;
-
- ppar = info->par;
- ppar->bpp = info->var.bits_per_pixel;
- ppar->linelength = ((info->var.xres_virtual + 7) & ~7) * ppar->bpp / 8;
- ppar->hres = info->var.xres;
- ppar->vres = info->var.yres;
- ppar->xoffset = info->var.xoffset;
- ppar->yoffset = info->var.yoffset;
-}
-
static int viafb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
int vmode_index, htotal, vtotal;
- struct viafb_par *ppar;
+ struct viafb_par *ppar = info->par;
u32 long_refresh;
- struct viafb_par *p_viafb_par;
- ppar = info->par;
-
DEBUG_MSG(KERN_INFO "viafb_check_var!\n");
/* Sanity check */
@@ -212,23 +143,21 @@ static int viafb_check_var(struct fb_var_screeninfo *var,
/* Adjust var according to our driver's own table */
viafb_fill_var_timing_info(var, viafb_refresh, vmode_index);
-
- /* This is indeed a patch for VT3353 */
- if (!info->par)
- return -1;
- p_viafb_par = (struct viafb_par *)info->par;
- if (p_viafb_par->chip_info->gfx_chip_name == UNICHROME_VX800)
- var->accel_flags = 0;
+ if (info->var.accel_flags & FB_ACCELF_TEXT &&
+ !ppar->shared->engine_mmio)
+ info->var.accel_flags = 0;
return 0;
}
static int viafb_set_par(struct fb_info *info)
{
+ struct viafb_par *viapar = info->par;
int vmode_index;
int vmode_index1 = 0;
DEBUG_MSG(KERN_INFO "viafb_set_par!\n");
+ viapar->depth = fb_get_color_depth(&info->var, &info->fix);
viafb_update_device_setting(info->var.xres, info->var.yres,
info->var.bits_per_pixel, viafb_refresh, 0);
@@ -252,21 +181,12 @@ static int viafb_set_par(struct fb_info *info)
info->var.bits_per_pixel, vmode_index1,
viafb_second_xres, viafb_second_yres, viafb_bpp1);
- /*We should set memory offset according virtual_x */
- /*Fix me:put this function into viafb_setmode */
- viafb_memory_pitch_patch(info);
-
- /* Update ***fb_par information */
- viafb_update_viafb_par(info);
-
- /* Update other fixed information */
- viafb_update_fix(&info->fix, info);
+ viafb_update_fix(info);
viafb_bpp = info->var.bits_per_pixel;
- /* Update viafb_accel, it is necessary to our 2D accelerate */
- viafb_accel = info->var.accel_flags;
-
- if (viafb_accel)
- viafb_set_2d_color_depth(info->var.bits_per_pixel);
+ if (info->var.accel_flags & FB_ACCELF_TEXT)
+ info->flags &= ~FBINFO_HWACCEL_DISABLED;
+ else
+ info->flags |= FBINFO_HWACCEL_DISABLED;
}
return 0;
@@ -503,12 +423,7 @@ static int viafb_pan_display(struct fb_var_screeninfo *var,
var->bits_per_pixel / 16;
DEBUG_MSG(KERN_INFO "\nviafb_pan_display,offset =%d ", offset);
-
- viafb_write_reg_mask(0x48, 0x3d4, ((offset >> 24) & 0x3), 0x3);
- viafb_write_reg_mask(0x34, 0x3d4, ((offset >> 16) & 0xff), 0xff);
- viafb_write_reg_mask(0x0c, 0x3d4, ((offset >> 8) & 0xff), 0xff);
- viafb_write_reg_mask(0x0d, 0x3d4, (offset & 0xff), 0xff);
-
+ viafb_set_primary_address(offset);
return 0;
}
@@ -560,7 +475,6 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
u32 __user *argp = (u32 __user *) arg;
u32 gpu32;
- u32 video_dev_info = 0;
DEBUG_MSG(KERN_INFO "viafb_ioctl: 0x%X !!\n", cmd);
memset(&u, 0, sizeof(u));
@@ -792,15 +706,6 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
if (put_user(state_info, argp))
return -EFAULT;
break;
- case VIAFB_SET_VIDEO_DEVICE:
- get_user(video_dev_info, argp);
- viafb_set_video_device(video_dev_info);
- break;
- case VIAFB_GET_VIDEO_DEVICE:
- viafb_get_video_device(&video_dev_info);
- if (put_user(video_dev_info, argp))
- return -EFAULT;
- break;
case VIAFB_SYNC_SURFACE:
DEBUG_MSG(KERN_INFO "lobo VIAFB_SYNC_SURFACE\n");
break;
@@ -866,10 +771,12 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
static void viafb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
- u32 col = 0, rop = 0;
- int pitch;
+ struct viafb_par *viapar = info->par;
+ struct viafb_shared *shared = viapar->shared;
+ u32 fg_color;
+ u8 rop;
- if (!viafb_accel) {
+ if (info->flags & FBINFO_HWACCEL_DISABLED || !shared->hw_bitblt) {
cfb_fillrect(info, rect);
return;
}
@@ -877,68 +784,31 @@ static void viafb_fillrect(struct fb_info *info,
if (!rect->width || !rect->height)
return;
- switch (rect->rop) {
- case ROP_XOR:
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR)
+ fg_color = ((u32 *)info->pseudo_palette)[rect->color];
+ else
+ fg_color = rect->color;
+
+ if (rect->rop == ROP_XOR)
rop = 0x5A;
- break;
- case ROP_COPY:
- default:
+ else
rop = 0xF0;
- break;
- }
-
- switch (info->var.bits_per_pixel) {
- case 8:
- col = rect->color;
- break;
- case 16:
- col = ((u32 *) (info->pseudo_palette))[rect->color];
- break;
- case 32:
- col = ((u32 *) (info->pseudo_palette))[rect->color];
- break;
- }
-
- /* BitBlt Source Address */
- writel(0x0, viaparinfo->io_virt + VIA_REG_SRCPOS);
- /* Source Base Address */
- writel(0x0, viaparinfo->io_virt + VIA_REG_SRCBASE);
- /* Destination Base Address */
- writel(((unsigned long) (info->screen_base) -
- (unsigned long) viafb_FB_MM) >> 3,
- viaparinfo->io_virt + VIA_REG_DSTBASE);
- /* Pitch */
- pitch = (info->var.xres_virtual + 7) & ~7;
- writel(VIA_PITCH_ENABLE |
- (((pitch *
- info->var.bits_per_pixel >> 3) >> 3) |
- (((pitch * info->
- var.bits_per_pixel >> 3) >> 3) << 16)),
- viaparinfo->io_virt + VIA_REG_PITCH);
- /* BitBlt Destination Address */
- writel(((rect->dy << 16) | rect->dx),
- viaparinfo->io_virt + VIA_REG_DSTPOS);
- /* Dimension: width & height */
- writel((((rect->height - 1) << 16) | (rect->width - 1)),
- viaparinfo->io_virt + VIA_REG_DIMENSION);
- /* Forground color or Destination color */
- writel(col, viaparinfo->io_virt + VIA_REG_FGCOLOR);
- /* GE Command */
- writel((0x01 | 0x2000 | (rop << 24)),
- viaparinfo->io_virt + VIA_REG_GECMD);
+ DEBUG_MSG(KERN_DEBUG "viafb 2D engine: fillrect\n");
+ if (shared->hw_bitblt(shared->engine_mmio, VIA_BITBLT_FILL,
+ rect->width, rect->height, info->var.bits_per_pixel,
+ viapar->vram_addr, info->fix.line_length, rect->dx, rect->dy,
+ NULL, 0, 0, 0, 0, fg_color, 0, rop))
+ cfb_fillrect(info, rect);
}
static void viafb_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
- u32 dy = area->dy, sy = area->sy, direction = 0x0;
- u32 sx = area->sx, dx = area->dx, width = area->width;
- int pitch;
-
- DEBUG_MSG(KERN_INFO "viafb_copyarea!!\n");
+ struct viafb_par *viapar = info->par;
+ struct viafb_shared *shared = viapar->shared;
- if (!viafb_accel) {
+ if (info->flags & FBINFO_HWACCEL_DISABLED || !shared->hw_bitblt) {
cfb_copyarea(info, area);
return;
}
@@ -946,263 +816,148 @@ static void viafb_copyarea(struct fb_info *info,
if (!area->width || !area->height)
return;
- if (sy < dy) {
- dy += area->height - 1;
- sy += area->height - 1;
- direction |= 0x4000;
- }
-
- if (sx < dx) {
- dx += width - 1;
- sx += width - 1;
- direction |= 0x8000;
- }
-
- /* Source Base Address */
- writel(((unsigned long) (info->screen_base) -
- (unsigned long) viafb_FB_MM) >> 3,
- viaparinfo->io_virt + VIA_REG_SRCBASE);
- /* Destination Base Address */
- writel(((unsigned long) (info->screen_base) -
- (unsigned long) viafb_FB_MM) >> 3,
- viaparinfo->io_virt + VIA_REG_DSTBASE);
- /* Pitch */
- pitch = (info->var.xres_virtual + 7) & ~7;
- /* VIA_PITCH_ENABLE can be omitted now. */
- writel(VIA_PITCH_ENABLE |
- (((pitch *
- info->var.bits_per_pixel >> 3) >> 3) | (((pitch *
- info->var.
- bits_per_pixel
- >> 3) >> 3)
- << 16)),
- viaparinfo->io_virt + VIA_REG_PITCH);
- /* BitBlt Source Address */
- writel(((sy << 16) | sx), viaparinfo->io_virt + VIA_REG_SRCPOS);
- /* BitBlt Destination Address */
- writel(((dy << 16) | dx), viaparinfo->io_virt + VIA_REG_DSTPOS);
- /* Dimension: width & height */
- writel((((area->height - 1) << 16) | (area->width - 1)),
- viaparinfo->io_virt + VIA_REG_DIMENSION);
- /* GE Command */
- writel((0x01 | direction | (0xCC << 24)),
- viaparinfo->io_virt + VIA_REG_GECMD);
-
+ DEBUG_MSG(KERN_DEBUG "viafb 2D engine: copyarea\n");
+ if (shared->hw_bitblt(shared->engine_mmio, VIA_BITBLT_COLOR,
+ area->width, area->height, info->var.bits_per_pixel,
+ viapar->vram_addr, info->fix.line_length, area->dx, area->dy,
+ NULL, viapar->vram_addr, info->fix.line_length,
+ area->sx, area->sy, 0, 0, 0))
+ cfb_copyarea(info, area);
}
static void viafb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
- u32 size, bg_col = 0, fg_col = 0, *udata;
- int i;
- int pitch;
+ struct viafb_par *viapar = info->par;
+ struct viafb_shared *shared = viapar->shared;
+ u32 fg_color = 0, bg_color = 0;
+ u8 op;
- if (!viafb_accel) {
+ if (info->flags & FBINFO_HWACCEL_DISABLED || !shared->hw_bitblt ||
+ (image->depth != 1 && image->depth != viapar->depth)) {
cfb_imageblit(info, image);
return;
}
- udata = (u32 *) image->data;
-
- switch (info->var.bits_per_pixel) {
- case 8:
- bg_col = image->bg_color;
- fg_col = image->fg_color;
- break;
- case 16:
- bg_col = ((u32 *) (info->pseudo_palette))[image->bg_color];
- fg_col = ((u32 *) (info->pseudo_palette))[image->fg_color];
- break;
- case 32:
- bg_col = ((u32 *) (info->pseudo_palette))[image->bg_color];
- fg_col = ((u32 *) (info->pseudo_palette))[image->fg_color];
- break;
- }
- size = image->width * image->height;
-
- /* Source Base Address */
- writel(0x0, viaparinfo->io_virt + VIA_REG_SRCBASE);
- /* Destination Base Address */
- writel(((unsigned long) (info->screen_base) -
- (unsigned long) viafb_FB_MM) >> 3,
- viaparinfo->io_virt + VIA_REG_DSTBASE);
- /* Pitch */
- pitch = (info->var.xres_virtual + 7) & ~7;
- writel(VIA_PITCH_ENABLE |
- (((pitch *
- info->var.bits_per_pixel >> 3) >> 3) | (((pitch *
- info->var.
- bits_per_pixel
- >> 3) >> 3)
- << 16)),
- viaparinfo->io_virt + VIA_REG_PITCH);
- /* BitBlt Source Address */
- writel(0x0, viaparinfo->io_virt + VIA_REG_SRCPOS);
- /* BitBlt Destination Address */
- writel(((image->dy << 16) | image->dx),
- viaparinfo->io_virt + VIA_REG_DSTPOS);
- /* Dimension: width & height */
- writel((((image->height - 1) << 16) | (image->width - 1)),
- viaparinfo->io_virt + VIA_REG_DIMENSION);
- /* fb color */
- writel(fg_col, viaparinfo->io_virt + VIA_REG_FGCOLOR);
- /* bg color */
- writel(bg_col, viaparinfo->io_virt + VIA_REG_BGCOLOR);
- /* GE Command */
- writel(0xCC020142, viaparinfo->io_virt + VIA_REG_GECMD);
-
- for (i = 0; i < size / 4; i++) {
- writel(*udata, viaparinfo->io_virt + VIA_MMIO_BLTBASE);
- udata++;
- }
+ if (image->depth == 1) {
+ op = VIA_BITBLT_MONO;
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
+ fg_color =
+ ((u32 *)info->pseudo_palette)[image->fg_color];
+ bg_color =
+ ((u32 *)info->pseudo_palette)[image->bg_color];
+ } else {
+ fg_color = image->fg_color;
+ bg_color = image->bg_color;
+ }
+ } else
+ op = VIA_BITBLT_COLOR;
+ DEBUG_MSG(KERN_DEBUG "viafb 2D engine: imageblit\n");
+ if (shared->hw_bitblt(shared->engine_mmio, op,
+ image->width, image->height, info->var.bits_per_pixel,
+ viapar->vram_addr, info->fix.line_length, image->dx, image->dy,
+ (u32 *)image->data, 0, 0, 0, 0, fg_color, bg_color, 0))
+ cfb_imageblit(info, image);
}
static int viafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
- u32 temp, xx, yy, bg_col = 0, fg_col = 0;
- int i, j = 0;
- static int hw_cursor;
- struct viafb_par *p_viafb_par;
-
- if (viafb_accel)
- hw_cursor = 1;
-
- if (!viafb_accel) {
- if (hw_cursor) {
- viafb_show_hw_cursor(info, HW_Cursor_OFF);
- hw_cursor = 0;
- }
- return -ENODEV;
- }
+ struct viafb_par *viapar = info->par;
+ void __iomem *engine = viapar->shared->engine_mmio;
+ u32 temp, xx, yy, bg_color = 0, fg_color = 0,
+ chip_name = viapar->shared->chip_info.gfx_chip_name;
+ int i, j = 0, cur_size = 64;
- if ((((struct viafb_par *)(info->par))->iga_path == IGA2)
- && (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266))
+ if (info->flags & FBINFO_HWACCEL_DISABLED || info != viafbinfo)
return -ENODEV;
- /* When duoview and using lcd , use soft cursor */
- if (viafb_LCD_ON || ((struct viafb_par *)(info->par))->duoview)
+ if (chip_name == UNICHROME_CLE266 && viapar->iga_path == IGA2)
return -ENODEV;
viafb_show_hw_cursor(info, HW_Cursor_OFF);
- viacursor = *cursor;
if (cursor->set & FB_CUR_SETHOT) {
- viacursor.hot = cursor->hot;
- temp = ((viacursor.hot.x) << 16) + viacursor.hot.y;
- writel(temp, viaparinfo->io_virt + VIA_REG_CURSOR_ORG);
+ temp = (cursor->hot.x << 16) + cursor->hot.y;
+ writel(temp, engine + VIA_REG_CURSOR_ORG);
}
if (cursor->set & FB_CUR_SETPOS) {
- viacursor.image.dx = cursor->image.dx;
- viacursor.image.dy = cursor->image.dy;
yy = cursor->image.dy - info->var.yoffset;
xx = cursor->image.dx - info->var.xoffset;
temp = yy & 0xFFFF;
temp |= (xx << 16);
- writel(temp, viaparinfo->io_virt + VIA_REG_CURSOR_POS);
+ writel(temp, engine + VIA_REG_CURSOR_POS);
}
- if (cursor->set & FB_CUR_SETSIZE) {
- temp = readl(viaparinfo->io_virt + VIA_REG_CURSOR_MODE);
+ if (cursor->image.width <= 32 && cursor->image.height <= 32)
+ cur_size = 32;
+ else if (cursor->image.width <= 64 && cursor->image.height <= 64)
+ cur_size = 64;
+ else {
+ printk(KERN_WARNING "viafb_cursor: The cursor is too large "
+ "%dx%d", cursor->image.width, cursor->image.height);
+ return -ENXIO;
+ }
- if ((cursor->image.width <= 32)
- && (cursor->image.height <= 32)) {
- MAX_CURS = 32;
+ if (cursor->set & FB_CUR_SETSIZE) {
+ temp = readl(engine + VIA_REG_CURSOR_MODE);
+ if (cur_size == 32)
temp |= 0x2;
- } else if ((cursor->image.width <= 64)
- && (cursor->image.height <= 64)) {
- MAX_CURS = 64;
- temp &= 0xFFFFFFFD;
- } else {
- DEBUG_MSG(KERN_INFO
- "The cursor image is biger than 64x64 bits...\n");
- return -ENXIO;
- }
- writel(temp, viaparinfo->io_virt + VIA_REG_CURSOR_MODE);
+ else
+ temp &= ~0x2;
- viacursor.image.height = cursor->image.height;
- viacursor.image.width = cursor->image.width;
+ writel(temp, engine + VIA_REG_CURSOR_MODE);
}
if (cursor->set & FB_CUR_SETCMAP) {
- viacursor.image.fg_color = cursor->image.fg_color;
- viacursor.image.bg_color = cursor->image.bg_color;
-
- switch (info->var.bits_per_pixel) {
- case 8:
- case 16:
- case 32:
- bg_col =
- (0xFF << 24) |
- (((info->cmap.red)[viacursor.image.bg_color] &
- 0xFF00) << 8) |
- ((info->cmap.green)[viacursor.image.bg_color] &
- 0xFF00) |
- (((info->cmap.blue)[viacursor.image.bg_color] &
- 0xFF00) >> 8);
- fg_col =
- (0xFF << 24) |
- (((info->cmap.red)[viacursor.image.fg_color] &
- 0xFF00) << 8) |
- ((info->cmap.green)[viacursor.image.fg_color] &
- 0xFF00) |
- (((info->cmap.blue)[viacursor.image.fg_color] &
- 0xFF00) >> 8);
- break;
- default:
- return 0;
- }
-
- /* This is indeed a patch for VT3324/VT3353 */
- if (!info->par)
- return 0;
- p_viafb_par = (struct viafb_par *)info->par;
-
- if ((p_viafb_par->chip_info->gfx_chip_name ==
- UNICHROME_CX700) ||
- ((p_viafb_par->chip_info->gfx_chip_name ==
- UNICHROME_VX800))) {
- bg_col =
- (((info->cmap.red)[viacursor.image.bg_color] &
- 0xFFC0) << 14) |
- (((info->cmap.green)[viacursor.image.bg_color] &
- 0xFFC0) << 4) |
- (((info->cmap.blue)[viacursor.image.bg_color] &
- 0xFFC0) >> 6);
- fg_col =
- (((info->cmap.red)[viacursor.image.fg_color] &
- 0xFFC0) << 14) |
- (((info->cmap.green)[viacursor.image.fg_color] &
- 0xFFC0) << 4) |
- (((info->cmap.blue)[viacursor.image.fg_color] &
- 0xFFC0) >> 6);
+ fg_color = cursor->image.fg_color;
+ bg_color = cursor->image.bg_color;
+ if (chip_name == UNICHROME_CX700 ||
+ chip_name == UNICHROME_VX800 ||
+ chip_name == UNICHROME_VX855) {
+ fg_color =
+ ((info->cmap.red[fg_color] & 0xFFC0) << 14) |
+ ((info->cmap.green[fg_color] & 0xFFC0) << 4) |
+ ((info->cmap.blue[fg_color] & 0xFFC0) >> 6);
+ bg_color =
+ ((info->cmap.red[bg_color] & 0xFFC0) << 14) |
+ ((info->cmap.green[bg_color] & 0xFFC0) << 4) |
+ ((info->cmap.blue[bg_color] & 0xFFC0) >> 6);
+ } else {
+ fg_color =
+ ((info->cmap.red[fg_color] & 0xFF00) << 8) |
+ (info->cmap.green[fg_color] & 0xFF00) |
+ ((info->cmap.blue[fg_color] & 0xFF00) >> 8);
+ bg_color =
+ ((info->cmap.red[bg_color] & 0xFF00) << 8) |
+ (info->cmap.green[bg_color] & 0xFF00) |
+ ((info->cmap.blue[bg_color] & 0xFF00) >> 8);
}
- writel(bg_col, viaparinfo->io_virt + VIA_REG_CURSOR_BG);
- writel(fg_col, viaparinfo->io_virt + VIA_REG_CURSOR_FG);
+ writel(bg_color, engine + VIA_REG_CURSOR_BG);
+ writel(fg_color, engine + VIA_REG_CURSOR_FG);
}
if (cursor->set & FB_CUR_SETSHAPE) {
struct {
- u8 data[CURSOR_SIZE / 8];
- u32 bak[CURSOR_SIZE / 32];
+ u8 data[CURSOR_SIZE];
+ u32 bak[CURSOR_SIZE / 4];
} *cr_data = kzalloc(sizeof(*cr_data), GFP_ATOMIC);
- int size =
- ((viacursor.image.width + 7) >> 3) *
- viacursor.image.height;
+ int size = ((cursor->image.width + 7) >> 3) *
+ cursor->image.height;
- if (cr_data == NULL)
- goto out;
+ if (!cr_data)
+ return -ENOMEM;
- if (MAX_CURS == 32) {
- for (i = 0; i < (CURSOR_SIZE / 32); i++) {
+ if (cur_size == 32) {
+ for (i = 0; i < (CURSOR_SIZE / 4); i++) {
cr_data->bak[i] = 0x0;
cr_data->bak[i + 1] = 0xFFFFFFFF;
i += 1;
}
- } else if (MAX_CURS == 64) {
- for (i = 0; i < (CURSOR_SIZE / 32); i++) {
+ } else {
+ for (i = 0; i < (CURSOR_SIZE / 4); i++) {
cr_data->bak[i] = 0x0;
cr_data->bak[i + 1] = 0x0;
cr_data->bak[i + 2] = 0xFFFFFFFF;
@@ -1211,27 +966,27 @@ static int viafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
}
}
- switch (viacursor.rop) {
+ switch (cursor->rop) {
case ROP_XOR:
for (i = 0; i < size; i++)
- cr_data->data[i] = viacursor.mask[i];
+ cr_data->data[i] = cursor->mask[i];
break;
case ROP_COPY:
for (i = 0; i < size; i++)
- cr_data->data[i] = viacursor.mask[i];
+ cr_data->data[i] = cursor->mask[i];
break;
default:
break;
}
- if (MAX_CURS == 32) {
+ if (cur_size == 32) {
for (i = 0; i < size; i++) {
cr_data->bak[j] = (u32) cr_data->data[i];
cr_data->bak[j + 1] = ~cr_data->bak[j];
j += 2;
}
- } else if (MAX_CURS == 64) {
+ } else {
for (i = 0; i < size; i++) {
cr_data->bak[j] = (u32) cr_data->data[i];
cr_data->bak[j + 1] = 0x0;
@@ -1241,14 +996,12 @@ static int viafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
}
}
- memcpy(((struct viafb_par *)(info->par))->fbmem_virt +
- ((struct viafb_par *)(info->par))->cursor_start,
- cr_data->bak, CURSOR_SIZE);
-out:
+ memcpy_toio(viafbinfo->screen_base + viapar->shared->
+ cursor_vram_addr, cr_data->bak, CURSOR_SIZE);
kfree(cr_data);
}
- if (viacursor.enable)
+ if (cursor->enable)
viafb_show_hw_cursor(info, HW_Cursor_ON);
return 0;
@@ -1256,8 +1009,8 @@ out:
static int viafb_sync(struct fb_info *info)
{
- if (viafb_accel)
- viafb_wait_engine_idle();
+ if (!(info->flags & FBINFO_HWACCEL_DISABLED))
+ viafb_wait_engine_idle(info);
return 0;
}
@@ -1266,12 +1019,16 @@ int viafb_get_mode_index(int hres, int vres)
u32 i;
DEBUG_MSG(KERN_INFO "viafb_get_mode_index!\n");
- for (i = 0; viafb_modentry[i].mode_index != VIA_RES_INVALID; i++)
- if (viafb_modentry[i].xres == hres &&
- viafb_modentry[i].yres == vres)
+ for (i = 0; i < NUM_TOTAL_MODETABLE; i++)
+ if (CLE266Modes[i].mode_array &&
+ CLE266Modes[i].crtc[0].crtc.hor_addr == hres &&
+ CLE266Modes[i].crtc[0].crtc.ver_addr == vres)
break;
- return viafb_modentry[i].mode_index;
+ if (i == NUM_TOTAL_MODETABLE)
+ return VIA_RES_INVALID;
+
+ return CLE266Modes[i].ModeIndex;
}
static void check_available_device_to_enable(int device_id)
@@ -1375,36 +1132,11 @@ static void viafb_set_device(struct device_t active_dev)
viafb_SAMM_ON = active_dev.samm;
viafb_primary_dev = active_dev.primary_dev;
- viafb_set_start_addr();
+ viafb_set_primary_address(0);
+ viafb_set_secondary_address(viafb_SAMM_ON ? viafb_second_offset : 0);
viafb_set_iga_path();
}
-static void viafb_set_video_device(u32 video_dev_info)
-{
- viaparinfo->video_on_crt = STATE_OFF;
- viaparinfo->video_on_dvi = STATE_OFF;
- viaparinfo->video_on_lcd = STATE_OFF;
-
- /* Check available device to enable: */
- if ((video_dev_info & CRT_Device) == CRT_Device)
- viaparinfo->video_on_crt = STATE_ON;
- else if ((video_dev_info & DVI_Device) == DVI_Device)
- viaparinfo->video_on_dvi = STATE_ON;
- else if ((video_dev_info & LCD_Device) == LCD_Device)
- viaparinfo->video_on_lcd = STATE_ON;
-}
-
-static void viafb_get_video_device(u32 *video_dev_info)
-{
- *video_dev_info = None_Device;
- if (viaparinfo->video_on_crt == STATE_ON)
- *video_dev_info |= CRT_Device;
- else if (viaparinfo->video_on_dvi == STATE_ON)
- *video_dev_info |= DVI_Device;
- else if (viaparinfo->video_on_lcd == STATE_ON)
- *video_dev_info |= LCD_Device;
-}
-
static int get_primary_device(void)
{
int primary_device = 0;
@@ -1446,18 +1178,6 @@ static int get_primary_device(void)
return primary_device;
}
-static u8 is_duoview(void)
-{
- if (0 == viafb_SAMM_ON) {
- if (viafb_LCD_ON + viafb_LCD2_ON +
- viafb_DVI_ON + viafb_CRT_ON == 2)
- return true;
- return false;
- } else {
- return false;
- }
-}
-
static void apply_second_mode_setting(struct fb_var_screeninfo
*sec_var)
{
@@ -1559,14 +1279,13 @@ static int apply_device_setting(struct viafb_ioctl_setting setting_info,
if (viafb_SAMM_ON)
viafb_primary_dev = setting_info.primary_device;
- viafb_set_start_addr();
+ viafb_set_primary_address(0);
+ viafb_set_secondary_address(viafb_SAMM_ON ? viafb_second_offset : 0);
viafb_set_iga_path();
}
need_set_mode = 1;
}
- viaparinfo->duoview = is_duoview();
-
if (!need_set_mode) {
;
} else {
@@ -1589,18 +1308,6 @@ static void retrieve_device_setting(struct viafb_ioctl_setting
setting_info->device_status |= LCD_Device;
if (viafb_LCD2_ON == 1)
setting_info->device_status |= LCD2_Device;
- if ((viaparinfo->video_on_crt == 1) && (viafb_CRT_ON == 1)) {
- setting_info->video_device_status =
- viaparinfo->crt_setting_info->iga_path;
- } else if ((viaparinfo->video_on_dvi == 1) && (viafb_DVI_ON == 1)) {
- setting_info->video_device_status =
- viaparinfo->tmds_setting_info->iga_path;
- } else if ((viaparinfo->video_on_lcd == 1) && (viafb_LCD_ON == 1)) {
- setting_info->video_device_status =
- viaparinfo->lvds_setting_info->iga_path;
- } else {
- setting_info->video_device_status = 0;
- }
setting_info->samm_status = viafb_SAMM_ON;
setting_info->primary_device = get_primary_device();
@@ -1687,25 +1394,6 @@ static void parse_active_dev(void)
viafb_CRT_ON = STATE_ON;
viafb_SAMM_ON = STATE_OFF;
}
- viaparinfo->duoview = is_duoview();
-}
-
-static void parse_video_dev(void)
-{
- viaparinfo->video_on_crt = STATE_OFF;
- viaparinfo->video_on_dvi = STATE_OFF;
- viaparinfo->video_on_lcd = STATE_OFF;
-
- if (!strncmp(viafb_video_dev, "CRT", 3)) {
- /* Video on CRT */
- viaparinfo->video_on_crt = STATE_ON;
- } else if (!strncmp(viafb_video_dev, "DVI", 3)) {
- /* Video on DVI */
- viaparinfo->video_on_dvi = STATE_ON;
- } else if (!strncmp(viafb_video_dev, "LCD", 3)) {
- /* Video on LCD */
- viaparinfo->video_on_lcd = STATE_ON;
- }
}
static int parse_port(char *opt_str, int *output_interface)
@@ -1754,10 +1442,8 @@ static void parse_dvi_port(void)
* DVP1Driving, DFPHigh, DFPLow CR96, SR2A[5], SR1B[1], SR2A[4], SR1E[2],
* CR9B, SR65, CR97, CR99
*/
-static int viafb_dvp0_proc_read(char *buf, char **start, off_t offset,
-int count, int *eof, void *data)
+static int viafb_dvp0_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
u8 dvp0_data_dri = 0, dvp0_clk_dri = 0, dvp0 = 0;
dvp0_data_dri =
(viafb_read_reg(VIASR, SR2A) & BIT5) >> 4 |
@@ -1766,13 +1452,17 @@ int count, int *eof, void *data)
(viafb_read_reg(VIASR, SR2A) & BIT4) >> 3 |
(viafb_read_reg(VIASR, SR1E) & BIT2) >> 2;
dvp0 = viafb_read_reg(VIACR, CR96) & 0x0f;
- len +=
- sprintf(buf + len, "%x %x %x\n", dvp0, dvp0_data_dri, dvp0_clk_dri);
- *eof = 1; /*Inform kernel end of data */
- return len;
+ seq_printf(m, "%x %x %x\n", dvp0, dvp0_data_dri, dvp0_clk_dri);
+ return 0;
}
-static int viafb_dvp0_proc_write(struct file *file,
- const char __user *buffer, unsigned long count, void *data)
+
+static int viafb_dvp0_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, viafb_dvp0_proc_show, NULL);
+}
+
+static ssize_t viafb_dvp0_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
{
char buf[20], *value, *pbuf;
u8 reg_val = 0;
@@ -1816,21 +1506,33 @@ static int viafb_dvp0_proc_write(struct file *file,
}
return count;
}
-static int viafb_dvp1_proc_read(char *buf, char **start, off_t offset,
- int count, int *eof, void *data)
+
+static const struct file_operations viafb_dvp0_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = viafb_dvp0_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = viafb_dvp0_proc_write,
+};
+
+static int viafb_dvp1_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
u8 dvp1 = 0, dvp1_data_dri = 0, dvp1_clk_dri = 0;
dvp1 = viafb_read_reg(VIACR, CR9B) & 0x0f;
dvp1_data_dri = (viafb_read_reg(VIASR, SR65) & 0x0c) >> 2;
dvp1_clk_dri = viafb_read_reg(VIASR, SR65) & 0x03;
- len +=
- sprintf(buf + len, "%x %x %x\n", dvp1, dvp1_data_dri, dvp1_clk_dri);
- *eof = 1; /*Inform kernel end of data */
- return len;
+ seq_printf(m, "%x %x %x\n", dvp1, dvp1_data_dri, dvp1_clk_dri);
+ return 0;
}
-static int viafb_dvp1_proc_write(struct file *file,
- const char __user *buffer, unsigned long count, void *data)
+
+static int viafb_dvp1_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, viafb_dvp1_proc_show, NULL);
+}
+
+static ssize_t viafb_dvp1_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
{
char buf[20], *value, *pbuf;
u8 reg_val = 0;
@@ -1869,18 +1571,30 @@ static int viafb_dvp1_proc_write(struct file *file,
return count;
}
-static int viafb_dfph_proc_read(char *buf, char **start, off_t offset,
- int count, int *eof, void *data)
+static const struct file_operations viafb_dvp1_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = viafb_dvp1_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = viafb_dvp1_proc_write,
+};
+
+static int viafb_dfph_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
u8 dfp_high = 0;
dfp_high = viafb_read_reg(VIACR, CR97) & 0x0f;
- len += sprintf(buf + len, "%x\n", dfp_high);
- *eof = 1; /*Inform kernel end of data */
- return len;
+ seq_printf(m, "%x\n", dfp_high);
+ return 0;
}
-static int viafb_dfph_proc_write(struct file *file,
- const char __user *buffer, unsigned long count, void *data)
+
+static int viafb_dfph_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, viafb_dfph_proc_show, NULL);
+}
+
+static ssize_t viafb_dfph_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
{
char buf[20];
u8 reg_val = 0;
@@ -1895,18 +1609,31 @@ static int viafb_dfph_proc_write(struct file *file,
viafb_write_reg_mask(CR97, VIACR, reg_val, 0x0f);
return count;
}
-static int viafb_dfpl_proc_read(char *buf, char **start, off_t offset,
- int count, int *eof, void *data)
+
+static const struct file_operations viafb_dfph_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = viafb_dfph_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = viafb_dfph_proc_write,
+};
+
+static int viafb_dfpl_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
u8 dfp_low = 0;
dfp_low = viafb_read_reg(VIACR, CR99) & 0x0f;
- len += sprintf(buf + len, "%x\n", dfp_low);
- *eof = 1; /*Inform kernel end of data */
- return len;
+ seq_printf(m, "%x\n", dfp_low);
+ return 0;
}
-static int viafb_dfpl_proc_write(struct file *file,
- const char __user *buffer, unsigned long count, void *data)
+
+static int viafb_dfpl_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, viafb_dfpl_proc_show, NULL);
+}
+
+static ssize_t viafb_dfpl_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
{
char buf[20];
u8 reg_val = 0;
@@ -1921,10 +1648,18 @@ static int viafb_dfpl_proc_write(struct file *file,
viafb_write_reg_mask(CR99, VIACR, reg_val, 0x0f);
return count;
}
-static int viafb_vt1636_proc_read(char *buf, char **start,
- off_t offset, int count, int *eof, void *data)
+
+static const struct file_operations viafb_dfpl_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = viafb_dfpl_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = viafb_dfpl_proc_write,
+};
+
+static int viafb_vt1636_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
u8 vt1636_08 = 0, vt1636_09 = 0;
switch (viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) {
case VT1636_LVDS:
@@ -1934,7 +1669,7 @@ static int viafb_vt1636_proc_read(char *buf, char **start,
vt1636_09 =
viafb_gpio_i2c_read_lvds(viaparinfo->lvds_setting_info,
&viaparinfo->chip_info->lvds_chip_info, 0x09) & 0x1f;
- len += sprintf(buf + len, "%x %x\n", vt1636_08, vt1636_09);
+ seq_printf(m, "%x %x\n", vt1636_08, vt1636_09);
break;
default:
break;
@@ -1947,16 +1682,21 @@ static int viafb_vt1636_proc_read(char *buf, char **start,
vt1636_09 =
viafb_gpio_i2c_read_lvds(viaparinfo->lvds_setting_info2,
&viaparinfo->chip_info->lvds_chip_info2, 0x09) & 0x1f;
- len += sprintf(buf + len, " %x %x\n", vt1636_08, vt1636_09);
+ seq_printf(m, " %x %x\n", vt1636_08, vt1636_09);
break;
default:
break;
}
- *eof = 1; /*Inform kernel end of data */
- return len;
+ return 0;
}
-static int viafb_vt1636_proc_write(struct file *file,
- const char __user *buffer, unsigned long count, void *data)
+
+static int viafb_vt1636_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, viafb_vt1636_proc_show, NULL);
+}
+
+static ssize_t viafb_vt1636_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
{
char buf[30], *value, *pbuf;
struct IODATA reg_val;
@@ -2045,39 +1785,27 @@ static int viafb_vt1636_proc_write(struct file *file,
return count;
}
+static const struct file_operations viafb_vt1636_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = viafb_vt1636_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = viafb_vt1636_proc_write,
+};
+
static void viafb_init_proc(struct proc_dir_entry **viafb_entry)
{
- struct proc_dir_entry *entry;
*viafb_entry = proc_mkdir("viafb", NULL);
if (viafb_entry) {
- entry = create_proc_entry("dvp0", 0, *viafb_entry);
- if (entry) {
- entry->read_proc = viafb_dvp0_proc_read;
- entry->write_proc = viafb_dvp0_proc_write;
- }
- entry = create_proc_entry("dvp1", 0, *viafb_entry);
- if (entry) {
- entry->read_proc = viafb_dvp1_proc_read;
- entry->write_proc = viafb_dvp1_proc_write;
- }
- entry = create_proc_entry("dfph", 0, *viafb_entry);
- if (entry) {
- entry->read_proc = viafb_dfph_proc_read;
- entry->write_proc = viafb_dfph_proc_write;
- }
- entry = create_proc_entry("dfpl", 0, *viafb_entry);
- if (entry) {
- entry->read_proc = viafb_dfpl_proc_read;
- entry->write_proc = viafb_dfpl_proc_write;
- }
+ proc_create("dvp0", 0, *viafb_entry, &viafb_dvp0_proc_fops);
+ proc_create("dvp1", 0, *viafb_entry, &viafb_dvp1_proc_fops);
+ proc_create("dfph", 0, *viafb_entry, &viafb_dfph_proc_fops);
+ proc_create("dfpl", 0, *viafb_entry, &viafb_dfpl_proc_fops);
if (VT1636_LVDS == viaparinfo->chip_info->lvds_chip_info.
lvds_chip_name || VT1636_LVDS ==
viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name) {
- entry = create_proc_entry("vt1636", 0, *viafb_entry);
- if (entry) {
- entry->read_proc = viafb_vt1636_proc_read;
- entry->write_proc = viafb_vt1636_proc_write;
- }
+ proc_create("vt1636", 0, *viafb_entry, &viafb_vt1636_proc_fops);
}
}
@@ -2094,51 +1822,61 @@ static void viafb_remove_proc(struct proc_dir_entry *viafb_entry)
remove_proc_entry("viafb", NULL);
}
-static int __devinit via_pci_probe(void)
+static void parse_mode(const char *str, u32 *xres, u32 *yres)
{
- unsigned long default_xres, default_yres;
- char *tmpc, *tmpm;
- char *tmpc_sec, *tmpm_sec;
+ char *ptr;
+
+ *xres = simple_strtoul(str, &ptr, 10);
+ if (ptr[0] != 'x')
+ goto out_default;
+
+ *yres = simple_strtoul(&ptr[1], &ptr, 10);
+ if (ptr[0])
+ goto out_default;
+
+ return;
+
+out_default:
+ printk(KERN_WARNING "viafb received invalid mode string: %s\n", str);
+ *xres = 640;
+ *yres = 480;
+}
+
+static int __devinit via_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ u32 default_xres, default_yres;
int vmode_index;
- u32 tmds_length, lvds_length, crt_length, chip_length, viafb_par_length;
+ u32 viafb_par_length;
DEBUG_MSG(KERN_INFO "VIAFB PCI Probe!!\n");
viafb_par_length = ALIGN(sizeof(struct viafb_par), BITS_PER_LONG/8);
- tmds_length = ALIGN(sizeof(struct tmds_setting_information),
- BITS_PER_LONG/8);
- lvds_length = ALIGN(sizeof(struct lvds_setting_information),
- BITS_PER_LONG/8);
- crt_length = ALIGN(sizeof(struct lvds_setting_information),
- BITS_PER_LONG/8);
- chip_length = ALIGN(sizeof(struct chip_information), BITS_PER_LONG/8);
/* Allocate fb_info and ***_par here, also including some other needed
* variables
*/
- viafbinfo = framebuffer_alloc(viafb_par_length + 2 * lvds_length +
- tmds_length + crt_length + chip_length, NULL);
+ viafbinfo = framebuffer_alloc(viafb_par_length +
+ ALIGN(sizeof(struct viafb_shared), BITS_PER_LONG/8),
+ &pdev->dev);
if (!viafbinfo) {
printk(KERN_ERR"Could not allocate memory for viafb_info.\n");
return -ENODEV;
}
viaparinfo = (struct viafb_par *)viafbinfo->par;
- viaparinfo->tmds_setting_info = (struct tmds_setting_information *)
- ((unsigned long)viaparinfo + viafb_par_length);
- viaparinfo->lvds_setting_info = (struct lvds_setting_information *)
- ((unsigned long)viaparinfo->tmds_setting_info + tmds_length);
- viaparinfo->lvds_setting_info2 = (struct lvds_setting_information *)
- ((unsigned long)viaparinfo->lvds_setting_info + lvds_length);
- viaparinfo->crt_setting_info = (struct crt_setting_information *)
- ((unsigned long)viaparinfo->lvds_setting_info2 + lvds_length);
- viaparinfo->chip_info = (struct chip_information *)
- ((unsigned long)viaparinfo->crt_setting_info + crt_length);
+ viaparinfo->shared = viafbinfo->par + viafb_par_length;
+ viaparinfo->vram_addr = 0;
+ viaparinfo->tmds_setting_info = &viaparinfo->shared->tmds_setting_info;
+ viaparinfo->lvds_setting_info = &viaparinfo->shared->lvds_setting_info;
+ viaparinfo->lvds_setting_info2 =
+ &viaparinfo->shared->lvds_setting_info2;
+ viaparinfo->crt_setting_info = &viaparinfo->shared->crt_setting_info;
+ viaparinfo->chip_info = &viaparinfo->shared->chip_info;
if (viafb_dual_fb)
viafb_SAMM_ON = 1;
parse_active_dev();
- parse_video_dev();
parse_lcd_port();
parse_dvi_port();
@@ -2149,32 +1887,32 @@ static int __devinit via_pci_probe(void)
/* Set up I2C bus stuff */
viafb_create_i2c_bus(viaparinfo);
- viafb_init_chip_info();
- viafb_get_fb_info(&viaparinfo->fbmem, &viaparinfo->memsize);
+ viafb_init_chip_info(pdev, ent);
+ viaparinfo->fbmem = pci_resource_start(pdev, 0);
+ viaparinfo->memsize = viafb_get_fb_size_from_pci();
viaparinfo->fbmem_free = viaparinfo->memsize;
viaparinfo->fbmem_used = 0;
- viaparinfo->fbmem_virt = ioremap_nocache(viaparinfo->fbmem,
+ viafbinfo->screen_base = ioremap_nocache(viaparinfo->fbmem,
viaparinfo->memsize);
- viafbinfo->screen_base = (char *)viaparinfo->fbmem_virt;
-
- if (!viaparinfo->fbmem_virt) {
+ if (!viafbinfo->screen_base) {
printk(KERN_INFO "ioremap failed\n");
- return -1;
+ return -ENOMEM;
}
- viafb_get_mmio_info(&viaparinfo->mmio_base, &viaparinfo->mmio_len);
- viaparinfo->io_virt = ioremap_nocache(viaparinfo->mmio_base,
- viaparinfo->mmio_len);
-
+ viafbinfo->fix.mmio_start = pci_resource_start(pdev, 1);
+ viafbinfo->fix.mmio_len = pci_resource_len(pdev, 1);
viafbinfo->node = 0;
viafbinfo->fbops = &viafb_ops;
viafbinfo->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
viafbinfo->pseudo_palette = pseudo_pal;
- if (viafb_accel) {
- viafb_init_accel();
- viafb_init_2d_engine();
- viafb_hw_cursor_init();
+ if (viafb_accel && !viafb_init_engine(viafbinfo)) {
+ viafbinfo->flags |= FBINFO_HWACCEL_COPYAREA |
+ FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT;
+ default_var.accel_flags = FB_ACCELF_TEXT;
+ } else {
+ viafbinfo->flags |= FBINFO_HWACCEL_DISABLED;
+ default_var.accel_flags = 0;
}
if (viafb_second_size && (viafb_second_size < 8)) {
@@ -2186,27 +1924,14 @@ static int __devinit via_pci_probe(void)
viafb_second_size * 1024 * 1024;
}
- viafb_FB_MM = viaparinfo->fbmem_virt;
- tmpm = viafb_mode;
- tmpc = strsep(&tmpm, "x");
- strict_strtoul(tmpc, 0, &default_xres);
- strict_strtoul(tmpm, 0, &default_yres);
-
+ parse_mode(viafb_mode, &default_xres, &default_yres);
vmode_index = viafb_get_mode_index(default_xres, default_yres);
DEBUG_MSG(KERN_INFO "0->index=%d\n", vmode_index);
if (viafb_SAMM_ON == 1) {
- if (strcmp(viafb_mode, viafb_mode1)) {
- tmpm_sec = viafb_mode1;
- tmpc_sec = strsep(&tmpm_sec, "x");
- strict_strtoul(tmpc_sec, 0,
- (unsigned long *)&viafb_second_xres);
- strict_strtoul(tmpm_sec, 0,
- (unsigned long *)&viafb_second_yres);
- } else {
- viafb_second_xres = default_xres;
- viafb_second_yres = default_yres;
- }
+ parse_mode(viafb_mode1, &viafb_second_xres,
+ &viafb_second_yres);
+
if (0 == viafb_second_virtual_xres) {
switch (viafb_second_xres) {
case 1400:
@@ -2256,18 +1981,9 @@ static int __devinit via_pci_probe(void)
default_var.lower_margin = 4;
default_var.hsync_len = default_var.left_margin;
default_var.vsync_len = 4;
- default_var.accel_flags = 0;
-
- if (viafb_accel) {
- viafbinfo->flags |=
- (FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT |
- FBINFO_HWACCEL_IMAGEBLIT);
- default_var.accel_flags |= FB_ACCELF_TEXT;
- } else
- viafbinfo->flags |= FBINFO_HWACCEL_DISABLED;
if (viafb_dual_fb) {
- viafbinfo1 = framebuffer_alloc(viafb_par_length, NULL);
+ viafbinfo1 = framebuffer_alloc(viafb_par_length, &pdev->dev);
if (!viafbinfo1) {
printk(KERN_ERR
"allocate the second framebuffer struct error\n");
@@ -2276,11 +1992,10 @@ static int __devinit via_pci_probe(void)
}
viaparinfo1 = viafbinfo1->par;
memcpy(viaparinfo1, viaparinfo, viafb_par_length);
+ viaparinfo1->vram_addr = viafb_second_offset;
viaparinfo1->memsize = viaparinfo->memsize -
viafb_second_offset;
viaparinfo->memsize = viafb_second_offset;
- viaparinfo1->fbmem_virt = viaparinfo->fbmem_virt +
- viafb_second_offset;
viaparinfo1->fbmem = viaparinfo->fbmem + viafb_second_offset;
viaparinfo1->fbmem_used = viaparinfo->fbmem_used;
@@ -2288,20 +2003,13 @@ static int __devinit via_pci_probe(void)
viaparinfo1->fbmem_used;
viaparinfo->fbmem_free = viaparinfo->memsize;
viaparinfo->fbmem_used = 0;
- if (viafb_accel) {
- viaparinfo1->cursor_start =
- viaparinfo->cursor_start - viafb_second_offset;
- viaparinfo1->VQ_start = viaparinfo->VQ_start -
- viafb_second_offset;
- viaparinfo1->VQ_end = viaparinfo->VQ_end -
- viafb_second_offset;
- }
+ viaparinfo->iga_path = IGA1;
+ viaparinfo1->iga_path = IGA2;
memcpy(viafbinfo1, viafbinfo, sizeof(struct fb_info));
+ viafbinfo1->par = viaparinfo1;
viafbinfo1->screen_base = viafbinfo->screen_base +
viafb_second_offset;
- viafbinfo1->fix.smem_start = viaparinfo1->fbmem;
- viafbinfo1->fix.smem_len = viaparinfo1->fbmem_free;
default_var.xres = viafb_second_xres;
default_var.yres = viafb_second_yres;
@@ -2323,15 +2031,17 @@ static int __devinit via_pci_probe(void)
viafb_setup_fixinfo(&viafbinfo1->fix, viaparinfo1);
viafb_check_var(&default_var, viafbinfo1);
viafbinfo1->var = default_var;
- viafb_update_viafb_par(viafbinfo);
- viafb_update_fix(&viafbinfo1->fix, viafbinfo1);
+ viafb_update_fix(viafbinfo1);
+ viaparinfo1->depth = fb_get_color_depth(&viafbinfo1->var,
+ &viafbinfo1->fix);
}
viafb_setup_fixinfo(&viafbinfo->fix, viaparinfo);
viafb_check_var(&default_var, viafbinfo);
viafbinfo->var = default_var;
- viafb_update_viafb_par(viafbinfo);
- viafb_update_fix(&viafbinfo->fix, viafbinfo);
+ viafb_update_fix(viafbinfo);
+ viaparinfo->depth = fb_get_color_depth(&viafbinfo->var,
+ &viafbinfo->fix);
default_var.activate = FB_ACTIVATE_NOW;
fb_alloc_cmap(&viafbinfo->cmap, 256, 0);
@@ -2353,20 +2063,20 @@ static int __devinit via_pci_probe(void)
viafbinfo->node, viafbinfo->fix.id, default_var.xres,
default_var.yres, default_var.bits_per_pixel);
- viafb_init_proc(&viaparinfo->proc_entry);
+ viafb_init_proc(&viaparinfo->shared->proc_entry);
viafb_init_dac(IGA2);
return 0;
}
-static void __devexit via_pci_remove(void)
+static void __devexit via_pci_remove(struct pci_dev *pdev)
{
DEBUG_MSG(KERN_INFO "via_pci_remove!\n");
fb_dealloc_cmap(&viafbinfo->cmap);
unregister_framebuffer(viafbinfo);
if (viafb_dual_fb)
unregister_framebuffer(viafbinfo1);
- iounmap((void *)viaparinfo->fbmem_virt);
- iounmap(viaparinfo->io_virt);
+ iounmap((void *)viafbinfo->screen_base);
+ iounmap(viaparinfo->shared->engine_mmio);
viafb_delete_i2c_buss(viaparinfo);
@@ -2374,7 +2084,7 @@ static void __devexit via_pci_remove(void)
if (viafb_dual_fb)
framebuffer_release(viafbinfo1);
- viafb_remove_proc(viaparinfo->proc_entry);
+ viafb_remove_proc(viaparinfo->shared->proc_entry);
}
#ifndef MODULE
@@ -2441,8 +2151,6 @@ static int __init viafb_setup(char *options)
else if (!strncmp(this_opt, "viafb_lcd_mode=", 15))
strict_strtoul(this_opt + 15, 0,
(unsigned long *)&viafb_lcd_mode);
- else if (!strncmp(this_opt, "viafb_video_dev=", 16))
- viafb_video_dev = kstrdup(this_opt + 16, GFP_KERNEL);
else if (!strncmp(this_opt, "viafb_lcd_port=", 15))
viafb_lcd_port = kstrdup(this_opt + 15, GFP_KERNEL);
else if (!strncmp(this_opt, "viafb_dvi_port=", 15))
@@ -2452,6 +2160,40 @@ static int __init viafb_setup(char *options)
}
#endif
+static struct pci_device_id viafb_pci_table[] __devinitdata = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CLE266_DID),
+ .driver_data = UNICHROME_CLE266 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_PM800_DID),
+ .driver_data = UNICHROME_PM800 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K400_DID),
+ .driver_data = UNICHROME_K400 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K800_DID),
+ .driver_data = UNICHROME_K800 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M890_DID),
+ .driver_data = UNICHROME_CN700 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K8M890_DID),
+ .driver_data = UNICHROME_K8M890 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CX700_DID),
+ .driver_data = UNICHROME_CX700 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M900_DID),
+ .driver_data = UNICHROME_P4M900 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN750_DID),
+ .driver_data = UNICHROME_CN750 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX800_DID),
+ .driver_data = UNICHROME_VX800 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX855_DID),
+ .driver_data = UNICHROME_VX855 },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, viafb_pci_table);
+
+static struct pci_driver viafb_driver = {
+ .name = "viafb",
+ .id_table = viafb_pci_table,
+ .probe = via_pci_probe,
+ .remove = __devexit_p(via_pci_remove),
+};
+
static int __init viafb_init(void)
{
#ifndef MODULE
@@ -2463,13 +2205,13 @@ static int __init viafb_init(void)
printk(KERN_INFO
"VIA Graphics Intergration Chipset framebuffer %d.%d initializing\n",
VERSION_MAJOR, VERSION_MINOR);
- return via_pci_probe();
+ return pci_register_driver(&viafb_driver);
}
static void __exit viafb_exit(void)
{
DEBUG_MSG(KERN_INFO "viafb_exit!\n");
- via_pci_remove();
+ pci_unregister_driver(&viafb_driver);
}
static struct fb_ops viafb_ops = {
@@ -2494,82 +2236,79 @@ module_init(viafb_init);
module_exit(viafb_exit);
#ifdef MODULE
-module_param(viafb_memsize, int, 0);
+module_param(viafb_memsize, int, S_IRUSR);
-module_param(viafb_mode, charp, 0);
+module_param(viafb_mode, charp, S_IRUSR);
MODULE_PARM_DESC(viafb_mode, "Set resolution (default=640x480)");
-module_param(viafb_mode1, charp, 0);
+module_param(viafb_mode1, charp, S_IRUSR);
MODULE_PARM_DESC(viafb_mode1, "Set resolution (default=640x480)");
-module_param(viafb_bpp, int, 0);
+module_param(viafb_bpp, int, S_IRUSR);
MODULE_PARM_DESC(viafb_bpp, "Set color depth (default=32bpp)");
-module_param(viafb_bpp1, int, 0);
+module_param(viafb_bpp1, int, S_IRUSR);
MODULE_PARM_DESC(viafb_bpp1, "Set color depth (default=32bpp)");
-module_param(viafb_refresh, int, 0);
+module_param(viafb_refresh, int, S_IRUSR);
MODULE_PARM_DESC(viafb_refresh,
"Set CRT viafb_refresh rate (default = 60)");
-module_param(viafb_refresh1, int, 0);
+module_param(viafb_refresh1, int, S_IRUSR);
MODULE_PARM_DESC(viafb_refresh1,
"Set CRT refresh rate (default = 60)");
-module_param(viafb_lcd_panel_id, int, 0);
+module_param(viafb_lcd_panel_id, int, S_IRUSR);
MODULE_PARM_DESC(viafb_lcd_panel_id,
"Set Flat Panel type(Default=1024x768)");
-module_param(viafb_lcd_dsp_method, int, 0);
+module_param(viafb_lcd_dsp_method, int, S_IRUSR);
MODULE_PARM_DESC(viafb_lcd_dsp_method,
"Set Flat Panel display scaling method.(Default=Expandsion)");
-module_param(viafb_SAMM_ON, int, 0);
+module_param(viafb_SAMM_ON, int, S_IRUSR);
MODULE_PARM_DESC(viafb_SAMM_ON,
"Turn on/off flag of SAMM(Default=OFF)");
-module_param(viafb_accel, int, 0);
+module_param(viafb_accel, int, S_IRUSR);
MODULE_PARM_DESC(viafb_accel,
- "Set 2D Hardware Acceleration.(Default = OFF)");
+ "Set 2D Hardware Acceleration: 0 = OFF, 1 = ON (default)");
-module_param(viafb_active_dev, charp, 0);
+module_param(viafb_active_dev, charp, S_IRUSR);
MODULE_PARM_DESC(viafb_active_dev, "Specify active devices.");
-module_param(viafb_display_hardware_layout, int, 0);
+module_param(viafb_display_hardware_layout, int, S_IRUSR);
MODULE_PARM_DESC(viafb_display_hardware_layout,
"Display Hardware Layout (LCD Only, DVI Only...,etc)");
-module_param(viafb_second_size, int, 0);
+module_param(viafb_second_size, int, S_IRUSR);
MODULE_PARM_DESC(viafb_second_size,
"Set secondary device memory size");
-module_param(viafb_dual_fb, int, 0);
+module_param(viafb_dual_fb, int, S_IRUSR);
MODULE_PARM_DESC(viafb_dual_fb,
"Turn on/off flag of dual framebuffer devices.(Default = OFF)");
-module_param(viafb_platform_epia_dvi, int, 0);
+module_param(viafb_platform_epia_dvi, int, S_IRUSR);
MODULE_PARM_DESC(viafb_platform_epia_dvi,
"Turn on/off flag of DVI devices on EPIA board.(Default = OFF)");
-module_param(viafb_device_lcd_dualedge, int, 0);
+module_param(viafb_device_lcd_dualedge, int, S_IRUSR);
MODULE_PARM_DESC(viafb_device_lcd_dualedge,
"Turn on/off flag of dual edge panel.(Default = OFF)");
-module_param(viafb_bus_width, int, 0);
+module_param(viafb_bus_width, int, S_IRUSR);
MODULE_PARM_DESC(viafb_bus_width,
"Set bus width of panel.(Default = 12)");
-module_param(viafb_lcd_mode, int, 0);
+module_param(viafb_lcd_mode, int, S_IRUSR);
MODULE_PARM_DESC(viafb_lcd_mode,
"Set Flat Panel mode(Default=OPENLDI)");
-module_param(viafb_video_dev, charp, 0);
-MODULE_PARM_DESC(viafb_video_dev, "Specify video devices.");
-
-module_param(viafb_lcd_port, charp, 0);
+module_param(viafb_lcd_port, charp, S_IRUSR);
MODULE_PARM_DESC(viafb_lcd_port, "Specify LCD output port.");
-module_param(viafb_dvi_port, charp, 0);
+module_param(viafb_dvi_port, charp, S_IRUSR);
MODULE_PARM_DESC(viafb_dvi_port, "Specify DVI output port.");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/via/viafbdev.h b/drivers/video/via/viafbdev.h
index 227b000feb3..0c94d244192 100644
--- a/drivers/video/via/viafbdev.h
+++ b/drivers/video/via/viafbdev.h
@@ -37,51 +37,50 @@
#define VERSION_OS 0 /* 0: for 32 bits OS, 1: for 64 bits OS */
#define VERSION_MINOR 4
+struct viafb_shared {
+ struct proc_dir_entry *proc_entry; /*viafb proc entry */
+
+ /* I2C stuff */
+ struct via_i2c_stuff i2c_stuff;
+
+ /* All the information will be needed to set engine */
+ struct tmds_setting_information tmds_setting_info;
+ struct crt_setting_information crt_setting_info;
+ struct lvds_setting_information lvds_setting_info;
+ struct lvds_setting_information lvds_setting_info2;
+ struct chip_information chip_info;
+
+ /* hardware acceleration stuff */
+ void __iomem *engine_mmio;
+ u32 cursor_vram_addr;
+ u32 vq_vram_addr; /* virtual queue address in video ram */
+ int (*hw_bitblt)(void __iomem *engine, u8 op, u32 width, u32 height,
+ u8 dst_bpp, u32 dst_addr, u32 dst_pitch, u32 dst_x, u32 dst_y,
+ u32 *src_mem, u32 src_addr, u32 src_pitch, u32 src_x, u32 src_y,
+ u32 fg_color, u32 bg_color, u8 fill_rop);
+};
+
struct viafb_par {
- int bpp;
- int hres;
- int vres;
- int linelength;
- u32 xoffset;
- u32 yoffset;
-
- void __iomem *fbmem_virt; /*framebuffer virtual memory address */
- void __iomem *io_virt; /*iospace virtual memory address */
+ u8 depth;
+ u32 vram_addr;
+
unsigned int fbmem; /*framebuffer physical memory address */
unsigned int memsize; /*size of fbmem */
- unsigned int io; /*io space address */
- unsigned long mmio_base; /*mmio base address */
- unsigned long mmio_len; /*mmio base length */
u32 fbmem_free; /* Free FB memory */
u32 fbmem_used; /* Use FB memory size */
- u32 cursor_start; /* Cursor Start Address */
- u32 VQ_start; /* Virtual Queue Start Address */
- u32 VQ_end; /* Virtual Queue End Address */
u32 iga_path;
- struct proc_dir_entry *proc_entry; /*viafb proc entry */
- u8 duoview; /*Is working in duoview mode? */
- /* I2C stuff */
- struct via_i2c_stuff i2c_stuff;
+ struct viafb_shared *shared;
/* All the information will be needed to set engine */
+ /* depreciated, use the ones in shared directly */
struct tmds_setting_information *tmds_setting_info;
struct crt_setting_information *crt_setting_info;
struct lvds_setting_information *lvds_setting_info;
struct lvds_setting_information *lvds_setting_info2;
struct chip_information *chip_info;
-
- /* some information related to video playing */
- int video_on_crt;
- int video_on_dvi;
- int video_on_lcd;
-
-};
-struct viafb_modeinfo {
- u32 xres;
- u32 yres;
- int mode_index;
};
+
extern unsigned int viafb_second_virtual_yres;
extern unsigned int viafb_second_virtual_xres;
extern unsigned int viafb_second_offset;
@@ -91,14 +90,12 @@ extern int viafb_dual_fb;
extern int viafb_LCD2_ON;
extern int viafb_LCD_ON;
extern int viafb_DVI_ON;
-extern int viafb_accel;
extern int viafb_hotplug;
extern int viafb_memsize;
extern int strict_strtoul(const char *cp, unsigned int base,
unsigned long *res);
-void viafb_memory_pitch_patch(struct fb_info *info);
void viafb_fill_var_timing_info(struct fb_var_screeninfo *var, int refresh,
int mode_index);
int viafb_get_mode_index(int hres, int vres);
diff --git a/drivers/video/via/viamode.c b/drivers/video/via/viamode.c
index 6dcf583a837..b74f8a67923 100644
--- a/drivers/video/via/viamode.c
+++ b/drivers/video/via/viamode.c
@@ -100,12 +100,8 @@ struct io_reg CN400_ModeXregs[] = { {VIASR, SR10, 0xFF, 0x01},
{VIACR, CR0F, 0xFF, 0x00}, /* Cursor Localtion Low */
{VIACR, CR32, 0xFF, 0x00},
{VIACR, CR33, 0xFF, 0x00},
-{VIACR, CR34, 0xFF, 0x00},
{VIACR, CR35, 0xFF, 0x00},
{VIACR, CR36, 0x08, 0x00},
-{VIACR, CR62, 0xFF, 0x00}, /* Secondary Display Starting Address */
-{VIACR, CR63, 0xFF, 0x00}, /* Secondary Display Starting Address */
-{VIACR, CR64, 0xFF, 0x00}, /* Secondary Display Starting Address */
{VIACR, CR69, 0xFF, 0x00},
{VIACR, CR6A, 0xFF, 0x40},
{VIACR, CR6B, 0xFF, 0x00},
@@ -159,16 +155,12 @@ struct io_reg CN700_ModeXregs[] = { {VIASR, SR10, 0xFF, 0x01},
{VIASR, CR30, 0xFF, 0x04},
{VIACR, CR32, 0xFF, 0x00},
{VIACR, CR33, 0x7F, 0x00},
-{VIACR, CR34, 0xFF, 0x00},
{VIACR, CR35, 0xFF, 0x00},
{VIACR, CR36, 0xFF, 0x31},
{VIACR, CR41, 0xFF, 0x80},
{VIACR, CR42, 0xFF, 0x00},
{VIACR, CR55, 0x80, 0x00},
{VIACR, CR5D, 0x80, 0x00}, /*Horizontal Retrace Start bit[11] should be 0*/
-{VIACR, CR62, 0xFF, 0x00}, /* Secondary Display Starting Address */
-{VIACR, CR63, 0xFF, 0x00}, /* Secondary Display Starting Address */
-{VIACR, CR64, 0xFF, 0x00}, /* Secondary Display Starting Address */
{VIACR, CR68, 0xFF, 0x67}, /* Default FIFO For IGA2 */
{VIACR, CR69, 0xFF, 0x00},
{VIACR, CR6A, 0xFD, 0x40},
@@ -233,9 +225,6 @@ struct io_reg KM400_ModeXregs[] = {
{VIACR, CR55, 0x80, 0x00},
{VIACR, CR5D, 0x80, 0x00},
{VIACR, CR36, 0xFF, 0x01}, /* Power Mangement 3 */
- {VIACR, CR62, 0xFF, 0x00}, /* Secondary Display Starting Address */
- {VIACR, CR63, 0xFF, 0x00}, /* Secondary Display Starting Address */
- {VIACR, CR64, 0xFF, 0x00}, /* Secondary Display Starting Address */
{VIACR, CR68, 0xFF, 0x67}, /* Default FIFO For IGA2 */
{VIACR, CR6A, 0x20, 0x20}, /* Extended FIFO On */
{VIACR, CR7A, 0xFF, 0x01}, /* LCD Scaling Parameter 1 */
@@ -285,14 +274,9 @@ struct io_reg CX700_ModeXregs[] = { {VIASR, SR10, 0xFF, 0x01},
{VIACR, CR0F, 0xFF, 0x00}, /* Cursor Localtion Low */
{VIACR, CR32, 0xFF, 0x00},
{VIACR, CR33, 0xFF, 0x00},
-{VIACR, CR34, 0xFF, 0x00},
{VIACR, CR35, 0xFF, 0x00},
{VIACR, CR36, 0x08, 0x00},
{VIACR, CR47, 0xC8, 0x00}, /* Clear VCK Plus. */
-{VIACR, CR62, 0xFF, 0x00}, /* Secondary Display Starting Address */
-{VIACR, CR63, 0xFF, 0x00}, /* Secondary Display Starting Address */
-{VIACR, CR64, 0xFF, 0x00}, /* Secondary Display Starting Address */
-{VIACR, CRA3, 0xFF, 0x00}, /* Secondary Display Starting Address */
{VIACR, CR69, 0xFF, 0x00},
{VIACR, CR6A, 0xFF, 0x40},
{VIACR, CR6B, 0xFF, 0x00},
@@ -325,69 +309,61 @@ struct io_reg CX700_ModeXregs[] = { {VIASR, SR10, 0xFF, 0x01},
{VIACR, CR96, 0xFF, 0x00},
{VIACR, CR97, 0xFF, 0x00},
{VIACR, CR99, 0xFF, 0x00},
-{VIACR, CR9B, 0xFF, 0x00},
-{VIACR, CRD2, 0xFF, 0xFF} /* TMDS/LVDS control register. */
+{VIACR, CR9B, 0xFF, 0x00}
};
-/* For VT3353: Common Setting for Video Mode */
-struct io_reg VX800_ModeXregs[] = { {VIASR, SR10, 0xFF, 0x01},
+struct io_reg VX855_ModeXregs[] = {
+{VIASR, SR10, 0xFF, 0x01},
{VIASR, SR15, 0x02, 0x02},
{VIASR, SR16, 0xBF, 0x08},
{VIASR, SR17, 0xFF, 0x1F},
{VIASR, SR18, 0xFF, 0x4E},
{VIASR, SR1A, 0xFB, 0x08},
{VIASR, SR1B, 0xFF, 0xF0},
-{VIASR, SR1E, 0xFF, 0x01},
-{VIASR, SR2A, 0xFF, 0x00},
+{VIASR, SR1E, 0x07, 0x01},
+{VIASR, SR2A, 0xF0, 0x00},
+{VIASR, SR58, 0xFF, 0x00},
+{VIASR, SR59, 0xFF, 0x00},
{VIASR, SR2D, 0xFF, 0xFF}, /* VCK and LCK PLL power on. */
+{VIACR, CR09, 0xFF, 0x00}, /* Initial CR09=0*/
+{VIACR, CR11, 0x8F, 0x00}, /* IGA1 initial Vertical end */
+{VIACR, CR17, 0x7F, 0x00}, /* IGA1 CRT Mode control init */
{VIACR, CR0A, 0xFF, 0x1E}, /* Cursor Start */
{VIACR, CR0B, 0xFF, 0x00}, /* Cursor End */
{VIACR, CR0E, 0xFF, 0x00}, /* Cursor Location High */
{VIACR, CR0F, 0xFF, 0x00}, /* Cursor Localtion Low */
{VIACR, CR32, 0xFF, 0x00},
-{VIACR, CR33, 0xFF, 0x00},
-{VIACR, CR34, 0xFF, 0x00},
+{VIACR, CR33, 0x7F, 0x00},
{VIACR, CR35, 0xFF, 0x00},
{VIACR, CR36, 0x08, 0x00},
-{VIACR, CR47, 0xC8, 0x00}, /* Clear VCK Plus. */
-{VIACR, CR62, 0xFF, 0x00}, /* Secondary Display Starting Address */
-{VIACR, CR63, 0xFF, 0x00}, /* Secondary Display Starting Address */
-{VIACR, CR64, 0xFF, 0x00}, /* Secondary Display Starting Address */
-{VIACR, CRA3, 0xFF, 0x00}, /* Secondary Display Starting Address */
{VIACR, CR69, 0xFF, 0x00},
-{VIACR, CR6A, 0xFF, 0x40},
+{VIACR, CR6A, 0xFD, 0x60},
{VIACR, CR6B, 0xFF, 0x00},
{VIACR, CR6C, 0xFF, 0x00},
-{VIACR, CR7A, 0xFF, 0x01}, /* LCD Scaling Parameter 1 */
-{VIACR, CR7B, 0xFF, 0x02}, /* LCD Scaling Parameter 2 */
-{VIACR, CR7C, 0xFF, 0x03}, /* LCD Scaling Parameter 3 */
-{VIACR, CR7D, 0xFF, 0x04}, /* LCD Scaling Parameter 4 */
-{VIACR, CR7E, 0xFF, 0x07}, /* LCD Scaling Parameter 5 */
-{VIACR, CR7F, 0xFF, 0x0A}, /* LCD Scaling Parameter 6 */
-{VIACR, CR80, 0xFF, 0x0D}, /* LCD Scaling Parameter 7 */
-{VIACR, CR81, 0xFF, 0x13}, /* LCD Scaling Parameter 8 */
-{VIACR, CR82, 0xFF, 0x16}, /* LCD Scaling Parameter 9 */
-{VIACR, CR83, 0xFF, 0x19}, /* LCD Scaling Parameter 10 */
-{VIACR, CR84, 0xFF, 0x1C}, /* LCD Scaling Parameter 11 */
-{VIACR, CR85, 0xFF, 0x1D}, /* LCD Scaling Parameter 12 */
-{VIACR, CR86, 0xFF, 0x1E}, /* LCD Scaling Parameter 13 */
-{VIACR, CR87, 0xFF, 0x1F}, /* LCD Scaling Parameter 14 */
-{VIACR, CR88, 0xFF, 0x40}, /* LCD Panel Type */
-{VIACR, CR89, 0xFF, 0x00}, /* LCD Timing Control 0 */
-{VIACR, CR8A, 0xFF, 0x88}, /* LCD Timing Control 1 */
-{VIACR, CRD4, 0xFF, 0x81}, /* Second power sequence control */
-{VIACR, CR8B, 0xFF, 0x5D}, /* LCD Power Sequence Control 0 */
-{VIACR, CR8C, 0xFF, 0x2B}, /* LCD Power Sequence Control 1 */
-{VIACR, CR8D, 0xFF, 0x6F}, /* LCD Power Sequence Control 2 */
-{VIACR, CR8E, 0xFF, 0x2B}, /* LCD Power Sequence Control 3 */
-{VIACR, CR8F, 0xFF, 0x01}, /* LCD Power Sequence Control 4 */
-{VIACR, CR90, 0xFF, 0x01}, /* LCD Power Sequence Control 5 */
-{VIACR, CR91, 0xFF, 0x80}, /* 24/12 bit LVDS Data off */
+{VIACR, CR7A, 0xFF, 0x01}, /* LCD Scaling Parameter 1 */
+{VIACR, CR7B, 0xFF, 0x02}, /* LCD Scaling Parameter 2 */
+{VIACR, CR7C, 0xFF, 0x03}, /* LCD Scaling Parameter 3 */
+{VIACR, CR7D, 0xFF, 0x04}, /* LCD Scaling Parameter 4 */
+{VIACR, CR7E, 0xFF, 0x07}, /* LCD Scaling Parameter 5 */
+{VIACR, CR7F, 0xFF, 0x0A}, /* LCD Scaling Parameter 6 */
+{VIACR, CR80, 0xFF, 0x0D}, /* LCD Scaling Parameter 7 */
+{VIACR, CR81, 0xFF, 0x13}, /* LCD Scaling Parameter 8 */
+{VIACR, CR82, 0xFF, 0x16}, /* LCD Scaling Parameter 9 */
+{VIACR, CR83, 0xFF, 0x19}, /* LCD Scaling Parameter 10 */
+{VIACR, CR84, 0xFF, 0x1C}, /* LCD Scaling Parameter 11 */
+{VIACR, CR85, 0xFF, 0x1D}, /* LCD Scaling Parameter 12 */
+{VIACR, CR86, 0xFF, 0x1E}, /* LCD Scaling Parameter 13 */
+{VIACR, CR87, 0xFF, 0x1F}, /* LCD Scaling Parameter 14 */
+{VIACR, CR88, 0xFF, 0x40}, /* LCD Panel Type */
+{VIACR, CR89, 0xFF, 0x00}, /* LCD Timing Control 0 */
+{VIACR, CR8A, 0xFF, 0x88}, /* LCD Timing Control 1 */
+{VIACR, CRD4, 0xFF, 0x81}, /* Second power sequence control */
+{VIACR, CR91, 0xFF, 0x80}, /* 24/12 bit LVDS Data off */
{VIACR, CR96, 0xFF, 0x00},
{VIACR, CR97, 0xFF, 0x00},
{VIACR, CR99, 0xFF, 0x00},
{VIACR, CR9B, 0xFF, 0x00},
-{VIACR, CRD2, 0xFF, 0xFF} /* TMDS/LVDS control register. */
+{VIACR, CRD2, 0xFF, 0xFF} /* TMDS/LVDS control register. */
};
/* Video Mode Table */
@@ -401,7 +377,6 @@ struct io_reg CLE266_ModeXregs[] = { {VIASR, SR1E, 0xF0, 0x00},
{VIASR, SR1A, 0xFB, 0x08},
{VIACR, CR32, 0xFF, 0x00},
-{VIACR, CR34, 0xFF, 0x00},
{VIACR, CR35, 0xFF, 0x00},
{VIACR, CR36, 0x08, 0x00},
{VIACR, CR6A, 0xFF, 0x80},
@@ -1084,3 +1059,14 @@ struct VideoModeTable CEA_HDMI_Modes[] = {
{VIA_RES_1280X720, CEAM1280x720, ARRAY_SIZE(CEAM1280x720)},
{VIA_RES_1920X1080, CEAM1920x1080, ARRAY_SIZE(CEAM1920x1080)}
};
+
+int NUM_TOTAL_RES_MAP_REFRESH = ARRAY_SIZE(res_map_refresh_tbl);
+int NUM_TOTAL_CEA_MODES = ARRAY_SIZE(CEA_HDMI_Modes);
+int NUM_TOTAL_CN400_ModeXregs = ARRAY_SIZE(CN400_ModeXregs);
+int NUM_TOTAL_CN700_ModeXregs = ARRAY_SIZE(CN700_ModeXregs);
+int NUM_TOTAL_KM400_ModeXregs = ARRAY_SIZE(KM400_ModeXregs);
+int NUM_TOTAL_CX700_ModeXregs = ARRAY_SIZE(CX700_ModeXregs);
+int NUM_TOTAL_VX855_ModeXregs = ARRAY_SIZE(VX855_ModeXregs);
+int NUM_TOTAL_CLE266_ModeXregs = ARRAY_SIZE(CLE266_ModeXregs);
+int NUM_TOTAL_PATCH_MODE = ARRAY_SIZE(res_patch_table);
+int NUM_TOTAL_MODETABLE = ARRAY_SIZE(CLE266Modes);
diff --git a/drivers/video/via/viamode.h b/drivers/video/via/viamode.h
index 1a5de50a23a..a9d6554fabd 100644
--- a/drivers/video/via/viamode.h
+++ b/drivers/video/via/viamode.h
@@ -50,128 +50,35 @@ struct res_map_refresh {
int vmode_refresh;
};
-#define NUM_TOTAL_RES_MAP_REFRESH ARRAY_SIZE(res_map_refresh_tbl)
-#define NUM_TOTAL_CEA_MODES ARRAY_SIZE(CEA_HDMI_Modes)
-#define NUM_TOTAL_CN400_ModeXregs ARRAY_SIZE(CN400_ModeXregs)
-#define NUM_TOTAL_CN700_ModeXregs ARRAY_SIZE(CN700_ModeXregs)
-#define NUM_TOTAL_KM400_ModeXregs ARRAY_SIZE(KM400_ModeXregs)
-#define NUM_TOTAL_CX700_ModeXregs ARRAY_SIZE(CX700_ModeXregs)
-#define NUM_TOTAL_VX800_ModeXregs ARRAY_SIZE(VX800_ModeXregs)
-#define NUM_TOTAL_CLE266_ModeXregs ARRAY_SIZE(CLE266_ModeXregs)
-#define NUM_TOTAL_PATCH_MODE ARRAY_SIZE(res_patch_table)
-#define NUM_TOTAL_MODETABLE ARRAY_SIZE(CLE266Modes)
+extern int NUM_TOTAL_RES_MAP_REFRESH;
+extern int NUM_TOTAL_CEA_MODES;
+extern int NUM_TOTAL_CN400_ModeXregs;
+extern int NUM_TOTAL_CN700_ModeXregs;
+extern int NUM_TOTAL_KM400_ModeXregs;
+extern int NUM_TOTAL_CX700_ModeXregs;
+extern int NUM_TOTAL_VX855_ModeXregs;
+extern int NUM_TOTAL_CLE266_ModeXregs;
+extern int NUM_TOTAL_PATCH_MODE;
+extern int NUM_TOTAL_MODETABLE;
/********************/
/* Mode Table */
/********************/
-/* 480x640 */
-extern struct crt_mode_table CRTM480x640[1];
-/* 640x480*/
-extern struct crt_mode_table CRTM640x480[5];
-/*720x480 (GTF)*/
-extern struct crt_mode_table CRTM720x480[1];
-/*720x576 (GTF)*/
-extern struct crt_mode_table CRTM720x576[1];
-/* 800x480 (CVT) */
-extern struct crt_mode_table CRTM800x480[1];
-/* 800x600*/
-extern struct crt_mode_table CRTM800x600[5];
-/* 848x480 (CVT) */
-extern struct crt_mode_table CRTM848x480[1];
-/*856x480 (GTF) convert to 852x480*/
-extern struct crt_mode_table CRTM852x480[1];
-/*1024x512 (GTF)*/
-extern struct crt_mode_table CRTM1024x512[1];
-/* 1024x600*/
-extern struct crt_mode_table CRTM1024x600[1];
-/* 1024x768*/
-extern struct crt_mode_table CRTM1024x768[4];
-/* 1152x864*/
-extern struct crt_mode_table CRTM1152x864[1];
-/* 1280x720 (HDMI 720P)*/
-extern struct crt_mode_table CRTM1280x720[2];
-/*1280x768 (GTF)*/
-extern struct crt_mode_table CRTM1280x768[2];
-/* 1280x800 (CVT) */
-extern struct crt_mode_table CRTM1280x800[1];
-/*1280x960*/
-extern struct crt_mode_table CRTM1280x960[1];
-/* 1280x1024*/
-extern struct crt_mode_table CRTM1280x1024[3];
-/* 1368x768 (GTF) */
-extern struct crt_mode_table CRTM1368x768[1];
-/*1440x1050 (GTF)*/
-extern struct crt_mode_table CRTM1440x1050[1];
-/* 1600x1200*/
-extern struct crt_mode_table CRTM1600x1200[2];
-/* 1680x1050 (CVT) */
-extern struct crt_mode_table CRTM1680x1050[2];
-/* 1680x1050 (CVT Reduce Blanking) */
-extern struct crt_mode_table CRTM1680x1050_RB[1];
-/* 1920x1080 (CVT)*/
-extern struct crt_mode_table CRTM1920x1080[1];
-/* 1920x1080 (CVT with Reduce Blanking) */
-extern struct crt_mode_table CRTM1920x1080_RB[1];
-/* 1920x1440*/
-extern struct crt_mode_table CRTM1920x1440[2];
-/* 1400x1050 (CVT) */
-extern struct crt_mode_table CRTM1400x1050[2];
-/* 1400x1050 (CVT Reduce Blanking) */
-extern struct crt_mode_table CRTM1400x1050_RB[1];
-/* 960x600 (CVT) */
-extern struct crt_mode_table CRTM960x600[1];
-/* 1000x600 (GTF) */
-extern struct crt_mode_table CRTM1000x600[1];
-/* 1024x576 (GTF) */
-extern struct crt_mode_table CRTM1024x576[1];
-/* 1088x612 (CVT) */
-extern struct crt_mode_table CRTM1088x612[1];
-/* 1152x720 (CVT) */
-extern struct crt_mode_table CRTM1152x720[1];
-/* 1200x720 (GTF) */
-extern struct crt_mode_table CRTM1200x720[1];
-/* 1280x600 (GTF) */
-extern struct crt_mode_table CRTM1280x600[1];
-/* 1360x768 (CVT) */
-extern struct crt_mode_table CRTM1360x768[1];
-/* 1360x768 (CVT Reduce Blanking) */
-extern struct crt_mode_table CRTM1360x768_RB[1];
-/* 1366x768 (GTF) */
-extern struct crt_mode_table CRTM1366x768[2];
-/* 1440x900 (CVT) */
-extern struct crt_mode_table CRTM1440x900[2];
-/* 1440x900 (CVT Reduce Blanking) */
-extern struct crt_mode_table CRTM1440x900_RB[1];
-/* 1600x900 (CVT) */
-extern struct crt_mode_table CRTM1600x900[1];
-/* 1600x900 (CVT Reduce Blanking) */
-extern struct crt_mode_table CRTM1600x900_RB[1];
-/* 1600x1024 (GTF) */
-extern struct crt_mode_table CRTM1600x1024[1];
-/* 1792x1344 (DMT) */
-extern struct crt_mode_table CRTM1792x1344[1];
-/* 1856x1392 (DMT) */
-extern struct crt_mode_table CRTM1856x1392[1];
-/* 1920x1200 (CVT) */
-extern struct crt_mode_table CRTM1920x1200[1];
-/* 1920x1200 (CVT with Reduce Blanking) */
-extern struct crt_mode_table CRTM1920x1200_RB[1];
-/* 2048x1536 (CVT) */
-extern struct crt_mode_table CRTM2048x1536[1];
-extern struct VideoModeTable CLE266Modes[47];
-extern struct crt_mode_table CEAM1280x720[1];
-extern struct crt_mode_table CEAM1920x1080[1];
-extern struct VideoModeTable CEA_HDMI_Modes[2];
+extern struct VideoModeTable CLE266Modes[];
+extern struct crt_mode_table CEAM1280x720[];
+extern struct crt_mode_table CEAM1920x1080[];
+extern struct VideoModeTable CEA_HDMI_Modes[];
-extern struct res_map_refresh res_map_refresh_tbl[61];
-extern struct io_reg CN400_ModeXregs[52];
-extern struct io_reg CN700_ModeXregs[66];
-extern struct io_reg KM400_ModeXregs[55];
-extern struct io_reg CX700_ModeXregs[58];
-extern struct io_reg VX800_ModeXregs[58];
-extern struct io_reg CLE266_ModeXregs[32];
-extern struct io_reg PM1024x768[2];
-extern struct patch_table res_patch_table[1];
+extern struct res_map_refresh res_map_refresh_tbl[];
+extern struct io_reg CN400_ModeXregs[];
+extern struct io_reg CN700_ModeXregs[];
+extern struct io_reg KM400_ModeXregs[];
+extern struct io_reg CX700_ModeXregs[];
+extern struct io_reg VX800_ModeXregs[];
+extern struct io_reg VX855_ModeXregs[];
+extern struct io_reg CLE266_ModeXregs[];
+extern struct io_reg PM1024x768[];
+extern struct patch_table res_patch_table[];
extern struct VPITTable VPIT;
#endif /* __VIAMODE_H__ */
diff --git a/drivers/video/via/vt1636.c b/drivers/video/via/vt1636.c
index 322a9f99355..a6b37494e79 100644
--- a/drivers/video/via/vt1636.c
+++ b/drivers/video/via/vt1636.c
@@ -27,7 +27,7 @@ u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information
{
u8 data;
- viaparinfo->i2c_stuff.i2c_port = plvds_chip_info->i2c_port;
+ viaparinfo->shared->i2c_stuff.i2c_port = plvds_chip_info->i2c_port;
viafb_i2c_readbyte(plvds_chip_info->lvds_chip_slave_addr, index, &data);
return data;
@@ -39,7 +39,7 @@ void viafb_gpio_i2c_write_mask_lvds(struct lvds_setting_information
{
int index, data;
- viaparinfo->i2c_stuff.i2c_port = plvds_chip_info->i2c_port;
+ viaparinfo->shared->i2c_stuff.i2c_port = plvds_chip_info->i2c_port;
index = io_data.Index;
data = viafb_gpio_i2c_read_lvds(plvds_setting_info, plvds_chip_info,
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 26b27826479..200c22f5513 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -19,6 +19,7 @@
*/
//#define DEBUG
#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
#include <linux/virtio_balloon.h>
#include <linux/swap.h>
#include <linux/kthread.h>
@@ -84,7 +85,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
init_completion(&vb->acked);
/* We should always be able to add one buffer to an empty queue. */
- if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) != 0)
+ if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
BUG();
vq->vq_ops->kick(vq);
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 248e00ec4dc..4a1f1ebff7b 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -84,7 +84,7 @@ struct virtio_pci_vq_info
struct list_head node;
/* MSI-X vector (or none) */
- unsigned vector;
+ unsigned msix_vector;
};
/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
@@ -280,25 +280,14 @@ static void vp_free_vectors(struct virtio_device *vdev)
vp_dev->msix_entries = NULL;
}
-static int vp_request_vectors(struct virtio_device *vdev, int nvectors,
- bool per_vq_vectors)
+static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
+ bool per_vq_vectors)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
const char *name = dev_name(&vp_dev->vdev.dev);
unsigned i, v;
int err = -ENOMEM;
- if (!nvectors) {
- /* Can't allocate MSI-X vectors, use regular interrupt */
- vp_dev->msix_vectors = 0;
- err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
- IRQF_SHARED, name, vp_dev);
- if (err)
- return err;
- vp_dev->intx_enabled = 1;
- return 0;
- }
-
vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
GFP_KERNEL);
if (!vp_dev->msix_entries)
@@ -311,6 +300,7 @@ static int vp_request_vectors(struct virtio_device *vdev, int nvectors,
for (i = 0; i < nvectors; ++i)
vp_dev->msix_entries[i].entry = i;
+ /* pci_enable_msix returns positive if we can't get this many. */
err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors);
if (err > 0)
err = -ENOSPC;
@@ -356,10 +346,22 @@ error:
return err;
}
-static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
- void (*callback)(struct virtqueue *vq),
- const char *name,
- u16 vector)
+static int vp_request_intx(struct virtio_device *vdev)
+{
+ int err;
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
+ IRQF_SHARED, dev_name(&vdev->dev), vp_dev);
+ if (!err)
+ vp_dev->intx_enabled = 1;
+ return err;
+}
+
+static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index,
+ void (*callback)(struct virtqueue *vq),
+ const char *name,
+ u16 msix_vec)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_vq_info *info;
@@ -384,7 +386,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
info->queue_index = index;
info->num = num;
- info->vector = vector;
+ info->msix_vector = msix_vec;
size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
@@ -408,10 +410,10 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
vq->priv = info;
info->vq = vq;
- if (vector != VIRTIO_MSI_NO_VECTOR) {
- iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
- vector = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
- if (vector == VIRTIO_MSI_NO_VECTOR) {
+ if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
+ iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
+ msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
+ if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
err = -EBUSY;
goto out_assign;
}
@@ -472,7 +474,8 @@ static void vp_del_vqs(struct virtio_device *vdev)
list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
info = vq->priv;
if (vp_dev->per_vq_vectors)
- free_irq(vp_dev->msix_entries[info->vector].vector, vq);
+ free_irq(vp_dev->msix_entries[info->msix_vector].vector,
+ vq);
vp_del_vq(vq);
}
vp_dev->per_vq_vectors = false;
@@ -484,38 +487,58 @@ static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char *names[],
- int nvectors,
+ bool use_msix,
bool per_vq_vectors)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- u16 vector;
- int i, err, allocated_vectors;
+ u16 msix_vec;
+ int i, err, nvectors, allocated_vectors;
- err = vp_request_vectors(vdev, nvectors, per_vq_vectors);
- if (err)
- goto error_request;
+ if (!use_msix) {
+ /* Old style: one normal interrupt for change and all vqs. */
+ err = vp_request_intx(vdev);
+ if (err)
+ goto error_request;
+ } else {
+ if (per_vq_vectors) {
+ /* Best option: one for change interrupt, one per vq. */
+ nvectors = 1;
+ for (i = 0; i < nvqs; ++i)
+ if (callbacks[i])
+ ++nvectors;
+ } else {
+ /* Second best: one for change, shared for all vqs. */
+ nvectors = 2;
+ }
+
+ err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors);
+ if (err)
+ goto error_request;
+ }
vp_dev->per_vq_vectors = per_vq_vectors;
allocated_vectors = vp_dev->msix_used_vectors;
for (i = 0; i < nvqs; ++i) {
if (!callbacks[i] || !vp_dev->msix_enabled)
- vector = VIRTIO_MSI_NO_VECTOR;
+ msix_vec = VIRTIO_MSI_NO_VECTOR;
else if (vp_dev->per_vq_vectors)
- vector = allocated_vectors++;
+ msix_vec = allocated_vectors++;
else
- vector = VP_MSIX_VQ_VECTOR;
- vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i], vector);
+ msix_vec = VP_MSIX_VQ_VECTOR;
+ vqs[i] = setup_vq(vdev, i, callbacks[i], names[i], msix_vec);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto error_find;
}
/* allocate per-vq irq if available and necessary */
- if (vp_dev->per_vq_vectors && vector != VIRTIO_MSI_NO_VECTOR) {
- snprintf(vp_dev->msix_names[vector], sizeof *vp_dev->msix_names,
- "%s-%s", dev_name(&vp_dev->vdev.dev), names[i]);
- err = request_irq(vp_dev->msix_entries[vector].vector,
- vring_interrupt, 0,
- vp_dev->msix_names[vector], vqs[i]);
+ if (vp_dev->per_vq_vectors) {
+ snprintf(vp_dev->msix_names[msix_vec],
+ sizeof *vp_dev->msix_names,
+ "%s-%s",
+ dev_name(&vp_dev->vdev.dev), names[i]);
+ err = request_irq(msix_vec, vring_interrupt, 0,
+ vp_dev->msix_names[msix_vec],
+ vqs[i]);
if (err) {
vp_del_vq(vqs[i]);
goto error_find;
@@ -537,28 +560,20 @@ static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
vq_callback_t *callbacks[],
const char *names[])
{
- int vectors = 0;
- int i, uninitialized_var(err);
-
- /* How many vectors would we like? */
- for (i = 0; i < nvqs; ++i)
- if (callbacks[i])
- ++vectors;
+ int err;
- /* We want at most one vector per queue and one for config changes. */
- err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
- vectors + 1, true);
+ /* Try MSI-X with one vector per queue. */
+ err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true);
if (!err)
return 0;
- /* Fallback to separate vectors for config and a shared for queues. */
+ /* Fallback: MSI-X with one vector for config, one shared for queues. */
err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
- 2, false);
+ true, false);
if (!err)
return 0;
/* Finally fall back to regular interrupts. */
- err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
- 0, false);
- return err;
+ return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
+ false, false);
}
static struct virtio_config_ops virtio_pci_config_ops = {
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index a882f260651..f5360058072 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -208,7 +208,11 @@ add_head:
pr_debug("Added buffer head %i to %p\n", head, vq);
END_USE(vq);
- return 0;
+
+ /* If we're indirect, we can fit many (assuming not OOM). */
+ if (vq->indirect)
+ return vq->num_free ? vq->vring.num : 0;
+ return vq->num_free;
}
static void vring_kick(struct virtqueue *_vq)
diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c
index f05d2a36836..9554ad5f9af 100644
--- a/drivers/vlynq/vlynq.c
+++ b/drivers/vlynq/vlynq.c
@@ -28,7 +28,6 @@
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
-#include <linux/device.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -703,7 +702,7 @@ static int vlynq_probe(struct platform_device *pdev)
dev->mem_start = mem_res->start;
dev->mem_end = mem_res->end;
- len = regs_res->end - regs_res->start;
+ len = resource_size(regs_res);
if (!request_mem_region(regs_res->start, len, dev_name(&dev->dev))) {
printk(KERN_ERR "%s: Can't request vlynq registers\n",
dev_name(&dev->dev));
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index ff3eb8ff6bd..3711b888d48 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -282,6 +282,13 @@ config NUC900_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called nuc900_wdt.
+config ADX_WATCHDOG
+ tristate "Avionic Design Xanthos watchdog"
+ depends on ARCH_PXA_ADX
+ help
+ Say Y here if you want support for the watchdog timer on Avionic
+ Design Xanthos boards.
+
# AVR32 Architecture
config AT32AP700X_WDT
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 348b3b862c9..699199b1baa 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o
obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o
obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
+obj-$(CONFIG_ADX_WATCHDOG) += adx_wdt.o
# AVR32 Architecture
obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
diff --git a/drivers/watchdog/adx_wdt.c b/drivers/watchdog/adx_wdt.c
new file mode 100644
index 00000000000..77afb0acc50
--- /dev/null
+++ b/drivers/watchdog/adx_wdt.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright (C) 2008-2009 Avionic Design GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+
+#define WATCHDOG_NAME "adx-wdt"
+
+/* register offsets */
+#define ADX_WDT_CONTROL 0x00
+#define ADX_WDT_CONTROL_ENABLE (1 << 0)
+#define ADX_WDT_CONTROL_nRESET (1 << 1)
+#define ADX_WDT_TIMEOUT 0x08
+
+static struct platform_device *adx_wdt_dev;
+static unsigned long driver_open;
+
+#define WDT_STATE_STOP 0
+#define WDT_STATE_START 1
+
+struct adx_wdt {
+ void __iomem *base;
+ unsigned long timeout;
+ unsigned int state;
+ unsigned int wake;
+ spinlock_t lock;
+};
+
+static struct watchdog_info adx_wdt_info = {
+ .identity = "Avionic Design Xanthos Watchdog",
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+};
+
+static void adx_wdt_start_locked(struct adx_wdt *wdt)
+{
+ u32 ctrl;
+
+ ctrl = readl(wdt->base + ADX_WDT_CONTROL);
+ ctrl |= ADX_WDT_CONTROL_ENABLE;
+ writel(ctrl, wdt->base + ADX_WDT_CONTROL);
+ wdt->state = WDT_STATE_START;
+}
+
+static void adx_wdt_start(struct adx_wdt *wdt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdt->lock, flags);
+ adx_wdt_start_locked(wdt);
+ spin_unlock_irqrestore(&wdt->lock, flags);
+}
+
+static void adx_wdt_stop_locked(struct adx_wdt *wdt)
+{
+ u32 ctrl;
+
+ ctrl = readl(wdt->base + ADX_WDT_CONTROL);
+ ctrl &= ~ADX_WDT_CONTROL_ENABLE;
+ writel(ctrl, wdt->base + ADX_WDT_CONTROL);
+ wdt->state = WDT_STATE_STOP;
+}
+
+static void adx_wdt_stop(struct adx_wdt *wdt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdt->lock, flags);
+ adx_wdt_stop_locked(wdt);
+ spin_unlock_irqrestore(&wdt->lock, flags);
+}
+
+static void adx_wdt_set_timeout(struct adx_wdt *wdt, unsigned long seconds)
+{
+ unsigned long timeout = seconds * 1000;
+ unsigned long flags;
+ unsigned int state;
+
+ spin_lock_irqsave(&wdt->lock, flags);
+ state = wdt->state;
+ adx_wdt_stop_locked(wdt);
+ writel(timeout, wdt->base + ADX_WDT_TIMEOUT);
+
+ if (state == WDT_STATE_START)
+ adx_wdt_start_locked(wdt);
+
+ wdt->timeout = timeout;
+ spin_unlock_irqrestore(&wdt->lock, flags);
+}
+
+static void adx_wdt_get_timeout(struct adx_wdt *wdt, unsigned long *seconds)
+{
+ *seconds = wdt->timeout / 1000;
+}
+
+static void adx_wdt_keepalive(struct adx_wdt *wdt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdt->lock, flags);
+ writel(wdt->timeout, wdt->base + ADX_WDT_TIMEOUT);
+ spin_unlock_irqrestore(&wdt->lock, flags);
+}
+
+static int adx_wdt_open(struct inode *inode, struct file *file)
+{
+ struct adx_wdt *wdt = platform_get_drvdata(adx_wdt_dev);
+
+ if (test_and_set_bit(0, &driver_open))
+ return -EBUSY;
+
+ file->private_data = wdt;
+ adx_wdt_set_timeout(wdt, 30);
+ adx_wdt_start(wdt);
+
+ return nonseekable_open(inode, file);
+}
+
+static int adx_wdt_release(struct inode *inode, struct file *file)
+{
+ struct adx_wdt *wdt = file->private_data;
+
+ adx_wdt_stop(wdt);
+ clear_bit(0, &driver_open);
+
+ return 0;
+}
+
+static long adx_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct adx_wdt *wdt = file->private_data;
+ void __user *argp = (void __user *)arg;
+ unsigned long __user *p = argp;
+ unsigned long seconds = 0;
+ unsigned int options;
+ long ret = -EINVAL;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ if (copy_to_user(argp, &adx_wdt_info, sizeof(adx_wdt_info)))
+ return -EFAULT;
+ else
+ return 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, p);
+
+ case WDIOC_KEEPALIVE:
+ adx_wdt_keepalive(wdt);
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(seconds, p))
+ return -EFAULT;
+
+ adx_wdt_set_timeout(wdt, seconds);
+
+ /* fallthrough */
+ case WDIOC_GETTIMEOUT:
+ adx_wdt_get_timeout(wdt, &seconds);
+ return put_user(seconds, p);
+
+ case WDIOC_SETOPTIONS:
+ if (copy_from_user(&options, argp, sizeof(options)))
+ return -EFAULT;
+
+ if (options & WDIOS_DISABLECARD) {
+ adx_wdt_stop(wdt);
+ ret = 0;
+ }
+
+ if (options & WDIOS_ENABLECARD) {
+ adx_wdt_start(wdt);
+ ret = 0;
+ }
+
+ return ret;
+
+ default:
+ break;
+ }
+
+ return -ENOTTY;
+}
+
+static ssize_t adx_wdt_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ struct adx_wdt *wdt = file->private_data;
+
+ if (len)
+ adx_wdt_keepalive(wdt);
+
+ return len;
+}
+
+static const struct file_operations adx_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = adx_wdt_open,
+ .release = adx_wdt_release,
+ .unlocked_ioctl = adx_wdt_ioctl,
+ .write = adx_wdt_write,
+};
+
+static struct miscdevice adx_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &adx_wdt_fops,
+};
+
+static int __devinit adx_wdt_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct adx_wdt *wdt;
+ int ret = 0;
+ u32 ctrl;
+
+ wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt) {
+ dev_err(&pdev->dev, "cannot allocate WDT structure\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&wdt->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot obtain I/O memory region\n");
+ return -ENXIO;
+ }
+
+ res = devm_request_mem_region(&pdev->dev, res->start,
+ res->end - res->start + 1, res->name);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot request I/O memory region\n");
+ return -ENXIO;
+ }
+
+ wdt->base = devm_ioremap_nocache(&pdev->dev, res->start,
+ res->end - res->start + 1);
+ if (!wdt->base) {
+ dev_err(&pdev->dev, "cannot remap I/O memory region\n");
+ return -ENXIO;
+ }
+
+ /* disable watchdog and reboot on timeout */
+ ctrl = readl(wdt->base + ADX_WDT_CONTROL);
+ ctrl &= ~ADX_WDT_CONTROL_ENABLE;
+ ctrl &= ~ADX_WDT_CONTROL_nRESET;
+ writel(ctrl, wdt->base + ADX_WDT_CONTROL);
+
+ platform_set_drvdata(pdev, wdt);
+ adx_wdt_dev = pdev;
+
+ ret = misc_register(&adx_wdt_miscdev);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register miscdev on minor %d "
+ "(err=%d)\n", WATCHDOG_MINOR, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __devexit adx_wdt_remove(struct platform_device *pdev)
+{
+ struct adx_wdt *wdt = platform_get_drvdata(pdev);
+
+ misc_deregister(&adx_wdt_miscdev);
+ adx_wdt_stop(wdt);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static void adx_wdt_shutdown(struct platform_device *pdev)
+{
+ struct adx_wdt *wdt = platform_get_drvdata(pdev);
+ adx_wdt_stop(wdt);
+}
+
+#ifdef CONFIG_PM
+static int adx_wdt_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct adx_wdt *wdt = platform_get_drvdata(pdev);
+
+ wdt->wake = (wdt->state == WDT_STATE_START) ? 1 : 0;
+ adx_wdt_stop(wdt);
+
+ return 0;
+}
+
+static int adx_wdt_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct adx_wdt *wdt = platform_get_drvdata(pdev);
+
+ if (wdt->wake)
+ adx_wdt_start(wdt);
+
+ return 0;
+}
+
+static struct dev_pm_ops adx_wdt_pm_ops = {
+ .suspend = adx_wdt_suspend,
+ .resume = adx_wdt_resume,
+};
+
+# define ADX_WDT_PM_OPS (&adx_wdt_pm_ops)
+#else
+# define ADX_WDT_PM_OPS NULL
+#endif
+
+static struct platform_driver adx_wdt_driver = {
+ .probe = adx_wdt_probe,
+ .remove = __devexit_p(adx_wdt_remove),
+ .shutdown = adx_wdt_shutdown,
+ .driver = {
+ .name = WATCHDOG_NAME,
+ .owner = THIS_MODULE,
+ .pm = ADX_WDT_PM_OPS,
+ },
+};
+
+static int __init adx_wdt_init(void)
+{
+ return platform_driver_register(&adx_wdt_driver);
+}
+
+static void __exit adx_wdt_exit(void)
+{
+ platform_driver_unregister(&adx_wdt_driver);
+}
+
+module_init(adx_wdt_init);
+module_exit(adx_wdt_exit);
+
+MODULE_DESCRIPTION("Avionic Design Xanthos Watchdog Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index f5bbd9e8341..d31505b6f7a 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -96,11 +96,7 @@ static struct balloon_stats balloon_stats;
/* We increase/decrease in batches which fit in a page */
static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
-/* VM /proc information for memory */
-extern unsigned long totalram_pages;
-
#ifdef CONFIG_HIGHMEM
-extern unsigned long totalhigh_pages;
#define inc_totalhigh_pages() (totalhigh_pages++)
#define dec_totalhigh_pages() (totalhigh_pages--)
#else
@@ -214,7 +210,7 @@ static int increase_reservation(unsigned long nr_pages)
page = balloon_first_page();
for (i = 0; i < nr_pages; i++) {
BUG_ON(page == NULL);
- frame_list[i] = page_to_pfn(page);;
+ frame_list[i] = page_to_pfn(page);
page = balloon_next_page(page);
}
diff --git a/firmware/ihex2fw.c b/firmware/ihex2fw.c
index 8f7fdaa9e01..5a03ba8c836 100644
--- a/firmware/ihex2fw.c
+++ b/firmware/ihex2fw.c
@@ -56,7 +56,7 @@ static int output_records(int outfd);
static int sort_records = 0;
static int wide_records = 0;
-int usage(void)
+static int usage(void)
{
fprintf(stderr, "ihex2fw: Convert ihex files into binary "
"representation for use by Linux kernel\n");
diff --git a/fs/9p/Kconfig b/fs/9p/Kconfig
index 74e0723e90b..795233702a4 100644
--- a/fs/9p/Kconfig
+++ b/fs/9p/Kconfig
@@ -8,3 +8,12 @@ config 9P_FS
See <http://v9fs.sf.net> for more information.
If unsure, say N.
+
+config 9P_FSCACHE
+ bool "Enable 9P client caching support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on 9P_FS=m && FSCACHE || 9P_FS=y && FSCACHE=y
+ help
+ Choose Y here to enable persistent, read-only local
+ caching support for 9p clients using FS-Cache
+
diff --git a/fs/9p/Makefile b/fs/9p/Makefile
index bc7f0d1551e..1a940ec7af6 100644
--- a/fs/9p/Makefile
+++ b/fs/9p/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_9P_FS) := 9p.o
vfs_dir.o \
vfs_dentry.o \
v9fs.o \
- fid.o \
+ fid.o
+9p-$(CONFIG_9P_FSCACHE) += cache.o
diff --git a/fs/9p/cache.c b/fs/9p/cache.c
new file mode 100644
index 00000000000..51c94e26a34
--- /dev/null
+++ b/fs/9p/cache.c
@@ -0,0 +1,474 @@
+/*
+ * V9FS cache definitions.
+ *
+ * Copyright (C) 2009 by Abhishek Kulkarni <adkulkar@umail.iu.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to:
+ * Free Software Foundation
+ * 51 Franklin Street, Fifth Floor
+ * Boston, MA 02111-1301 USA
+ *
+ */
+
+#include <linux/jiffies.h>
+#include <linux/file.h>
+#include <linux/stat.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <net/9p/9p.h>
+
+#include "v9fs.h"
+#include "cache.h"
+
+#define CACHETAG_LEN 11
+
+struct kmem_cache *vcookie_cache;
+
+struct fscache_netfs v9fs_cache_netfs = {
+ .name = "9p",
+ .version = 0,
+};
+
+static void init_once(void *foo)
+{
+ struct v9fs_cookie *vcookie = (struct v9fs_cookie *) foo;
+ vcookie->fscache = NULL;
+ vcookie->qid = NULL;
+ inode_init_once(&vcookie->inode);
+}
+
+/**
+ * v9fs_init_vcookiecache - initialize a cache for vcookies to maintain
+ * vcookie to inode mapping
+ *
+ * Returns 0 on success.
+ */
+
+static int v9fs_init_vcookiecache(void)
+{
+ vcookie_cache = kmem_cache_create("vcookie_cache",
+ sizeof(struct v9fs_cookie),
+ 0, (SLAB_RECLAIM_ACCOUNT|
+ SLAB_MEM_SPREAD),
+ init_once);
+ if (!vcookie_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * v9fs_destroy_vcookiecache - destroy the cache of vcookies
+ *
+ */
+
+static void v9fs_destroy_vcookiecache(void)
+{
+ kmem_cache_destroy(vcookie_cache);
+}
+
+int __v9fs_cache_register(void)
+{
+ int ret;
+ ret = v9fs_init_vcookiecache();
+ if (ret < 0)
+ return ret;
+
+ return fscache_register_netfs(&v9fs_cache_netfs);
+}
+
+void __v9fs_cache_unregister(void)
+{
+ v9fs_destroy_vcookiecache();
+ fscache_unregister_netfs(&v9fs_cache_netfs);
+}
+
+/**
+ * v9fs_random_cachetag - Generate a random tag to be associated
+ * with a new cache session.
+ *
+ * The value of jiffies is used for a fairly randomly cache tag.
+ */
+
+static
+int v9fs_random_cachetag(struct v9fs_session_info *v9ses)
+{
+ v9ses->cachetag = kmalloc(CACHETAG_LEN, GFP_KERNEL);
+ if (!v9ses->cachetag)
+ return -ENOMEM;
+
+ return scnprintf(v9ses->cachetag, CACHETAG_LEN, "%lu", jiffies);
+}
+
+static uint16_t v9fs_cache_session_get_key(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax)
+{
+ struct v9fs_session_info *v9ses;
+ uint16_t klen = 0;
+
+ v9ses = (struct v9fs_session_info *)cookie_netfs_data;
+ P9_DPRINTK(P9_DEBUG_FSC, "session %p buf %p size %u", v9ses,
+ buffer, bufmax);
+
+ if (v9ses->cachetag)
+ klen = strlen(v9ses->cachetag);
+
+ if (klen > bufmax)
+ return 0;
+
+ memcpy(buffer, v9ses->cachetag, klen);
+ P9_DPRINTK(P9_DEBUG_FSC, "cache session tag %s", v9ses->cachetag);
+ return klen;
+}
+
+const struct fscache_cookie_def v9fs_cache_session_index_def = {
+ .name = "9P.session",
+ .type = FSCACHE_COOKIE_TYPE_INDEX,
+ .get_key = v9fs_cache_session_get_key,
+};
+
+void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
+{
+ /* If no cache session tag was specified, we generate a random one. */
+ if (!v9ses->cachetag)
+ v9fs_random_cachetag(v9ses);
+
+ v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
+ &v9fs_cache_session_index_def,
+ v9ses);
+ P9_DPRINTK(P9_DEBUG_FSC, "session %p get cookie %p", v9ses,
+ v9ses->fscache);
+}
+
+void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses)
+{
+ P9_DPRINTK(P9_DEBUG_FSC, "session %p put cookie %p", v9ses,
+ v9ses->fscache);
+ fscache_relinquish_cookie(v9ses->fscache, 0);
+ v9ses->fscache = NULL;
+}
+
+
+static uint16_t v9fs_cache_inode_get_key(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax)
+{
+ const struct v9fs_cookie *vcookie = cookie_netfs_data;
+ memcpy(buffer, &vcookie->qid->path, sizeof(vcookie->qid->path));
+
+ P9_DPRINTK(P9_DEBUG_FSC, "inode %p get key %llu", &vcookie->inode,
+ vcookie->qid->path);
+ return sizeof(vcookie->qid->path);
+}
+
+static void v9fs_cache_inode_get_attr(const void *cookie_netfs_data,
+ uint64_t *size)
+{
+ const struct v9fs_cookie *vcookie = cookie_netfs_data;
+ *size = i_size_read(&vcookie->inode);
+
+ P9_DPRINTK(P9_DEBUG_FSC, "inode %p get attr %llu", &vcookie->inode,
+ *size);
+}
+
+static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data,
+ void *buffer, uint16_t buflen)
+{
+ const struct v9fs_cookie *vcookie = cookie_netfs_data;
+ memcpy(buffer, &vcookie->qid->version, sizeof(vcookie->qid->version));
+
+ P9_DPRINTK(P9_DEBUG_FSC, "inode %p get aux %u", &vcookie->inode,
+ vcookie->qid->version);
+ return sizeof(vcookie->qid->version);
+}
+
+static enum
+fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
+ const void *buffer,
+ uint16_t buflen)
+{
+ const struct v9fs_cookie *vcookie = cookie_netfs_data;
+
+ if (buflen != sizeof(vcookie->qid->version))
+ return FSCACHE_CHECKAUX_OBSOLETE;
+
+ if (memcmp(buffer, &vcookie->qid->version,
+ sizeof(vcookie->qid->version)))
+ return FSCACHE_CHECKAUX_OBSOLETE;
+
+ return FSCACHE_CHECKAUX_OKAY;
+}
+
+static void v9fs_cache_inode_now_uncached(void *cookie_netfs_data)
+{
+ struct v9fs_cookie *vcookie = cookie_netfs_data;
+ struct pagevec pvec;
+ pgoff_t first;
+ int loop, nr_pages;
+
+ pagevec_init(&pvec, 0);
+ first = 0;
+
+ for (;;) {
+ nr_pages = pagevec_lookup(&pvec, vcookie->inode.i_mapping,
+ first,
+ PAGEVEC_SIZE - pagevec_count(&pvec));
+ if (!nr_pages)
+ break;
+
+ for (loop = 0; loop < nr_pages; loop++)
+ ClearPageFsCache(pvec.pages[loop]);
+
+ first = pvec.pages[nr_pages - 1]->index + 1;
+
+ pvec.nr = nr_pages;
+ pagevec_release(&pvec);
+ cond_resched();
+ }
+}
+
+const struct fscache_cookie_def v9fs_cache_inode_index_def = {
+ .name = "9p.inode",
+ .type = FSCACHE_COOKIE_TYPE_DATAFILE,
+ .get_key = v9fs_cache_inode_get_key,
+ .get_attr = v9fs_cache_inode_get_attr,
+ .get_aux = v9fs_cache_inode_get_aux,
+ .check_aux = v9fs_cache_inode_check_aux,
+ .now_uncached = v9fs_cache_inode_now_uncached,
+};
+
+void v9fs_cache_inode_get_cookie(struct inode *inode)
+{
+ struct v9fs_cookie *vcookie;
+ struct v9fs_session_info *v9ses;
+
+ if (!S_ISREG(inode->i_mode))
+ return;
+
+ vcookie = v9fs_inode2cookie(inode);
+ if (vcookie->fscache)
+ return;
+
+ v9ses = v9fs_inode2v9ses(inode);
+ vcookie->fscache = fscache_acquire_cookie(v9ses->fscache,
+ &v9fs_cache_inode_index_def,
+ vcookie);
+
+ P9_DPRINTK(P9_DEBUG_FSC, "inode %p get cookie %p", inode,
+ vcookie->fscache);
+}
+
+void v9fs_cache_inode_put_cookie(struct inode *inode)
+{
+ struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
+
+ if (!vcookie->fscache)
+ return;
+ P9_DPRINTK(P9_DEBUG_FSC, "inode %p put cookie %p", inode,
+ vcookie->fscache);
+
+ fscache_relinquish_cookie(vcookie->fscache, 0);
+ vcookie->fscache = NULL;
+}
+
+void v9fs_cache_inode_flush_cookie(struct inode *inode)
+{
+ struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
+
+ if (!vcookie->fscache)
+ return;
+ P9_DPRINTK(P9_DEBUG_FSC, "inode %p flush cookie %p", inode,
+ vcookie->fscache);
+
+ fscache_relinquish_cookie(vcookie->fscache, 1);
+ vcookie->fscache = NULL;
+}
+
+void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
+{
+ struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
+ struct p9_fid *fid;
+
+ if (!vcookie->fscache)
+ return;
+
+ spin_lock(&vcookie->lock);
+ fid = filp->private_data;
+ if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
+ v9fs_cache_inode_flush_cookie(inode);
+ else
+ v9fs_cache_inode_get_cookie(inode);
+
+ spin_unlock(&vcookie->lock);
+}
+
+void v9fs_cache_inode_reset_cookie(struct inode *inode)
+{
+ struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
+ struct v9fs_session_info *v9ses;
+ struct fscache_cookie *old;
+
+ if (!vcookie->fscache)
+ return;
+
+ old = vcookie->fscache;
+
+ spin_lock(&vcookie->lock);
+ fscache_relinquish_cookie(vcookie->fscache, 1);
+
+ v9ses = v9fs_inode2v9ses(inode);
+ vcookie->fscache = fscache_acquire_cookie(v9ses->fscache,
+ &v9fs_cache_inode_index_def,
+ vcookie);
+
+ P9_DPRINTK(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p",
+ inode, old, vcookie->fscache);
+
+ spin_unlock(&vcookie->lock);
+}
+
+int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
+{
+ struct inode *inode = page->mapping->host;
+ struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
+
+ BUG_ON(!vcookie->fscache);
+
+ if (PageFsCache(page)) {
+ if (fscache_check_page_write(vcookie->fscache, page)) {
+ if (!(gfp & __GFP_WAIT))
+ return 0;
+ fscache_wait_on_page_write(vcookie->fscache, page);
+ }
+
+ fscache_uncache_page(vcookie->fscache, page);
+ ClearPageFsCache(page);
+ }
+
+ return 1;
+}
+
+void __v9fs_fscache_invalidate_page(struct page *page)
+{
+ struct inode *inode = page->mapping->host;
+ struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
+
+ BUG_ON(!vcookie->fscache);
+
+ if (PageFsCache(page)) {
+ fscache_wait_on_page_write(vcookie->fscache, page);
+ BUG_ON(!PageLocked(page));
+ fscache_uncache_page(vcookie->fscache, page);
+ ClearPageFsCache(page);
+ }
+}
+
+static void v9fs_vfs_readpage_complete(struct page *page, void *data,
+ int error)
+{
+ if (!error)
+ SetPageUptodate(page);
+
+ unlock_page(page);
+}
+
+/**
+ * __v9fs_readpage_from_fscache - read a page from cache
+ *
+ * Returns 0 if the pages are in cache and a BIO is submitted,
+ * 1 if the pages are not in cache and -error otherwise.
+ */
+
+int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
+{
+ int ret;
+ const struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
+
+ P9_DPRINTK(P9_DEBUG_FSC, "inode %p page %p", inode, page);
+ if (!vcookie->fscache)
+ return -ENOBUFS;
+
+ ret = fscache_read_or_alloc_page(vcookie->fscache,
+ page,
+ v9fs_vfs_readpage_complete,
+ NULL,
+ GFP_KERNEL);
+ switch (ret) {
+ case -ENOBUFS:
+ case -ENODATA:
+ P9_DPRINTK(P9_DEBUG_FSC, "page/inode not in cache %d", ret);
+ return 1;
+ case 0:
+ P9_DPRINTK(P9_DEBUG_FSC, "BIO submitted");
+ return ret;
+ default:
+ P9_DPRINTK(P9_DEBUG_FSC, "ret %d", ret);
+ return ret;
+ }
+}
+
+/**
+ * __v9fs_readpages_from_fscache - read multiple pages from cache
+ *
+ * Returns 0 if the pages are in cache and a BIO is submitted,
+ * 1 if the pages are not in cache and -error otherwise.
+ */
+
+int __v9fs_readpages_from_fscache(struct inode *inode,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned *nr_pages)
+{
+ int ret;
+ const struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
+
+ P9_DPRINTK(P9_DEBUG_FSC, "inode %p pages %u", inode, *nr_pages);
+ if (!vcookie->fscache)
+ return -ENOBUFS;
+
+ ret = fscache_read_or_alloc_pages(vcookie->fscache,
+ mapping, pages, nr_pages,
+ v9fs_vfs_readpage_complete,
+ NULL,
+ mapping_gfp_mask(mapping));
+ switch (ret) {
+ case -ENOBUFS:
+ case -ENODATA:
+ P9_DPRINTK(P9_DEBUG_FSC, "pages/inodes not in cache %d", ret);
+ return 1;
+ case 0:
+ BUG_ON(!list_empty(pages));
+ BUG_ON(*nr_pages != 0);
+ P9_DPRINTK(P9_DEBUG_FSC, "BIO submitted");
+ return ret;
+ default:
+ P9_DPRINTK(P9_DEBUG_FSC, "ret %d", ret);
+ return ret;
+ }
+}
+
+/**
+ * __v9fs_readpage_to_fscache - write a page to the cache
+ *
+ */
+
+void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
+{
+ int ret;
+ const struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
+
+ P9_DPRINTK(P9_DEBUG_FSC, "inode %p page %p", inode, page);
+ ret = fscache_write_page(vcookie->fscache, page, GFP_KERNEL);
+ P9_DPRINTK(P9_DEBUG_FSC, "ret = %d", ret);
+ if (ret != 0)
+ v9fs_uncache_page(inode, page);
+}
diff --git a/fs/9p/cache.h b/fs/9p/cache.h
new file mode 100644
index 00000000000..a94192bfaee
--- /dev/null
+++ b/fs/9p/cache.h
@@ -0,0 +1,176 @@
+/*
+ * V9FS cache definitions.
+ *
+ * Copyright (C) 2009 by Abhishek Kulkarni <adkulkar@umail.iu.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to:
+ * Free Software Foundation
+ * 51 Franklin Street, Fifth Floor
+ * Boston, MA 02111-1301 USA
+ *
+ */
+
+#ifndef _9P_CACHE_H
+#ifdef CONFIG_9P_FSCACHE
+#include <linux/fscache.h>
+#include <linux/spinlock.h>
+
+extern struct kmem_cache *vcookie_cache;
+
+struct v9fs_cookie {
+ spinlock_t lock;
+ struct inode inode;
+ struct fscache_cookie *fscache;
+ struct p9_qid *qid;
+};
+
+static inline struct v9fs_cookie *v9fs_inode2cookie(const struct inode *inode)
+{
+ return container_of(inode, struct v9fs_cookie, inode);
+}
+
+extern struct fscache_netfs v9fs_cache_netfs;
+extern const struct fscache_cookie_def v9fs_cache_session_index_def;
+extern const struct fscache_cookie_def v9fs_cache_inode_index_def;
+
+extern void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses);
+extern void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses);
+
+extern void v9fs_cache_inode_get_cookie(struct inode *inode);
+extern void v9fs_cache_inode_put_cookie(struct inode *inode);
+extern void v9fs_cache_inode_flush_cookie(struct inode *inode);
+extern void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp);
+extern void v9fs_cache_inode_reset_cookie(struct inode *inode);
+
+extern int __v9fs_cache_register(void);
+extern void __v9fs_cache_unregister(void);
+
+extern int __v9fs_fscache_release_page(struct page *page, gfp_t gfp);
+extern void __v9fs_fscache_invalidate_page(struct page *page);
+extern int __v9fs_readpage_from_fscache(struct inode *inode,
+ struct page *page);
+extern int __v9fs_readpages_from_fscache(struct inode *inode,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned *nr_pages);
+extern void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page);
+
+
+/**
+ * v9fs_cache_register - Register v9fs file system with the cache
+ */
+static inline int v9fs_cache_register(void)
+{
+ return __v9fs_cache_register();
+}
+
+/**
+ * v9fs_cache_unregister - Unregister v9fs from the cache
+ */
+static inline void v9fs_cache_unregister(void)
+{
+ __v9fs_cache_unregister();
+}
+
+static inline int v9fs_fscache_release_page(struct page *page,
+ gfp_t gfp)
+{
+ return __v9fs_fscache_release_page(page, gfp);
+}
+
+static inline void v9fs_fscache_invalidate_page(struct page *page)
+{
+ __v9fs_fscache_invalidate_page(page);
+}
+
+static inline int v9fs_readpage_from_fscache(struct inode *inode,
+ struct page *page)
+{
+ return __v9fs_readpage_from_fscache(inode, page);
+}
+
+static inline int v9fs_readpages_from_fscache(struct inode *inode,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned *nr_pages)
+{
+ return __v9fs_readpages_from_fscache(inode, mapping, pages,
+ nr_pages);
+}
+
+static inline void v9fs_readpage_to_fscache(struct inode *inode,
+ struct page *page)
+{
+ if (PageFsCache(page))
+ __v9fs_readpage_to_fscache(inode, page);
+}
+
+static inline void v9fs_uncache_page(struct inode *inode, struct page *page)
+{
+ struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
+ fscache_uncache_page(vcookie->fscache, page);
+ BUG_ON(PageFsCache(page));
+}
+
+static inline void v9fs_vcookie_set_qid(struct inode *inode,
+ struct p9_qid *qid)
+{
+ struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
+ spin_lock(&vcookie->lock);
+ vcookie->qid = qid;
+ spin_unlock(&vcookie->lock);
+}
+
+#else /* CONFIG_9P_FSCACHE */
+
+static inline int v9fs_cache_register(void)
+{
+ return 1;
+}
+
+static inline void v9fs_cache_unregister(void) {}
+
+static inline int v9fs_fscache_release_page(struct page *page,
+ gfp_t gfp) {
+ return 1;
+}
+
+static inline void v9fs_fscache_invalidate_page(struct page *page) {}
+
+static inline int v9fs_readpage_from_fscache(struct inode *inode,
+ struct page *page)
+{
+ return -ENOBUFS;
+}
+
+static inline int v9fs_readpages_from_fscache(struct inode *inode,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned *nr_pages)
+{
+ return -ENOBUFS;
+}
+
+static inline void v9fs_readpage_to_fscache(struct inode *inode,
+ struct page *page)
+{}
+
+static inline void v9fs_uncache_page(struct inode *inode, struct page *page)
+{}
+
+static inline void v9fs_vcookie_set_qid(struct inode *inode,
+ struct p9_qid *qid)
+{}
+
+#endif /* CONFIG_9P_FSCACHE */
+#endif /* _9P_CACHE_H */
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index f7003cfac63..cf62b05e296 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -34,21 +34,25 @@
#include <net/9p/transport.h>
#include "v9fs.h"
#include "v9fs_vfs.h"
+#include "cache.h"
+
+static DEFINE_SPINLOCK(v9fs_sessionlist_lock);
+static LIST_HEAD(v9fs_sessionlist);
/*
- * Option Parsing (code inspired by NFS code)
- * NOTE: each transport will parse its own options
- */
+ * Option Parsing (code inspired by NFS code)
+ * NOTE: each transport will parse its own options
+ */
enum {
/* Options that take integer arguments */
Opt_debug, Opt_dfltuid, Opt_dfltgid, Opt_afid,
/* String options */
- Opt_uname, Opt_remotename, Opt_trans,
+ Opt_uname, Opt_remotename, Opt_trans, Opt_cache, Opt_cachetag,
/* Options that take no arguments */
Opt_nodevmap,
/* Cache options */
- Opt_cache_loose,
+ Opt_cache_loose, Opt_fscache,
/* Access options */
Opt_access,
/* Error token */
@@ -63,8 +67,10 @@ static const match_table_t tokens = {
{Opt_uname, "uname=%s"},
{Opt_remotename, "aname=%s"},
{Opt_nodevmap, "nodevmap"},
- {Opt_cache_loose, "cache=loose"},
+ {Opt_cache, "cache=%s"},
{Opt_cache_loose, "loose"},
+ {Opt_fscache, "fscache"},
+ {Opt_cachetag, "cachetag=%s"},
{Opt_access, "access=%s"},
{Opt_err, NULL}
};
@@ -89,16 +95,16 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
v9ses->afid = ~0;
v9ses->debug = 0;
v9ses->cache = 0;
+#ifdef CONFIG_9P_FSCACHE
+ v9ses->cachetag = NULL;
+#endif
if (!opts)
return 0;
options = kstrdup(opts, GFP_KERNEL);
- if (!options) {
- P9_DPRINTK(P9_DEBUG_ERROR,
- "failed to allocate copy of option string\n");
- return -ENOMEM;
- }
+ if (!options)
+ goto fail_option_alloc;
while ((p = strsep(&options, ",")) != NULL) {
int token;
@@ -143,16 +149,33 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
case Opt_cache_loose:
v9ses->cache = CACHE_LOOSE;
break;
+ case Opt_fscache:
+ v9ses->cache = CACHE_FSCACHE;
+ break;
+ case Opt_cachetag:
+#ifdef CONFIG_9P_FSCACHE
+ v9ses->cachetag = match_strdup(&args[0]);
+#endif
+ break;
+ case Opt_cache:
+ s = match_strdup(&args[0]);
+ if (!s)
+ goto fail_option_alloc;
+
+ if (strcmp(s, "loose") == 0)
+ v9ses->cache = CACHE_LOOSE;
+ else if (strcmp(s, "fscache") == 0)
+ v9ses->cache = CACHE_FSCACHE;
+ else
+ v9ses->cache = CACHE_NONE;
+ kfree(s);
+ break;
case Opt_access:
s = match_strdup(&args[0]);
- if (!s) {
- P9_DPRINTK(P9_DEBUG_ERROR,
- "failed to allocate copy"
- " of option argument\n");
- ret = -ENOMEM;
- break;
- }
+ if (!s)
+ goto fail_option_alloc;
+
v9ses->flags &= ~V9FS_ACCESS_MASK;
if (strcmp(s, "user") == 0)
v9ses->flags |= V9FS_ACCESS_USER;
@@ -173,6 +196,11 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
}
kfree(options);
return ret;
+
+fail_option_alloc:
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "failed to allocate copy of option argument\n");
+ return -ENOMEM;
}
/**
@@ -200,6 +228,10 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
return ERR_PTR(-ENOMEM);
}
+ spin_lock(&v9fs_sessionlist_lock);
+ list_add(&v9ses->slist, &v9fs_sessionlist);
+ spin_unlock(&v9fs_sessionlist_lock);
+
v9ses->flags = V9FS_EXTENDED | V9FS_ACCESS_USER;
strcpy(v9ses->uname, V9FS_DEFUSER);
strcpy(v9ses->aname, V9FS_DEFANAME);
@@ -249,6 +281,11 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
else
fid->uid = ~0;
+#ifdef CONFIG_9P_FSCACHE
+ /* register the session for caching */
+ v9fs_cache_session_get_cookie(v9ses);
+#endif
+
return fid;
error:
@@ -268,8 +305,18 @@ void v9fs_session_close(struct v9fs_session_info *v9ses)
v9ses->clnt = NULL;
}
+#ifdef CONFIG_9P_FSCACHE
+ if (v9ses->fscache) {
+ v9fs_cache_session_put_cookie(v9ses);
+ kfree(v9ses->cachetag);
+ }
+#endif
__putname(v9ses->uname);
__putname(v9ses->aname);
+
+ spin_lock(&v9fs_sessionlist_lock);
+ list_del(&v9ses->slist);
+ spin_unlock(&v9fs_sessionlist_lock);
}
/**
@@ -286,25 +333,132 @@ void v9fs_session_cancel(struct v9fs_session_info *v9ses) {
extern int v9fs_error_init(void);
+static struct kobject *v9fs_kobj;
+
+#ifdef CONFIG_9P_FSCACHE
/**
- * v9fs_init - Initialize module
+ * caches_show - list caches associated with a session
+ *
+ * Returns the size of buffer written.
+ */
+
+static ssize_t caches_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ ssize_t n = 0, count = 0, limit = PAGE_SIZE;
+ struct v9fs_session_info *v9ses;
+
+ spin_lock(&v9fs_sessionlist_lock);
+ list_for_each_entry(v9ses, &v9fs_sessionlist, slist) {
+ if (v9ses->cachetag) {
+ n = snprintf(buf, limit, "%s\n", v9ses->cachetag);
+ if (n < 0) {
+ count = n;
+ break;
+ }
+
+ count += n;
+ limit -= n;
+ }
+ }
+
+ spin_unlock(&v9fs_sessionlist_lock);
+ return count;
+}
+
+static struct kobj_attribute v9fs_attr_cache = __ATTR_RO(caches);
+#endif /* CONFIG_9P_FSCACHE */
+
+static struct attribute *v9fs_attrs[] = {
+#ifdef CONFIG_9P_FSCACHE
+ &v9fs_attr_cache.attr,
+#endif
+ NULL,
+};
+
+static struct attribute_group v9fs_attr_group = {
+ .attrs = v9fs_attrs,
+};
+
+/**
+ * v9fs_sysfs_init - Initialize the v9fs sysfs interface
+ *
+ */
+
+static int v9fs_sysfs_init(void)
+{
+ v9fs_kobj = kobject_create_and_add("9p", fs_kobj);
+ if (!v9fs_kobj)
+ return -ENOMEM;
+
+ if (sysfs_create_group(v9fs_kobj, &v9fs_attr_group)) {
+ kobject_put(v9fs_kobj);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * v9fs_sysfs_cleanup - Unregister the v9fs sysfs interface
+ *
+ */
+
+static void v9fs_sysfs_cleanup(void)
+{
+ sysfs_remove_group(v9fs_kobj, &v9fs_attr_group);
+ kobject_put(v9fs_kobj);
+}
+
+/**
+ * init_v9fs - Initialize module
*
*/
static int __init init_v9fs(void)
{
+ int err;
printk(KERN_INFO "Installing v9fs 9p2000 file system support\n");
/* TODO: Setup list of registered trasnport modules */
- return register_filesystem(&v9fs_fs_type);
+ err = register_filesystem(&v9fs_fs_type);
+ if (err < 0) {
+ printk(KERN_ERR "Failed to register filesystem\n");
+ return err;
+ }
+
+ err = v9fs_cache_register();
+ if (err < 0) {
+ printk(KERN_ERR "Failed to register v9fs for caching\n");
+ goto out_fs_unreg;
+ }
+
+ err = v9fs_sysfs_init();
+ if (err < 0) {
+ printk(KERN_ERR "Failed to register with sysfs\n");
+ goto out_sysfs_cleanup;
+ }
+
+ return 0;
+
+out_sysfs_cleanup:
+ v9fs_sysfs_cleanup();
+
+out_fs_unreg:
+ unregister_filesystem(&v9fs_fs_type);
+
+ return err;
}
/**
- * v9fs_init - shutdown module
+ * exit_v9fs - shutdown module
*
*/
static void __exit exit_v9fs(void)
{
+ v9fs_sysfs_cleanup();
+ v9fs_cache_unregister();
unregister_filesystem(&v9fs_fs_type);
}
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 38762bf102a..019f4ccb70c 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -51,6 +51,7 @@ enum p9_session_flags {
enum p9_cache_modes {
CACHE_NONE,
CACHE_LOOSE,
+ CACHE_FSCACHE,
};
/**
@@ -60,6 +61,8 @@ enum p9_cache_modes {
* @debug: debug level
* @afid: authentication handle
* @cache: cache mode of type &p9_cache_modes
+ * @cachetag: the tag of the cache associated with this session
+ * @fscache: session cookie associated with FS-Cache
* @options: copy of options string given by user
* @uname: string user name to mount hierarchy as
* @aname: mount specifier for remote hierarchy
@@ -68,7 +71,7 @@ enum p9_cache_modes {
* @dfltgid: default numeric groupid to mount hierarchy as
* @uid: if %V9FS_ACCESS_SINGLE, the numeric uid which mounted the hierarchy
* @clnt: reference to 9P network client instantiated for this session
- * @debugfs_dir: reference to debugfs_dir which can be used for add'l debug
+ * @slist: reference to list of registered 9p sessions
*
* This structure holds state for each session instance established during
* a sys_mount() .
@@ -84,6 +87,10 @@ struct v9fs_session_info {
unsigned short debug;
unsigned int afid;
unsigned int cache;
+#ifdef CONFIG_9P_FSCACHE
+ char *cachetag;
+ struct fscache_cookie *fscache;
+#endif
char *uname; /* user name to mount as */
char *aname; /* name of remote hierarchy being mounted */
@@ -92,11 +99,9 @@ struct v9fs_session_info {
unsigned int dfltgid; /* default gid for legacy support */
u32 uid; /* if ACCESS_SINGLE, the uid that has access */
struct p9_client *clnt; /* 9p client */
- struct dentry *debugfs_dir;
+ struct list_head slist; /* list of sessions registered with v9fs */
};
-extern struct dentry *v9fs_debugfs_root;
-
struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *,
char *);
void v9fs_session_close(struct v9fs_session_info *v9ses);
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index f0c7de78e20..3a7560e3586 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -44,7 +44,13 @@ extern const struct file_operations v9fs_dir_operations;
extern const struct dentry_operations v9fs_dentry_operations;
extern const struct dentry_operations v9fs_cached_dentry_operations;
+#ifdef CONFIG_9P_FSCACHE
+struct inode *v9fs_alloc_inode(struct super_block *sb);
+void v9fs_destroy_inode(struct inode *inode);
+#endif
+
struct inode *v9fs_get_inode(struct super_block *sb, int mode);
+void v9fs_clear_inode(struct inode *inode);
ino_t v9fs_qid2ino(struct p9_qid *qid);
void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
int v9fs_dir_release(struct inode *inode, struct file *filp);
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 92828281a30..90e38449f4b 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -38,6 +38,7 @@
#include "v9fs.h"
#include "v9fs_vfs.h"
+#include "cache.h"
/**
* v9fs_vfs_readpage - read an entire page in from 9P
@@ -52,18 +53,31 @@ static int v9fs_vfs_readpage(struct file *filp, struct page *page)
int retval;
loff_t offset;
char *buffer;
+ struct inode *inode;
+ inode = page->mapping->host;
P9_DPRINTK(P9_DEBUG_VFS, "\n");
+
+ BUG_ON(!PageLocked(page));
+
+ retval = v9fs_readpage_from_fscache(inode, page);
+ if (retval == 0)
+ return retval;
+
buffer = kmap(page);
offset = page_offset(page);
retval = v9fs_file_readn(filp, buffer, NULL, PAGE_CACHE_SIZE, offset);
- if (retval < 0)
+ if (retval < 0) {
+ v9fs_uncache_page(inode, page);
goto done;
+ }
memset(buffer + retval, 0, PAGE_CACHE_SIZE - retval);
flush_dcache_page(page);
SetPageUptodate(page);
+
+ v9fs_readpage_to_fscache(inode, page);
retval = 0;
done:
@@ -72,6 +86,78 @@ done:
return retval;
}
+/**
+ * v9fs_vfs_readpages - read a set of pages from 9P
+ *
+ * @filp: file being read
+ * @mapping: the address space
+ * @pages: list of pages to read
+ * @nr_pages: count of pages to read
+ *
+ */
+
+static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping,
+ struct list_head *pages, unsigned nr_pages)
+{
+ int ret = 0;
+ struct inode *inode;
+
+ inode = mapping->host;
+ P9_DPRINTK(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, filp);
+
+ ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages);
+ if (ret == 0)
+ return ret;
+
+ ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp);
+ P9_DPRINTK(P9_DEBUG_VFS, " = %d\n", ret);
+ return ret;
+}
+
+/**
+ * v9fs_release_page - release the private state associated with a page
+ *
+ * Returns 1 if the page can be released, false otherwise.
+ */
+
+static int v9fs_release_page(struct page *page, gfp_t gfp)
+{
+ if (PagePrivate(page))
+ return 0;
+
+ return v9fs_fscache_release_page(page, gfp);
+}
+
+/**
+ * v9fs_invalidate_page - Invalidate a page completely or partially
+ *
+ * @page: structure to page
+ * @offset: offset in the page
+ */
+
+static void v9fs_invalidate_page(struct page *page, unsigned long offset)
+{
+ if (offset == 0)
+ v9fs_fscache_invalidate_page(page);
+}
+
+/**
+ * v9fs_launder_page - Writeback a dirty page
+ * Since the writes go directly to the server, we simply return a 0
+ * here to indicate success.
+ *
+ * Returns 0 on success.
+ */
+
+static int v9fs_launder_page(struct page *page)
+{
+ return 0;
+}
+
const struct address_space_operations v9fs_addr_operations = {
.readpage = v9fs_vfs_readpage,
+ .readpages = v9fs_vfs_readpages,
+ .releasepage = v9fs_release_page,
+ .invalidatepage = v9fs_invalidate_page,
+ .launder_page = v9fs_launder_page,
};
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 68bf2af6c38..3902bf43a08 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -32,6 +32,7 @@
#include <linux/string.h>
#include <linux/inet.h>
#include <linux/list.h>
+#include <linux/pagemap.h>
#include <asm/uaccess.h>
#include <linux/idr.h>
#include <net/9p/9p.h>
@@ -40,6 +41,7 @@
#include "v9fs.h"
#include "v9fs_vfs.h"
#include "fid.h"
+#include "cache.h"
static const struct file_operations v9fs_cached_file_operations;
@@ -72,7 +74,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
return err;
}
if (omode & P9_OTRUNC) {
- inode->i_size = 0;
+ i_size_write(inode, 0);
inode->i_blocks = 0;
}
if ((file->f_flags & O_APPEND) && (!v9fs_extended(v9ses)))
@@ -85,6 +87,10 @@ int v9fs_file_open(struct inode *inode, struct file *file)
/* enable cached file options */
if(file->f_op == &v9fs_file_operations)
file->f_op = &v9fs_cached_file_operations;
+
+#ifdef CONFIG_9P_FSCACHE
+ v9fs_cache_inode_set_cookie(inode, file);
+#endif
}
return 0;
@@ -210,6 +216,7 @@ v9fs_file_write(struct file *filp, const char __user * data,
struct p9_client *clnt;
struct inode *inode = filp->f_path.dentry->d_inode;
int origin = *offset;
+ unsigned long pg_start, pg_end;
P9_DPRINTK(P9_DEBUG_VFS, "data %p count %d offset %x\n", data,
(int)count, (int)*offset);
@@ -225,7 +232,7 @@ v9fs_file_write(struct file *filp, const char __user * data,
if (count < rsize)
rsize = count;
- n = p9_client_write(fid, NULL, data+total, *offset+total,
+ n = p9_client_write(fid, NULL, data+total, origin+total,
rsize);
if (n <= 0)
break;
@@ -234,14 +241,14 @@ v9fs_file_write(struct file *filp, const char __user * data,
} while (count > 0);
if (total > 0) {
- invalidate_inode_pages2_range(inode->i_mapping, origin,
- origin+total);
+ pg_start = origin >> PAGE_CACHE_SHIFT;
+ pg_end = (origin + total - 1) >> PAGE_CACHE_SHIFT;
+ if (inode->i_mapping && inode->i_mapping->nrpages)
+ invalidate_inode_pages2_range(inode->i_mapping,
+ pg_start, pg_end);
*offset += total;
- }
-
- if (*offset > inode->i_size) {
- inode->i_size = *offset;
- inode->i_blocks = (inode->i_size + 512 - 1) >> 9;
+ i_size_write(inode, i_size_read(inode) + total);
+ inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9;
}
if (n < 0)
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 06a223d50a8..5947628aefe 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -40,6 +40,7 @@
#include "v9fs.h"
#include "v9fs_vfs.h"
#include "fid.h"
+#include "cache.h"
static const struct inode_operations v9fs_dir_inode_operations;
static const struct inode_operations v9fs_dir_inode_operations_ext;
@@ -197,6 +198,39 @@ v9fs_blank_wstat(struct p9_wstat *wstat)
wstat->extension = NULL;
}
+#ifdef CONFIG_9P_FSCACHE
+/**
+ * v9fs_alloc_inode - helper function to allocate an inode
+ * This callback is executed before setting up the inode so that we
+ * can associate a vcookie with each inode.
+ *
+ */
+
+struct inode *v9fs_alloc_inode(struct super_block *sb)
+{
+ struct v9fs_cookie *vcookie;
+ vcookie = (struct v9fs_cookie *)kmem_cache_alloc(vcookie_cache,
+ GFP_KERNEL);
+ if (!vcookie)
+ return NULL;
+
+ vcookie->fscache = NULL;
+ vcookie->qid = NULL;
+ spin_lock_init(&vcookie->lock);
+ return &vcookie->inode;
+}
+
+/**
+ * v9fs_destroy_inode - destroy an inode
+ *
+ */
+
+void v9fs_destroy_inode(struct inode *inode)
+{
+ kmem_cache_free(vcookie_cache, v9fs_inode2cookie(inode));
+}
+#endif
+
/**
* v9fs_get_inode - helper function to setup an inode
* @sb: superblock
@@ -326,6 +360,21 @@ error:
}
*/
+
+/**
+ * v9fs_clear_inode - release an inode
+ * @inode: inode to release
+ *
+ */
+void v9fs_clear_inode(struct inode *inode)
+{
+ filemap_fdatawrite(inode->i_mapping);
+
+#ifdef CONFIG_9P_FSCACHE
+ v9fs_cache_inode_put_cookie(inode);
+#endif
+}
+
/**
* v9fs_inode_from_fid - populate an inode by issuing a attribute request
* @v9ses: session information
@@ -356,8 +405,14 @@ v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
v9fs_stat2inode(st, ret, sb);
ret->i_ino = v9fs_qid2ino(&st->qid);
+
+#ifdef CONFIG_9P_FSCACHE
+ v9fs_vcookie_set_qid(ret, &st->qid);
+ v9fs_cache_inode_get_cookie(ret);
+#endif
p9stat_free(st);
kfree(st);
+
return ret;
error:
@@ -751,7 +806,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
err = -EPERM;
v9ses = v9fs_inode2v9ses(dentry->d_inode);
- if (v9ses->cache == CACHE_LOOSE)
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
return simple_getattr(mnt, dentry, stat);
fid = v9fs_fid_lookup(dentry);
@@ -872,10 +927,10 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
} else
inode->i_rdev = 0;
- inode->i_size = stat->length;
+ i_size_write(inode, stat->length);
/* not real number of blocks, but 512 byte ones ... */
- inode->i_blocks = (inode->i_size + 512 - 1) >> 9;
+ inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9;
}
/**
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 8961f1a8f66..14a86448572 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -44,21 +44,9 @@
#include "v9fs_vfs.h"
#include "fid.h"
-static void v9fs_clear_inode(struct inode *);
static const struct super_operations v9fs_super_ops;
/**
- * v9fs_clear_inode - release an inode
- * @inode: inode to release
- *
- */
-
-static void v9fs_clear_inode(struct inode *inode)
-{
- filemap_fdatawrite(inode->i_mapping);
-}
-
-/**
* v9fs_set_super - set the superblock
* @s: super block
* @data: file system specific data
@@ -220,6 +208,10 @@ v9fs_umount_begin(struct super_block *sb)
}
static const struct super_operations v9fs_super_ops = {
+#ifdef CONFIG_9P_FSCACHE
+ .alloc_inode = v9fs_alloc_inode,
+ .destroy_inode = v9fs_destroy_inode,
+#endif
.statfs = simple_statfs,
.clear_inode = v9fs_clear_inode,
.show_options = generic_show_options,
diff --git a/fs/Kconfig b/fs/Kconfig
index 455aa207e67..d4bf8caad8d 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -109,6 +109,7 @@ source "fs/sysfs/Kconfig"
config TMPFS
bool "Virtual memory file system support (former shm fs)"
+ depends on SHMEM
help
Tmpfs is a file system which keeps all files in virtual memory.
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 798cb071d13..3f57ce4bee5 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -19,9 +19,6 @@ static int
adfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh,
int create)
{
- if (block < 0)
- goto abort_negative;
-
if (!create) {
if (block >= inode->i_blocks)
goto abort_toobig;
@@ -34,10 +31,6 @@ adfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh,
/* don't support allocation of blocks yet */
return -EIO;
-abort_negative:
- adfs_error(inode->i_sb, "block %d < 0", block);
- return -EIO;
-
abort_toobig:
return 0;
}
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index 3ff8bdd18fb..0931bc1325e 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -21,7 +21,7 @@ static void afs_fl_release_private(struct file_lock *fl);
static struct workqueue_struct *afs_lock_manager;
static DEFINE_MUTEX(afs_lock_manager_mutex);
-static struct file_lock_operations afs_lock_ops = {
+static const struct file_lock_operations afs_lock_ops = {
.fl_copy_lock = afs_fl_copy_lock,
.fl_release_private = afs_fl_release_private,
};
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 8630615e57f..852739d262a 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -28,7 +28,7 @@ static int afs_proc_cells_show(struct seq_file *m, void *v);
static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf,
size_t size, loff_t *_pos);
-static struct seq_operations afs_proc_cells_ops = {
+static const struct seq_operations afs_proc_cells_ops = {
.start = afs_proc_cells_start,
.next = afs_proc_cells_next,
.stop = afs_proc_cells_stop,
@@ -70,7 +70,7 @@ static void *afs_proc_cell_volumes_next(struct seq_file *p, void *v,
static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v);
static int afs_proc_cell_volumes_show(struct seq_file *m, void *v);
-static struct seq_operations afs_proc_cell_volumes_ops = {
+static const struct seq_operations afs_proc_cell_volumes_ops = {
.start = afs_proc_cell_volumes_start,
.next = afs_proc_cell_volumes_next,
.stop = afs_proc_cell_volumes_stop,
@@ -95,7 +95,7 @@ static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v,
static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v);
static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v);
-static struct seq_operations afs_proc_cell_vlservers_ops = {
+static const struct seq_operations afs_proc_cell_vlservers_ops = {
.start = afs_proc_cell_vlservers_start,
.next = afs_proc_cell_vlservers_next,
.stop = afs_proc_cell_vlservers_stop,
@@ -119,7 +119,7 @@ static void *afs_proc_cell_servers_next(struct seq_file *p, void *v,
static void afs_proc_cell_servers_stop(struct seq_file *p, void *v);
static int afs_proc_cell_servers_show(struct seq_file *m, void *v);
-static struct seq_operations afs_proc_cell_servers_ops = {
+static const struct seq_operations afs_proc_cell_servers_ops = {
.start = afs_proc_cell_servers_start,
.next = afs_proc_cell_servers_next,
.stop = afs_proc_cell_servers_stop,
diff --git a/fs/aio.c b/fs/aio.c
index d065b2c3273..02a2c934057 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -24,6 +24,7 @@
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/mman.h>
+#include <linux/mmu_context.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/aio.h>
@@ -34,7 +35,6 @@
#include <asm/kmap_types.h>
#include <asm/uaccess.h>
-#include <asm/mmu_context.h>
#if DEBUG > 1
#define dprintk printk
@@ -78,6 +78,7 @@ static int __init aio_setup(void)
return 0;
}
+__initcall(aio_setup);
static void aio_free_ring(struct kioctx *ctx)
{
@@ -380,6 +381,7 @@ ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
__set_current_state(TASK_RUNNING);
return iocb->ki_user_data;
}
+EXPORT_SYMBOL(wait_on_sync_kiocb);
/* exit_aio: called when the last user of mm goes away. At this point,
* there is no way for any new requests to be submited or any of the
@@ -573,6 +575,7 @@ int aio_put_req(struct kiocb *req)
spin_unlock_irq(&ctx->ctx_lock);
return ret;
}
+EXPORT_SYMBOL(aio_put_req);
static struct kioctx *lookup_ioctx(unsigned long ctx_id)
{
@@ -595,51 +598,6 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
}
/*
- * use_mm
- * Makes the calling kernel thread take on the specified
- * mm context.
- * Called by the retry thread execute retries within the
- * iocb issuer's mm context, so that copy_from/to_user
- * operations work seamlessly for aio.
- * (Note: this routine is intended to be called only
- * from a kernel thread context)
- */
-static void use_mm(struct mm_struct *mm)
-{
- struct mm_struct *active_mm;
- struct task_struct *tsk = current;
-
- task_lock(tsk);
- active_mm = tsk->active_mm;
- atomic_inc(&mm->mm_count);
- tsk->mm = mm;
- tsk->active_mm = mm;
- switch_mm(active_mm, mm, tsk);
- task_unlock(tsk);
-
- mmdrop(active_mm);
-}
-
-/*
- * unuse_mm
- * Reverses the effect of use_mm, i.e. releases the
- * specified mm context which was earlier taken on
- * by the calling kernel thread
- * (Note: this routine is intended to be called only
- * from a kernel thread context)
- */
-static void unuse_mm(struct mm_struct *mm)
-{
- struct task_struct *tsk = current;
-
- task_lock(tsk);
- tsk->mm = NULL;
- /* active_mm is still 'mm' */
- enter_lazy_tlb(mm, tsk);
- task_unlock(tsk);
-}
-
-/*
* Queue up a kiocb to be retried. Assumes that the kiocb
* has already been marked as kicked, and places it on
* the retry run list for the corresponding ioctx, if it
@@ -1037,6 +995,7 @@ put_rq:
spin_unlock_irqrestore(&ctx->ctx_lock, flags);
return ret;
}
+EXPORT_SYMBOL(aio_complete);
/* aio_read_evt
* Pull an event off of the ioctx's event ring. Returns the number of
@@ -1825,9 +1784,3 @@ SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout);
return ret;
}
-
-__initcall(aio_setup);
-
-EXPORT_SYMBOL(aio_complete);
-EXPORT_SYMBOL(aio_put_req);
-EXPORT_SYMBOL(wait_on_sync_kiocb);
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 47d4a01c539..d11c51fc2a3 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -77,28 +77,24 @@ static const struct address_space_operations anon_aops = {
*
* Creates a new file by hooking it on a single inode. This is useful for files
* that do not need to have a full-fledged inode in order to operate correctly.
- * All the files created with anon_inode_getfd() will share a single inode,
+ * All the files created with anon_inode_getfile() will share a single inode,
* hence saving memory and avoiding code duplication for the file/inode/dentry
- * setup. Returns new descriptor or -error.
+ * setup. Returns the newly created file* or an error pointer.
*/
-int anon_inode_getfd(const char *name, const struct file_operations *fops,
- void *priv, int flags)
+struct file *anon_inode_getfile(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags)
{
struct qstr this;
struct dentry *dentry;
struct file *file;
- int error, fd;
+ int error;
if (IS_ERR(anon_inode_inode))
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
if (fops->owner && !try_module_get(fops->owner))
- return -ENOENT;
-
- error = get_unused_fd_flags(flags);
- if (error < 0)
- goto err_module;
- fd = error;
+ return ERR_PTR(-ENOENT);
/*
* Link the inode to a directory entry by creating a unique name
@@ -110,7 +106,7 @@ int anon_inode_getfd(const char *name, const struct file_operations *fops,
this.hash = 0;
dentry = d_alloc(anon_inode_mnt->mnt_sb->s_root, &this);
if (!dentry)
- goto err_put_unused_fd;
+ goto err_module;
/*
* We know the anon_inode inode count is always greater than zero,
@@ -136,16 +132,54 @@ int anon_inode_getfd(const char *name, const struct file_operations *fops,
file->f_version = 0;
file->private_data = priv;
+ return file;
+
+err_dput:
+ dput(dentry);
+err_module:
+ module_put(fops->owner);
+ return ERR_PTR(error);
+}
+EXPORT_SYMBOL_GPL(anon_inode_getfile);
+
+/**
+ * anon_inode_getfd - creates a new file instance by hooking it up to an
+ * anonymous inode, and a dentry that describe the "class"
+ * of the file
+ *
+ * @name: [in] name of the "class" of the new file
+ * @fops: [in] file operations for the new file
+ * @priv: [in] private data for the new file (will be file's private_data)
+ * @flags: [in] flags
+ *
+ * Creates a new file by hooking it on a single inode. This is useful for files
+ * that do not need to have a full-fledged inode in order to operate correctly.
+ * All the files created with anon_inode_getfd() will share a single inode,
+ * hence saving memory and avoiding code duplication for the file/inode/dentry
+ * setup. Returns new descriptor or an error code.
+ */
+int anon_inode_getfd(const char *name, const struct file_operations *fops,
+ void *priv, int flags)
+{
+ int error, fd;
+ struct file *file;
+
+ error = get_unused_fd_flags(flags);
+ if (error < 0)
+ return error;
+ fd = error;
+
+ file = anon_inode_getfile(name, fops, priv, flags);
+ if (IS_ERR(file)) {
+ error = PTR_ERR(file);
+ goto err_put_unused_fd;
+ }
fd_install(fd, file);
return fd;
-err_dput:
- dput(dentry);
err_put_unused_fd:
put_unused_fd(fd);
-err_module:
- module_put(fops->owner);
return error;
}
EXPORT_SYMBOL_GPL(anon_inode_getfd);
diff --git a/fs/attr.c b/fs/attr.c
index 9fe1b1bd30a..96d394bdadd 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -18,7 +18,7 @@
/* Taken over from the old code... */
/* POSIX UID/GID verification for setting inode attributes. */
-int inode_change_ok(struct inode *inode, struct iattr *attr)
+int inode_change_ok(const struct inode *inode, struct iattr *attr)
{
int retval = -EPERM;
unsigned int ia_valid = attr->ia_valid;
@@ -60,9 +60,51 @@ fine:
error:
return retval;
}
-
EXPORT_SYMBOL(inode_change_ok);
+/**
+ * inode_newsize_ok - may this inode be truncated to a given size
+ * @inode: the inode to be truncated
+ * @offset: the new size to assign to the inode
+ * @Returns: 0 on success, -ve errno on failure
+ *
+ * inode_newsize_ok will check filesystem limits and ulimits to check that the
+ * new inode size is within limits. inode_newsize_ok will also send SIGXFSZ
+ * when necessary. Caller must not proceed with inode size change if failure is
+ * returned. @inode must be a file (not directory), with appropriate
+ * permissions to allow truncate (inode_newsize_ok does NOT check these
+ * conditions).
+ *
+ * inode_newsize_ok must be called with i_mutex held.
+ */
+int inode_newsize_ok(const struct inode *inode, loff_t offset)
+{
+ if (inode->i_size < offset) {
+ unsigned long limit;
+
+ limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+ if (limit != RLIM_INFINITY && offset > limit)
+ goto out_sig;
+ if (offset > inode->i_sb->s_maxbytes)
+ goto out_big;
+ } else {
+ /*
+ * truncation of in-use swapfiles is disallowed - it would
+ * cause subsequent swapout to scribble on the now-freed
+ * blocks.
+ */
+ if (IS_SWAPFILE(inode))
+ return -ETXTBSY;
+ }
+
+ return 0;
+out_sig:
+ send_sig(SIGXFSZ, current, 0);
+out_big:
+ return -EFBIG;
+}
+EXPORT_SYMBOL(inode_newsize_ok);
+
int inode_setattr(struct inode * inode, struct iattr * attr)
{
unsigned int ia_valid = attr->ia_valid;
diff --git a/fs/autofs/dirhash.c b/fs/autofs/dirhash.c
index 2316e944a10..e947915109e 100644
--- a/fs/autofs/dirhash.c
+++ b/fs/autofs/dirhash.c
@@ -90,7 +90,7 @@ struct autofs_dir_ent *autofs_expire(struct super_block *sb,
DPRINTK(("autofs: not expirable (not a mounted directory): %s\n", ent->name));
continue;
}
- while (d_mountpoint(path.dentry) && follow_down(&path));
+ while (d_mountpoint(path.dentry) && follow_down(&path))
;
umount_ok = may_umount(path.mnt);
path_put(&path);
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 615d5496fe0..33baf27fac7 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -737,12 +737,7 @@ befs_put_super(struct super_block *sb)
{
kfree(BEFS_SB(sb)->mount_opts.iocharset);
BEFS_SB(sb)->mount_opts.iocharset = NULL;
-
- if (BEFS_SB(sb)->nls) {
- unload_nls(BEFS_SB(sb)->nls);
- BEFS_SB(sb)->nls = NULL;
- }
-
+ unload_nls(BEFS_SB(sb)->nls);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
}
@@ -842,7 +837,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_magic = BEFS_SUPER_MAGIC;
/* Set real blocksize of fs */
sb_set_blocksize(sb, (ulong) befs_sb->block_size);
- sb->s_op = (struct super_operations *) &befs_sops;
+ sb->s_op = &befs_sops;
root = befs_iget(sb, iaddr2blockno(sb, &(befs_sb->root_dir)));
if (IS_ERR(root)) {
ret = PTR_ERR(root);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 7c1e65d5487..b9b3bb51b1e 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1280,9 +1280,6 @@ static int writenote(struct memelfnote *men, struct file *file,
#define DUMP_WRITE(addr, nr) \
if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
goto end_coredump;
-#define DUMP_SEEK(off) \
- if (!dump_seek(file, (off))) \
- goto end_coredump;
static void fill_elf_header(struct elfhdr *elf, int segs,
u16 machine, u32 flags, u8 osabi)
@@ -1714,42 +1711,52 @@ struct elf_note_info {
int numnote;
};
-static int fill_note_info(struct elfhdr *elf, int phdrs,
- struct elf_note_info *info,
- long signr, struct pt_regs *regs)
+static int elf_note_info_init(struct elf_note_info *info)
{
-#define NUM_NOTES 6
- struct list_head *t;
-
- info->notes = NULL;
- info->prstatus = NULL;
- info->psinfo = NULL;
- info->fpu = NULL;
-#ifdef ELF_CORE_COPY_XFPREGS
- info->xfpu = NULL;
-#endif
+ memset(info, 0, sizeof(*info));
INIT_LIST_HEAD(&info->thread_list);
- info->notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote),
- GFP_KERNEL);
+ /* Allocate space for six ELF notes */
+ info->notes = kmalloc(6 * sizeof(struct memelfnote), GFP_KERNEL);
if (!info->notes)
return 0;
info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
if (!info->psinfo)
- return 0;
+ goto notes_free;
info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
if (!info->prstatus)
- return 0;
+ goto psinfo_free;
info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
if (!info->fpu)
- return 0;
+ goto prstatus_free;
#ifdef ELF_CORE_COPY_XFPREGS
info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
if (!info->xfpu)
- return 0;
+ goto fpu_free;
+#endif
+ return 1;
+#ifdef ELF_CORE_COPY_XFPREGS
+ fpu_free:
+ kfree(info->fpu);
#endif
+ prstatus_free:
+ kfree(info->prstatus);
+ psinfo_free:
+ kfree(info->psinfo);
+ notes_free:
+ kfree(info->notes);
+ return 0;
+}
+
+static int fill_note_info(struct elfhdr *elf, int phdrs,
+ struct elf_note_info *info,
+ long signr, struct pt_regs *regs)
+{
+ struct list_head *t;
+
+ if (!elf_note_info_init(info))
+ return 0;
- info->thread_status_size = 0;
if (signr) {
struct core_thread *ct;
struct elf_thread_status *ets;
@@ -1809,8 +1816,6 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
#endif
return 1;
-
-#undef NUM_NOTES
}
static size_t get_note_info_size(struct elf_note_info *info)
@@ -2016,7 +2021,8 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
goto end_coredump;
/* Align to page */
- DUMP_SEEK(dataoff - foffset);
+ if (!dump_seek(file, dataoff - foffset))
+ goto end_coredump;
for (vma = first_vma(current, gate_vma); vma != NULL;
vma = next_vma(vma, gate_vma)) {
@@ -2027,33 +2033,19 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
struct page *page;
- struct vm_area_struct *tmp_vma;
-
- if (get_user_pages(current, current->mm, addr, 1, 0, 1,
- &page, &tmp_vma) <= 0) {
- DUMP_SEEK(PAGE_SIZE);
- } else {
- if (page == ZERO_PAGE(0)) {
- if (!dump_seek(file, PAGE_SIZE)) {
- page_cache_release(page);
- goto end_coredump;
- }
- } else {
- void *kaddr;
- flush_cache_page(tmp_vma, addr,
- page_to_pfn(page));
- kaddr = kmap(page);
- if ((size += PAGE_SIZE) > limit ||
- !dump_write(file, kaddr,
- PAGE_SIZE)) {
- kunmap(page);
- page_cache_release(page);
- goto end_coredump;
- }
- kunmap(page);
- }
+ int stop;
+
+ page = get_dump_page(addr);
+ if (page) {
+ void *kaddr = kmap(page);
+ stop = ((size += PAGE_SIZE) > limit) ||
+ !dump_write(file, kaddr, PAGE_SIZE);
+ kunmap(page);
page_cache_release(page);
- }
+ } else
+ stop = !dump_seek(file, PAGE_SIZE);
+ if (stop)
+ goto end_coredump;
}
}
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 20fbeced472..38502c67987 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -283,20 +283,23 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
}
stack_size = exec_params.stack_size;
- if (stack_size < interp_params.stack_size)
- stack_size = interp_params.stack_size;
-
if (exec_params.flags & ELF_FDPIC_FLAG_EXEC_STACK)
executable_stack = EXSTACK_ENABLE_X;
else if (exec_params.flags & ELF_FDPIC_FLAG_NOEXEC_STACK)
executable_stack = EXSTACK_DISABLE_X;
- else if (interp_params.flags & ELF_FDPIC_FLAG_EXEC_STACK)
- executable_stack = EXSTACK_ENABLE_X;
- else if (interp_params.flags & ELF_FDPIC_FLAG_NOEXEC_STACK)
- executable_stack = EXSTACK_DISABLE_X;
else
executable_stack = EXSTACK_DEFAULT;
+ if (stack_size == 0) {
+ stack_size = interp_params.stack_size;
+ if (interp_params.flags & ELF_FDPIC_FLAG_EXEC_STACK)
+ executable_stack = EXSTACK_ENABLE_X;
+ else if (interp_params.flags & ELF_FDPIC_FLAG_NOEXEC_STACK)
+ executable_stack = EXSTACK_DISABLE_X;
+ else
+ executable_stack = EXSTACK_DEFAULT;
+ }
+
retval = -ENOEXEC;
if (stack_size == 0)
goto error;
@@ -1325,9 +1328,6 @@ static int writenote(struct memelfnote *men, struct file *file)
#define DUMP_WRITE(addr, nr) \
if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
goto end_coredump;
-#define DUMP_SEEK(off) \
- if (!dump_seek(file, (off))) \
- goto end_coredump;
static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs)
{
@@ -1518,6 +1518,7 @@ static int elf_fdpic_dump_segments(struct file *file, size_t *size,
unsigned long *limit, unsigned long mm_flags)
{
struct vm_area_struct *vma;
+ int err = 0;
for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
unsigned long addr;
@@ -1525,43 +1526,26 @@ static int elf_fdpic_dump_segments(struct file *file, size_t *size,
if (!maydump(vma, mm_flags))
continue;
- for (addr = vma->vm_start;
- addr < vma->vm_end;
- addr += PAGE_SIZE
- ) {
- struct vm_area_struct *vma;
- struct page *page;
-
- if (get_user_pages(current, current->mm, addr, 1, 0, 1,
- &page, &vma) <= 0) {
- DUMP_SEEK(file->f_pos + PAGE_SIZE);
- }
- else if (page == ZERO_PAGE(0)) {
- page_cache_release(page);
- DUMP_SEEK(file->f_pos + PAGE_SIZE);
- }
- else {
- void *kaddr;
-
- flush_cache_page(vma, addr, page_to_pfn(page));
- kaddr = kmap(page);
- if ((*size += PAGE_SIZE) > *limit ||
- !dump_write(file, kaddr, PAGE_SIZE)
- ) {
- kunmap(page);
- page_cache_release(page);
- return -EIO;
- }
+ for (addr = vma->vm_start; addr < vma->vm_end;
+ addr += PAGE_SIZE) {
+ struct page *page = get_dump_page(addr);
+ if (page) {
+ void *kaddr = kmap(page);
+ *size += PAGE_SIZE;
+ if (*size > *limit)
+ err = -EFBIG;
+ else if (!dump_write(file, kaddr, PAGE_SIZE))
+ err = -EIO;
kunmap(page);
page_cache_release(page);
- }
+ } else if (!dump_seek(file, file->f_pos + PAGE_SIZE))
+ err = -EFBIG;
+ if (err)
+ goto out;
}
}
-
- return 0;
-
-end_coredump:
- return -EFBIG;
+out:
+ return err;
}
#endif
@@ -1802,7 +1786,8 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
goto end_coredump;
}
- DUMP_SEEK(dataoff);
+ if (!dump_seek(file, dataoff))
+ goto end_coredump;
if (elf_fdpic_dump_segments(file, &size, &limit, mm_flags) < 0)
goto end_coredump;
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index e92f229e3c6..a2796651e75 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -278,8 +278,6 @@ static int decompress_exec(
ret = bprm->file->f_op->read(bprm->file, buf, LBUFSIZE, &fpos);
if (ret <= 0)
break;
- if (ret >= (unsigned long) -4096)
- break;
len -= ret;
strm.next_in = buf;
@@ -335,7 +333,7 @@ calc_reloc(unsigned long r, struct lib_info *p, int curid, int internalp)
"(%d != %d)", (unsigned) r, curid, id);
goto failed;
} else if ( ! p->lib_list[id].loaded &&
- load_flat_shared_library(id, p) > (unsigned long) -4096) {
+ IS_ERR_VALUE(load_flat_shared_library(id, p))) {
printk("BINFMT_FLAT: failed to load library %d", id);
goto failed;
}
@@ -545,7 +543,7 @@ static int load_flat_file(struct linux_binprm * bprm,
textpos = do_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC,
MAP_PRIVATE|MAP_EXECUTABLE, 0);
up_write(&current->mm->mmap_sem);
- if (!textpos || textpos >= (unsigned long) -4096) {
+ if (!textpos || IS_ERR_VALUE(textpos)) {
if (!textpos)
textpos = (unsigned long) -ENOMEM;
printk("Unable to mmap process text, errno %d\n", (int)-textpos);
@@ -560,7 +558,7 @@ static int load_flat_file(struct linux_binprm * bprm,
PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0);
up_write(&current->mm->mmap_sem);
- if (realdatastart == 0 || realdatastart >= (unsigned long)-4096) {
+ if (realdatastart == 0 || IS_ERR_VALUE(realdatastart)) {
if (!realdatastart)
realdatastart = (unsigned long) -ENOMEM;
printk("Unable to allocate RAM for process data, errno %d\n",
@@ -587,7 +585,7 @@ static int load_flat_file(struct linux_binprm * bprm,
result = bprm->file->f_op->read(bprm->file, (char *) datapos,
data_len + (relocs * sizeof(unsigned long)), &fpos);
}
- if (result >= (unsigned long)-4096) {
+ if (IS_ERR_VALUE(result)) {
printk("Unable to read data+bss, errno %d\n", (int)-result);
do_munmap(current->mm, textpos, text_len);
do_munmap(current->mm, realdatastart, data_len + extra);
@@ -607,7 +605,7 @@ static int load_flat_file(struct linux_binprm * bprm,
PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0);
up_write(&current->mm->mmap_sem);
- if (!textpos || textpos >= (unsigned long) -4096) {
+ if (!textpos || IS_ERR_VALUE(textpos)) {
if (!textpos)
textpos = (unsigned long) -ENOMEM;
printk("Unable to allocate RAM for process text/data, errno %d\n",
@@ -641,7 +639,7 @@ static int load_flat_file(struct linux_binprm * bprm,
fpos = 0;
result = bprm->file->f_op->read(bprm->file,
(char *) textpos, text_len, &fpos);
- if (result < (unsigned long) -4096)
+ if (!IS_ERR_VALUE(result))
result = decompress_exec(bprm, text_len, (char *) datapos,
data_len + (relocs * sizeof(unsigned long)), 0);
}
@@ -651,13 +649,13 @@ static int load_flat_file(struct linux_binprm * bprm,
fpos = 0;
result = bprm->file->f_op->read(bprm->file,
(char *) textpos, text_len, &fpos);
- if (result < (unsigned long) -4096) {
+ if (!IS_ERR_VALUE(result)) {
fpos = ntohl(hdr->data_start);
result = bprm->file->f_op->read(bprm->file, (char *) datapos,
data_len + (relocs * sizeof(unsigned long)), &fpos);
}
}
- if (result >= (unsigned long)-4096) {
+ if (IS_ERR_VALUE(result)) {
printk("Unable to read code+data+bss, errno %d\n",(int)-result);
do_munmap(current->mm, textpos, text_len + data_len + extra +
MAX_SHARED_LIBS * sizeof(unsigned long));
@@ -835,7 +833,7 @@ static int load_flat_shared_library(int id, struct lib_info *libs)
res = prepare_binprm(&bprm);
- if (res <= (unsigned long)-4096)
+ if (!IS_ERR_VALUE(res))
res = load_flat_file(&bprm, libs, id, NULL);
abort_creds(bprm.cred);
@@ -880,7 +878,7 @@ static int load_flat_binary(struct linux_binprm * bprm, struct pt_regs * regs)
stack_len += FLAT_DATA_ALIGN - 1; /* reserve for upcoming alignment */
res = load_flat_file(bprm, &libinfo, 0, &stack_len);
- if (res > (unsigned long)-4096)
+ if (IS_ERR_VALUE(res))
return res;
/* Update data segment pointers for all libraries */
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 71e7e03ac34..9cf4b926f8e 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -216,8 +216,6 @@ EXPORT_SYMBOL(fsync_bdev);
* freeze_bdev -- lock a filesystem and force it into a consistent state
* @bdev: blockdevice to lock
*
- * This takes the block device bd_mount_sem to make sure no new mounts
- * happen on bdev until thaw_bdev() is called.
* If a superblock is found on this device, we take the s_umount semaphore
* on it to make sure nobody unmounts until the snapshot creation is done.
* The reference counter (bd_fsfreeze_count) guarantees that only the last
@@ -232,46 +230,55 @@ struct super_block *freeze_bdev(struct block_device *bdev)
int error = 0;
mutex_lock(&bdev->bd_fsfreeze_mutex);
- if (bdev->bd_fsfreeze_count > 0) {
- bdev->bd_fsfreeze_count++;
+ if (++bdev->bd_fsfreeze_count > 1) {
+ /*
+ * We don't even need to grab a reference - the first call
+ * to freeze_bdev grab an active reference and only the last
+ * thaw_bdev drops it.
+ */
sb = get_super(bdev);
+ drop_super(sb);
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return sb;
}
- bdev->bd_fsfreeze_count++;
-
- down(&bdev->bd_mount_sem);
- sb = get_super(bdev);
- if (sb && !(sb->s_flags & MS_RDONLY)) {
- sb->s_frozen = SB_FREEZE_WRITE;
- smp_wmb();
-
- sync_filesystem(sb);
-
- sb->s_frozen = SB_FREEZE_TRANS;
- smp_wmb();
-
- sync_blockdev(sb->s_bdev);
-
- if (sb->s_op->freeze_fs) {
- error = sb->s_op->freeze_fs(sb);
- if (error) {
- printk(KERN_ERR
- "VFS:Filesystem freeze failed\n");
- sb->s_frozen = SB_UNFROZEN;
- drop_super(sb);
- up(&bdev->bd_mount_sem);
- bdev->bd_fsfreeze_count--;
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return ERR_PTR(error);
- }
+
+ sb = get_active_super(bdev);
+ if (!sb)
+ goto out;
+ if (sb->s_flags & MS_RDONLY) {
+ deactivate_locked_super(sb);
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ return sb;
+ }
+
+ sb->s_frozen = SB_FREEZE_WRITE;
+ smp_wmb();
+
+ sync_filesystem(sb);
+
+ sb->s_frozen = SB_FREEZE_TRANS;
+ smp_wmb();
+
+ sync_blockdev(sb->s_bdev);
+
+ if (sb->s_op->freeze_fs) {
+ error = sb->s_op->freeze_fs(sb);
+ if (error) {
+ printk(KERN_ERR
+ "VFS:Filesystem freeze failed\n");
+ sb->s_frozen = SB_UNFROZEN;
+ deactivate_locked_super(sb);
+ bdev->bd_fsfreeze_count--;
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ return ERR_PTR(error);
}
}
+ up_write(&sb->s_umount);
+ out:
sync_blockdev(bdev);
mutex_unlock(&bdev->bd_fsfreeze_mutex);
-
- return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
+ return sb; /* thaw_bdev releases s->s_umount */
}
EXPORT_SYMBOL(freeze_bdev);
@@ -284,44 +291,44 @@ EXPORT_SYMBOL(freeze_bdev);
*/
int thaw_bdev(struct block_device *bdev, struct super_block *sb)
{
- int error = 0;
+ int error = -EINVAL;
mutex_lock(&bdev->bd_fsfreeze_mutex);
- if (!bdev->bd_fsfreeze_count) {
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return -EINVAL;
- }
-
- bdev->bd_fsfreeze_count--;
- if (bdev->bd_fsfreeze_count > 0) {
- if (sb)
- drop_super(sb);
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return 0;
- }
-
- if (sb) {
- BUG_ON(sb->s_bdev != bdev);
- if (!(sb->s_flags & MS_RDONLY)) {
- if (sb->s_op->unfreeze_fs) {
- error = sb->s_op->unfreeze_fs(sb);
- if (error) {
- printk(KERN_ERR
- "VFS:Filesystem thaw failed\n");
- sb->s_frozen = SB_FREEZE_TRANS;
- bdev->bd_fsfreeze_count++;
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- return error;
- }
- }
- sb->s_frozen = SB_UNFROZEN;
- smp_wmb();
- wake_up(&sb->s_wait_unfrozen);
+ if (!bdev->bd_fsfreeze_count)
+ goto out_unlock;
+
+ error = 0;
+ if (--bdev->bd_fsfreeze_count > 0)
+ goto out_unlock;
+
+ if (!sb)
+ goto out_unlock;
+
+ BUG_ON(sb->s_bdev != bdev);
+ down_write(&sb->s_umount);
+ if (sb->s_flags & MS_RDONLY)
+ goto out_deactivate;
+
+ if (sb->s_op->unfreeze_fs) {
+ error = sb->s_op->unfreeze_fs(sb);
+ if (error) {
+ printk(KERN_ERR
+ "VFS:Filesystem thaw failed\n");
+ sb->s_frozen = SB_FREEZE_TRANS;
+ bdev->bd_fsfreeze_count++;
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ return error;
}
- drop_super(sb);
}
- up(&bdev->bd_mount_sem);
+ sb->s_frozen = SB_UNFROZEN;
+ smp_wmb();
+ wake_up(&sb->s_wait_unfrozen);
+
+out_deactivate:
+ if (sb)
+ deactivate_locked_super(sb);
+out_unlock:
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return 0;
}
@@ -430,7 +437,6 @@ static void init_once(void *foo)
memset(bdev, 0, sizeof(*bdev));
mutex_init(&bdev->bd_mutex);
- sema_init(&bdev->bd_mount_sem, 1);
INIT_LIST_HEAD(&bdev->bd_inodes);
INIT_LIST_HEAD(&bdev->bd_list);
#ifdef CONFIG_SYSFS
@@ -1114,7 +1120,7 @@ EXPORT_SYMBOL(revalidate_disk);
int check_disk_change(struct block_device *bdev)
{
struct gendisk *disk = bdev->bd_disk;
- struct block_device_operations * bdops = disk->fops;
+ const struct block_device_operations *bdops = disk->fops;
if (!bdops->media_changed)
return 0;
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 019e8af449a..282ca085c2f 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -48,6 +48,9 @@ struct btrfs_worker_thread {
/* number of things on the pending list */
atomic_t num_pending;
+ /* reference counter for this struct */
+ atomic_t refs;
+
unsigned long sequence;
/* protects the pending list. */
@@ -71,7 +74,12 @@ static void check_idle_worker(struct btrfs_worker_thread *worker)
unsigned long flags;
spin_lock_irqsave(&worker->workers->lock, flags);
worker->idle = 1;
- list_move(&worker->worker_list, &worker->workers->idle_list);
+
+ /* the list may be empty if the worker is just starting */
+ if (!list_empty(&worker->worker_list)) {
+ list_move(&worker->worker_list,
+ &worker->workers->idle_list);
+ }
spin_unlock_irqrestore(&worker->workers->lock, flags);
}
}
@@ -87,23 +95,49 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
unsigned long flags;
spin_lock_irqsave(&worker->workers->lock, flags);
worker->idle = 0;
- list_move_tail(&worker->worker_list,
- &worker->workers->worker_list);
+
+ if (!list_empty(&worker->worker_list)) {
+ list_move_tail(&worker->worker_list,
+ &worker->workers->worker_list);
+ }
spin_unlock_irqrestore(&worker->workers->lock, flags);
}
}
-static noinline int run_ordered_completions(struct btrfs_workers *workers,
- struct btrfs_work *work)
+static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
{
+ struct btrfs_workers *workers = worker->workers;
unsigned long flags;
+ rmb();
+ if (!workers->atomic_start_pending)
+ return;
+
+ spin_lock_irqsave(&workers->lock, flags);
+ if (!workers->atomic_start_pending)
+ goto out;
+
+ workers->atomic_start_pending = 0;
+ if (workers->num_workers >= workers->max_workers)
+ goto out;
+
+ spin_unlock_irqrestore(&workers->lock, flags);
+ btrfs_start_workers(workers, 1);
+ return;
+
+out:
+ spin_unlock_irqrestore(&workers->lock, flags);
+}
+
+static noinline int run_ordered_completions(struct btrfs_workers *workers,
+ struct btrfs_work *work)
+{
if (!workers->ordered)
return 0;
set_bit(WORK_DONE_BIT, &work->flags);
- spin_lock_irqsave(&workers->lock, flags);
+ spin_lock(&workers->order_lock);
while (1) {
if (!list_empty(&workers->prio_order_list)) {
@@ -126,45 +160,118 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,
if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
break;
- spin_unlock_irqrestore(&workers->lock, flags);
+ spin_unlock(&workers->order_lock);
work->ordered_func(work);
/* now take the lock again and call the freeing code */
- spin_lock_irqsave(&workers->lock, flags);
+ spin_lock(&workers->order_lock);
list_del(&work->order_list);
work->ordered_free(work);
}
- spin_unlock_irqrestore(&workers->lock, flags);
+ spin_unlock(&workers->order_lock);
return 0;
}
+static void put_worker(struct btrfs_worker_thread *worker)
+{
+ if (atomic_dec_and_test(&worker->refs))
+ kfree(worker);
+}
+
+static int try_worker_shutdown(struct btrfs_worker_thread *worker)
+{
+ int freeit = 0;
+
+ spin_lock_irq(&worker->lock);
+ spin_lock(&worker->workers->lock);
+ if (worker->workers->num_workers > 1 &&
+ worker->idle &&
+ !worker->working &&
+ !list_empty(&worker->worker_list) &&
+ list_empty(&worker->prio_pending) &&
+ list_empty(&worker->pending) &&
+ atomic_read(&worker->num_pending) == 0) {
+ freeit = 1;
+ list_del_init(&worker->worker_list);
+ worker->workers->num_workers--;
+ }
+ spin_unlock(&worker->workers->lock);
+ spin_unlock_irq(&worker->lock);
+
+ if (freeit)
+ put_worker(worker);
+ return freeit;
+}
+
+static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker,
+ struct list_head *prio_head,
+ struct list_head *head)
+{
+ struct btrfs_work *work = NULL;
+ struct list_head *cur = NULL;
+
+ if(!list_empty(prio_head))
+ cur = prio_head->next;
+
+ smp_mb();
+ if (!list_empty(&worker->prio_pending))
+ goto refill;
+
+ if (!list_empty(head))
+ cur = head->next;
+
+ if (cur)
+ goto out;
+
+refill:
+ spin_lock_irq(&worker->lock);
+ list_splice_tail_init(&worker->prio_pending, prio_head);
+ list_splice_tail_init(&worker->pending, head);
+
+ if (!list_empty(prio_head))
+ cur = prio_head->next;
+ else if (!list_empty(head))
+ cur = head->next;
+ spin_unlock_irq(&worker->lock);
+
+ if (!cur)
+ goto out_fail;
+
+out:
+ work = list_entry(cur, struct btrfs_work, list);
+
+out_fail:
+ return work;
+}
+
/*
* main loop for servicing work items
*/
static int worker_loop(void *arg)
{
struct btrfs_worker_thread *worker = arg;
- struct list_head *cur;
+ struct list_head head;
+ struct list_head prio_head;
struct btrfs_work *work;
+
+ INIT_LIST_HEAD(&head);
+ INIT_LIST_HEAD(&prio_head);
+
do {
- spin_lock_irq(&worker->lock);
-again_locked:
+again:
while (1) {
- if (!list_empty(&worker->prio_pending))
- cur = worker->prio_pending.next;
- else if (!list_empty(&worker->pending))
- cur = worker->pending.next;
- else
+
+
+ work = get_next_work(worker, &prio_head, &head);
+ if (!work)
break;
- work = list_entry(cur, struct btrfs_work, list);
list_del(&work->list);
clear_bit(WORK_QUEUED_BIT, &work->flags);
work->worker = worker;
- spin_unlock_irq(&worker->lock);
work->func(work);
@@ -175,9 +282,13 @@ again_locked:
*/
run_ordered_completions(worker->workers, work);
- spin_lock_irq(&worker->lock);
- check_idle_worker(worker);
+ check_pending_worker_creates(worker);
+
}
+
+ spin_lock_irq(&worker->lock);
+ check_idle_worker(worker);
+
if (freezing(current)) {
worker->working = 0;
spin_unlock_irq(&worker->lock);
@@ -216,8 +327,10 @@ again_locked:
spin_lock_irq(&worker->lock);
set_current_state(TASK_INTERRUPTIBLE);
if (!list_empty(&worker->pending) ||
- !list_empty(&worker->prio_pending))
- goto again_locked;
+ !list_empty(&worker->prio_pending)) {
+ spin_unlock_irq(&worker->lock);
+ goto again;
+ }
/*
* this makes sure we get a wakeup when someone
@@ -226,8 +339,13 @@ again_locked:
worker->working = 0;
spin_unlock_irq(&worker->lock);
- if (!kthread_should_stop())
- schedule();
+ if (!kthread_should_stop()) {
+ schedule_timeout(HZ * 120);
+ if (!worker->working &&
+ try_worker_shutdown(worker)) {
+ return 0;
+ }
+ }
}
__set_current_state(TASK_RUNNING);
}
@@ -242,16 +360,30 @@ int btrfs_stop_workers(struct btrfs_workers *workers)
{
struct list_head *cur;
struct btrfs_worker_thread *worker;
+ int can_stop;
+ spin_lock_irq(&workers->lock);
list_splice_init(&workers->idle_list, &workers->worker_list);
while (!list_empty(&workers->worker_list)) {
cur = workers->worker_list.next;
worker = list_entry(cur, struct btrfs_worker_thread,
worker_list);
- kthread_stop(worker->task);
- list_del(&worker->worker_list);
- kfree(worker);
+
+ atomic_inc(&worker->refs);
+ workers->num_workers -= 1;
+ if (!list_empty(&worker->worker_list)) {
+ list_del_init(&worker->worker_list);
+ put_worker(worker);
+ can_stop = 1;
+ } else
+ can_stop = 0;
+ spin_unlock_irq(&workers->lock);
+ if (can_stop)
+ kthread_stop(worker->task);
+ spin_lock_irq(&workers->lock);
+ put_worker(worker);
}
+ spin_unlock_irq(&workers->lock);
return 0;
}
@@ -266,10 +398,13 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
INIT_LIST_HEAD(&workers->order_list);
INIT_LIST_HEAD(&workers->prio_order_list);
spin_lock_init(&workers->lock);
+ spin_lock_init(&workers->order_lock);
workers->max_workers = max;
workers->idle_thresh = 32;
workers->name = name;
workers->ordered = 0;
+ workers->atomic_start_pending = 0;
+ workers->atomic_worker_start = 0;
}
/*
@@ -293,7 +428,9 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
INIT_LIST_HEAD(&worker->prio_pending);
INIT_LIST_HEAD(&worker->worker_list);
spin_lock_init(&worker->lock);
+
atomic_set(&worker->num_pending, 0);
+ atomic_set(&worker->refs, 1);
worker->workers = workers;
worker->task = kthread_run(worker_loop, worker,
"btrfs-%s-%d", workers->name,
@@ -303,7 +440,6 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
kfree(worker);
goto fail;
}
-
spin_lock_irq(&workers->lock);
list_add_tail(&worker->worker_list, &workers->idle_list);
worker->idle = 1;
@@ -350,7 +486,6 @@ static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
*/
next = workers->worker_list.next;
worker = list_entry(next, struct btrfs_worker_thread, worker_list);
- atomic_inc(&worker->num_pending);
worker->sequence++;
if (worker->sequence % workers->idle_thresh == 0)
@@ -367,28 +502,18 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
{
struct btrfs_worker_thread *worker;
unsigned long flags;
+ struct list_head *fallback;
again:
spin_lock_irqsave(&workers->lock, flags);
worker = next_worker(workers);
- spin_unlock_irqrestore(&workers->lock, flags);
if (!worker) {
- spin_lock_irqsave(&workers->lock, flags);
if (workers->num_workers >= workers->max_workers) {
- struct list_head *fallback = NULL;
- /*
- * we have failed to find any workers, just
- * return the force one
- */
- if (!list_empty(&workers->worker_list))
- fallback = workers->worker_list.next;
- if (!list_empty(&workers->idle_list))
- fallback = workers->idle_list.next;
- BUG_ON(!fallback);
- worker = list_entry(fallback,
- struct btrfs_worker_thread, worker_list);
- spin_unlock_irqrestore(&workers->lock, flags);
+ goto fallback;
+ } else if (workers->atomic_worker_start) {
+ workers->atomic_start_pending = 1;
+ goto fallback;
} else {
spin_unlock_irqrestore(&workers->lock, flags);
/* we're below the limit, start another worker */
@@ -396,6 +521,28 @@ again:
goto again;
}
}
+ goto found;
+
+fallback:
+ fallback = NULL;
+ /*
+ * we have failed to find any workers, just
+ * return the first one we can find.
+ */
+ if (!list_empty(&workers->worker_list))
+ fallback = workers->worker_list.next;
+ if (!list_empty(&workers->idle_list))
+ fallback = workers->idle_list.next;
+ BUG_ON(!fallback);
+ worker = list_entry(fallback,
+ struct btrfs_worker_thread, worker_list);
+found:
+ /*
+ * this makes sure the worker doesn't exit before it is placed
+ * onto a busy/idle list
+ */
+ atomic_inc(&worker->num_pending);
+ spin_unlock_irqrestore(&workers->lock, flags);
return worker;
}
@@ -427,7 +574,7 @@ int btrfs_requeue_work(struct btrfs_work *work)
spin_lock(&worker->workers->lock);
worker->idle = 0;
list_move_tail(&worker->worker_list,
- &worker->workers->worker_list);
+ &worker->workers->worker_list);
spin_unlock(&worker->workers->lock);
}
if (!worker->working) {
@@ -435,9 +582,9 @@ int btrfs_requeue_work(struct btrfs_work *work)
worker->working = 1;
}
- spin_unlock_irqrestore(&worker->lock, flags);
if (wake)
wake_up_process(worker->task);
+ spin_unlock_irqrestore(&worker->lock, flags);
out:
return 0;
@@ -463,14 +610,18 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
worker = find_worker(workers);
if (workers->ordered) {
- spin_lock_irqsave(&workers->lock, flags);
+ /*
+ * you're not allowed to do ordered queues from an
+ * interrupt handler
+ */
+ spin_lock(&workers->order_lock);
if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
list_add_tail(&work->order_list,
&workers->prio_order_list);
} else {
list_add_tail(&work->order_list, &workers->order_list);
}
- spin_unlock_irqrestore(&workers->lock, flags);
+ spin_unlock(&workers->order_lock);
} else {
INIT_LIST_HEAD(&work->order_list);
}
@@ -481,7 +632,6 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
list_add_tail(&work->list, &worker->prio_pending);
else
list_add_tail(&work->list, &worker->pending);
- atomic_inc(&worker->num_pending);
check_busy_worker(worker);
/*
@@ -492,10 +642,10 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
wake = 1;
worker->working = 1;
- spin_unlock_irqrestore(&worker->lock, flags);
-
if (wake)
wake_up_process(worker->task);
+ spin_unlock_irqrestore(&worker->lock, flags);
+
out:
return 0;
}
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index 1b511c109db..fc089b95ec1 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -73,6 +73,15 @@ struct btrfs_workers {
/* force completions in the order they were queued */
int ordered;
+ /* more workers required, but in an interrupt handler */
+ int atomic_start_pending;
+
+ /*
+ * are we allowed to sleep while starting workers or are we required
+ * to start them at a later time?
+ */
+ int atomic_worker_start;
+
/* list with all the work threads. The workers on the idle thread
* may be actively servicing jobs, but they haven't yet hit the
* idle thresh limit above.
@@ -90,6 +99,9 @@ struct btrfs_workers {
/* lock for finding the next worker thread to queue on */
spinlock_t lock;
+ /* lock for the ordered lists */
+ spinlock_t order_lock;
+
/* extra name for this worker, used for current->name */
char *name;
};
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index ea1ea0af8c0..82ee56bba29 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -138,6 +138,7 @@ struct btrfs_inode {
* of these.
*/
unsigned ordered_data_close:1;
+ unsigned dummy_inode:1;
struct inode vfs_inode;
};
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 9d8ba4d54a3..a11a32058b5 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -506,10 +506,10 @@ static noinline int add_ra_bio_pages(struct inode *inode,
*/
set_page_extent_mapped(page);
lock_extent(tree, last_offset, end, GFP_NOFS);
- spin_lock(&em_tree->lock);
+ read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, last_offset,
PAGE_CACHE_SIZE);
- spin_unlock(&em_tree->lock);
+ read_unlock(&em_tree->lock);
if (!em || last_offset < em->start ||
(last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
@@ -593,11 +593,11 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
em_tree = &BTRFS_I(inode)->extent_tree;
/* we need the actual starting offset of this extent in the file */
- spin_lock(&em_tree->lock);
+ read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree,
page_offset(bio->bi_io_vec->bv_page),
PAGE_CACHE_SIZE);
- spin_unlock(&em_tree->lock);
+ read_unlock(&em_tree->lock);
compressed_len = em->block_len;
cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 3fdcc0512d3..ec96f3a6d53 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -2853,6 +2853,12 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
int split;
int num_doubles = 0;
+ l = path->nodes[0];
+ slot = path->slots[0];
+ if (extend && data_size + btrfs_item_size_nr(l, slot) +
+ sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
+ return -EOVERFLOW;
+
/* first try to make some room by pushing left and right */
if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) {
wret = push_leaf_right(trans, root, path, data_size, 0);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 837435ce84c..80599b4e42b 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -114,6 +114,10 @@ struct btrfs_ordered_sum;
*/
#define BTRFS_DEV_ITEMS_OBJECTID 1ULL
+#define BTRFS_BTREE_INODE_OBJECTID 1
+
+#define BTRFS_EMPTY_SUBVOL_DIR_OBJECTID 2
+
/*
* we can actually store much bigger names, but lets not confuse the rest
* of linux
@@ -670,6 +674,7 @@ struct btrfs_space_info {
u64 bytes_reserved; /* total bytes the allocator has reserved for
current allocations */
u64 bytes_readonly; /* total bytes that are read only */
+ u64 bytes_super; /* total bytes reserved for the super blocks */
/* delalloc accounting */
u64 bytes_delalloc; /* number of bytes reserved for allocation,
@@ -726,6 +731,15 @@ enum btrfs_caching_type {
BTRFS_CACHE_FINISHED = 2,
};
+struct btrfs_caching_control {
+ struct list_head list;
+ struct mutex mutex;
+ wait_queue_head_t wait;
+ struct btrfs_block_group_cache *block_group;
+ u64 progress;
+ atomic_t count;
+};
+
struct btrfs_block_group_cache {
struct btrfs_key key;
struct btrfs_block_group_item item;
@@ -733,6 +747,7 @@ struct btrfs_block_group_cache {
spinlock_t lock;
u64 pinned;
u64 reserved;
+ u64 bytes_super;
u64 flags;
u64 sectorsize;
int extents_thresh;
@@ -742,8 +757,9 @@ struct btrfs_block_group_cache {
int dirty;
/* cache tracking stuff */
- wait_queue_head_t caching_q;
int cached;
+ struct btrfs_caching_control *caching_ctl;
+ u64 last_byte_to_unpin;
struct btrfs_space_info *space_info;
@@ -782,13 +798,16 @@ struct btrfs_fs_info {
/* the log root tree is a directory of all the other log roots */
struct btrfs_root *log_root_tree;
+
+ spinlock_t fs_roots_radix_lock;
struct radix_tree_root fs_roots_radix;
/* block group cache stuff */
spinlock_t block_group_cache_lock;
struct rb_root block_group_cache_tree;
- struct extent_io_tree pinned_extents;
+ struct extent_io_tree freed_extents[2];
+ struct extent_io_tree *pinned_extents;
/* logical->physical extent mapping */
struct btrfs_mapping_tree mapping_tree;
@@ -822,11 +841,7 @@ struct btrfs_fs_info {
struct mutex transaction_kthread_mutex;
struct mutex cleaner_mutex;
struct mutex chunk_mutex;
- struct mutex drop_mutex;
struct mutex volume_mutex;
- struct mutex tree_reloc_mutex;
- struct rw_semaphore extent_commit_sem;
-
/*
* this protects the ordered operations list only while we are
* processing all of the entries on it. This way we make
@@ -835,10 +850,16 @@ struct btrfs_fs_info {
* before jumping into the main commit.
*/
struct mutex ordered_operations_mutex;
+ struct rw_semaphore extent_commit_sem;
+
+ struct rw_semaphore subvol_sem;
+
+ struct srcu_struct subvol_srcu;
struct list_head trans_list;
struct list_head hashers;
struct list_head dead_roots;
+ struct list_head caching_block_groups;
atomic_t nr_async_submits;
atomic_t async_submit_draining;
@@ -996,10 +1017,12 @@ struct btrfs_root {
u32 stripesize;
u32 type;
- u64 highest_inode;
- u64 last_inode_alloc;
+
+ u64 highest_objectid;
int ref_cows;
int track_dirty;
+ int in_radix;
+
u64 defrag_trans_start;
struct btrfs_key defrag_progress;
struct btrfs_key defrag_max;
@@ -1920,8 +1943,8 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root, unsigned long count);
int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len);
-int btrfs_update_pinned_extents(struct btrfs_root *root,
- u64 bytenr, u64 num, int pin);
+int btrfs_pin_extent(struct btrfs_root *root,
+ u64 bytenr, u64 num, int reserved);
int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *leaf);
int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
@@ -1971,9 +1994,10 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
u64 root_objectid, u64 owner, u64 offset);
int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len);
+int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root);
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct extent_io_tree *unpin);
+ struct btrfs_root *root);
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent,
@@ -1984,6 +2008,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr);
int btrfs_free_block_groups(struct btrfs_fs_info *info);
int btrfs_read_block_groups(struct btrfs_root *root);
+int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr);
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytes_used,
u64 type, u64 chunk_objectid, u64 chunk_offset,
@@ -2006,7 +2031,6 @@ void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
u64 bytes);
void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
u64 bytes);
-void btrfs_free_pinned_extents(struct btrfs_fs_info *info);
/* ctree.c */
int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
int level, int *slot);
@@ -2100,12 +2124,15 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
struct extent_buffer *parent);
/* root-item.c */
int btrfs_find_root_ref(struct btrfs_root *tree_root,
- struct btrfs_path *path,
- u64 root_id, u64 ref_id);
+ struct btrfs_path *path,
+ u64 root_id, u64 ref_id);
int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *tree_root,
- u64 root_id, u8 type, u64 ref_id,
- u64 dirid, u64 sequence,
+ u64 root_id, u64 ref_id, u64 dirid, u64 sequence,
+ const char *name, int name_len);
+int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_root *tree_root,
+ u64 root_id, u64 ref_id, u64 dirid, u64 *sequence,
const char *name, int name_len);
int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_key *key);
@@ -2120,6 +2147,7 @@ int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
int btrfs_search_root(struct btrfs_root *root, u64 search_start,
u64 *found_objectid);
int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
+int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
int btrfs_set_root_node(struct btrfs_root_item *item,
struct extent_buffer *node);
/* dir-item.c */
@@ -2138,6 +2166,10 @@ btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
struct btrfs_path *path, u64 dir,
u64 objectid, const char *name, int name_len,
int mod);
+struct btrfs_dir_item *
+btrfs_search_dir_index_item(struct btrfs_root *root,
+ struct btrfs_path *path, u64 dirid,
+ const char *name, int name_len);
struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
struct btrfs_path *path,
const char *name, int name_len);
@@ -2160,6 +2192,7 @@ int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 offset);
int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 offset);
+int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset);
/* inode-map.c */
int btrfs_find_free_objectid(struct btrfs_trans_handle *trans,
@@ -2232,6 +2265,10 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
int btrfs_add_link(struct btrfs_trans_handle *trans,
struct inode *parent_inode, struct inode *inode,
const char *name, int name_len, int add_backref, u64 index);
+int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct inode *dir, u64 objectid,
+ const char *name, int name_len);
int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode, u64 new_size,
@@ -2242,7 +2279,7 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end);
int btrfs_writepages(struct address_space *mapping,
struct writeback_control *wbc);
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
- struct btrfs_root *new_root, struct dentry *dentry,
+ struct btrfs_root *new_root,
u64 new_dirid, u64 alloc_hint);
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
size_t size, struct bio *bio, unsigned long bio_flags);
@@ -2258,6 +2295,7 @@ int btrfs_write_inode(struct inode *inode, int wait);
void btrfs_dirty_inode(struct inode *inode);
struct inode *btrfs_alloc_inode(struct super_block *sb);
void btrfs_destroy_inode(struct inode *inode);
+void btrfs_drop_inode(struct inode *inode);
int btrfs_init_cachep(void);
void btrfs_destroy_cachep(void);
long btrfs_ioctl_trans_end(struct file *file);
@@ -2275,6 +2313,8 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
void btrfs_orphan_cleanup(struct btrfs_root *root);
int btrfs_cont_expand(struct inode *inode, loff_t size);
+int btrfs_invalidate_inodes(struct btrfs_root *root);
+extern struct dentry_operations btrfs_dentry_operations;
/* ioctl.c */
long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
@@ -2290,7 +2330,7 @@ extern struct file_operations btrfs_file_operations;
int btrfs_drop_extents(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode,
u64 start, u64 end, u64 locked_end,
- u64 inline_limit, u64 *hint_block);
+ u64 inline_limit, u64 *hint_block, int drop_cache);
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode, u64 start, u64 end);
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 1d70236ba00..f3a6075519c 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -281,6 +281,53 @@ btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
return btrfs_match_dir_item_name(root, path, name, name_len);
}
+struct btrfs_dir_item *
+btrfs_search_dir_index_item(struct btrfs_root *root,
+ struct btrfs_path *path, u64 dirid,
+ const char *name, int name_len)
+{
+ struct extent_buffer *leaf;
+ struct btrfs_dir_item *di;
+ struct btrfs_key key;
+ u32 nritems;
+ int ret;
+
+ key.objectid = dirid;
+ key.type = BTRFS_DIR_INDEX_KEY;
+ key.offset = 0;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ leaf = path->nodes[0];
+ nritems = btrfs_header_nritems(leaf);
+
+ while (1) {
+ if (path->slots[0] >= nritems) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ if (ret > 0)
+ break;
+ leaf = path->nodes[0];
+ nritems = btrfs_header_nritems(leaf);
+ continue;
+ }
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.objectid != dirid || key.type != BTRFS_DIR_INDEX_KEY)
+ break;
+
+ di = btrfs_match_dir_item_name(root, path, name, name_len);
+ if (di)
+ return di;
+
+ path->slots[0]++;
+ }
+ return NULL;
+}
+
struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 dir,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8b819279001..644e796fd64 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -41,6 +41,7 @@
static struct extent_io_ops btree_extent_io_ops;
static void end_workqueue_fn(struct btrfs_work *work);
+static void free_fs_root(struct btrfs_root *root);
static atomic_t btrfs_bdi_num = ATOMIC_INIT(0);
@@ -123,15 +124,15 @@ static struct extent_map *btree_get_extent(struct inode *inode,
struct extent_map *em;
int ret;
- spin_lock(&em_tree->lock);
+ read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
if (em) {
em->bdev =
BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
- spin_unlock(&em_tree->lock);
+ read_unlock(&em_tree->lock);
goto out;
}
- spin_unlock(&em_tree->lock);
+ read_unlock(&em_tree->lock);
em = alloc_extent_map(GFP_NOFS);
if (!em) {
@@ -144,7 +145,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
em->block_start = 0;
em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
- spin_lock(&em_tree->lock);
+ write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
if (ret == -EEXIST) {
u64 failed_start = em->start;
@@ -163,7 +164,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
free_extent_map(em);
em = NULL;
}
- spin_unlock(&em_tree->lock);
+ write_unlock(&em_tree->lock);
if (ret)
em = ERR_PTR(ret);
@@ -772,7 +773,7 @@ static void btree_invalidatepage(struct page *page, unsigned long offset)
}
}
-static struct address_space_operations btree_aops = {
+static const struct address_space_operations btree_aops = {
.readpage = btree_readpage,
.writepage = btree_writepage,
.writepages = btree_writepages,
@@ -895,8 +896,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
root->fs_info = fs_info;
root->objectid = objectid;
root->last_trans = 0;
- root->highest_inode = 0;
- root->last_inode_alloc = 0;
+ root->highest_objectid = 0;
root->name = NULL;
root->in_sysfs = 0;
root->inode_tree.rb_node = NULL;
@@ -952,14 +952,16 @@ static int find_and_setup_root(struct btrfs_root *tree_root,
root, fs_info, objectid);
ret = btrfs_find_last_root(tree_root, objectid,
&root->root_item, &root->root_key);
+ if (ret > 0)
+ return -ENOENT;
BUG_ON(ret);
generation = btrfs_root_generation(&root->root_item);
blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
blocksize, generation);
- root->commit_root = btrfs_root_node(root);
BUG_ON(!root->node);
+ root->commit_root = btrfs_root_node(root);
return 0;
}
@@ -1095,7 +1097,6 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
struct btrfs_fs_info *fs_info = tree_root->fs_info;
struct btrfs_path *path;
struct extent_buffer *l;
- u64 highest_inode;
u64 generation;
u32 blocksize;
int ret = 0;
@@ -1110,7 +1111,7 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
kfree(root);
return ERR_PTR(ret);
}
- goto insert;
+ goto out;
}
__setup_root(tree_root->nodesize, tree_root->leafsize,
@@ -1120,39 +1121,30 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
path = btrfs_alloc_path();
BUG_ON(!path);
ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
- if (ret != 0) {
- if (ret > 0)
- ret = -ENOENT;
- goto out;
+ if (ret == 0) {
+ l = path->nodes[0];
+ read_extent_buffer(l, &root->root_item,
+ btrfs_item_ptr_offset(l, path->slots[0]),
+ sizeof(root->root_item));
+ memcpy(&root->root_key, location, sizeof(*location));
}
- l = path->nodes[0];
- read_extent_buffer(l, &root->root_item,
- btrfs_item_ptr_offset(l, path->slots[0]),
- sizeof(root->root_item));
- memcpy(&root->root_key, location, sizeof(*location));
- ret = 0;
-out:
- btrfs_release_path(root, path);
btrfs_free_path(path);
if (ret) {
- kfree(root);
+ if (ret > 0)
+ ret = -ENOENT;
return ERR_PTR(ret);
}
+
generation = btrfs_root_generation(&root->root_item);
blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
blocksize, generation);
root->commit_root = btrfs_root_node(root);
BUG_ON(!root->node);
-insert:
- if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
+out:
+ if (location->objectid != BTRFS_TREE_LOG_OBJECTID)
root->ref_cows = 1;
- ret = btrfs_find_highest_inode(root, &highest_inode);
- if (ret == 0) {
- root->highest_inode = highest_inode;
- root->last_inode_alloc = highest_inode;
- }
- }
+
return root;
}
@@ -1187,39 +1179,66 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
return fs_info->dev_root;
if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
return fs_info->csum_root;
-
+again:
+ spin_lock(&fs_info->fs_roots_radix_lock);
root = radix_tree_lookup(&fs_info->fs_roots_radix,
(unsigned long)location->objectid);
+ spin_unlock(&fs_info->fs_roots_radix_lock);
if (root)
return root;
+ ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
+ if (ret == 0)
+ ret = -ENOENT;
+ if (ret < 0)
+ return ERR_PTR(ret);
+
root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
if (IS_ERR(root))
return root;
+ WARN_ON(btrfs_root_refs(&root->root_item) == 0);
set_anon_super(&root->anon_super, NULL);
+ ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
+ if (ret)
+ goto fail;
+
+ spin_lock(&fs_info->fs_roots_radix_lock);
ret = radix_tree_insert(&fs_info->fs_roots_radix,
(unsigned long)root->root_key.objectid,
root);
+ if (ret == 0)
+ root->in_radix = 1;
+ spin_unlock(&fs_info->fs_roots_radix_lock);
+ radix_tree_preload_end();
if (ret) {
- free_extent_buffer(root->node);
- kfree(root);
- return ERR_PTR(ret);
+ if (ret == -EEXIST) {
+ free_fs_root(root);
+ goto again;
+ }
+ goto fail;
}
- if (!(fs_info->sb->s_flags & MS_RDONLY)) {
- ret = btrfs_find_dead_roots(fs_info->tree_root,
- root->root_key.objectid);
- BUG_ON(ret);
+
+ ret = btrfs_find_dead_roots(fs_info->tree_root,
+ root->root_key.objectid);
+ WARN_ON(ret);
+
+ if (!(fs_info->sb->s_flags & MS_RDONLY))
btrfs_orphan_cleanup(root);
- }
+
return root;
+fail:
+ free_fs_root(root);
+ return ERR_PTR(ret);
}
struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
struct btrfs_key *location,
const char *name, int namelen)
{
+ return btrfs_read_fs_root_no_name(fs_info, location);
+#if 0
struct btrfs_root *root;
int ret;
@@ -1236,7 +1255,7 @@ struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
kfree(root);
return ERR_PTR(ret);
}
-#if 0
+
ret = btrfs_sysfs_add_root(root);
if (ret) {
free_extent_buffer(root->node);
@@ -1244,9 +1263,9 @@ struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
kfree(root);
return ERR_PTR(ret);
}
-#endif
root->in_sysfs = 1;
return root;
+#endif
}
static int btrfs_congested_fn(void *congested_data, int bdi_bits)
@@ -1325,9 +1344,9 @@ static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
offset = page_offset(page);
em_tree = &BTRFS_I(inode)->extent_tree;
- spin_lock(&em_tree->lock);
+ read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
- spin_unlock(&em_tree->lock);
+ read_unlock(&em_tree->lock);
if (!em) {
__unplug_io_fn(bdi, page);
return;
@@ -1360,8 +1379,10 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
err = bdi_register(bdi, NULL, "btrfs-%d",
atomic_inc_return(&btrfs_bdi_num));
- if (err)
+ if (err) {
+ bdi_destroy(bdi);
return err;
+ }
bdi->ra_pages = default_backing_dev_info.ra_pages;
bdi->unplug_io_fn = btrfs_unplug_io_fn;
@@ -1451,9 +1472,12 @@ static int cleaner_kthread(void *arg)
break;
vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
- mutex_lock(&root->fs_info->cleaner_mutex);
- btrfs_clean_old_snapshots(root);
- mutex_unlock(&root->fs_info->cleaner_mutex);
+
+ if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
+ mutex_trylock(&root->fs_info->cleaner_mutex)) {
+ btrfs_clean_old_snapshots(root);
+ mutex_unlock(&root->fs_info->cleaner_mutex);
+ }
if (freezing(current)) {
refrigerator();
@@ -1558,15 +1582,36 @@ struct btrfs_root *open_ctree(struct super_block *sb,
err = -ENOMEM;
goto fail;
}
- INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
+
+ ret = init_srcu_struct(&fs_info->subvol_srcu);
+ if (ret) {
+ err = ret;
+ goto fail;
+ }
+
+ ret = setup_bdi(fs_info, &fs_info->bdi);
+ if (ret) {
+ err = ret;
+ goto fail_srcu;
+ }
+
+ fs_info->btree_inode = new_inode(sb);
+ if (!fs_info->btree_inode) {
+ err = -ENOMEM;
+ goto fail_bdi;
+ }
+
+ INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
INIT_LIST_HEAD(&fs_info->trans_list);
INIT_LIST_HEAD(&fs_info->dead_roots);
INIT_LIST_HEAD(&fs_info->hashers);
INIT_LIST_HEAD(&fs_info->delalloc_inodes);
INIT_LIST_HEAD(&fs_info->ordered_operations);
+ INIT_LIST_HEAD(&fs_info->caching_block_groups);
spin_lock_init(&fs_info->delalloc_lock);
spin_lock_init(&fs_info->new_trans_lock);
spin_lock_init(&fs_info->ref_cache_lock);
+ spin_lock_init(&fs_info->fs_roots_radix_lock);
init_completion(&fs_info->kobj_unregister);
fs_info->tree_root = tree_root;
@@ -1585,11 +1630,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info->sb = sb;
fs_info->max_extent = (u64)-1;
fs_info->max_inline = 8192 * 1024;
- if (setup_bdi(fs_info, &fs_info->bdi))
- goto fail_bdi;
- fs_info->btree_inode = new_inode(sb);
- fs_info->btree_inode->i_ino = 1;
- fs_info->btree_inode->i_nlink = 1;
fs_info->metadata_ratio = 8;
fs_info->thread_pool_size = min_t(unsigned long,
@@ -1602,6 +1642,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
sb->s_blocksize_bits = blksize_bits(4096);
sb->s_bdi = &fs_info->bdi;
+ fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
+ fs_info->btree_inode->i_nlink = 1;
/*
* we set the i_size on the btree inode to the max possible int.
* the real end of the address space is determined by all of
@@ -1620,28 +1662,32 @@ struct btrfs_root *open_ctree(struct super_block *sb,
BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
+ BTRFS_I(fs_info->btree_inode)->root = tree_root;
+ memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
+ sizeof(struct btrfs_key));
+ BTRFS_I(fs_info->btree_inode)->dummy_inode = 1;
+ insert_inode_hash(fs_info->btree_inode);
+
spin_lock_init(&fs_info->block_group_cache_lock);
fs_info->block_group_cache_tree.rb_node = NULL;
- extent_io_tree_init(&fs_info->pinned_extents,
+ extent_io_tree_init(&fs_info->freed_extents[0],
fs_info->btree_inode->i_mapping, GFP_NOFS);
+ extent_io_tree_init(&fs_info->freed_extents[1],
+ fs_info->btree_inode->i_mapping, GFP_NOFS);
+ fs_info->pinned_extents = &fs_info->freed_extents[0];
fs_info->do_barriers = 1;
- BTRFS_I(fs_info->btree_inode)->root = tree_root;
- memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
- sizeof(struct btrfs_key));
- insert_inode_hash(fs_info->btree_inode);
mutex_init(&fs_info->trans_mutex);
mutex_init(&fs_info->ordered_operations_mutex);
mutex_init(&fs_info->tree_log_mutex);
- mutex_init(&fs_info->drop_mutex);
mutex_init(&fs_info->chunk_mutex);
mutex_init(&fs_info->transaction_kthread_mutex);
mutex_init(&fs_info->cleaner_mutex);
mutex_init(&fs_info->volume_mutex);
- mutex_init(&fs_info->tree_reloc_mutex);
init_rwsem(&fs_info->extent_commit_sem);
+ init_rwsem(&fs_info->subvol_sem);
btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
@@ -1700,7 +1746,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
err = -EINVAL;
goto fail_iput;
}
-
+printk("thread pool is %d\n", fs_info->thread_pool_size);
/*
* we need to start all the end_io workers up front because the
* queue work function gets called at interrupt time, and so it
@@ -1745,20 +1791,22 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info->endio_workers.idle_thresh = 4;
fs_info->endio_meta_workers.idle_thresh = 4;
- fs_info->endio_write_workers.idle_thresh = 64;
- fs_info->endio_meta_write_workers.idle_thresh = 64;
+ fs_info->endio_write_workers.idle_thresh = 2;
+ fs_info->endio_meta_write_workers.idle_thresh = 2;
+
+ fs_info->endio_workers.atomic_worker_start = 1;
+ fs_info->endio_meta_workers.atomic_worker_start = 1;
+ fs_info->endio_write_workers.atomic_worker_start = 1;
+ fs_info->endio_meta_write_workers.atomic_worker_start = 1;
btrfs_start_workers(&fs_info->workers, 1);
btrfs_start_workers(&fs_info->submit_workers, 1);
btrfs_start_workers(&fs_info->delalloc_workers, 1);
btrfs_start_workers(&fs_info->fixup_workers, 1);
- btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
- btrfs_start_workers(&fs_info->endio_meta_workers,
- fs_info->thread_pool_size);
- btrfs_start_workers(&fs_info->endio_meta_write_workers,
- fs_info->thread_pool_size);
- btrfs_start_workers(&fs_info->endio_write_workers,
- fs_info->thread_pool_size);
+ btrfs_start_workers(&fs_info->endio_workers, 1);
+ btrfs_start_workers(&fs_info->endio_meta_workers, 1);
+ btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
+ btrfs_start_workers(&fs_info->endio_write_workers, 1);
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
@@ -1918,6 +1966,9 @@ struct btrfs_root *open_ctree(struct super_block *sb,
}
}
+ ret = btrfs_find_orphan_roots(tree_root);
+ BUG_ON(ret);
+
if (!(sb->s_flags & MS_RDONLY)) {
ret = btrfs_recover_relocation(tree_root);
BUG_ON(ret);
@@ -1977,6 +2028,8 @@ fail_iput:
btrfs_mapping_tree_free(&fs_info->mapping_tree);
fail_bdi:
bdi_destroy(&fs_info->bdi);
+fail_srcu:
+ cleanup_srcu_struct(&fs_info->subvol_srcu);
fail:
kfree(extent_root);
kfree(tree_root);
@@ -2236,20 +2289,29 @@ int write_ctree_super(struct btrfs_trans_handle *trans,
int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
{
- WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
+ spin_lock(&fs_info->fs_roots_radix_lock);
radix_tree_delete(&fs_info->fs_roots_radix,
(unsigned long)root->root_key.objectid);
+ spin_unlock(&fs_info->fs_roots_radix_lock);
+
+ if (btrfs_root_refs(&root->root_item) == 0)
+ synchronize_srcu(&fs_info->subvol_srcu);
+
+ free_fs_root(root);
+ return 0;
+}
+
+static void free_fs_root(struct btrfs_root *root)
+{
+ WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
if (root->anon_super.s_dev) {
down_write(&root->anon_super.s_umount);
kill_anon_super(&root->anon_super);
}
- if (root->node)
- free_extent_buffer(root->node);
- if (root->commit_root)
- free_extent_buffer(root->commit_root);
+ free_extent_buffer(root->node);
+ free_extent_buffer(root->commit_root);
kfree(root->name);
kfree(root);
- return 0;
}
static int del_fs_roots(struct btrfs_fs_info *fs_info)
@@ -2258,6 +2320,20 @@ static int del_fs_roots(struct btrfs_fs_info *fs_info)
struct btrfs_root *gang[8];
int i;
+ while (!list_empty(&fs_info->dead_roots)) {
+ gang[0] = list_entry(fs_info->dead_roots.next,
+ struct btrfs_root, root_list);
+ list_del(&gang[0]->root_list);
+
+ if (gang[0]->in_radix) {
+ btrfs_free_fs_root(fs_info, gang[0]);
+ } else {
+ free_extent_buffer(gang[0]->node);
+ free_extent_buffer(gang[0]->commit_root);
+ kfree(gang[0]);
+ }
+ }
+
while (1) {
ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
(void **)gang, 0,
@@ -2287,9 +2363,6 @@ int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
root_objectid = gang[ret - 1]->root_key.objectid + 1;
for (i = 0; i < ret; i++) {
root_objectid = gang[i]->root_key.objectid;
- ret = btrfs_find_dead_roots(fs_info->tree_root,
- root_objectid);
- BUG_ON(ret);
btrfs_orphan_cleanup(gang[i]);
}
root_objectid++;
@@ -2359,7 +2432,6 @@ int close_ctree(struct btrfs_root *root)
free_extent_buffer(root->fs_info->csum_root->commit_root);
btrfs_free_block_groups(root->fs_info);
- btrfs_free_pinned_extents(root->fs_info);
del_fs_roots(fs_info);
@@ -2378,6 +2450,7 @@ int close_ctree(struct btrfs_root *root)
btrfs_mapping_tree_free(&fs_info->mapping_tree);
bdi_destroy(&fs_info->bdi);
+ cleanup_srcu_struct(&fs_info->subvol_srcu);
kfree(fs_info->extent_root);
kfree(fs_info->tree_root);
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 9596b40caa4..ba5c3fd5ab8 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -28,7 +28,7 @@ static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
len = BTRFS_FID_SIZE_NON_CONNECTABLE;
type = FILEID_BTRFS_WITHOUT_PARENT;
- fid->objectid = BTRFS_I(inode)->location.objectid;
+ fid->objectid = inode->i_ino;
fid->root_objectid = BTRFS_I(inode)->root->objectid;
fid->gen = inode->i_generation;
@@ -60,34 +60,61 @@ static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
}
static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
- u64 root_objectid, u32 generation)
+ u64 root_objectid, u32 generation,
+ int check_generation)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb)->fs_info;
struct btrfs_root *root;
+ struct dentry *dentry;
struct inode *inode;
struct btrfs_key key;
+ int index;
+ int err = 0;
+
+ if (objectid < BTRFS_FIRST_FREE_OBJECTID)
+ return ERR_PTR(-ESTALE);
key.objectid = root_objectid;
btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
key.offset = (u64)-1;
- root = btrfs_read_fs_root_no_name(btrfs_sb(sb)->fs_info, &key);
- if (IS_ERR(root))
- return ERR_CAST(root);
+ index = srcu_read_lock(&fs_info->subvol_srcu);
+
+ root = btrfs_read_fs_root_no_name(fs_info, &key);
+ if (IS_ERR(root)) {
+ err = PTR_ERR(root);
+ goto fail;
+ }
+
+ if (btrfs_root_refs(&root->root_item) == 0) {
+ err = -ENOENT;
+ goto fail;
+ }
key.objectid = objectid;
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
key.offset = 0;
inode = btrfs_iget(sb, &key, root);
- if (IS_ERR(inode))
- return (void *)inode;
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto fail;
+ }
+
+ srcu_read_unlock(&fs_info->subvol_srcu, index);
- if (generation != inode->i_generation) {
+ if (check_generation && generation != inode->i_generation) {
iput(inode);
return ERR_PTR(-ESTALE);
}
- return d_obtain_alias(inode);
+ dentry = d_obtain_alias(inode);
+ if (!IS_ERR(dentry))
+ dentry->d_op = &btrfs_dentry_operations;
+ return dentry;
+fail:
+ srcu_read_unlock(&fs_info->subvol_srcu, index);
+ return ERR_PTR(err);
}
static struct dentry *btrfs_fh_to_parent(struct super_block *sb, struct fid *fh,
@@ -111,7 +138,7 @@ static struct dentry *btrfs_fh_to_parent(struct super_block *sb, struct fid *fh,
objectid = fid->parent_objectid;
generation = fid->parent_gen;
- return btrfs_get_dentry(sb, objectid, root_objectid, generation);
+ return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1);
}
static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
@@ -133,66 +160,76 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
root_objectid = fid->root_objectid;
generation = fid->gen;
- return btrfs_get_dentry(sb, objectid, root_objectid, generation);
+ return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1);
}
static struct dentry *btrfs_get_parent(struct dentry *child)
{
struct inode *dir = child->d_inode;
+ static struct dentry *dentry;
struct btrfs_root *root = BTRFS_I(dir)->root;
- struct btrfs_key key;
struct btrfs_path *path;
struct extent_buffer *leaf;
- int slot;
- u64 objectid;
+ struct btrfs_root_ref *ref;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
int ret;
path = btrfs_alloc_path();
- key.objectid = dir->i_ino;
- btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
- key.offset = (u64)-1;
+ if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
+ key.objectid = root->root_key.objectid;
+ key.type = BTRFS_ROOT_BACKREF_KEY;
+ key.offset = (u64)-1;
+ root = root->fs_info->tree_root;
+ } else {
+ key.objectid = dir->i_ino;
+ key.type = BTRFS_INODE_REF_KEY;
+ key.offset = (u64)-1;
+ }
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0) {
- /* Error */
- btrfs_free_path(path);
- return ERR_PTR(ret);
+ if (ret < 0)
+ goto fail;
+
+ BUG_ON(ret == 0);
+ if (path->slots[0] == 0) {
+ ret = -ENOENT;
+ goto fail;
}
+
+ path->slots[0]--;
leaf = path->nodes[0];
- slot = path->slots[0];
- if (ret) {
- /* btrfs_search_slot() returns the slot where we'd want to
- insert a backref for parent inode #0xFFFFFFFFFFFFFFFF.
- The _real_ backref, telling us what the parent inode
- _actually_ is, will be in the slot _before_ the one
- that btrfs_search_slot() returns. */
- if (!slot) {
- /* Unless there is _no_ key in the tree before... */
- btrfs_free_path(path);
- return ERR_PTR(-EIO);
- }
- slot--;
+
+ btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+ if (found_key.objectid != key.objectid || found_key.type != key.type) {
+ ret = -ENOENT;
+ goto fail;
}
- btrfs_item_key_to_cpu(leaf, &key, slot);
+ if (found_key.type == BTRFS_ROOT_BACKREF_KEY) {
+ ref = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_root_ref);
+ key.objectid = btrfs_root_ref_dirid(leaf, ref);
+ } else {
+ key.objectid = found_key.offset;
+ }
btrfs_free_path(path);
- if (key.objectid != dir->i_ino || key.type != BTRFS_INODE_REF_KEY)
- return ERR_PTR(-EINVAL);
-
- objectid = key.offset;
-
- /* If we are already at the root of a subvol, return the real root */
- if (objectid == dir->i_ino)
- return dget(dir->i_sb->s_root);
+ if (found_key.type == BTRFS_ROOT_BACKREF_KEY) {
+ return btrfs_get_dentry(root->fs_info->sb, key.objectid,
+ found_key.offset, 0, 0);
+ }
- /* Build a new key for the inode item */
- key.objectid = objectid;
- btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
+ key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
-
- return d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root));
+ dentry = d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root));
+ if (!IS_ERR(dentry))
+ dentry->d_op = &btrfs_dentry_operations;
+ return dentry;
+fail:
+ btrfs_free_path(path);
+ return ERR_PTR(ret);
}
const struct export_operations btrfs_export_ops = {
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 535f85ba104..993f93ff7ba 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -32,12 +32,12 @@
#include "locking.h"
#include "free-space-cache.h"
-static int update_reserved_extents(struct btrfs_root *root,
- u64 bytenr, u64 num, int reserve);
static int update_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int alloc,
int mark_free);
+static int update_reserved_extents(struct btrfs_block_group_cache *cache,
+ u64 num_bytes, int reserve);
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent,
@@ -57,10 +57,17 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
u64 parent, u64 root_objectid,
u64 flags, struct btrfs_disk_key *key,
int level, struct btrfs_key *ins);
-
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root, u64 alloc_bytes,
u64 flags, int force);
+static int pin_down_bytes(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ u64 bytenr, u64 num_bytes,
+ int is_data, int reserved,
+ struct extent_buffer **must_clean);
+static int find_next_key(struct btrfs_path *path, int level,
+ struct btrfs_key *key);
static noinline int
block_group_cache_done(struct btrfs_block_group_cache *cache)
@@ -153,34 +160,34 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
return ret;
}
-/*
- * We always set EXTENT_LOCKED for the super mirror extents so we don't
- * overwrite them, so those bits need to be unset. Also, if we are unmounting
- * with pinned extents still sitting there because we had a block group caching,
- * we need to clear those now, since we are done.
- */
-void btrfs_free_pinned_extents(struct btrfs_fs_info *info)
+static int add_excluded_extent(struct btrfs_root *root,
+ u64 start, u64 num_bytes)
{
- u64 start, end, last = 0;
- int ret;
+ u64 end = start + num_bytes - 1;
+ set_extent_bits(&root->fs_info->freed_extents[0],
+ start, end, EXTENT_UPTODATE, GFP_NOFS);
+ set_extent_bits(&root->fs_info->freed_extents[1],
+ start, end, EXTENT_UPTODATE, GFP_NOFS);
+ return 0;
+}
- while (1) {
- ret = find_first_extent_bit(&info->pinned_extents, last,
- &start, &end,
- EXTENT_LOCKED|EXTENT_DIRTY);
- if (ret)
- break;
+static void free_excluded_extents(struct btrfs_root *root,
+ struct btrfs_block_group_cache *cache)
+{
+ u64 start, end;
- clear_extent_bits(&info->pinned_extents, start, end,
- EXTENT_LOCKED|EXTENT_DIRTY, GFP_NOFS);
- last = end+1;
- }
+ start = cache->key.objectid;
+ end = start + cache->key.offset - 1;
+
+ clear_extent_bits(&root->fs_info->freed_extents[0],
+ start, end, EXTENT_UPTODATE, GFP_NOFS);
+ clear_extent_bits(&root->fs_info->freed_extents[1],
+ start, end, EXTENT_UPTODATE, GFP_NOFS);
}
-static int remove_sb_from_cache(struct btrfs_root *root,
- struct btrfs_block_group_cache *cache)
+static int exclude_super_stripes(struct btrfs_root *root,
+ struct btrfs_block_group_cache *cache)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
u64 bytenr;
u64 *logical;
int stripe_len;
@@ -192,17 +199,42 @@ static int remove_sb_from_cache(struct btrfs_root *root,
cache->key.objectid, bytenr,
0, &logical, &nr, &stripe_len);
BUG_ON(ret);
+
while (nr--) {
- try_lock_extent(&fs_info->pinned_extents,
- logical[nr],
- logical[nr] + stripe_len - 1, GFP_NOFS);
+ cache->bytes_super += stripe_len;
+ ret = add_excluded_extent(root, logical[nr],
+ stripe_len);
+ BUG_ON(ret);
}
+
kfree(logical);
}
-
return 0;
}
+static struct btrfs_caching_control *
+get_caching_control(struct btrfs_block_group_cache *cache)
+{
+ struct btrfs_caching_control *ctl;
+
+ spin_lock(&cache->lock);
+ if (cache->cached != BTRFS_CACHE_STARTED) {
+ spin_unlock(&cache->lock);
+ return NULL;
+ }
+
+ ctl = cache->caching_ctl;
+ atomic_inc(&ctl->count);
+ spin_unlock(&cache->lock);
+ return ctl;
+}
+
+static void put_caching_control(struct btrfs_caching_control *ctl)
+{
+ if (atomic_dec_and_test(&ctl->count))
+ kfree(ctl);
+}
+
/*
* this is only called by cache_block_group, since we could have freed extents
* we need to check the pinned_extents for any extents that can't be used yet
@@ -215,9 +247,9 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
int ret;
while (start < end) {
- ret = find_first_extent_bit(&info->pinned_extents, start,
+ ret = find_first_extent_bit(info->pinned_extents, start,
&extent_start, &extent_end,
- EXTENT_DIRTY|EXTENT_LOCKED);
+ EXTENT_DIRTY | EXTENT_UPTODATE);
if (ret)
break;
@@ -249,22 +281,27 @@ static int caching_kthread(void *data)
{
struct btrfs_block_group_cache *block_group = data;
struct btrfs_fs_info *fs_info = block_group->fs_info;
- u64 last = 0;
+ struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
+ struct btrfs_root *extent_root = fs_info->extent_root;
struct btrfs_path *path;
- int ret = 0;
- struct btrfs_key key;
struct extent_buffer *leaf;
- int slot;
+ struct btrfs_key key;
u64 total_found = 0;
-
- BUG_ON(!fs_info);
+ u64 last = 0;
+ u32 nritems;
+ int ret = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- atomic_inc(&block_group->space_info->caching_threads);
+ exclude_super_stripes(extent_root, block_group);
+ spin_lock(&block_group->space_info->lock);
+ block_group->space_info->bytes_super += block_group->bytes_super;
+ spin_unlock(&block_group->space_info->lock);
+
last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
+
/*
* We don't want to deadlock with somebody trying to allocate a new
* extent for the extent root while also trying to search the extent
@@ -277,74 +314,64 @@ static int caching_kthread(void *data)
key.objectid = last;
key.offset = 0;
- btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
+ key.type = BTRFS_EXTENT_ITEM_KEY;
again:
+ mutex_lock(&caching_ctl->mutex);
/* need to make sure the commit_root doesn't disappear */
down_read(&fs_info->extent_commit_sem);
- ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
if (ret < 0)
goto err;
+ leaf = path->nodes[0];
+ nritems = btrfs_header_nritems(leaf);
+
while (1) {
smp_mb();
- if (block_group->fs_info->closing > 1) {
+ if (fs_info->closing > 1) {
last = (u64)-1;
break;
}
- leaf = path->nodes[0];
- slot = path->slots[0];
- if (slot >= btrfs_header_nritems(leaf)) {
- ret = btrfs_next_leaf(fs_info->extent_root, path);
- if (ret < 0)
- goto err;
- else if (ret)
+ if (path->slots[0] < nritems) {
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ } else {
+ ret = find_next_key(path, 0, &key);
+ if (ret)
break;
- if (need_resched() ||
- btrfs_transaction_in_commit(fs_info)) {
- leaf = path->nodes[0];
-
- /* this shouldn't happen, but if the
- * leaf is empty just move on.
- */
- if (btrfs_header_nritems(leaf) == 0)
- break;
- /*
- * we need to copy the key out so that
- * we are sure the next search advances
- * us forward in the btree.
- */
- btrfs_item_key_to_cpu(leaf, &key, 0);
- btrfs_release_path(fs_info->extent_root, path);
- up_read(&fs_info->extent_commit_sem);
+ caching_ctl->progress = last;
+ btrfs_release_path(extent_root, path);
+ up_read(&fs_info->extent_commit_sem);
+ mutex_unlock(&caching_ctl->mutex);
+ if (btrfs_transaction_in_commit(fs_info))
schedule_timeout(1);
- goto again;
- }
+ else
+ cond_resched();
+ goto again;
+ }
+ if (key.objectid < block_group->key.objectid) {
+ path->slots[0]++;
continue;
}
- btrfs_item_key_to_cpu(leaf, &key, slot);
- if (key.objectid < block_group->key.objectid)
- goto next;
if (key.objectid >= block_group->key.objectid +
block_group->key.offset)
break;
- if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
+ if (key.type == BTRFS_EXTENT_ITEM_KEY) {
total_found += add_new_free_space(block_group,
fs_info, last,
key.objectid);
last = key.objectid + key.offset;
- }
- if (total_found > (1024 * 1024 * 2)) {
- total_found = 0;
- wake_up(&block_group->caching_q);
+ if (total_found > (1024 * 1024 * 2)) {
+ total_found = 0;
+ wake_up(&caching_ctl->wait);
+ }
}
-next:
path->slots[0]++;
}
ret = 0;
@@ -352,33 +379,65 @@ next:
total_found += add_new_free_space(block_group, fs_info, last,
block_group->key.objectid +
block_group->key.offset);
+ caching_ctl->progress = (u64)-1;
spin_lock(&block_group->lock);
+ block_group->caching_ctl = NULL;
block_group->cached = BTRFS_CACHE_FINISHED;
spin_unlock(&block_group->lock);
err:
btrfs_free_path(path);
up_read(&fs_info->extent_commit_sem);
- atomic_dec(&block_group->space_info->caching_threads);
- wake_up(&block_group->caching_q);
+ free_excluded_extents(extent_root, block_group);
+
+ mutex_unlock(&caching_ctl->mutex);
+ wake_up(&caching_ctl->wait);
+
+ put_caching_control(caching_ctl);
+ atomic_dec(&block_group->space_info->caching_threads);
return 0;
}
static int cache_block_group(struct btrfs_block_group_cache *cache)
{
+ struct btrfs_fs_info *fs_info = cache->fs_info;
+ struct btrfs_caching_control *caching_ctl;
struct task_struct *tsk;
int ret = 0;
+ smp_mb();
+ if (cache->cached != BTRFS_CACHE_NO)
+ return 0;
+
+ caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL);
+ BUG_ON(!caching_ctl);
+
+ INIT_LIST_HEAD(&caching_ctl->list);
+ mutex_init(&caching_ctl->mutex);
+ init_waitqueue_head(&caching_ctl->wait);
+ caching_ctl->block_group = cache;
+ caching_ctl->progress = cache->key.objectid;
+ /* one for caching kthread, one for caching block group list */
+ atomic_set(&caching_ctl->count, 2);
+
spin_lock(&cache->lock);
if (cache->cached != BTRFS_CACHE_NO) {
spin_unlock(&cache->lock);
- return ret;
+ kfree(caching_ctl);
+ return 0;
}
+ cache->caching_ctl = caching_ctl;
cache->cached = BTRFS_CACHE_STARTED;
spin_unlock(&cache->lock);
+ down_write(&fs_info->extent_commit_sem);
+ list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
+ up_write(&fs_info->extent_commit_sem);
+
+ atomic_inc(&cache->space_info->caching_threads);
+
tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
cache->key.objectid);
if (IS_ERR(tsk)) {
@@ -1657,7 +1716,6 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
parent, ref_root, flags,
ref->objectid, ref->offset,
&ins, node->ref_mod);
- update_reserved_extents(root, ins.objectid, ins.offset, 0);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
node->num_bytes, parent,
@@ -1783,7 +1841,6 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
extent_op->flags_to_set,
&extent_op->key,
ref->level, &ins);
- update_reserved_extents(root, ins.objectid, ins.offset, 0);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
node->num_bytes, parent, ref_root,
@@ -1818,16 +1875,32 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
BUG_ON(extent_op);
head = btrfs_delayed_node_to_head(node);
if (insert_reserved) {
+ int mark_free = 0;
+ struct extent_buffer *must_clean = NULL;
+
+ ret = pin_down_bytes(trans, root, NULL,
+ node->bytenr, node->num_bytes,
+ head->is_data, 1, &must_clean);
+ if (ret > 0)
+ mark_free = 1;
+
+ if (must_clean) {
+ clean_tree_block(NULL, root, must_clean);
+ btrfs_tree_unlock(must_clean);
+ free_extent_buffer(must_clean);
+ }
if (head->is_data) {
ret = btrfs_del_csums(trans, root,
node->bytenr,
node->num_bytes);
BUG_ON(ret);
}
- btrfs_update_pinned_extents(root, node->bytenr,
- node->num_bytes, 1);
- update_reserved_extents(root, node->bytenr,
- node->num_bytes, 0);
+ if (mark_free) {
+ ret = btrfs_free_reserved_extent(root,
+ node->bytenr,
+ node->num_bytes);
+ BUG_ON(ret);
+ }
}
mutex_unlock(&head->mutex);
return 0;
@@ -2706,6 +2779,8 @@ int btrfs_check_metadata_free_space(struct btrfs_root *root)
/* get the space info for where the metadata will live */
alloc_target = btrfs_get_alloc_profile(root, 0);
meta_sinfo = __find_space_info(info, alloc_target);
+ if (!meta_sinfo)
+ goto alloc;
again:
spin_lock(&meta_sinfo->lock);
@@ -2717,12 +2792,13 @@ again:
do_div(thresh, 100);
if (meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
- meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly > thresh) {
+ meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
+ meta_sinfo->bytes_super > thresh) {
struct btrfs_trans_handle *trans;
if (!meta_sinfo->full) {
meta_sinfo->force_alloc = 1;
spin_unlock(&meta_sinfo->lock);
-
+alloc:
trans = btrfs_start_transaction(root, 1);
if (!trans)
return -ENOMEM;
@@ -2730,6 +2806,10 @@ again:
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2 * 1024 * 1024, alloc_target, 0);
btrfs_end_transaction(trans, root);
+ if (!meta_sinfo) {
+ meta_sinfo = __find_space_info(info,
+ alloc_target);
+ }
goto again;
}
spin_unlock(&meta_sinfo->lock);
@@ -2765,13 +2845,16 @@ int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
data_sinfo = BTRFS_I(inode)->space_info;
+ if (!data_sinfo)
+ goto alloc;
+
again:
/* make sure we have enough space to handle the data first */
spin_lock(&data_sinfo->lock);
if (data_sinfo->total_bytes - data_sinfo->bytes_used -
data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved -
data_sinfo->bytes_pinned - data_sinfo->bytes_readonly -
- data_sinfo->bytes_may_use < bytes) {
+ data_sinfo->bytes_may_use - data_sinfo->bytes_super < bytes) {
struct btrfs_trans_handle *trans;
/*
@@ -2783,7 +2866,7 @@ again:
data_sinfo->force_alloc = 1;
spin_unlock(&data_sinfo->lock);
-
+alloc:
alloc_target = btrfs_get_alloc_profile(root, 1);
trans = btrfs_start_transaction(root, 1);
if (!trans)
@@ -2795,6 +2878,11 @@ again:
btrfs_end_transaction(trans, root);
if (ret)
return ret;
+
+ if (!data_sinfo) {
+ btrfs_set_inode_space_info(root, inode);
+ data_sinfo = BTRFS_I(inode)->space_info;
+ }
goto again;
}
spin_unlock(&data_sinfo->lock);
@@ -3009,10 +3097,12 @@ static int update_block_group(struct btrfs_trans_handle *trans,
num_bytes = min(total, cache->key.offset - byte_in_group);
if (alloc) {
old_val += num_bytes;
+ btrfs_set_block_group_used(&cache->item, old_val);
+ cache->reserved -= num_bytes;
cache->space_info->bytes_used += num_bytes;
+ cache->space_info->bytes_reserved -= num_bytes;
if (cache->ro)
cache->space_info->bytes_readonly -= num_bytes;
- btrfs_set_block_group_used(&cache->item, old_val);
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
} else {
@@ -3057,127 +3147,136 @@ static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
return bytenr;
}
-int btrfs_update_pinned_extents(struct btrfs_root *root,
- u64 bytenr, u64 num, int pin)
+/*
+ * this function must be called within transaction
+ */
+int btrfs_pin_extent(struct btrfs_root *root,
+ u64 bytenr, u64 num_bytes, int reserved)
{
- u64 len;
- struct btrfs_block_group_cache *cache;
struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_block_group_cache *cache;
- if (pin)
- set_extent_dirty(&fs_info->pinned_extents,
- bytenr, bytenr + num - 1, GFP_NOFS);
-
- while (num > 0) {
- cache = btrfs_lookup_block_group(fs_info, bytenr);
- BUG_ON(!cache);
- len = min(num, cache->key.offset -
- (bytenr - cache->key.objectid));
- if (pin) {
- spin_lock(&cache->space_info->lock);
- spin_lock(&cache->lock);
- cache->pinned += len;
- cache->space_info->bytes_pinned += len;
- spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
- fs_info->total_pinned += len;
- } else {
- int unpin = 0;
+ cache = btrfs_lookup_block_group(fs_info, bytenr);
+ BUG_ON(!cache);
- /*
- * in order to not race with the block group caching, we
- * only want to unpin the extent if we are cached. If
- * we aren't cached, we want to start async caching this
- * block group so we can free the extent the next time
- * around.
- */
- spin_lock(&cache->space_info->lock);
- spin_lock(&cache->lock);
- unpin = (cache->cached == BTRFS_CACHE_FINISHED);
- if (likely(unpin)) {
- cache->pinned -= len;
- cache->space_info->bytes_pinned -= len;
- fs_info->total_pinned -= len;
- }
- spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
+ spin_lock(&cache->space_info->lock);
+ spin_lock(&cache->lock);
+ cache->pinned += num_bytes;
+ cache->space_info->bytes_pinned += num_bytes;
+ if (reserved) {
+ cache->reserved -= num_bytes;
+ cache->space_info->bytes_reserved -= num_bytes;
+ }
+ spin_unlock(&cache->lock);
+ spin_unlock(&cache->space_info->lock);
- if (likely(unpin))
- clear_extent_dirty(&fs_info->pinned_extents,
- bytenr, bytenr + len -1,
- GFP_NOFS);
- else
- cache_block_group(cache);
+ btrfs_put_block_group(cache);
- if (unpin)
- btrfs_add_free_space(cache, bytenr, len);
- }
- btrfs_put_block_group(cache);
- bytenr += len;
- num -= len;
+ set_extent_dirty(fs_info->pinned_extents,
+ bytenr, bytenr + num_bytes - 1, GFP_NOFS);
+ return 0;
+}
+
+static int update_reserved_extents(struct btrfs_block_group_cache *cache,
+ u64 num_bytes, int reserve)
+{
+ spin_lock(&cache->space_info->lock);
+ spin_lock(&cache->lock);
+ if (reserve) {
+ cache->reserved += num_bytes;
+ cache->space_info->bytes_reserved += num_bytes;
+ } else {
+ cache->reserved -= num_bytes;
+ cache->space_info->bytes_reserved -= num_bytes;
}
+ spin_unlock(&cache->lock);
+ spin_unlock(&cache->space_info->lock);
return 0;
}
-static int update_reserved_extents(struct btrfs_root *root,
- u64 bytenr, u64 num, int reserve)
+int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
{
- u64 len;
- struct btrfs_block_group_cache *cache;
struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_caching_control *next;
+ struct btrfs_caching_control *caching_ctl;
+ struct btrfs_block_group_cache *cache;
- while (num > 0) {
- cache = btrfs_lookup_block_group(fs_info, bytenr);
- BUG_ON(!cache);
- len = min(num, cache->key.offset -
- (bytenr - cache->key.objectid));
+ down_write(&fs_info->extent_commit_sem);
- spin_lock(&cache->space_info->lock);
- spin_lock(&cache->lock);
- if (reserve) {
- cache->reserved += len;
- cache->space_info->bytes_reserved += len;
+ list_for_each_entry_safe(caching_ctl, next,
+ &fs_info->caching_block_groups, list) {
+ cache = caching_ctl->block_group;
+ if (block_group_cache_done(cache)) {
+ cache->last_byte_to_unpin = (u64)-1;
+ list_del_init(&caching_ctl->list);
+ put_caching_control(caching_ctl);
} else {
- cache->reserved -= len;
- cache->space_info->bytes_reserved -= len;
+ cache->last_byte_to_unpin = caching_ctl->progress;
}
- spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
- btrfs_put_block_group(cache);
- bytenr += len;
- num -= len;
}
+
+ if (fs_info->pinned_extents == &fs_info->freed_extents[0])
+ fs_info->pinned_extents = &fs_info->freed_extents[1];
+ else
+ fs_info->pinned_extents = &fs_info->freed_extents[0];
+
+ up_write(&fs_info->extent_commit_sem);
return 0;
}
-int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
+static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
{
- u64 last = 0;
- u64 start;
- u64 end;
- struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
- int ret;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_block_group_cache *cache = NULL;
+ u64 len;
- while (1) {
- ret = find_first_extent_bit(pinned_extents, last,
- &start, &end, EXTENT_DIRTY);
- if (ret)
- break;
+ while (start <= end) {
+ if (!cache ||
+ start >= cache->key.objectid + cache->key.offset) {
+ if (cache)
+ btrfs_put_block_group(cache);
+ cache = btrfs_lookup_block_group(fs_info, start);
+ BUG_ON(!cache);
+ }
+
+ len = cache->key.objectid + cache->key.offset - start;
+ len = min(len, end + 1 - start);
- set_extent_dirty(copy, start, end, GFP_NOFS);
- last = end + 1;
+ if (start < cache->last_byte_to_unpin) {
+ len = min(len, cache->last_byte_to_unpin - start);
+ btrfs_add_free_space(cache, start, len);
+ }
+
+ spin_lock(&cache->space_info->lock);
+ spin_lock(&cache->lock);
+ cache->pinned -= len;
+ cache->space_info->bytes_pinned -= len;
+ spin_unlock(&cache->lock);
+ spin_unlock(&cache->space_info->lock);
+
+ start += len;
}
+
+ if (cache)
+ btrfs_put_block_group(cache);
return 0;
}
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct extent_io_tree *unpin)
+ struct btrfs_root *root)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct extent_io_tree *unpin;
u64 start;
u64 end;
int ret;
+ if (fs_info->pinned_extents == &fs_info->freed_extents[0])
+ unpin = &fs_info->freed_extents[1];
+ else
+ unpin = &fs_info->freed_extents[0];
+
while (1) {
ret = find_first_extent_bit(unpin, 0, &start, &end,
EXTENT_DIRTY);
@@ -3186,10 +3285,8 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
ret = btrfs_discard_extent(root, start, end + 1 - start);
- /* unlocks the pinned mutex */
- btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
clear_extent_dirty(unpin, start, end, GFP_NOFS);
-
+ unpin_extent_range(root, start, end);
cond_resched();
}
@@ -3199,7 +3296,8 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
static int pin_down_bytes(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
- u64 bytenr, u64 num_bytes, int is_data,
+ u64 bytenr, u64 num_bytes,
+ int is_data, int reserved,
struct extent_buffer **must_clean)
{
int err = 0;
@@ -3231,15 +3329,15 @@ static int pin_down_bytes(struct btrfs_trans_handle *trans,
}
free_extent_buffer(buf);
pinit:
- btrfs_set_path_blocking(path);
+ if (path)
+ btrfs_set_path_blocking(path);
/* unlocks the pinned mutex */
- btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
+ btrfs_pin_extent(root, bytenr, num_bytes, reserved);
BUG_ON(err < 0);
return 0;
}
-
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent,
@@ -3413,7 +3511,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
}
ret = pin_down_bytes(trans, root, path, bytenr,
- num_bytes, is_data, &must_clean);
+ num_bytes, is_data, 0, &must_clean);
if (ret > 0)
mark_free = 1;
BUG_ON(ret < 0);
@@ -3544,8 +3642,7 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
/* unlocks the pinned mutex */
- btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
- update_reserved_extents(root, bytenr, num_bytes, 0);
+ btrfs_pin_extent(root, bytenr, num_bytes, 1);
ret = 0;
} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
@@ -3585,19 +3682,33 @@ static noinline int
wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
u64 num_bytes)
{
+ struct btrfs_caching_control *caching_ctl;
DEFINE_WAIT(wait);
- prepare_to_wait(&cache->caching_q, &wait, TASK_UNINTERRUPTIBLE);
-
- if (block_group_cache_done(cache)) {
- finish_wait(&cache->caching_q, &wait);
+ caching_ctl = get_caching_control(cache);
+ if (!caching_ctl)
return 0;
- }
- schedule();
- finish_wait(&cache->caching_q, &wait);
- wait_event(cache->caching_q, block_group_cache_done(cache) ||
+ wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
(cache->free_space >= num_bytes));
+
+ put_caching_control(caching_ctl);
+ return 0;
+}
+
+static noinline int
+wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
+{
+ struct btrfs_caching_control *caching_ctl;
+ DEFINE_WAIT(wait);
+
+ caching_ctl = get_caching_control(cache);
+ if (!caching_ctl)
+ return 0;
+
+ wait_event(caching_ctl->wait, block_group_cache_done(cache));
+
+ put_caching_control(caching_ctl);
return 0;
}
@@ -3635,6 +3746,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
int last_ptr_loop = 0;
int loop = 0;
bool found_uncached_bg = false;
+ bool failed_cluster_refill = false;
WARN_ON(num_bytes < root->sectorsize);
btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
@@ -3732,7 +3844,16 @@ have_block_group:
if (unlikely(block_group->ro))
goto loop;
- if (last_ptr) {
+ /*
+ * Ok we want to try and use the cluster allocator, so lets look
+ * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
+ * have tried the cluster allocator plenty of times at this
+ * point and not have found anything, so we are likely way too
+ * fragmented for the clustering stuff to find anything, so lets
+ * just skip it and let the allocator find whatever block it can
+ * find
+ */
+ if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
/*
* the refill lock keeps out other
* people trying to start a new cluster
@@ -3807,9 +3928,11 @@ refill_cluster:
spin_unlock(&last_ptr->refill_lock);
goto checks;
}
- } else if (!cached && loop > LOOP_CACHING_NOWAIT) {
+ } else if (!cached && loop > LOOP_CACHING_NOWAIT
+ && !failed_cluster_refill) {
spin_unlock(&last_ptr->refill_lock);
+ failed_cluster_refill = true;
wait_block_group_cache_progress(block_group,
num_bytes + empty_cluster + empty_size);
goto have_block_group;
@@ -3821,13 +3944,9 @@ refill_cluster:
* cluster. Free the cluster we've been trying
* to use, and go to the next block group
*/
- if (loop < LOOP_NO_EMPTY_SIZE) {
- btrfs_return_cluster_to_free_space(NULL,
- last_ptr);
- spin_unlock(&last_ptr->refill_lock);
- goto loop;
- }
+ btrfs_return_cluster_to_free_space(NULL, last_ptr);
spin_unlock(&last_ptr->refill_lock);
+ goto loop;
}
offset = btrfs_find_space_for_alloc(block_group, search_start,
@@ -3881,9 +4000,12 @@ checks:
search_start - offset);
BUG_ON(offset > search_start);
+ update_reserved_extents(block_group, num_bytes, 1);
+
/* we are all good, lets return */
break;
loop:
+ failed_cluster_refill = false;
btrfs_put_block_group(block_group);
}
up_read(&space_info->groups_sem);
@@ -3973,12 +4095,12 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
up_read(&info->groups_sem);
}
-static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- u64 num_bytes, u64 min_alloc_size,
- u64 empty_size, u64 hint_byte,
- u64 search_end, struct btrfs_key *ins,
- u64 data)
+int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ u64 num_bytes, u64 min_alloc_size,
+ u64 empty_size, u64 hint_byte,
+ u64 search_end, struct btrfs_key *ins,
+ u64 data)
{
int ret;
u64 search_start = 0;
@@ -4044,25 +4166,8 @@ int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
ret = btrfs_discard_extent(root, start, len);
btrfs_add_free_space(cache, start, len);
+ update_reserved_extents(cache, len, 0);
btrfs_put_block_group(cache);
- update_reserved_extents(root, start, len, 0);
-
- return ret;
-}
-
-int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- u64 num_bytes, u64 min_alloc_size,
- u64 empty_size, u64 hint_byte,
- u64 search_end, struct btrfs_key *ins,
- u64 data)
-{
- int ret;
- ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
- empty_size, hint_byte, search_end, ins,
- data);
- if (!ret)
- update_reserved_extents(root, ins->objectid, ins->offset, 1);
return ret;
}
@@ -4223,15 +4328,46 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
{
int ret;
struct btrfs_block_group_cache *block_group;
+ struct btrfs_caching_control *caching_ctl;
+ u64 start = ins->objectid;
+ u64 num_bytes = ins->offset;
block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
cache_block_group(block_group);
- wait_event(block_group->caching_q,
- block_group_cache_done(block_group));
+ caching_ctl = get_caching_control(block_group);
- ret = btrfs_remove_free_space(block_group, ins->objectid,
- ins->offset);
- BUG_ON(ret);
+ if (!caching_ctl) {
+ BUG_ON(!block_group_cache_done(block_group));
+ ret = btrfs_remove_free_space(block_group, start, num_bytes);
+ BUG_ON(ret);
+ } else {
+ mutex_lock(&caching_ctl->mutex);
+
+ if (start >= caching_ctl->progress) {
+ ret = add_excluded_extent(root, start, num_bytes);
+ BUG_ON(ret);
+ } else if (start + num_bytes <= caching_ctl->progress) {
+ ret = btrfs_remove_free_space(block_group,
+ start, num_bytes);
+ BUG_ON(ret);
+ } else {
+ num_bytes = caching_ctl->progress - start;
+ ret = btrfs_remove_free_space(block_group,
+ start, num_bytes);
+ BUG_ON(ret);
+
+ start = caching_ctl->progress;
+ num_bytes = ins->objectid + ins->offset -
+ caching_ctl->progress;
+ ret = add_excluded_extent(root, start, num_bytes);
+ BUG_ON(ret);
+ }
+
+ mutex_unlock(&caching_ctl->mutex);
+ put_caching_control(caching_ctl);
+ }
+
+ update_reserved_extents(block_group, ins->offset, 1);
btrfs_put_block_group(block_group);
ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
0, owner, offset, ins, 1);
@@ -4255,9 +4391,9 @@ static int alloc_tree_block(struct btrfs_trans_handle *trans,
int ret;
u64 flags = 0;
- ret = __btrfs_reserve_extent(trans, root, num_bytes, num_bytes,
- empty_size, hint_byte, search_end,
- ins, 0);
+ ret = btrfs_reserve_extent(trans, root, num_bytes, num_bytes,
+ empty_size, hint_byte, search_end,
+ ins, 0);
if (ret)
return ret;
@@ -4268,7 +4404,6 @@ static int alloc_tree_block(struct btrfs_trans_handle *trans,
} else
BUG_ON(parent > 0);
- update_reserved_extents(root, ins->objectid, ins->offset, 1);
if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
struct btrfs_delayed_extent_op *extent_op;
extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
@@ -4347,452 +4482,99 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
return buf;
}
-#if 0
-int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct extent_buffer *leaf)
-{
- u64 disk_bytenr;
- u64 num_bytes;
- struct btrfs_key key;
- struct btrfs_file_extent_item *fi;
- u32 nritems;
- int i;
- int ret;
-
- BUG_ON(!btrfs_is_leaf(leaf));
- nritems = btrfs_header_nritems(leaf);
-
- for (i = 0; i < nritems; i++) {
- cond_resched();
- btrfs_item_key_to_cpu(leaf, &key, i);
-
- /* only extents have references, skip everything else */
- if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
- continue;
-
- fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
-
- /* inline extents live in the btree, they don't have refs */
- if (btrfs_file_extent_type(leaf, fi) ==
- BTRFS_FILE_EXTENT_INLINE)
- continue;
-
- disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
-
- /* holes don't have refs */
- if (disk_bytenr == 0)
- continue;
-
- num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
- ret = btrfs_free_extent(trans, root, disk_bytenr, num_bytes,
- leaf->start, 0, key.objectid, 0);
- BUG_ON(ret);
- }
- return 0;
-}
-
-static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_leaf_ref *ref)
-{
- int i;
- int ret;
- struct btrfs_extent_info *info;
- struct refsort *sorted;
-
- if (ref->nritems == 0)
- return 0;
-
- sorted = kmalloc(sizeof(*sorted) * ref->nritems, GFP_NOFS);
- for (i = 0; i < ref->nritems; i++) {
- sorted[i].bytenr = ref->extents[i].bytenr;
- sorted[i].slot = i;
- }
- sort(sorted, ref->nritems, sizeof(struct refsort), refsort_cmp, NULL);
-
- /*
- * the items in the ref were sorted when the ref was inserted
- * into the ref cache, so this is already in order
- */
- for (i = 0; i < ref->nritems; i++) {
- info = ref->extents + sorted[i].slot;
- ret = btrfs_free_extent(trans, root, info->bytenr,
- info->num_bytes, ref->bytenr,
- ref->owner, ref->generation,
- info->objectid, 0);
-
- atomic_inc(&root->fs_info->throttle_gen);
- wake_up(&root->fs_info->transaction_throttle);
- cond_resched();
-
- BUG_ON(ret);
- info++;
- }
-
- kfree(sorted);
- return 0;
-}
-
-
-static int drop_snap_lookup_refcount(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 start,
- u64 len, u32 *refs)
-{
- int ret;
-
- ret = btrfs_lookup_extent_refs(trans, root, start, len, refs);
- BUG_ON(ret);
-
-#if 0 /* some debugging code in case we see problems here */
- /* if the refs count is one, it won't get increased again. But
- * if the ref count is > 1, someone may be decreasing it at
- * the same time we are.
- */
- if (*refs != 1) {
- struct extent_buffer *eb = NULL;
- eb = btrfs_find_create_tree_block(root, start, len);
- if (eb)
- btrfs_tree_lock(eb);
-
- mutex_lock(&root->fs_info->alloc_mutex);
- ret = lookup_extent_ref(NULL, root, start, len, refs);
- BUG_ON(ret);
- mutex_unlock(&root->fs_info->alloc_mutex);
-
- if (eb) {
- btrfs_tree_unlock(eb);
- free_extent_buffer(eb);
- }
- if (*refs == 1) {
- printk(KERN_ERR "btrfs block %llu went down to one "
- "during drop_snap\n", (unsigned long long)start);
- }
-
- }
-#endif
-
- cond_resched();
- return ret;
-}
+struct walk_control {
+ u64 refs[BTRFS_MAX_LEVEL];
+ u64 flags[BTRFS_MAX_LEVEL];
+ struct btrfs_key update_progress;
+ int stage;
+ int level;
+ int shared_level;
+ int update_ref;
+ int keep_locks;
+ int reada_slot;
+ int reada_count;
+};
+#define DROP_REFERENCE 1
+#define UPDATE_BACKREF 2
-/*
- * this is used while deleting old snapshots, and it drops the refs
- * on a whole subtree starting from a level 1 node.
- *
- * The idea is to sort all the leaf pointers, and then drop the
- * ref on all the leaves in order. Most of the time the leaves
- * will have ref cache entries, so no leaf IOs will be required to
- * find the extents they have references on.
- *
- * For each leaf, any references it has are also dropped in order
- *
- * This ends up dropping the references in something close to optimal
- * order for reading and modifying the extent allocation tree.
- */
-static noinline int drop_level_one_refs(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path)
+static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct walk_control *wc,
+ struct btrfs_path *path)
{
u64 bytenr;
- u64 root_owner;
- u64 root_gen;
- struct extent_buffer *eb = path->nodes[1];
- struct extent_buffer *leaf;
- struct btrfs_leaf_ref *ref;
- struct refsort *sorted = NULL;
- int nritems = btrfs_header_nritems(eb);
+ u64 generation;
+ u64 refs;
+ u64 last = 0;
+ u32 nritems;
+ u32 blocksize;
+ struct btrfs_key key;
+ struct extent_buffer *eb;
int ret;
- int i;
- int refi = 0;
- int slot = path->slots[1];
- u32 blocksize = btrfs_level_size(root, 0);
- u32 refs;
-
- if (nritems == 0)
- goto out;
-
- root_owner = btrfs_header_owner(eb);
- root_gen = btrfs_header_generation(eb);
- sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
+ int slot;
+ int nread = 0;
- /*
- * step one, sort all the leaf pointers so we don't scribble
- * randomly into the extent allocation tree
- */
- for (i = slot; i < nritems; i++) {
- sorted[refi].bytenr = btrfs_node_blockptr(eb, i);
- sorted[refi].slot = i;
- refi++;
+ if (path->slots[wc->level] < wc->reada_slot) {
+ wc->reada_count = wc->reada_count * 2 / 3;
+ wc->reada_count = max(wc->reada_count, 2);
+ } else {
+ wc->reada_count = wc->reada_count * 3 / 2;
+ wc->reada_count = min_t(int, wc->reada_count,
+ BTRFS_NODEPTRS_PER_BLOCK(root));
}
- /*
- * nritems won't be zero, but if we're picking up drop_snapshot
- * after a crash, slot might be > 0, so double check things
- * just in case.
- */
- if (refi == 0)
- goto out;
+ eb = path->nodes[wc->level];
+ nritems = btrfs_header_nritems(eb);
+ blocksize = btrfs_level_size(root, wc->level - 1);
- sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
+ for (slot = path->slots[wc->level]; slot < nritems; slot++) {
+ if (nread >= wc->reada_count)
+ break;
- /*
- * the first loop frees everything the leaves point to
- */
- for (i = 0; i < refi; i++) {
- u64 ptr_gen;
+ cond_resched();
+ bytenr = btrfs_node_blockptr(eb, slot);
+ generation = btrfs_node_ptr_generation(eb, slot);
- bytenr = sorted[i].bytenr;
+ if (slot == path->slots[wc->level])
+ goto reada;
- /*
- * check the reference count on this leaf. If it is > 1
- * we just decrement it below and don't update any
- * of the refs the leaf points to.
- */
- ret = drop_snap_lookup_refcount(trans, root, bytenr,
- blocksize, &refs);
- BUG_ON(ret);
- if (refs != 1)
+ if (wc->stage == UPDATE_BACKREF &&
+ generation <= root->root_key.offset)
continue;
- ptr_gen = btrfs_node_ptr_generation(eb, sorted[i].slot);
-
- /*
- * the leaf only had one reference, which means the
- * only thing pointing to this leaf is the snapshot
- * we're deleting. It isn't possible for the reference
- * count to increase again later
- *
- * The reference cache is checked for the leaf,
- * and if found we'll be able to drop any refs held by
- * the leaf without needing to read it in.
- */
- ref = btrfs_lookup_leaf_ref(root, bytenr);
- if (ref && ref->generation != ptr_gen) {
- btrfs_free_leaf_ref(root, ref);
- ref = NULL;
- }
- if (ref) {
- ret = cache_drop_leaf_ref(trans, root, ref);
- BUG_ON(ret);
- btrfs_remove_leaf_ref(root, ref);
- btrfs_free_leaf_ref(root, ref);
- } else {
- /*
- * the leaf wasn't in the reference cache, so
- * we have to read it.
- */
- leaf = read_tree_block(root, bytenr, blocksize,
- ptr_gen);
- ret = btrfs_drop_leaf_ref(trans, root, leaf);
+ if (wc->stage == DROP_REFERENCE) {
+ ret = btrfs_lookup_extent_info(trans, root,
+ bytenr, blocksize,
+ &refs, NULL);
BUG_ON(ret);
- free_extent_buffer(leaf);
- }
- atomic_inc(&root->fs_info->throttle_gen);
- wake_up(&root->fs_info->transaction_throttle);
- cond_resched();
- }
-
- /*
- * run through the loop again to free the refs on the leaves.
- * This is faster than doing it in the loop above because
- * the leaves are likely to be clustered together. We end up
- * working in nice chunks on the extent allocation tree.
- */
- for (i = 0; i < refi; i++) {
- bytenr = sorted[i].bytenr;
- ret = btrfs_free_extent(trans, root, bytenr,
- blocksize, eb->start,
- root_owner, root_gen, 0, 1);
- BUG_ON(ret);
-
- atomic_inc(&root->fs_info->throttle_gen);
- wake_up(&root->fs_info->transaction_throttle);
- cond_resched();
- }
-out:
- kfree(sorted);
-
- /*
- * update the path to show we've processed the entire level 1
- * node. This will get saved into the root's drop_snapshot_progress
- * field so these drops are not repeated again if this transaction
- * commits.
- */
- path->slots[1] = nritems;
- return 0;
-}
-
-/*
- * helper function for drop_snapshot, this walks down the tree dropping ref
- * counts as it goes.
- */
-static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path, int *level)
-{
- u64 root_owner;
- u64 root_gen;
- u64 bytenr;
- u64 ptr_gen;
- struct extent_buffer *next;
- struct extent_buffer *cur;
- struct extent_buffer *parent;
- u32 blocksize;
- int ret;
- u32 refs;
-
- WARN_ON(*level < 0);
- WARN_ON(*level >= BTRFS_MAX_LEVEL);
- ret = drop_snap_lookup_refcount(trans, root, path->nodes[*level]->start,
- path->nodes[*level]->len, &refs);
- BUG_ON(ret);
- if (refs > 1)
- goto out;
-
- /*
- * walk down to the last node level and free all the leaves
- */
- while (*level >= 0) {
- WARN_ON(*level < 0);
- WARN_ON(*level >= BTRFS_MAX_LEVEL);
- cur = path->nodes[*level];
-
- if (btrfs_header_level(cur) != *level)
- WARN_ON(1);
+ BUG_ON(refs == 0);
+ if (refs == 1)
+ goto reada;
- if (path->slots[*level] >=
- btrfs_header_nritems(cur))
- break;
-
- /* the new code goes down to level 1 and does all the
- * leaves pointed to that node in bulk. So, this check
- * for level 0 will always be false.
- *
- * But, the disk format allows the drop_snapshot_progress
- * field in the root to leave things in a state where
- * a leaf will need cleaning up here. If someone crashes
- * with the old code and then boots with the new code,
- * we might find a leaf here.
- */
- if (*level == 0) {
- ret = btrfs_drop_leaf_ref(trans, root, cur);
- BUG_ON(ret);
- break;
+ if (!wc->update_ref ||
+ generation <= root->root_key.offset)
+ continue;
+ btrfs_node_key_to_cpu(eb, &key, slot);
+ ret = btrfs_comp_cpu_keys(&key,
+ &wc->update_progress);
+ if (ret < 0)
+ continue;
}
-
- /*
- * once we get to level one, process the whole node
- * at once, including everything below it.
- */
- if (*level == 1) {
- ret = drop_level_one_refs(trans, root, path);
- BUG_ON(ret);
+reada:
+ ret = readahead_tree_block(root, bytenr, blocksize,
+ generation);
+ if (ret)
break;
- }
-
- bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
- ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
- blocksize = btrfs_level_size(root, *level - 1);
-
- ret = drop_snap_lookup_refcount(trans, root, bytenr,
- blocksize, &refs);
- BUG_ON(ret);
-
- /*
- * if there is more than one reference, we don't need
- * to read that node to drop any references it has. We
- * just drop the ref we hold on that node and move on to the
- * next slot in this level.
- */
- if (refs != 1) {
- parent = path->nodes[*level];
- root_owner = btrfs_header_owner(parent);
- root_gen = btrfs_header_generation(parent);
- path->slots[*level]++;
-
- ret = btrfs_free_extent(trans, root, bytenr,
- blocksize, parent->start,
- root_owner, root_gen,
- *level - 1, 1);
- BUG_ON(ret);
-
- atomic_inc(&root->fs_info->throttle_gen);
- wake_up(&root->fs_info->transaction_throttle);
- cond_resched();
-
- continue;
- }
-
- /*
- * we need to keep freeing things in the next level down.
- * read the block and loop around to process it
- */
- next = read_tree_block(root, bytenr, blocksize, ptr_gen);
- WARN_ON(*level <= 0);
- if (path->nodes[*level-1])
- free_extent_buffer(path->nodes[*level-1]);
- path->nodes[*level-1] = next;
- *level = btrfs_header_level(next);
- path->slots[*level] = 0;
- cond_resched();
+ last = bytenr + blocksize;
+ nread++;
}
-out:
- WARN_ON(*level < 0);
- WARN_ON(*level >= BTRFS_MAX_LEVEL);
-
- if (path->nodes[*level] == root->node) {
- parent = path->nodes[*level];
- bytenr = path->nodes[*level]->start;
- } else {
- parent = path->nodes[*level + 1];
- bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
- }
-
- blocksize = btrfs_level_size(root, *level);
- root_owner = btrfs_header_owner(parent);
- root_gen = btrfs_header_generation(parent);
-
- /*
- * cleanup and free the reference on the last node
- * we processed
- */
- ret = btrfs_free_extent(trans, root, bytenr, blocksize,
- parent->start, root_owner, root_gen,
- *level, 1);
- free_extent_buffer(path->nodes[*level]);
- path->nodes[*level] = NULL;
-
- *level += 1;
- BUG_ON(ret);
-
- cond_resched();
- return 0;
+ wc->reada_slot = slot;
}
-#endif
-
-struct walk_control {
- u64 refs[BTRFS_MAX_LEVEL];
- u64 flags[BTRFS_MAX_LEVEL];
- struct btrfs_key update_progress;
- int stage;
- int level;
- int shared_level;
- int update_ref;
- int keep_locks;
-};
-
-#define DROP_REFERENCE 1
-#define UPDATE_BACKREF 2
/*
* hepler to process tree block while walking down the tree.
*
- * when wc->stage == DROP_REFERENCE, this function checks
- * reference count of the block. if the block is shared and
- * we need update back refs for the subtree rooted at the
- * block, this function changes wc->stage to UPDATE_BACKREF
- *
* when wc->stage == UPDATE_BACKREF, this function updates
* back refs for pointers in the block.
*
@@ -4805,7 +4587,6 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
{
int level = wc->level;
struct extent_buffer *eb = path->nodes[level];
- struct btrfs_key key;
u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
int ret;
@@ -4828,21 +4609,6 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
BUG_ON(wc->refs[level] == 0);
}
- if (wc->stage == DROP_REFERENCE &&
- wc->update_ref && wc->refs[level] > 1) {
- BUG_ON(eb == root->node);
- BUG_ON(path->slots[level] > 0);
- if (level == 0)
- btrfs_item_key_to_cpu(eb, &key, path->slots[level]);
- else
- btrfs_node_key_to_cpu(eb, &key, path->slots[level]);
- if (btrfs_header_owner(eb) == root->root_key.objectid &&
- btrfs_comp_cpu_keys(&key, &wc->update_progress) >= 0) {
- wc->stage = UPDATE_BACKREF;
- wc->shared_level = level;
- }
- }
-
if (wc->stage == DROP_REFERENCE) {
if (wc->refs[level] > 1)
return 1;
@@ -4879,6 +4645,123 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
}
/*
+ * hepler to process tree block pointer.
+ *
+ * when wc->stage == DROP_REFERENCE, this function checks
+ * reference count of the block pointed to. if the block
+ * is shared and we need update back refs for the subtree
+ * rooted at the block, this function changes wc->stage to
+ * UPDATE_BACKREF. if the block is shared and there is no
+ * need to update back, this function drops the reference
+ * to the block.
+ *
+ * NOTE: return value 1 means we should stop walking down.
+ */
+static noinline int do_walk_down(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct walk_control *wc)
+{
+ u64 bytenr;
+ u64 generation;
+ u64 parent;
+ u32 blocksize;
+ struct btrfs_key key;
+ struct extent_buffer *next;
+ int level = wc->level;
+ int reada = 0;
+ int ret = 0;
+
+ generation = btrfs_node_ptr_generation(path->nodes[level],
+ path->slots[level]);
+ /*
+ * if the lower level block was created before the snapshot
+ * was created, we know there is no need to update back refs
+ * for the subtree
+ */
+ if (wc->stage == UPDATE_BACKREF &&
+ generation <= root->root_key.offset)
+ return 1;
+
+ bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
+ blocksize = btrfs_level_size(root, level - 1);
+
+ next = btrfs_find_tree_block(root, bytenr, blocksize);
+ if (!next) {
+ next = btrfs_find_create_tree_block(root, bytenr, blocksize);
+ reada = 1;
+ }
+ btrfs_tree_lock(next);
+ btrfs_set_lock_blocking(next);
+
+ if (wc->stage == DROP_REFERENCE) {
+ ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
+ &wc->refs[level - 1],
+ &wc->flags[level - 1]);
+ BUG_ON(ret);
+ BUG_ON(wc->refs[level - 1] == 0);
+
+ if (wc->refs[level - 1] > 1) {
+ if (!wc->update_ref ||
+ generation <= root->root_key.offset)
+ goto skip;
+
+ btrfs_node_key_to_cpu(path->nodes[level], &key,
+ path->slots[level]);
+ ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
+ if (ret < 0)
+ goto skip;
+
+ wc->stage = UPDATE_BACKREF;
+ wc->shared_level = level - 1;
+ }
+ }
+
+ if (!btrfs_buffer_uptodate(next, generation)) {
+ btrfs_tree_unlock(next);
+ free_extent_buffer(next);
+ next = NULL;
+ }
+
+ if (!next) {
+ if (reada && level == 1)
+ reada_walk_down(trans, root, wc, path);
+ next = read_tree_block(root, bytenr, blocksize, generation);
+ btrfs_tree_lock(next);
+ btrfs_set_lock_blocking(next);
+ }
+
+ level--;
+ BUG_ON(level != btrfs_header_level(next));
+ path->nodes[level] = next;
+ path->slots[level] = 0;
+ path->locks[level] = 1;
+ wc->level = level;
+ if (wc->level == 1)
+ wc->reada_slot = 0;
+ return 0;
+skip:
+ wc->refs[level - 1] = 0;
+ wc->flags[level - 1] = 0;
+
+ if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
+ parent = path->nodes[level]->start;
+ } else {
+ BUG_ON(root->root_key.objectid !=
+ btrfs_header_owner(path->nodes[level]));
+ parent = 0;
+ }
+
+ ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
+ root->root_key.objectid, level - 1, 0);
+ BUG_ON(ret);
+
+ btrfs_tree_unlock(next);
+ free_extent_buffer(next);
+ return 1;
+}
+
+/*
* hepler to process tree block while walking up the tree.
*
* when wc->stage == DROP_REFERENCE, this function drops
@@ -4905,7 +4788,6 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
if (level < wc->shared_level)
goto out;
- BUG_ON(wc->refs[level] <= 1);
ret = find_next_key(path, level + 1, &wc->update_progress);
if (ret > 0)
wc->update_ref = 0;
@@ -4936,8 +4818,6 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
path->locks[level] = 0;
return 1;
}
- } else {
- BUG_ON(level != 0);
}
}
@@ -4990,17 +4870,13 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct walk_control *wc)
{
- struct extent_buffer *next;
- struct extent_buffer *cur;
- u64 bytenr;
- u64 ptr_gen;
- u32 blocksize;
int level = wc->level;
int ret;
while (level >= 0) {
- cur = path->nodes[level];
- BUG_ON(path->slots[level] >= btrfs_header_nritems(cur));
+ if (path->slots[level] >=
+ btrfs_header_nritems(path->nodes[level]))
+ break;
ret = walk_down_proc(trans, root, path, wc);
if (ret > 0)
@@ -5009,20 +4885,12 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
if (level == 0)
break;
- bytenr = btrfs_node_blockptr(cur, path->slots[level]);
- blocksize = btrfs_level_size(root, level - 1);
- ptr_gen = btrfs_node_ptr_generation(cur, path->slots[level]);
-
- next = read_tree_block(root, bytenr, blocksize, ptr_gen);
- btrfs_tree_lock(next);
- btrfs_set_lock_blocking(next);
-
- level--;
- BUG_ON(level != btrfs_header_level(next));
- path->nodes[level] = next;
- path->slots[level] = 0;
- path->locks[level] = 1;
- wc->level = level;
+ ret = do_walk_down(trans, root, path, wc);
+ if (ret > 0) {
+ path->slots[level]++;
+ continue;
+ }
+ level = wc->level;
}
return 0;
}
@@ -5112,9 +4980,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
err = ret;
goto out;
}
- btrfs_node_key_to_cpu(path->nodes[level], &key,
- path->slots[level]);
- WARN_ON(memcmp(&key, &wc->update_progress, sizeof(key)));
+ WARN_ON(ret > 0);
/*
* unlock our path, this is safe because only this
@@ -5149,6 +5015,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
wc->stage = DROP_REFERENCE;
wc->update_ref = update_ref;
wc->keep_locks = 0;
+ wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
while (1) {
ret = walk_down_tree(trans, root, path, wc);
@@ -5201,9 +5068,24 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
ret = btrfs_del_root(trans, tree_root, &root->root_key);
BUG_ON(ret);
- free_extent_buffer(root->node);
- free_extent_buffer(root->commit_root);
- kfree(root);
+ if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
+ ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
+ NULL, NULL);
+ BUG_ON(ret < 0);
+ if (ret > 0) {
+ ret = btrfs_del_orphan_item(trans, tree_root,
+ root->root_key.objectid);
+ BUG_ON(ret);
+ }
+ }
+
+ if (root->in_radix) {
+ btrfs_free_fs_root(tree_root->fs_info, root);
+ } else {
+ free_extent_buffer(root->node);
+ free_extent_buffer(root->commit_root);
+ kfree(root);
+ }
out:
btrfs_end_transaction(trans, tree_root);
kfree(wc);
@@ -5255,6 +5137,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
wc->stage = DROP_REFERENCE;
wc->update_ref = 0;
wc->keep_locks = 1;
+ wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
while (1) {
wret = walk_down_tree(trans, root, path, wc);
@@ -5397,9 +5280,9 @@ static noinline int relocate_data_extent(struct inode *reloc_inode,
lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
while (1) {
int ret;
- spin_lock(&em_tree->lock);
+ write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
- spin_unlock(&em_tree->lock);
+ write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
break;
@@ -6842,287 +6725,86 @@ int btrfs_prepare_block_group_relocation(struct btrfs_root *root,
return 0;
}
-#if 0
-static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- u64 objectid, u64 size)
-{
- struct btrfs_path *path;
- struct btrfs_inode_item *item;
- struct extent_buffer *leaf;
- int ret;
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
- path->leave_spinning = 1;
- ret = btrfs_insert_empty_inode(trans, root, path, objectid);
- if (ret)
- goto out;
-
- leaf = path->nodes[0];
- item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
- memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
- btrfs_set_inode_generation(leaf, item, 1);
- btrfs_set_inode_size(leaf, item, size);
- btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
- btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS);
- btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(root, path);
-out:
- btrfs_free_path(path);
- return ret;
-}
-
-static noinline struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *group)
+/*
+ * checks to see if its even possible to relocate this block group.
+ *
+ * @return - -1 if it's not a good idea to relocate this block group, 0 if its
+ * ok to go ahead and try.
+ */
+int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
{
- struct inode *inode = NULL;
- struct btrfs_trans_handle *trans;
- struct btrfs_root *root;
- struct btrfs_key root_key;
- u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
- int err = 0;
+ struct btrfs_block_group_cache *block_group;
+ struct btrfs_space_info *space_info;
+ struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+ struct btrfs_device *device;
+ int full = 0;
+ int ret = 0;
- root_key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
- root_key.type = BTRFS_ROOT_ITEM_KEY;
- root_key.offset = (u64)-1;
- root = btrfs_read_fs_root_no_name(fs_info, &root_key);
- if (IS_ERR(root))
- return ERR_CAST(root);
+ block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
- trans = btrfs_start_transaction(root, 1);
- BUG_ON(!trans);
+ /* odd, couldn't find the block group, leave it alone */
+ if (!block_group)
+ return -1;
- err = btrfs_find_free_objectid(trans, root, objectid, &objectid);
- if (err)
+ /* no bytes used, we're good */
+ if (!btrfs_block_group_used(&block_group->item))
goto out;
- err = __insert_orphan_inode(trans, root, objectid, group->key.offset);
- BUG_ON(err);
-
- err = btrfs_insert_file_extent(trans, root, objectid, 0, 0, 0,
- group->key.offset, 0, group->key.offset,
- 0, 0, 0);
- BUG_ON(err);
-
- inode = btrfs_iget_locked(root->fs_info->sb, objectid, root);
- if (inode->i_state & I_NEW) {
- BTRFS_I(inode)->root = root;
- BTRFS_I(inode)->location.objectid = objectid;
- BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
- BTRFS_I(inode)->location.offset = 0;
- btrfs_read_locked_inode(inode);
- unlock_new_inode(inode);
- BUG_ON(is_bad_inode(inode));
- } else {
- BUG_ON(1);
- }
- BTRFS_I(inode)->index_cnt = group->key.objectid;
-
- err = btrfs_orphan_add(trans, inode);
-out:
- btrfs_end_transaction(trans, root);
- if (err) {
- if (inode)
- iput(inode);
- inode = ERR_PTR(err);
- }
- return inode;
-}
-
-int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
-{
-
- struct btrfs_ordered_sum *sums;
- struct btrfs_sector_sum *sector_sum;
- struct btrfs_ordered_extent *ordered;
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct list_head list;
- size_t offset;
- int ret;
- u64 disk_bytenr;
-
- INIT_LIST_HEAD(&list);
-
- ordered = btrfs_lookup_ordered_extent(inode, file_pos);
- BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
-
- disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
- ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
- disk_bytenr + len - 1, &list);
-
- while (!list_empty(&list)) {
- sums = list_entry(list.next, struct btrfs_ordered_sum, list);
- list_del_init(&sums->list);
-
- sector_sum = sums->sums;
- sums->bytenr = ordered->start;
+ space_info = block_group->space_info;
+ spin_lock(&space_info->lock);
- offset = 0;
- while (offset < sums->len) {
- sector_sum->bytenr += ordered->start - disk_bytenr;
- sector_sum++;
- offset += root->sectorsize;
- }
+ full = space_info->full;
- btrfs_add_ordered_sum(inode, ordered, sums);
+ /*
+ * if this is the last block group we have in this space, we can't
+ * relocate it unless we're able to allocate a new chunk below.
+ *
+ * Otherwise, we need to make sure we have room in the space to handle
+ * all of the extents from this block group. If we can, we're good
+ */
+ if ((space_info->total_bytes != block_group->key.offset) &&
+ (space_info->bytes_used + space_info->bytes_reserved +
+ space_info->bytes_pinned + space_info->bytes_readonly +
+ btrfs_block_group_used(&block_group->item) <
+ space_info->total_bytes)) {
+ spin_unlock(&space_info->lock);
+ goto out;
}
- btrfs_put_ordered_extent(ordered);
- return 0;
-}
-
-int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start)
-{
- struct btrfs_trans_handle *trans;
- struct btrfs_path *path;
- struct btrfs_fs_info *info = root->fs_info;
- struct extent_buffer *leaf;
- struct inode *reloc_inode;
- struct btrfs_block_group_cache *block_group;
- struct btrfs_key key;
- u64 skipped;
- u64 cur_byte;
- u64 total_found;
- u32 nritems;
- int ret;
- int progress;
- int pass = 0;
-
- root = root->fs_info->extent_root;
-
- block_group = btrfs_lookup_block_group(info, group_start);
- BUG_ON(!block_group);
-
- printk(KERN_INFO "btrfs relocating block group %llu flags %llu\n",
- (unsigned long long)block_group->key.objectid,
- (unsigned long long)block_group->flags);
-
- path = btrfs_alloc_path();
- BUG_ON(!path);
-
- reloc_inode = create_reloc_inode(info, block_group);
- BUG_ON(IS_ERR(reloc_inode));
-
- __alloc_chunk_for_shrink(root, block_group, 1);
- set_block_group_readonly(block_group);
-
- btrfs_start_delalloc_inodes(info->tree_root);
- btrfs_wait_ordered_extents(info->tree_root, 0);
-again:
- skipped = 0;
- total_found = 0;
- progress = 0;
- key.objectid = block_group->key.objectid;
- key.offset = 0;
- key.type = 0;
- cur_byte = key.objectid;
-
- trans = btrfs_start_transaction(info->tree_root, 1);
- btrfs_commit_transaction(trans, info->tree_root);
+ spin_unlock(&space_info->lock);
- mutex_lock(&root->fs_info->cleaner_mutex);
- btrfs_clean_old_snapshots(info->tree_root);
- btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1);
- mutex_unlock(&root->fs_info->cleaner_mutex);
+ /*
+ * ok we don't have enough space, but maybe we have free space on our
+ * devices to allocate new chunks for relocation, so loop through our
+ * alloc devices and guess if we have enough space. However, if we
+ * were marked as full, then we know there aren't enough chunks, and we
+ * can just return.
+ */
+ ret = -1;
+ if (full)
+ goto out;
- trans = btrfs_start_transaction(info->tree_root, 1);
- btrfs_commit_transaction(trans, info->tree_root);
+ mutex_lock(&root->fs_info->chunk_mutex);
+ list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
+ u64 min_free = btrfs_block_group_used(&block_group->item);
+ u64 dev_offset, max_avail;
- while (1) {
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
- goto out;
-next:
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
- if (path->slots[0] >= nritems) {
- ret = btrfs_next_leaf(root, path);
- if (ret < 0)
- goto out;
- if (ret == 1) {
- ret = 0;
+ /*
+ * check to make sure we can actually find a chunk with enough
+ * space to fit our block group in.
+ */
+ if (device->total_bytes > device->bytes_used + min_free) {
+ ret = find_free_dev_extent(NULL, device, min_free,
+ &dev_offset, &max_avail);
+ if (!ret)
break;
- }
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
+ ret = -1;
}
-
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-
- if (key.objectid >= block_group->key.objectid +
- block_group->key.offset)
- break;
-
- if (progress && need_resched()) {
- btrfs_release_path(root, path);
- cond_resched();
- progress = 0;
- continue;
- }
- progress = 1;
-
- if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY ||
- key.objectid + key.offset <= cur_byte) {
- path->slots[0]++;
- goto next;
- }
-
- total_found++;
- cur_byte = key.objectid + key.offset;
- btrfs_release_path(root, path);
-
- __alloc_chunk_for_shrink(root, block_group, 0);
- ret = relocate_one_extent(root, path, &key, block_group,
- reloc_inode, pass);
- BUG_ON(ret < 0);
- if (ret > 0)
- skipped++;
-
- key.objectid = cur_byte;
- key.type = 0;
- key.offset = 0;
- }
-
- btrfs_release_path(root, path);
-
- if (pass == 0) {
- btrfs_wait_ordered_range(reloc_inode, 0, (u64)-1);
- invalidate_mapping_pages(reloc_inode->i_mapping, 0, -1);
- }
-
- if (total_found > 0) {
- printk(KERN_INFO "btrfs found %llu extents in pass %d\n",
- (unsigned long long)total_found, pass);
- pass++;
- if (total_found == skipped && pass > 2) {
- iput(reloc_inode);
- reloc_inode = create_reloc_inode(info, block_group);
- pass = 0;
- }
- goto again;
}
-
- /* delete reloc_inode */
- iput(reloc_inode);
-
- /* unpin extents in this range */
- trans = btrfs_start_transaction(info->tree_root, 1);
- btrfs_commit_transaction(trans, info->tree_root);
-
- spin_lock(&block_group->lock);
- WARN_ON(block_group->pinned > 0);
- WARN_ON(block_group->reserved > 0);
- WARN_ON(btrfs_block_group_used(&block_group->item) > 0);
- spin_unlock(&block_group->lock);
- btrfs_put_block_group(block_group);
- ret = 0;
+ mutex_unlock(&root->fs_info->chunk_mutex);
out:
- btrfs_free_path(path);
+ btrfs_put_block_group(block_group);
return ret;
}
-#endif
static int find_first_block_group(struct btrfs_root *root,
struct btrfs_path *path, struct btrfs_key *key)
@@ -7165,8 +6847,18 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
{
struct btrfs_block_group_cache *block_group;
struct btrfs_space_info *space_info;
+ struct btrfs_caching_control *caching_ctl;
struct rb_node *n;
+ down_write(&info->extent_commit_sem);
+ while (!list_empty(&info->caching_block_groups)) {
+ caching_ctl = list_entry(info->caching_block_groups.next,
+ struct btrfs_caching_control, list);
+ list_del(&caching_ctl->list);
+ put_caching_control(caching_ctl);
+ }
+ up_write(&info->extent_commit_sem);
+
spin_lock(&info->block_group_cache_lock);
while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
block_group = rb_entry(n, struct btrfs_block_group_cache,
@@ -7180,8 +6872,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
up_write(&block_group->space_info->groups_sem);
if (block_group->cached == BTRFS_CACHE_STARTED)
- wait_event(block_group->caching_q,
- block_group_cache_done(block_group));
+ wait_block_group_cache_done(block_group);
btrfs_remove_free_space_cache(block_group);
@@ -7251,7 +6942,6 @@ int btrfs_read_block_groups(struct btrfs_root *root)
spin_lock_init(&cache->lock);
spin_lock_init(&cache->tree_lock);
cache->fs_info = info;
- init_waitqueue_head(&cache->caching_q);
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
@@ -7273,8 +6963,6 @@ int btrfs_read_block_groups(struct btrfs_root *root)
cache->flags = btrfs_block_group_flags(&cache->item);
cache->sectorsize = root->sectorsize;
- remove_sb_from_cache(root, cache);
-
/*
* check for two cases, either we are full, and therefore
* don't need to bother with the caching work since we won't
@@ -7283,13 +6971,19 @@ int btrfs_read_block_groups(struct btrfs_root *root)
* time, particularly in the full case.
*/
if (found_key.offset == btrfs_block_group_used(&cache->item)) {
+ exclude_super_stripes(root, cache);
+ cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
+ free_excluded_extents(root, cache);
} else if (btrfs_block_group_used(&cache->item) == 0) {
+ exclude_super_stripes(root, cache);
+ cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
add_new_free_space(cache, root->fs_info,
found_key.objectid,
found_key.objectid +
found_key.offset);
+ free_excluded_extents(root, cache);
}
ret = update_space_info(info, cache->flags, found_key.offset,
@@ -7297,6 +6991,10 @@ int btrfs_read_block_groups(struct btrfs_root *root)
&space_info);
BUG_ON(ret);
cache->space_info = space_info;
+ spin_lock(&cache->space_info->lock);
+ cache->space_info->bytes_super += cache->bytes_super;
+ spin_unlock(&cache->space_info->lock);
+
down_write(&space_info->groups_sem);
list_add_tail(&cache->list, &space_info->block_groups);
up_write(&space_info->groups_sem);
@@ -7346,7 +7044,6 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
atomic_set(&cache->count, 1);
spin_lock_init(&cache->lock);
spin_lock_init(&cache->tree_lock);
- init_waitqueue_head(&cache->caching_q);
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
@@ -7355,15 +7052,23 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
cache->flags = type;
btrfs_set_block_group_flags(&cache->item, type);
+ cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
- remove_sb_from_cache(root, cache);
+ exclude_super_stripes(root, cache);
add_new_free_space(cache, root->fs_info, chunk_offset,
chunk_offset + size);
+ free_excluded_extents(root, cache);
+
ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
&cache->space_info);
BUG_ON(ret);
+
+ spin_lock(&cache->space_info->lock);
+ cache->space_info->bytes_super += cache->bytes_super;
+ spin_unlock(&cache->space_info->lock);
+
down_write(&cache->space_info->groups_sem);
list_add_tail(&cache->list, &cache->space_info->block_groups);
up_write(&cache->space_info->groups_sem);
@@ -7429,8 +7134,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
up_write(&block_group->space_info->groups_sem);
if (block_group->cached == BTRFS_CACHE_STARTED)
- wait_event(block_group->caching_q,
- block_group_cache_done(block_group));
+ wait_block_group_cache_done(block_group);
btrfs_remove_free_space_cache(block_group);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 68260180f58..0cb88f8146e 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -367,10 +367,10 @@ static int insert_state(struct extent_io_tree *tree,
}
if (bits & EXTENT_DIRTY)
tree->dirty_bytes += end - start + 1;
- set_state_cb(tree, state, bits);
- state->state |= bits;
state->start = start;
state->end = end;
+ set_state_cb(tree, state, bits);
+ state->state |= bits;
node = tree_insert(&tree->state, end, &state->rb_node);
if (node) {
struct extent_state *found;
@@ -471,10 +471,14 @@ static int clear_state_bit(struct extent_io_tree *tree,
* bits were already set, or zero if none of the bits were already set.
*/
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, int wake, int delete, gfp_t mask)
+ int bits, int wake, int delete,
+ struct extent_state **cached_state,
+ gfp_t mask)
{
struct extent_state *state;
+ struct extent_state *cached;
struct extent_state *prealloc = NULL;
+ struct rb_node *next_node;
struct rb_node *node;
u64 last_end;
int err;
@@ -488,6 +492,17 @@ again:
}
spin_lock(&tree->lock);
+ if (cached_state) {
+ cached = *cached_state;
+ *cached_state = NULL;
+ cached_state = NULL;
+ if (cached && cached->tree && cached->start == start) {
+ atomic_dec(&cached->refs);
+ state = cached;
+ goto hit_next;
+ }
+ free_extent_state(cached);
+ }
/*
* this search will find the extents that end after
* our range starts
@@ -496,6 +511,7 @@ again:
if (!node)
goto out;
state = rb_entry(node, struct extent_state, rb_node);
+hit_next:
if (state->start > end)
goto out;
WARN_ON(state->end < start);
@@ -531,8 +547,6 @@ again:
if (last_end == (u64)-1)
goto out;
start = last_end + 1;
- } else {
- start = state->start;
}
goto search_again;
}
@@ -550,16 +564,28 @@ again:
if (wake)
wake_up(&state->wq);
+
set |= clear_state_bit(tree, prealloc, bits,
wake, delete);
prealloc = NULL;
goto out;
}
+ if (state->end < end && prealloc && !need_resched())
+ next_node = rb_next(&state->rb_node);
+ else
+ next_node = NULL;
+
set |= clear_state_bit(tree, state, bits, wake, delete);
if (last_end == (u64)-1)
goto out;
start = last_end + 1;
+ if (start <= end && next_node) {
+ state = rb_entry(next_node, struct extent_state,
+ rb_node);
+ if (state->start == start)
+ goto hit_next;
+ }
goto search_again;
out:
@@ -653,28 +679,40 @@ static void set_state_bits(struct extent_io_tree *tree,
state->state |= bits;
}
+static void cache_state(struct extent_state *state,
+ struct extent_state **cached_ptr)
+{
+ if (cached_ptr && !(*cached_ptr)) {
+ if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
+ *cached_ptr = state;
+ atomic_inc(&state->refs);
+ }
+ }
+}
+
/*
- * set some bits on a range in the tree. This may require allocations
- * or sleeping, so the gfp mask is used to indicate what is allowed.
+ * set some bits on a range in the tree. This may require allocations or
+ * sleeping, so the gfp mask is used to indicate what is allowed.
*
- * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
- * range already has the desired bits set. The start of the existing
- * range is returned in failed_start in this case.
+ * If any of the exclusive bits are set, this will fail with -EEXIST if some
+ * part of the range already has the desired bits set. The start of the
+ * existing range is returned in failed_start in this case.
*
- * [start, end] is inclusive
- * This takes the tree lock.
+ * [start, end] is inclusive This takes the tree lock.
*/
+
static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, int exclusive, u64 *failed_start,
+ int bits, int exclusive_bits, u64 *failed_start,
+ struct extent_state **cached_state,
gfp_t mask)
{
struct extent_state *state;
struct extent_state *prealloc = NULL;
struct rb_node *node;
int err = 0;
- int set;
u64 last_start;
u64 last_end;
+
again:
if (!prealloc && (mask & __GFP_WAIT)) {
prealloc = alloc_extent_state(mask);
@@ -683,6 +721,13 @@ again:
}
spin_lock(&tree->lock);
+ if (cached_state && *cached_state) {
+ state = *cached_state;
+ if (state->start == start && state->tree) {
+ node = &state->rb_node;
+ goto hit_next;
+ }
+ }
/*
* this search will find all the extents that end after
* our range starts.
@@ -694,8 +739,8 @@ again:
BUG_ON(err == -EEXIST);
goto out;
}
-
state = rb_entry(node, struct extent_state, rb_node);
+hit_next:
last_start = state->start;
last_end = state->end;
@@ -706,17 +751,29 @@ again:
* Just lock what we found and keep going
*/
if (state->start == start && state->end <= end) {
- set = state->state & bits;
- if (set && exclusive) {
+ struct rb_node *next_node;
+ if (state->state & exclusive_bits) {
*failed_start = state->start;
err = -EEXIST;
goto out;
}
+
set_state_bits(tree, state, bits);
+ cache_state(state, cached_state);
merge_state(tree, state);
if (last_end == (u64)-1)
goto out;
+
start = last_end + 1;
+ if (start < end && prealloc && !need_resched()) {
+ next_node = rb_next(node);
+ if (next_node) {
+ state = rb_entry(next_node, struct extent_state,
+ rb_node);
+ if (state->start == start)
+ goto hit_next;
+ }
+ }
goto search_again;
}
@@ -737,8 +794,7 @@ again:
* desired bit on it.
*/
if (state->start < start) {
- set = state->state & bits;
- if (exclusive && set) {
+ if (state->state & exclusive_bits) {
*failed_start = start;
err = -EEXIST;
goto out;
@@ -750,12 +806,11 @@ again:
goto out;
if (state->end <= end) {
set_state_bits(tree, state, bits);
+ cache_state(state, cached_state);
merge_state(tree, state);
if (last_end == (u64)-1)
goto out;
start = last_end + 1;
- } else {
- start = state->start;
}
goto search_again;
}
@@ -774,6 +829,7 @@ again:
this_end = last_start - 1;
err = insert_state(tree, prealloc, start, this_end,
bits);
+ cache_state(prealloc, cached_state);
prealloc = NULL;
BUG_ON(err == -EEXIST);
if (err)
@@ -788,8 +844,7 @@ again:
* on the first half
*/
if (state->start <= end && state->end > end) {
- set = state->state & bits;
- if (exclusive && set) {
+ if (state->state & exclusive_bits) {
*failed_start = start;
err = -EEXIST;
goto out;
@@ -798,6 +853,7 @@ again:
BUG_ON(err == -EEXIST);
set_state_bits(tree, prealloc, bits);
+ cache_state(prealloc, cached_state);
merge_state(tree, prealloc);
prealloc = NULL;
goto out;
@@ -826,86 +882,64 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
{
return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
- mask);
-}
-
-int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask)
-{
- return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
+ NULL, mask);
}
int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
int bits, gfp_t mask)
{
return set_extent_bit(tree, start, end, bits, 0, NULL,
- mask);
+ NULL, mask);
}
int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
int bits, gfp_t mask)
{
- return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
+ return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
}
int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
{
return set_extent_bit(tree, start, end,
- EXTENT_DELALLOC | EXTENT_DIRTY,
- 0, NULL, mask);
+ EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
+ 0, NULL, NULL, mask);
}
int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
{
return clear_extent_bit(tree, start, end,
- EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
-}
-
-int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask)
-{
- return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
+ EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
+ NULL, mask);
}
int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
{
return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
- mask);
+ NULL, mask);
}
static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
{
- return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
+ return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0,
+ NULL, mask);
}
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
{
return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
- mask);
+ NULL, mask);
}
static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
u64 end, gfp_t mask)
{
- return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
-}
-
-static int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask)
-{
- return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
- 0, NULL, mask);
-}
-
-static int clear_extent_writeback(struct extent_io_tree *tree, u64 start,
- u64 end, gfp_t mask)
-{
- return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
+ return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
+ NULL, mask);
}
int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
@@ -917,13 +951,15 @@ int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
* either insert or lock state struct between start and end use mask to tell
* us if waiting is desired.
*/
-int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
+int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+ int bits, struct extent_state **cached_state, gfp_t mask)
{
int err;
u64 failed_start;
while (1) {
- err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
- &failed_start, mask);
+ err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
+ EXTENT_LOCKED, &failed_start,
+ cached_state, mask);
if (err == -EEXIST && (mask & __GFP_WAIT)) {
wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
start = failed_start;
@@ -935,27 +971,40 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
return err;
}
+int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
+{
+ return lock_extent_bits(tree, start, end, 0, NULL, mask);
+}
+
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
{
int err;
u64 failed_start;
- err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
- &failed_start, mask);
+ err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
+ &failed_start, NULL, mask);
if (err == -EEXIST) {
if (failed_start > start)
clear_extent_bit(tree, start, failed_start - 1,
- EXTENT_LOCKED, 1, 0, mask);
+ EXTENT_LOCKED, 1, 0, NULL, mask);
return 0;
}
return 1;
}
+int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
+ struct extent_state **cached, gfp_t mask)
+{
+ return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
+ mask);
+}
+
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
{
- return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
+ return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
+ mask);
}
/*
@@ -974,7 +1023,6 @@ int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
page_cache_release(page);
index++;
}
- set_extent_dirty(tree, start, end, GFP_NOFS);
return 0;
}
@@ -994,7 +1042,6 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
page_cache_release(page);
index++;
}
- set_extent_writeback(tree, start, end, GFP_NOFS);
return 0;
}
@@ -1232,6 +1279,7 @@ static noinline u64 find_lock_delalloc_range(struct inode *inode,
u64 delalloc_start;
u64 delalloc_end;
u64 found;
+ struct extent_state *cached_state = NULL;
int ret;
int loops = 0;
@@ -1269,6 +1317,7 @@ again:
/* some of the pages are gone, lets avoid looping by
* shortening the size of the delalloc range we're searching
*/
+ free_extent_state(cached_state);
if (!loops) {
unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
max_bytes = PAGE_CACHE_SIZE - offset;
@@ -1282,18 +1331,21 @@ again:
BUG_ON(ret);
/* step three, lock the state bits for the whole range */
- lock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
+ lock_extent_bits(tree, delalloc_start, delalloc_end,
+ 0, &cached_state, GFP_NOFS);
/* then test to make sure it is all still delalloc */
ret = test_range_bit(tree, delalloc_start, delalloc_end,
- EXTENT_DELALLOC, 1);
+ EXTENT_DELALLOC, 1, cached_state);
if (!ret) {
- unlock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
+ unlock_extent_cached(tree, delalloc_start, delalloc_end,
+ &cached_state, GFP_NOFS);
__unlock_for_delalloc(inode, locked_page,
delalloc_start, delalloc_end);
cond_resched();
goto again;
}
+ free_extent_state(cached_state);
*start = delalloc_start;
*end = delalloc_end;
out_failed:
@@ -1307,7 +1359,8 @@ int extent_clear_unlock_delalloc(struct inode *inode,
int clear_unlock,
int clear_delalloc, int clear_dirty,
int set_writeback,
- int end_writeback)
+ int end_writeback,
+ int set_private2)
{
int ret;
struct page *pages[16];
@@ -1325,8 +1378,9 @@ int extent_clear_unlock_delalloc(struct inode *inode,
if (clear_delalloc)
clear_bits |= EXTENT_DELALLOC;
- clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS);
- if (!(unlock_pages || clear_dirty || set_writeback || end_writeback))
+ clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
+ if (!(unlock_pages || clear_dirty || set_writeback || end_writeback ||
+ set_private2))
return 0;
while (nr_pages > 0) {
@@ -1334,6 +1388,10 @@ int extent_clear_unlock_delalloc(struct inode *inode,
min_t(unsigned long,
nr_pages, ARRAY_SIZE(pages)), pages);
for (i = 0; i < ret; i++) {
+
+ if (set_private2)
+ SetPagePrivate2(pages[i]);
+
if (pages[i] == locked_page) {
page_cache_release(pages[i]);
continue;
@@ -1476,14 +1534,17 @@ out:
* range is found set.
*/
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, int filled)
+ int bits, int filled, struct extent_state *cached)
{
struct extent_state *state = NULL;
struct rb_node *node;
int bitset = 0;
spin_lock(&tree->lock);
- node = tree_search(tree, start);
+ if (cached && cached->tree && cached->start == start)
+ node = &cached->rb_node;
+ else
+ node = tree_search(tree, start);
while (node && start <= end) {
state = rb_entry(node, struct extent_state, rb_node);
@@ -1503,6 +1564,10 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
bitset = 0;
break;
}
+
+ if (state->end == (u64)-1)
+ break;
+
start = state->end + 1;
if (start > end)
break;
@@ -1526,7 +1591,7 @@ static int check_page_uptodate(struct extent_io_tree *tree,
{
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
u64 end = start + PAGE_CACHE_SIZE - 1;
- if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
+ if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
SetPageUptodate(page);
return 0;
}
@@ -1540,7 +1605,7 @@ static int check_page_locked(struct extent_io_tree *tree,
{
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
u64 end = start + PAGE_CACHE_SIZE - 1;
- if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
+ if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
unlock_page(page);
return 0;
}
@@ -1552,10 +1617,7 @@ static int check_page_locked(struct extent_io_tree *tree,
static int check_page_writeback(struct extent_io_tree *tree,
struct page *page)
{
- u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
- u64 end = start + PAGE_CACHE_SIZE - 1;
- if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
- end_page_writeback(page);
+ end_page_writeback(page);
return 0;
}
@@ -1613,13 +1675,11 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
}
if (!uptodate) {
- clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
+ clear_extent_uptodate(tree, start, end, GFP_NOFS);
ClearPageUptodate(page);
SetPageError(page);
}
- clear_extent_writeback(tree, start, end, GFP_ATOMIC);
-
if (whole_page)
end_page_writeback(page);
else
@@ -1983,7 +2043,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
continue;
}
/* the get_extent function already copied into the page */
- if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
+ if (test_range_bit(tree, cur, cur_end,
+ EXTENT_UPTODATE, 1, NULL)) {
check_page_uptodate(tree, page);
unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
cur = cur + iosize;
@@ -2078,6 +2139,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
u64 iosize;
u64 unlock_start;
sector_t sector;
+ struct extent_state *cached_state = NULL;
struct extent_map *em;
struct block_device *bdev;
int ret;
@@ -2124,6 +2186,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
delalloc_end = 0;
page_started = 0;
if (!epd->extent_locked) {
+ u64 delalloc_to_write = 0;
/*
* make sure the wbc mapping index is at least updated
* to this page.
@@ -2143,8 +2206,24 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
tree->ops->fill_delalloc(inode, page, delalloc_start,
delalloc_end, &page_started,
&nr_written);
+ /*
+ * delalloc_end is already one less than the total
+ * length, so we don't subtract one from
+ * PAGE_CACHE_SIZE
+ */
+ delalloc_to_write += (delalloc_end - delalloc_start +
+ PAGE_CACHE_SIZE) >>
+ PAGE_CACHE_SHIFT;
delalloc_start = delalloc_end + 1;
}
+ if (wbc->nr_to_write < delalloc_to_write) {
+ int thresh = 8192;
+
+ if (delalloc_to_write < thresh * 2)
+ thresh = delalloc_to_write;
+ wbc->nr_to_write = min_t(u64, delalloc_to_write,
+ thresh);
+ }
/* did the fill delalloc function already unlock and start
* the IO?
@@ -2160,15 +2239,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
goto done_unlocked;
}
}
- lock_extent(tree, start, page_end, GFP_NOFS);
-
- unlock_start = start;
-
if (tree->ops && tree->ops->writepage_start_hook) {
ret = tree->ops->writepage_start_hook(page, start,
page_end);
if (ret == -EAGAIN) {
- unlock_extent(tree, start, page_end, GFP_NOFS);
redirty_page_for_writepage(wbc, page);
update_nr_written(page, wbc, nr_written);
unlock_page(page);
@@ -2184,12 +2258,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
update_nr_written(page, wbc, nr_written + 1);
end = page_end;
- if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0))
- printk(KERN_ERR "btrfs delalloc bits after lock_extent\n");
-
if (last_byte <= start) {
- clear_extent_dirty(tree, start, page_end, GFP_NOFS);
- unlock_extent(tree, start, page_end, GFP_NOFS);
if (tree->ops && tree->ops->writepage_end_io_hook)
tree->ops->writepage_end_io_hook(page, start,
page_end, NULL, 1);
@@ -2197,13 +2266,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
goto done;
}
- set_extent_uptodate(tree, start, page_end, GFP_NOFS);
blocksize = inode->i_sb->s_blocksize;
while (cur <= end) {
if (cur >= last_byte) {
- clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
- unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
if (tree->ops && tree->ops->writepage_end_io_hook)
tree->ops->writepage_end_io_hook(page, cur,
page_end, NULL, 1);
@@ -2235,12 +2301,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
*/
if (compressed || block_start == EXTENT_MAP_HOLE ||
block_start == EXTENT_MAP_INLINE) {
- clear_extent_dirty(tree, cur,
- cur + iosize - 1, GFP_NOFS);
-
- unlock_extent(tree, unlock_start, cur + iosize - 1,
- GFP_NOFS);
-
/*
* end_io notification does not happen here for
* compressed extents
@@ -2265,13 +2325,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
}
/* leave this out until we have a page_mkwrite call */
if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
- EXTENT_DIRTY, 0)) {
+ EXTENT_DIRTY, 0, NULL)) {
cur = cur + iosize;
pg_offset += iosize;
continue;
}
- clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
if (tree->ops && tree->ops->writepage_io_hook) {
ret = tree->ops->writepage_io_hook(page, cur,
cur + iosize - 1);
@@ -2309,12 +2368,12 @@ done:
set_page_writeback(page);
end_page_writeback(page);
}
- if (unlock_start <= page_end)
- unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
unlock_page(page);
done_unlocked:
+ /* drop our reference on any cached states */
+ free_extent_state(cached_state);
return 0;
}
@@ -2339,9 +2398,9 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
writepage_t writepage, void *data,
void (*flush_fn)(void *))
{
- struct backing_dev_info *bdi = mapping->backing_dev_info;
int ret = 0;
int done = 0;
+ int nr_to_write_done = 0;
struct pagevec pvec;
int nr_pages;
pgoff_t index;
@@ -2361,7 +2420,7 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
scanned = 1;
}
retry:
- while (!done && (index <= end) &&
+ while (!done && !nr_to_write_done && (index <= end) &&
(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
PAGECACHE_TAG_DIRTY, min(end - index,
(pgoff_t)PAGEVEC_SIZE-1) + 1))) {
@@ -2412,12 +2471,15 @@ retry:
unlock_page(page);
ret = 0;
}
- if (ret || wbc->nr_to_write <= 0)
- done = 1;
- if (wbc->nonblocking && bdi_write_congested(bdi)) {
- wbc->encountered_congestion = 1;
+ if (ret)
done = 1;
- }
+
+ /*
+ * the filesystem may choose to bump up nr_to_write.
+ * We have to make sure to honor the new nr_to_write
+ * at any time
+ */
+ nr_to_write_done = wbc->nr_to_write <= 0;
}
pagevec_release(&pvec);
cond_resched();
@@ -2604,10 +2666,10 @@ int extent_invalidatepage(struct extent_io_tree *tree,
return 0;
lock_extent(tree, start, end, GFP_NOFS);
- wait_on_extent_writeback(tree, start, end);
+ wait_on_page_writeback(page);
clear_extent_bit(tree, start, end,
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
- 1, 1, GFP_NOFS);
+ 1, 1, NULL, GFP_NOFS);
return 0;
}
@@ -2687,7 +2749,7 @@ int extent_prepare_write(struct extent_io_tree *tree,
!isnew && !PageUptodate(page) &&
(block_off_end > to || block_off_start < from) &&
!test_range_bit(tree, block_start, cur_end,
- EXTENT_UPTODATE, 1)) {
+ EXTENT_UPTODATE, 1, NULL)) {
u64 sector;
u64 extent_offset = block_start - em->start;
size_t iosize;
@@ -2701,7 +2763,7 @@ int extent_prepare_write(struct extent_io_tree *tree,
*/
set_extent_bit(tree, block_start,
block_start + iosize - 1,
- EXTENT_LOCKED, 0, NULL, GFP_NOFS);
+ EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS);
ret = submit_extent_page(READ, tree, page,
sector, iosize, page_offset, em->bdev,
NULL, 1,
@@ -2742,13 +2804,18 @@ int try_release_extent_state(struct extent_map_tree *map,
int ret = 1;
if (test_range_bit(tree, start, end,
- EXTENT_IOBITS | EXTENT_ORDERED, 0))
+ EXTENT_IOBITS, 0, NULL))
ret = 0;
else {
if ((mask & GFP_NOFS) == GFP_NOFS)
mask = GFP_NOFS;
- clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
- 1, 1, mask);
+ /*
+ * at this point we can safely clear everything except the
+ * locked bit and the nodatasum bit
+ */
+ clear_extent_bit(tree, start, end,
+ ~(EXTENT_LOCKED | EXTENT_NODATASUM),
+ 0, 0, NULL, mask);
}
return ret;
}
@@ -2771,29 +2838,28 @@ int try_release_extent_mapping(struct extent_map_tree *map,
u64 len;
while (start <= end) {
len = end - start + 1;
- spin_lock(&map->lock);
+ write_lock(&map->lock);
em = lookup_extent_mapping(map, start, len);
if (!em || IS_ERR(em)) {
- spin_unlock(&map->lock);
+ write_unlock(&map->lock);
break;
}
if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
em->start != start) {
- spin_unlock(&map->lock);
+ write_unlock(&map->lock);
free_extent_map(em);
break;
}
if (!test_range_bit(tree, em->start,
extent_map_end(em) - 1,
- EXTENT_LOCKED | EXTENT_WRITEBACK |
- EXTENT_ORDERED,
- 0)) {
+ EXTENT_LOCKED | EXTENT_WRITEBACK,
+ 0, NULL)) {
remove_extent_mapping(map, em);
/* once for the rb tree */
free_extent_map(em);
}
start = extent_map_end(em);
- spin_unlock(&map->lock);
+ write_unlock(&map->lock);
/* once for us */
free_extent_map(em);
@@ -3203,7 +3269,7 @@ int extent_range_uptodate(struct extent_io_tree *tree,
int uptodate;
unsigned long index;
- ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
+ ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL);
if (ret)
return 1;
while (start <= end) {
@@ -3233,7 +3299,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree,
return 1;
ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
- EXTENT_UPTODATE, 1);
+ EXTENT_UPTODATE, 1, NULL);
if (ret)
return ret;
@@ -3269,7 +3335,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
return 0;
if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
- EXTENT_UPTODATE, 1)) {
+ EXTENT_UPTODATE, 1, NULL)) {
return 0;
}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 5bc20abf3f3..14ed16fd862 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -13,10 +13,8 @@
#define EXTENT_DEFRAG (1 << 6)
#define EXTENT_DEFRAG_DONE (1 << 7)
#define EXTENT_BUFFER_FILLED (1 << 8)
-#define EXTENT_ORDERED (1 << 9)
-#define EXTENT_ORDERED_METADATA (1 << 10)
-#define EXTENT_BOUNDARY (1 << 11)
-#define EXTENT_NODATASUM (1 << 12)
+#define EXTENT_BOUNDARY (1 << 9)
+#define EXTENT_NODATASUM (1 << 10)
#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
/* flags for bio submission */
@@ -142,6 +140,8 @@ int try_release_extent_state(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page,
gfp_t mask);
int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
+int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+ int bits, struct extent_state **cached, gfp_t mask);
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
@@ -155,11 +155,12 @@ u64 count_range_bits(struct extent_io_tree *tree,
u64 max_bytes, unsigned long bits);
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, int filled);
+ int bits, int filled, struct extent_state *cached_state);
int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
int bits, gfp_t mask);
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, int wake, int delete, gfp_t mask);
+ int bits, int wake, int delete, struct extent_state **cached,
+ gfp_t mask);
int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
int bits, gfp_t mask);
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
@@ -282,5 +283,6 @@ int extent_clear_unlock_delalloc(struct inode *inode,
int clear_unlock,
int clear_delalloc, int clear_dirty,
int set_writeback,
- int end_writeback);
+ int end_writeback,
+ int set_private2);
#endif
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 30c9365861e..2c726b7b9fa 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -36,7 +36,7 @@ void extent_map_exit(void)
void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
{
tree->map.rb_node = NULL;
- spin_lock_init(&tree->lock);
+ rwlock_init(&tree->lock);
}
/**
@@ -198,6 +198,56 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)
return 0;
}
+int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
+{
+ int ret = 0;
+ struct extent_map *merge = NULL;
+ struct rb_node *rb;
+ struct extent_map *em;
+
+ write_lock(&tree->lock);
+ em = lookup_extent_mapping(tree, start, len);
+
+ WARN_ON(em->start != start || !em);
+
+ if (!em)
+ goto out;
+
+ clear_bit(EXTENT_FLAG_PINNED, &em->flags);
+
+ if (em->start != 0) {
+ rb = rb_prev(&em->rb_node);
+ if (rb)
+ merge = rb_entry(rb, struct extent_map, rb_node);
+ if (rb && mergable_maps(merge, em)) {
+ em->start = merge->start;
+ em->len += merge->len;
+ em->block_len += merge->block_len;
+ em->block_start = merge->block_start;
+ merge->in_tree = 0;
+ rb_erase(&merge->rb_node, &tree->map);
+ free_extent_map(merge);
+ }
+ }
+
+ rb = rb_next(&em->rb_node);
+ if (rb)
+ merge = rb_entry(rb, struct extent_map, rb_node);
+ if (rb && mergable_maps(em, merge)) {
+ em->len += merge->len;
+ em->block_len += merge->len;
+ rb_erase(&merge->rb_node, &tree->map);
+ merge->in_tree = 0;
+ free_extent_map(merge);
+ }
+
+ free_extent_map(em);
+out:
+ write_unlock(&tree->lock);
+ return ret;
+
+}
+
/**
* add_extent_mapping - add new extent map to the extent tree
* @tree: tree to insert new map in
@@ -222,7 +272,6 @@ int add_extent_mapping(struct extent_map_tree *tree,
ret = -EEXIST;
goto out;
}
- assert_spin_locked(&tree->lock);
rb = tree_insert(&tree->map, em->start, &em->rb_node);
if (rb) {
ret = -EEXIST;
@@ -285,7 +334,6 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
struct rb_node *next = NULL;
u64 end = range_end(start, len);
- assert_spin_locked(&tree->lock);
rb_node = __tree_search(&tree->map, start, &prev, &next);
if (!rb_node && prev) {
em = rb_entry(prev, struct extent_map, rb_node);
@@ -319,6 +367,54 @@ out:
}
/**
+ * search_extent_mapping - find a nearby extent map
+ * @tree: tree to lookup in
+ * @start: byte offset to start the search
+ * @len: length of the lookup range
+ *
+ * Find and return the first extent_map struct in @tree that intersects the
+ * [start, len] range.
+ *
+ * If one can't be found, any nearby extent may be returned
+ */
+struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
+ u64 start, u64 len)
+{
+ struct extent_map *em;
+ struct rb_node *rb_node;
+ struct rb_node *prev = NULL;
+ struct rb_node *next = NULL;
+
+ rb_node = __tree_search(&tree->map, start, &prev, &next);
+ if (!rb_node && prev) {
+ em = rb_entry(prev, struct extent_map, rb_node);
+ goto found;
+ }
+ if (!rb_node && next) {
+ em = rb_entry(next, struct extent_map, rb_node);
+ goto found;
+ }
+ if (!rb_node) {
+ em = NULL;
+ goto out;
+ }
+ if (IS_ERR(rb_node)) {
+ em = ERR_PTR(PTR_ERR(rb_node));
+ goto out;
+ }
+ em = rb_entry(rb_node, struct extent_map, rb_node);
+ goto found;
+
+ em = NULL;
+ goto out;
+
+found:
+ atomic_inc(&em->refs);
+out:
+ return em;
+}
+
+/**
* remove_extent_mapping - removes an extent_map from the extent tree
* @tree: extent tree to remove from
* @em: extent map beeing removed
@@ -331,7 +427,6 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
int ret = 0;
WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
- assert_spin_locked(&tree->lock);
rb_erase(&em->rb_node, &tree->map);
em->in_tree = 0;
return ret;
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index fb6eeef06bb..ab6d74b6e64 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -31,7 +31,7 @@ struct extent_map {
struct extent_map_tree {
struct rb_root map;
- spinlock_t lock;
+ rwlock_t lock;
};
static inline u64 extent_map_end(struct extent_map *em)
@@ -59,4 +59,7 @@ struct extent_map *alloc_extent_map(gfp_t mask);
void free_extent_map(struct extent_map *em);
int __init extent_map_init(void);
void extent_map_exit(void);
+int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len);
+struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
+ u64 start, u64 len);
#endif
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 4b833972273..571ad3c13b4 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -112,8 +112,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
int err = 0;
int i;
struct inode *inode = fdentry(file)->d_inode;
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- u64 hint_byte;
u64 num_bytes;
u64 start_pos;
u64 end_of_last_block;
@@ -125,22 +123,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
end_of_last_block = start_pos + num_bytes - 1;
-
- lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
- trans = btrfs_join_transaction(root, 1);
- if (!trans) {
- err = -ENOMEM;
- goto out_unlock;
- }
- btrfs_set_trans_block_group(trans, inode);
- hint_byte = 0;
-
- set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
-
- /* check for reserved extents on each page, we don't want
- * to reset the delalloc bit on things that already have
- * extents reserved.
- */
btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
for (i = 0; i < num_pages; i++) {
struct page *p = pages[i];
@@ -155,9 +137,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
* at this time.
*/
}
- err = btrfs_end_transaction(trans, root);
-out_unlock:
- unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
return err;
}
@@ -189,18 +168,18 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
if (!split2)
split2 = alloc_extent_map(GFP_NOFS);
- spin_lock(&em_tree->lock);
+ write_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
if (!em) {
- spin_unlock(&em_tree->lock);
+ write_unlock(&em_tree->lock);
break;
}
flags = em->flags;
if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
- spin_unlock(&em_tree->lock);
if (em->start <= start &&
(!testend || em->start + em->len >= start + len)) {
free_extent_map(em);
+ write_unlock(&em_tree->lock);
break;
}
if (start < em->start) {
@@ -210,6 +189,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
start = em->start + em->len;
}
free_extent_map(em);
+ write_unlock(&em_tree->lock);
continue;
}
compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
@@ -260,7 +240,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
free_extent_map(split);
split = NULL;
}
- spin_unlock(&em_tree->lock);
+ write_unlock(&em_tree->lock);
/* once for us */
free_extent_map(em);
@@ -289,7 +269,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode,
u64 start, u64 end, u64 locked_end,
- u64 inline_limit, u64 *hint_byte)
+ u64 inline_limit, u64 *hint_byte, int drop_cache)
{
u64 extent_end = 0;
u64 search_start = start;
@@ -314,7 +294,8 @@ noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
int ret;
inline_limit = 0;
- btrfs_drop_extent_cache(inode, start, end - 1, 0);
+ if (drop_cache)
+ btrfs_drop_extent_cache(inode, start, end - 1, 0);
path = btrfs_alloc_path();
if (!path)
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 5edcee3a617..5c2caad7621 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -259,7 +259,9 @@ static int link_free_space(struct btrfs_block_group_cache *block_group,
static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
{
- u64 max_bytes, possible_bytes;
+ u64 max_bytes;
+ u64 bitmap_bytes;
+ u64 extent_bytes;
/*
* The goal is to keep the total amount of memory used per 1gb of space
@@ -269,22 +271,27 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
max_bytes = MAX_CACHE_BYTES_PER_GIG *
(div64_u64(block_group->key.offset, 1024 * 1024 * 1024));
- possible_bytes = (block_group->total_bitmaps * PAGE_CACHE_SIZE) +
- (sizeof(struct btrfs_free_space) *
- block_group->extents_thresh);
+ /*
+ * we want to account for 1 more bitmap than what we have so we can make
+ * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
+ * we add more bitmaps.
+ */
+ bitmap_bytes = (block_group->total_bitmaps + 1) * PAGE_CACHE_SIZE;
- if (possible_bytes > max_bytes) {
- int extent_bytes = max_bytes -
- (block_group->total_bitmaps * PAGE_CACHE_SIZE);
+ if (bitmap_bytes >= max_bytes) {
+ block_group->extents_thresh = 0;
+ return;
+ }
- if (extent_bytes <= 0) {
- block_group->extents_thresh = 0;
- return;
- }
+ /*
+ * we want the extent entry threshold to always be at most 1/2 the maxw
+ * bytes we can have, or whatever is less than that.
+ */
+ extent_bytes = max_bytes - bitmap_bytes;
+ extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
- block_group->extents_thresh = extent_bytes /
- (sizeof(struct btrfs_free_space));
- }
+ block_group->extents_thresh =
+ div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
}
static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group,
@@ -403,6 +410,7 @@ static void add_new_bitmap(struct btrfs_block_group_cache *block_group,
BUG_ON(block_group->total_bitmaps >= max_bitmaps);
info->offset = offset_to_bitmap(block_group, offset);
+ info->bytes = 0;
link_free_space(block_group, info);
block_group->total_bitmaps++;
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index 6b627c61180..72ce3c173d6 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -149,6 +149,8 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
ptr = (unsigned long)(ref + 1);
ret = 0;
} else if (ret < 0) {
+ if (ret == -EOVERFLOW)
+ ret = -EMLINK;
goto out;
} else {
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
@@ -177,8 +179,6 @@ int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_item(trans, root, path, &key,
sizeof(struct btrfs_inode_item));
- if (ret == 0 && objectid > root->highest_inode)
- root->highest_inode = objectid;
return ret;
}
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 9abbced1123..c56eb590917 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -43,9 +43,10 @@ int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid)
slot = path->slots[0] - 1;
l = path->nodes[0];
btrfs_item_key_to_cpu(l, &found_key, slot);
- *objectid = found_key.objectid;
+ *objectid = max_t(u64, found_key.objectid,
+ BTRFS_FIRST_FREE_OBJECTID - 1);
} else {
- *objectid = BTRFS_FIRST_FREE_OBJECTID;
+ *objectid = BTRFS_FIRST_FREE_OBJECTID - 1;
}
ret = 0;
error:
@@ -53,91 +54,27 @@ error:
return ret;
}
-/*
- * walks the btree of allocated inodes and find a hole.
- */
int btrfs_find_free_objectid(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 dirid, u64 *objectid)
{
- struct btrfs_path *path;
- struct btrfs_key key;
int ret;
- int slot = 0;
- u64 last_ino = 0;
- int start_found;
- struct extent_buffer *l;
- struct btrfs_key search_key;
- u64 search_start = dirid;
-
mutex_lock(&root->objectid_mutex);
- if (root->last_inode_alloc >= BTRFS_FIRST_FREE_OBJECTID &&
- root->last_inode_alloc < BTRFS_LAST_FREE_OBJECTID) {
- *objectid = ++root->last_inode_alloc;
- mutex_unlock(&root->objectid_mutex);
- return 0;
- }
- path = btrfs_alloc_path();
- BUG_ON(!path);
- search_start = max(search_start, (u64)BTRFS_FIRST_FREE_OBJECTID);
- search_key.objectid = search_start;
- search_key.type = 0;
- search_key.offset = 0;
-
- start_found = 0;
- ret = btrfs_search_slot(trans, root, &search_key, path, 0, 0);
- if (ret < 0)
- goto error;
- while (1) {
- l = path->nodes[0];
- slot = path->slots[0];
- if (slot >= btrfs_header_nritems(l)) {
- ret = btrfs_next_leaf(root, path);
- if (ret == 0)
- continue;
- if (ret < 0)
- goto error;
- if (!start_found) {
- *objectid = search_start;
- start_found = 1;
- goto found;
- }
- *objectid = last_ino > search_start ?
- last_ino : search_start;
- goto found;
- }
- btrfs_item_key_to_cpu(l, &key, slot);
- if (key.objectid >= search_start) {
- if (start_found) {
- if (last_ino < search_start)
- last_ino = search_start;
- if (key.objectid > last_ino) {
- *objectid = last_ino;
- goto found;
- }
- } else if (key.objectid > search_start) {
- *objectid = search_start;
- goto found;
- }
- }
- if (key.objectid >= BTRFS_LAST_FREE_OBJECTID)
- break;
+ if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
+ ret = btrfs_find_highest_inode(root, &root->highest_objectid);
+ if (ret)
+ goto out;
+ }
- start_found = 1;
- last_ino = key.objectid + 1;
- path->slots[0]++;
+ if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
+ ret = -ENOSPC;
+ goto out;
}
- BUG_ON(1);
-found:
- btrfs_release_path(root, path);
- btrfs_free_path(path);
- BUG_ON(*objectid < search_start);
- mutex_unlock(&root->objectid_mutex);
- return 0;
-error:
- btrfs_release_path(root, path);
- btrfs_free_path(path);
+
+ *objectid = ++root->highest_objectid;
+ ret = 0;
+out:
mutex_unlock(&root->objectid_mutex);
return ret;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 59cba180fe8..e9b76bcd1c1 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -55,13 +55,13 @@ struct btrfs_iget_args {
struct btrfs_root *root;
};
-static struct inode_operations btrfs_dir_inode_operations;
-static struct inode_operations btrfs_symlink_inode_operations;
-static struct inode_operations btrfs_dir_ro_inode_operations;
-static struct inode_operations btrfs_special_inode_operations;
-static struct inode_operations btrfs_file_inode_operations;
-static struct address_space_operations btrfs_aops;
-static struct address_space_operations btrfs_symlink_aops;
+static const struct inode_operations btrfs_dir_inode_operations;
+static const struct inode_operations btrfs_symlink_inode_operations;
+static const struct inode_operations btrfs_dir_ro_inode_operations;
+static const struct inode_operations btrfs_special_inode_operations;
+static const struct inode_operations btrfs_file_inode_operations;
+static const struct address_space_operations btrfs_aops;
+static const struct address_space_operations btrfs_symlink_aops;
static struct file_operations btrfs_dir_file_operations;
static struct extent_io_ops btrfs_extent_io_ops;
@@ -231,7 +231,8 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
}
ret = btrfs_drop_extents(trans, root, inode, start,
- aligned_end, aligned_end, start, &hint_byte);
+ aligned_end, aligned_end, start,
+ &hint_byte, 1);
BUG_ON(ret);
if (isize > actual_end)
@@ -240,7 +241,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
inline_len, compressed_size,
compressed_pages);
BUG_ON(ret);
- btrfs_drop_extent_cache(inode, start, aligned_end, 0);
+ btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
return 0;
}
@@ -425,7 +426,7 @@ again:
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
start, end, NULL, 1, 0,
- 0, 1, 1, 1);
+ 0, 1, 1, 1, 0);
ret = 0;
goto free_pages_out;
}
@@ -611,9 +612,9 @@ static noinline int submit_compressed_extents(struct inode *inode,
set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
while (1) {
- spin_lock(&em_tree->lock);
+ write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
- spin_unlock(&em_tree->lock);
+ write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
break;
@@ -640,7 +641,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
- NULL, 1, 1, 0, 1, 1, 0);
+ NULL, 1, 1, 0, 1, 1, 0, 0);
ret = btrfs_submit_compressed_write(inode,
async_extent->start,
@@ -713,7 +714,7 @@ static noinline int cow_file_range(struct inode *inode,
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
start, end, NULL, 1, 1,
- 1, 1, 1, 1);
+ 1, 1, 1, 1, 0);
*nr_written = *nr_written +
(end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
*page_started = 1;
@@ -725,6 +726,15 @@ static noinline int cow_file_range(struct inode *inode,
BUG_ON(disk_num_bytes >
btrfs_super_total_bytes(&root->fs_info->super_copy));
+
+ read_lock(&BTRFS_I(inode)->extent_tree.lock);
+ em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
+ start, num_bytes);
+ if (em) {
+ alloc_hint = em->block_start;
+ free_extent_map(em);
+ }
+ read_unlock(&BTRFS_I(inode)->extent_tree.lock);
btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
while (disk_num_bytes > 0) {
@@ -737,7 +747,6 @@ static noinline int cow_file_range(struct inode *inode,
em = alloc_extent_map(GFP_NOFS);
em->start = start;
em->orig_start = em->start;
-
ram_size = ins.offset;
em->len = ins.offset;
@@ -747,9 +756,9 @@ static noinline int cow_file_range(struct inode *inode,
set_bit(EXTENT_FLAG_PINNED, &em->flags);
while (1) {
- spin_lock(&em_tree->lock);
+ write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
- spin_unlock(&em_tree->lock);
+ write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
break;
@@ -776,11 +785,14 @@ static noinline int cow_file_range(struct inode *inode,
/* we're not doing compressed IO, don't unlock the first
* page (which the caller expects to stay locked), don't
* clear any dirty bits and don't set any writeback bits
+ *
+ * Do set the Private2 bit so we know this page was properly
+ * setup for writepage
*/
extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
start, start + ram_size - 1,
locked_page, unlock, 1,
- 1, 0, 0, 0);
+ 1, 0, 0, 0, 1);
disk_num_bytes -= cur_alloc_size;
num_bytes -= cur_alloc_size;
alloc_hint = ins.objectid + ins.offset;
@@ -853,7 +865,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
int limit = 10 * 1024 * 1042;
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED |
- EXTENT_DELALLOC, 1, 0, GFP_NOFS);
+ EXTENT_DELALLOC, 1, 0, NULL, GFP_NOFS);
while (start < end) {
async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
async_cow->inode = inode;
@@ -1080,9 +1092,9 @@ out_check:
em->bdev = root->fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
while (1) {
- spin_lock(&em_tree->lock);
+ write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
- spin_unlock(&em_tree->lock);
+ write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
break;
@@ -1101,7 +1113,7 @@ out_check:
extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
cur_offset, cur_offset + num_bytes - 1,
- locked_page, 1, 1, 1, 0, 0, 0);
+ locked_page, 1, 1, 1, 0, 0, 0, 1);
cur_offset = extent_end;
if (cur_offset > end)
break;
@@ -1374,10 +1386,8 @@ again:
lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
/* already ordered? We're done */
- if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
- EXTENT_ORDERED, 0)) {
+ if (PagePrivate2(page))
goto out;
- }
ordered = btrfs_lookup_ordered_extent(inode, page_start);
if (ordered) {
@@ -1413,11 +1423,9 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
struct inode *inode = page->mapping->host;
struct btrfs_writepage_fixup *fixup;
struct btrfs_root *root = BTRFS_I(inode)->root;
- int ret;
- ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
- EXTENT_ORDERED, 0);
- if (ret)
+ /* this page is properly in the ordered list */
+ if (TestClearPagePrivate2(page))
return 0;
if (PageChecked(page))
@@ -1455,9 +1463,19 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
BUG_ON(!path);
path->leave_spinning = 1;
+
+ /*
+ * we may be replacing one extent in the tree with another.
+ * The new extent is pinned in the extent map, and we don't want
+ * to drop it from the cache until it is completely in the btree.
+ *
+ * So, tell btrfs_drop_extents to leave this extent in the cache.
+ * the caller is expected to unpin it and allow it to be merged
+ * with the others.
+ */
ret = btrfs_drop_extents(trans, root, inode, file_pos,
file_pos + num_bytes, locked_end,
- file_pos, &hint);
+ file_pos, &hint, 0);
BUG_ON(ret);
ins.objectid = inode->i_ino;
@@ -1485,7 +1503,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
inode_add_bytes(inode, num_bytes);
- btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0);
ins.objectid = disk_bytenr;
ins.offset = disk_num_bytes;
@@ -1596,6 +1613,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
ordered_extent->len,
compressed, 0, 0,
BTRFS_FILE_EXTENT_REG);
+ unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
+ ordered_extent->file_offset,
+ ordered_extent->len);
BUG_ON(ret);
}
unlock_extent(io_tree, ordered_extent->file_offset,
@@ -1623,6 +1643,7 @@ nocow:
static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state, int uptodate)
{
+ ClearPagePrivate2(page);
return btrfs_finish_ordered_io(page->mapping->host, start, end);
}
@@ -1669,13 +1690,13 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
failrec->last_mirror = 0;
failrec->bio_flags = 0;
- spin_lock(&em_tree->lock);
+ read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, failrec->len);
if (em->start > start || em->start + em->len < start) {
free_extent_map(em);
em = NULL;
}
- spin_unlock(&em_tree->lock);
+ read_unlock(&em_tree->lock);
if (!em || IS_ERR(em)) {
kfree(failrec);
@@ -1794,7 +1815,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
return 0;
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
- test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1)) {
+ test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
GFP_NOFS);
return 0;
@@ -2352,6 +2373,69 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
return ret;
}
+int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct inode *dir, u64 objectid,
+ const char *name, int name_len)
+{
+ struct btrfs_path *path;
+ struct extent_buffer *leaf;
+ struct btrfs_dir_item *di;
+ struct btrfs_key key;
+ u64 index;
+ int ret;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
+ name, name_len, -1);
+ BUG_ON(!di || IS_ERR(di));
+
+ leaf = path->nodes[0];
+ btrfs_dir_item_key_to_cpu(leaf, di, &key);
+ WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
+ ret = btrfs_delete_one_dir_name(trans, root, path, di);
+ BUG_ON(ret);
+ btrfs_release_path(root, path);
+
+ ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
+ objectid, root->root_key.objectid,
+ dir->i_ino, &index, name, name_len);
+ if (ret < 0) {
+ BUG_ON(ret != -ENOENT);
+ di = btrfs_search_dir_index_item(root, path, dir->i_ino,
+ name, name_len);
+ BUG_ON(!di || IS_ERR(di));
+
+ leaf = path->nodes[0];
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ btrfs_release_path(root, path);
+ index = key.offset;
+ }
+
+ di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
+ index, name, name_len, -1);
+ BUG_ON(!di || IS_ERR(di));
+
+ leaf = path->nodes[0];
+ btrfs_dir_item_key_to_cpu(leaf, di, &key);
+ WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
+ ret = btrfs_delete_one_dir_name(trans, root, path, di);
+ BUG_ON(ret);
+ btrfs_release_path(root, path);
+
+ btrfs_i_size_write(dir, dir->i_size - name_len * 2);
+ dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+ ret = btrfs_update_inode(trans, root, dir);
+ BUG_ON(ret);
+ dir->i_sb->s_dirt = 1;
+
+ btrfs_free_path(path);
+ return 0;
+}
+
static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
@@ -2361,29 +2445,31 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
struct btrfs_trans_handle *trans;
unsigned long nr = 0;
- /*
- * the FIRST_FREE_OBJECTID check makes sure we don't try to rmdir
- * the root of a subvolume or snapshot
- */
if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
- inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
+ inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
return -ENOTEMPTY;
- }
trans = btrfs_start_transaction(root, 1);
btrfs_set_trans_block_group(trans, dir);
+ if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
+ err = btrfs_unlink_subvol(trans, root, dir,
+ BTRFS_I(inode)->location.objectid,
+ dentry->d_name.name,
+ dentry->d_name.len);
+ goto out;
+ }
+
err = btrfs_orphan_add(trans, inode);
if (err)
- goto fail_trans;
+ goto out;
/* now the directory is empty */
err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
dentry->d_name.name, dentry->d_name.len);
if (!err)
btrfs_i_size_write(inode, 0);
-
-fail_trans:
+out:
nr = trans->blocks_used;
ret = btrfs_end_transaction_throttle(trans, root);
btrfs_btree_balance_dirty(root, nr);
@@ -2935,7 +3021,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
cur_offset,
cur_offset + hole_size,
block_end,
- cur_offset, &hint_byte);
+ cur_offset, &hint_byte, 1);
if (err)
break;
err = btrfs_insert_file_extent(trans, root,
@@ -3003,6 +3089,11 @@ void btrfs_delete_inode(struct inode *inode)
}
btrfs_wait_ordered_range(inode, 0, (u64)-1);
+ if (inode->i_nlink > 0) {
+ BUG_ON(btrfs_root_refs(&root->root_item) != 0);
+ goto no_delete;
+ }
+
btrfs_i_size_write(inode, 0);
trans = btrfs_join_transaction(root, 1);
@@ -3070,29 +3161,67 @@ out_err:
* is kind of like crossing a mount point.
*/
static int fixup_tree_root_location(struct btrfs_root *root,
- struct btrfs_key *location,
- struct btrfs_root **sub_root,
- struct dentry *dentry)
+ struct inode *dir,
+ struct dentry *dentry,
+ struct btrfs_key *location,
+ struct btrfs_root **sub_root)
{
- struct btrfs_root_item *ri;
+ struct btrfs_path *path;
+ struct btrfs_root *new_root;
+ struct btrfs_root_ref *ref;
+ struct extent_buffer *leaf;
+ int ret;
+ int err = 0;
- if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
- return 0;
- if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
- return 0;
+ path = btrfs_alloc_path();
+ if (!path) {
+ err = -ENOMEM;
+ goto out;
+ }
- *sub_root = btrfs_read_fs_root(root->fs_info, location,
- dentry->d_name.name,
- dentry->d_name.len);
- if (IS_ERR(*sub_root))
- return PTR_ERR(*sub_root);
+ err = -ENOENT;
+ ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
+ BTRFS_I(dir)->root->root_key.objectid,
+ location->objectid);
+ if (ret) {
+ if (ret < 0)
+ err = ret;
+ goto out;
+ }
- ri = &(*sub_root)->root_item;
- location->objectid = btrfs_root_dirid(ri);
- btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
- location->offset = 0;
+ leaf = path->nodes[0];
+ ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
+ if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino ||
+ btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
+ goto out;
- return 0;
+ ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
+ (unsigned long)(ref + 1),
+ dentry->d_name.len);
+ if (ret)
+ goto out;
+
+ btrfs_release_path(root->fs_info->tree_root, path);
+
+ new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
+ if (IS_ERR(new_root)) {
+ err = PTR_ERR(new_root);
+ goto out;
+ }
+
+ if (btrfs_root_refs(&new_root->root_item) == 0) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ *sub_root = new_root;
+ location->objectid = btrfs_root_dirid(&new_root->root_item);
+ location->type = BTRFS_INODE_ITEM_KEY;
+ location->offset = 0;
+ err = 0;
+out:
+ btrfs_free_path(path);
+ return err;
}
static void inode_tree_add(struct inode *inode)
@@ -3101,11 +3230,13 @@ static void inode_tree_add(struct inode *inode)
struct btrfs_inode *entry;
struct rb_node **p;
struct rb_node *parent;
-
again:
p = &root->inode_tree.rb_node;
parent = NULL;
+ if (hlist_unhashed(&inode->i_hash))
+ return;
+
spin_lock(&root->inode_lock);
while (*p) {
parent = *p;
@@ -3132,13 +3263,87 @@ again:
static void inode_tree_del(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
+ int empty = 0;
spin_lock(&root->inode_lock);
if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
+ empty = RB_EMPTY_ROOT(&root->inode_tree);
}
spin_unlock(&root->inode_lock);
+
+ if (empty && btrfs_root_refs(&root->root_item) == 0) {
+ synchronize_srcu(&root->fs_info->subvol_srcu);
+ spin_lock(&root->inode_lock);
+ empty = RB_EMPTY_ROOT(&root->inode_tree);
+ spin_unlock(&root->inode_lock);
+ if (empty)
+ btrfs_add_dead_root(root);
+ }
+}
+
+int btrfs_invalidate_inodes(struct btrfs_root *root)
+{
+ struct rb_node *node;
+ struct rb_node *prev;
+ struct btrfs_inode *entry;
+ struct inode *inode;
+ u64 objectid = 0;
+
+ WARN_ON(btrfs_root_refs(&root->root_item) != 0);
+
+ spin_lock(&root->inode_lock);
+again:
+ node = root->inode_tree.rb_node;
+ prev = NULL;
+ while (node) {
+ prev = node;
+ entry = rb_entry(node, struct btrfs_inode, rb_node);
+
+ if (objectid < entry->vfs_inode.i_ino)
+ node = node->rb_left;
+ else if (objectid > entry->vfs_inode.i_ino)
+ node = node->rb_right;
+ else
+ break;
+ }
+ if (!node) {
+ while (prev) {
+ entry = rb_entry(prev, struct btrfs_inode, rb_node);
+ if (objectid <= entry->vfs_inode.i_ino) {
+ node = prev;
+ break;
+ }
+ prev = rb_next(prev);
+ }
+ }
+ while (node) {
+ entry = rb_entry(node, struct btrfs_inode, rb_node);
+ objectid = entry->vfs_inode.i_ino + 1;
+ inode = igrab(&entry->vfs_inode);
+ if (inode) {
+ spin_unlock(&root->inode_lock);
+ if (atomic_read(&inode->i_count) > 1)
+ d_prune_aliases(inode);
+ /*
+ * btrfs_drop_inode will remove it from
+ * the inode cache when its usage count
+ * hits zero.
+ */
+ iput(inode);
+ cond_resched();
+ spin_lock(&root->inode_lock);
+ goto again;
+ }
+
+ if (cond_resched_lock(&root->inode_lock))
+ goto again;
+
+ node = rb_next(node);
+ }
+ spin_unlock(&root->inode_lock);
+ return 0;
}
static noinline void init_btrfs_i(struct inode *inode)
@@ -3225,15 +3430,41 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
return inode;
}
+static struct inode *new_simple_dir(struct super_block *s,
+ struct btrfs_key *key,
+ struct btrfs_root *root)
+{
+ struct inode *inode = new_inode(s);
+
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ init_btrfs_i(inode);
+
+ BTRFS_I(inode)->root = root;
+ memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
+ BTRFS_I(inode)->dummy_inode = 1;
+
+ inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
+ inode->i_op = &simple_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
+ inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+
+ return inode;
+}
+
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
{
struct inode *inode;
- struct btrfs_inode *bi = BTRFS_I(dir);
- struct btrfs_root *root = bi->root;
+ struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_root *sub_root = root;
struct btrfs_key location;
+ int index;
int ret;
+ dentry->d_op = &btrfs_dentry_operations;
+
if (dentry->d_name.len > BTRFS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
@@ -3242,29 +3473,50 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
if (ret < 0)
return ERR_PTR(ret);
- inode = NULL;
- if (location.objectid) {
- ret = fixup_tree_root_location(root, &location, &sub_root,
- dentry);
- if (ret < 0)
- return ERR_PTR(ret);
- if (ret > 0)
- return ERR_PTR(-ENOENT);
+ if (location.objectid == 0)
+ return NULL;
+
+ if (location.type == BTRFS_INODE_ITEM_KEY) {
+ inode = btrfs_iget(dir->i_sb, &location, root);
+ return inode;
+ }
+
+ BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
+
+ index = srcu_read_lock(&root->fs_info->subvol_srcu);
+ ret = fixup_tree_root_location(root, dir, dentry,
+ &location, &sub_root);
+ if (ret < 0) {
+ if (ret != -ENOENT)
+ inode = ERR_PTR(ret);
+ else
+ inode = new_simple_dir(dir->i_sb, &location, sub_root);
+ } else {
inode = btrfs_iget(dir->i_sb, &location, sub_root);
- if (IS_ERR(inode))
- return ERR_CAST(inode);
}
+ srcu_read_unlock(&root->fs_info->subvol_srcu, index);
+
return inode;
}
+static int btrfs_dentry_delete(struct dentry *dentry)
+{
+ struct btrfs_root *root;
+
+ if (!dentry->d_inode)
+ return 0;
+
+ root = BTRFS_I(dentry->d_inode)->root;
+ if (btrfs_root_refs(&root->root_item) == 0)
+ return 1;
+ return 0;
+}
+
static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
struct inode *inode;
- if (dentry->d_name.len > BTRFS_NAME_LEN)
- return ERR_PTR(-ENAMETOOLONG);
-
inode = btrfs_lookup_dentry(dir, dentry);
if (IS_ERR(inode))
return ERR_CAST(inode);
@@ -3603,9 +3855,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
if (ret != 0)
goto fail;
- if (objectid > root->highest_inode)
- root->highest_inode = objectid;
-
inode->i_uid = current_fsuid();
if (dir && (dir->i_mode & S_ISGID)) {
@@ -3673,26 +3922,35 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
struct inode *parent_inode, struct inode *inode,
const char *name, int name_len, int add_backref, u64 index)
{
- int ret;
+ int ret = 0;
struct btrfs_key key;
struct btrfs_root *root = BTRFS_I(parent_inode)->root;
- key.objectid = inode->i_ino;
- btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
- key.offset = 0;
+ if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+ memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
+ } else {
+ key.objectid = inode->i_ino;
+ btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
+ key.offset = 0;
+ }
+
+ if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+ ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
+ key.objectid, root->root_key.objectid,
+ parent_inode->i_ino,
+ index, name, name_len);
+ } else if (add_backref) {
+ ret = btrfs_insert_inode_ref(trans, root,
+ name, name_len, inode->i_ino,
+ parent_inode->i_ino, index);
+ }
- ret = btrfs_insert_dir_item(trans, root, name, name_len,
- parent_inode->i_ino,
- &key, btrfs_inode_type(inode),
- index);
if (ret == 0) {
- if (add_backref) {
- ret = btrfs_insert_inode_ref(trans, root,
- name, name_len,
- inode->i_ino,
- parent_inode->i_ino,
- index);
- }
+ ret = btrfs_insert_dir_item(trans, root, name, name_len,
+ parent_inode->i_ino, &key,
+ btrfs_inode_type(inode), index);
+ BUG_ON(ret);
+
btrfs_i_size_write(parent_inode, parent_inode->i_size +
name_len * 2);
parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
@@ -3875,18 +4133,16 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
err = btrfs_add_nondir(trans, dentry, inode, 1, index);
- if (err)
- drop_inode = 1;
-
- btrfs_update_inode_block_group(trans, dir);
- err = btrfs_update_inode(trans, root, inode);
-
- if (err)
+ if (err) {
drop_inode = 1;
+ } else {
+ btrfs_update_inode_block_group(trans, dir);
+ err = btrfs_update_inode(trans, root, inode);
+ BUG_ON(err);
+ btrfs_log_new_name(trans, inode, NULL, dentry->d_parent);
+ }
nr = trans->blocks_used;
-
- btrfs_log_new_name(trans, inode, NULL, dentry->d_parent);
btrfs_end_transaction_throttle(trans, root);
fail:
if (drop_inode) {
@@ -4064,11 +4320,11 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
int compressed;
again:
- spin_lock(&em_tree->lock);
+ read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
if (em)
em->bdev = root->fs_info->fs_devices->latest_bdev;
- spin_unlock(&em_tree->lock);
+ read_unlock(&em_tree->lock);
if (em) {
if (em->start > start || em->start + em->len <= start)
@@ -4215,6 +4471,11 @@ again:
map = kmap(page);
read_extent_buffer(leaf, map + pg_offset, ptr,
copy_size);
+ if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
+ memset(map + pg_offset + copy_size, 0,
+ PAGE_CACHE_SIZE - pg_offset -
+ copy_size);
+ }
kunmap(page);
}
flush_dcache_page(page);
@@ -4259,7 +4520,7 @@ insert:
}
err = 0;
- spin_lock(&em_tree->lock);
+ write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
/* it is possible that someone inserted the extent into the tree
* while we had the lock dropped. It is also possible that
@@ -4299,7 +4560,7 @@ insert:
err = 0;
}
}
- spin_unlock(&em_tree->lock);
+ write_unlock(&em_tree->lock);
out:
if (path)
btrfs_free_path(path);
@@ -4398,13 +4659,21 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
u64 page_start = page_offset(page);
u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
+
+ /*
+ * we have the page locked, so new writeback can't start,
+ * and the dirty bit won't be cleared while we are here.
+ *
+ * Wait for IO on this page so that we can safely clear
+ * the PagePrivate2 bit and do ordered accounting
+ */
wait_on_page_writeback(page);
+
tree = &BTRFS_I(page->mapping->host)->io_tree;
if (offset) {
btrfs_releasepage(page, GFP_NOFS);
return;
}
-
lock_extent(tree, page_start, page_end, GFP_NOFS);
ordered = btrfs_lookup_ordered_extent(page->mapping->host,
page_offset(page));
@@ -4415,16 +4684,21 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
*/
clear_extent_bit(tree, page_start, page_end,
EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_LOCKED, 1, 0, GFP_NOFS);
- btrfs_finish_ordered_io(page->mapping->host,
- page_start, page_end);
+ EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
+ /*
+ * whoever cleared the private bit is responsible
+ * for the finish_ordered_io
+ */
+ if (TestClearPagePrivate2(page)) {
+ btrfs_finish_ordered_io(page->mapping->host,
+ page_start, page_end);
+ }
btrfs_put_ordered_extent(ordered);
lock_extent(tree, page_start, page_end, GFP_NOFS);
}
clear_extent_bit(tree, page_start, page_end,
- EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_ORDERED,
- 1, 1, GFP_NOFS);
+ EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
+ 1, 1, NULL, GFP_NOFS);
__btrfs_releasepage(page, GFP_NOFS);
ClearPageChecked(page);
@@ -4521,11 +4795,14 @@ again:
}
ClearPageChecked(page);
set_page_dirty(page);
+ SetPageUptodate(page);
BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
out_unlock:
+ if (!ret)
+ return VM_FAULT_LOCKED;
unlock_page(page);
out:
return ret;
@@ -4594,11 +4871,11 @@ out:
* create a new subvolume directory/inode (helper for the ioctl).
*/
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
- struct btrfs_root *new_root, struct dentry *dentry,
+ struct btrfs_root *new_root,
u64 new_dirid, u64 alloc_hint)
{
struct inode *inode;
- int error;
+ int err;
u64 index = 0;
inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
@@ -4611,11 +4888,10 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
inode->i_nlink = 1;
btrfs_i_size_write(inode, 0);
- error = btrfs_update_inode(trans, new_root, inode);
- if (error)
- return error;
+ err = btrfs_update_inode(trans, new_root, inode);
+ BUG_ON(err);
- d_instantiate(dentry, inode);
+ iput(inode);
return 0;
}
@@ -4693,6 +4969,16 @@ void btrfs_destroy_inode(struct inode *inode)
kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}
+void btrfs_drop_inode(struct inode *inode)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+
+ if (inode->i_nlink > 0 && btrfs_root_refs(&root->root_item) == 0)
+ generic_delete_inode(inode);
+ else
+ generic_drop_inode(inode);
+}
+
static void init_once(void *foo)
{
struct btrfs_inode *ei = (struct btrfs_inode *) foo;
@@ -4761,31 +5047,32 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(old_dir)->root;
+ struct btrfs_root *dest = BTRFS_I(new_dir)->root;
struct inode *new_inode = new_dentry->d_inode;
struct inode *old_inode = old_dentry->d_inode;
struct timespec ctime = CURRENT_TIME;
u64 index = 0;
+ u64 root_objectid;
int ret;
- /* we're not allowed to rename between subvolumes */
- if (BTRFS_I(old_inode)->root->root_key.objectid !=
- BTRFS_I(new_dir)->root->root_key.objectid)
+ if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
+ return -EPERM;
+
+ /* we only allow rename subvolume link between subvolumes */
+ if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
return -EXDEV;
- if (S_ISDIR(old_inode->i_mode) && new_inode &&
- new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
+ if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
+ (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID))
return -ENOTEMPTY;
- }
- /* to rename a snapshot or subvolume, we need to juggle the
- * backrefs. This isn't coded yet
- */
- if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
- return -EXDEV;
+ if (S_ISDIR(old_inode->i_mode) && new_inode &&
+ new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
+ return -ENOTEMPTY;
ret = btrfs_check_metadata_free_space(root);
if (ret)
- goto out_unlock;
+ return ret;
/*
* we're using rename to replace one file with another.
@@ -4796,8 +5083,40 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
filemap_flush(old_inode->i_mapping);
+ /* close the racy window with snapshot create/destroy ioctl */
+ if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
+ down_read(&root->fs_info->subvol_sem);
+
trans = btrfs_start_transaction(root, 1);
+ btrfs_set_trans_block_group(trans, new_dir);
+
+ if (dest != root)
+ btrfs_record_root_in_trans(trans, dest);
+ ret = btrfs_set_inode_index(new_dir, &index);
+ if (ret)
+ goto out_fail;
+
+ if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+ /* force full log commit if subvolume involved. */
+ root->fs_info->last_trans_log_full_commit = trans->transid;
+ } else {
+ ret = btrfs_insert_inode_ref(trans, dest,
+ new_dentry->d_name.name,
+ new_dentry->d_name.len,
+ old_inode->i_ino,
+ new_dir->i_ino, index);
+ if (ret)
+ goto out_fail;
+ /*
+ * this is an ugly little race, but the rename is required
+ * to make sure that if we crash, the inode is either at the
+ * old name or the new one. pinning the log transaction lets
+ * us make sure we don't allow a log commit to come in after
+ * we unlink the name but before we add the new name back in.
+ */
+ btrfs_pin_log_trans(root);
+ }
/*
* make sure the inode gets flushed if it is replacing
* something.
@@ -4807,18 +5126,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
btrfs_add_ordered_operation(trans, root, old_inode);
}
- /*
- * this is an ugly little race, but the rename is required to make
- * sure that if we crash, the inode is either at the old name
- * or the new one. pinning the log transaction lets us make sure
- * we don't allow a log commit to come in after we unlink the
- * name but before we add the new name back in.
- */
- btrfs_pin_log_trans(root);
-
- btrfs_set_trans_block_group(trans, new_dir);
-
- btrfs_inc_nlink(old_dentry->d_inode);
old_dir->i_ctime = old_dir->i_mtime = ctime;
new_dir->i_ctime = new_dir->i_mtime = ctime;
old_inode->i_ctime = ctime;
@@ -4826,47 +5133,58 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (old_dentry->d_parent != new_dentry->d_parent)
btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
- ret = btrfs_unlink_inode(trans, root, old_dir, old_dentry->d_inode,
- old_dentry->d_name.name,
- old_dentry->d_name.len);
- if (ret)
- goto out_fail;
+ if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+ root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
+ ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
+ old_dentry->d_name.name,
+ old_dentry->d_name.len);
+ } else {
+ btrfs_inc_nlink(old_dentry->d_inode);
+ ret = btrfs_unlink_inode(trans, root, old_dir,
+ old_dentry->d_inode,
+ old_dentry->d_name.name,
+ old_dentry->d_name.len);
+ }
+ BUG_ON(ret);
if (new_inode) {
new_inode->i_ctime = CURRENT_TIME;
- ret = btrfs_unlink_inode(trans, root, new_dir,
- new_dentry->d_inode,
- new_dentry->d_name.name,
- new_dentry->d_name.len);
- if (ret)
- goto out_fail;
+ if (unlikely(new_inode->i_ino ==
+ BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
+ root_objectid = BTRFS_I(new_inode)->location.objectid;
+ ret = btrfs_unlink_subvol(trans, dest, new_dir,
+ root_objectid,
+ new_dentry->d_name.name,
+ new_dentry->d_name.len);
+ BUG_ON(new_inode->i_nlink == 0);
+ } else {
+ ret = btrfs_unlink_inode(trans, dest, new_dir,
+ new_dentry->d_inode,
+ new_dentry->d_name.name,
+ new_dentry->d_name.len);
+ }
+ BUG_ON(ret);
if (new_inode->i_nlink == 0) {
ret = btrfs_orphan_add(trans, new_dentry->d_inode);
- if (ret)
- goto out_fail;
+ BUG_ON(ret);
}
-
}
- ret = btrfs_set_inode_index(new_dir, &index);
- if (ret)
- goto out_fail;
- ret = btrfs_add_link(trans, new_dentry->d_parent->d_inode,
- old_inode, new_dentry->d_name.name,
- new_dentry->d_name.len, 1, index);
- if (ret)
- goto out_fail;
+ ret = btrfs_add_link(trans, new_dir, old_inode,
+ new_dentry->d_name.name,
+ new_dentry->d_name.len, 0, index);
+ BUG_ON(ret);
- btrfs_log_new_name(trans, old_inode, old_dir,
- new_dentry->d_parent);
+ if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
+ btrfs_log_new_name(trans, old_inode, old_dir,
+ new_dentry->d_parent);
+ btrfs_end_log_trans(root);
+ }
out_fail:
-
- /* this btrfs_end_log_trans just allows the current
- * log-sub transaction to complete
- */
- btrfs_end_log_trans(root);
btrfs_end_transaction_throttle(trans, root);
-out_unlock:
+
+ if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
+ up_read(&root->fs_info->subvol_sem);
return ret;
}
@@ -5058,6 +5376,8 @@ static int prealloc_file_range(struct btrfs_trans_handle *trans,
0, 0, 0,
BTRFS_FILE_EXTENT_PREALLOC);
BUG_ON(ret);
+ btrfs_drop_extent_cache(inode, cur_offset,
+ cur_offset + ins.offset -1, 0);
num_bytes -= ins.offset;
cur_offset += ins.offset;
alloc_hint = ins.objectid + ins.offset;
@@ -5201,7 +5521,7 @@ static int btrfs_permission(struct inode *inode, int mask)
return generic_permission(inode, mask, btrfs_check_acl);
}
-static struct inode_operations btrfs_dir_inode_operations = {
+static const struct inode_operations btrfs_dir_inode_operations = {
.getattr = btrfs_getattr,
.lookup = btrfs_lookup,
.create = btrfs_create,
@@ -5219,10 +5539,11 @@ static struct inode_operations btrfs_dir_inode_operations = {
.removexattr = btrfs_removexattr,
.permission = btrfs_permission,
};
-static struct inode_operations btrfs_dir_ro_inode_operations = {
+static const struct inode_operations btrfs_dir_ro_inode_operations = {
.lookup = btrfs_lookup,
.permission = btrfs_permission,
};
+
static struct file_operations btrfs_dir_file_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
@@ -5259,7 +5580,7 @@ static struct extent_io_ops btrfs_extent_io_ops = {
*
* For now we're avoiding this by dropping bmap.
*/
-static struct address_space_operations btrfs_aops = {
+static const struct address_space_operations btrfs_aops = {
.readpage = btrfs_readpage,
.writepage = btrfs_writepage,
.writepages = btrfs_writepages,
@@ -5269,16 +5590,17 @@ static struct address_space_operations btrfs_aops = {
.invalidatepage = btrfs_invalidatepage,
.releasepage = btrfs_releasepage,
.set_page_dirty = btrfs_set_page_dirty,
+ .error_remove_page = generic_error_remove_page,
};
-static struct address_space_operations btrfs_symlink_aops = {
+static const struct address_space_operations btrfs_symlink_aops = {
.readpage = btrfs_readpage,
.writepage = btrfs_writepage,
.invalidatepage = btrfs_invalidatepage,
.releasepage = btrfs_releasepage,
};
-static struct inode_operations btrfs_file_inode_operations = {
+static const struct inode_operations btrfs_file_inode_operations = {
.truncate = btrfs_truncate,
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
@@ -5290,7 +5612,7 @@ static struct inode_operations btrfs_file_inode_operations = {
.fallocate = btrfs_fallocate,
.fiemap = btrfs_fiemap,
};
-static struct inode_operations btrfs_special_inode_operations = {
+static const struct inode_operations btrfs_special_inode_operations = {
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
.permission = btrfs_permission,
@@ -5299,7 +5621,7 @@ static struct inode_operations btrfs_special_inode_operations = {
.listxattr = btrfs_listxattr,
.removexattr = btrfs_removexattr,
};
-static struct inode_operations btrfs_symlink_inode_operations = {
+static const struct inode_operations btrfs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
@@ -5309,3 +5631,7 @@ static struct inode_operations btrfs_symlink_inode_operations = {
.listxattr = btrfs_listxattr,
.removexattr = btrfs_removexattr,
};
+
+struct dentry_operations btrfs_dentry_operations = {
+ .d_delete = btrfs_dentry_delete,
+};
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index bd88f25889f..a8577a7f26a 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -230,8 +230,8 @@ static noinline int create_subvol(struct btrfs_root *root,
struct btrfs_root_item root_item;
struct btrfs_inode_item *inode_item;
struct extent_buffer *leaf;
- struct btrfs_root *new_root = root;
- struct inode *dir;
+ struct btrfs_root *new_root;
+ struct inode *dir = dentry->d_parent->d_inode;
int ret;
int err;
u64 objectid;
@@ -241,7 +241,7 @@ static noinline int create_subvol(struct btrfs_root *root,
ret = btrfs_check_metadata_free_space(root);
if (ret)
- goto fail_commit;
+ return ret;
trans = btrfs_start_transaction(root, 1);
BUG_ON(!trans);
@@ -304,11 +304,17 @@ static noinline int create_subvol(struct btrfs_root *root,
if (ret)
goto fail;
+ key.offset = (u64)-1;
+ new_root = btrfs_read_fs_root_no_name(root->fs_info, &key);
+ BUG_ON(IS_ERR(new_root));
+
+ btrfs_record_root_in_trans(trans, new_root);
+
+ ret = btrfs_create_subvol_root(trans, new_root, new_dirid,
+ BTRFS_I(dir)->block_group);
/*
* insert the directory item
*/
- key.offset = (u64)-1;
- dir = dentry->d_parent->d_inode;
ret = btrfs_set_inode_index(dir, &index);
BUG_ON(ret);
@@ -322,44 +328,18 @@ static noinline int create_subvol(struct btrfs_root *root,
ret = btrfs_update_inode(trans, root, dir);
BUG_ON(ret);
- /* add the backref first */
ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
- objectid, BTRFS_ROOT_BACKREF_KEY,
- root->root_key.objectid,
+ objectid, root->root_key.objectid,
dir->i_ino, index, name, namelen);
BUG_ON(ret);
- /* now add the forward ref */
- ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
- root->root_key.objectid, BTRFS_ROOT_REF_KEY,
- objectid,
- dir->i_ino, index, name, namelen);
-
- BUG_ON(ret);
-
- ret = btrfs_commit_transaction(trans, root);
- if (ret)
- goto fail_commit;
-
- new_root = btrfs_read_fs_root_no_name(root->fs_info, &key);
- BUG_ON(!new_root);
-
- trans = btrfs_start_transaction(new_root, 1);
- BUG_ON(!trans);
-
- ret = btrfs_create_subvol_root(trans, new_root, dentry, new_dirid,
- BTRFS_I(dir)->block_group);
- if (ret)
- goto fail;
-
+ d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));
fail:
nr = trans->blocks_used;
- err = btrfs_commit_transaction(trans, new_root);
+ err = btrfs_commit_transaction(trans, root);
if (err && !ret)
ret = err;
-fail_commit:
- btrfs_btree_balance_dirty(root, nr);
return ret;
}
@@ -420,14 +400,15 @@ static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
* sys_mkdirat and vfs_mkdir, but we only do a single component lookup
* inside this filesystem so it's quite a bit simpler.
*/
-static noinline int btrfs_mksubvol(struct path *parent, char *name,
- int mode, int namelen,
+static noinline int btrfs_mksubvol(struct path *parent,
+ char *name, int namelen,
struct btrfs_root *snap_src)
{
+ struct inode *dir = parent->dentry->d_inode;
struct dentry *dentry;
int error;
- mutex_lock_nested(&parent->dentry->d_inode->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
dentry = lookup_one_len(name, parent->dentry, namelen);
error = PTR_ERR(dentry);
@@ -438,99 +419,39 @@ static noinline int btrfs_mksubvol(struct path *parent, char *name,
if (dentry->d_inode)
goto out_dput;
- if (!IS_POSIXACL(parent->dentry->d_inode))
- mode &= ~current_umask();
-
error = mnt_want_write(parent->mnt);
if (error)
goto out_dput;
- error = btrfs_may_create(parent->dentry->d_inode, dentry);
+ error = btrfs_may_create(dir, dentry);
if (error)
goto out_drop_write;
- /*
- * Actually perform the low-level subvolume creation after all
- * this VFS fuzz.
- *
- * Eventually we want to pass in an inode under which we create this
- * subvolume, but for now all are under the filesystem root.
- *
- * Also we should pass on the mode eventually to allow creating new
- * subvolume with specific mode bits.
- */
+ down_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
+
+ if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
+ goto out_up_read;
+
if (snap_src) {
- struct dentry *dir = dentry->d_parent;
- struct dentry *test = dir->d_parent;
- struct btrfs_path *path = btrfs_alloc_path();
- int ret;
- u64 test_oid;
- u64 parent_oid = BTRFS_I(dir->d_inode)->root->root_key.objectid;
-
- test_oid = snap_src->root_key.objectid;
-
- ret = btrfs_find_root_ref(snap_src->fs_info->tree_root,
- path, parent_oid, test_oid);
- if (ret == 0)
- goto create;
- btrfs_release_path(snap_src->fs_info->tree_root, path);
-
- /* we need to make sure we aren't creating a directory loop
- * by taking a snapshot of something that has our current
- * subvol in its directory tree. So, this loops through
- * the dentries and checks the forward refs for each subvolume
- * to see if is references the subvolume where we are
- * placing this new snapshot.
- */
- while (1) {
- if (!test ||
- dir == snap_src->fs_info->sb->s_root ||
- test == snap_src->fs_info->sb->s_root ||
- test->d_inode->i_sb != snap_src->fs_info->sb) {
- break;
- }
- if (S_ISLNK(test->d_inode->i_mode)) {
- printk(KERN_INFO "Btrfs symlink in snapshot "
- "path, failed\n");
- error = -EMLINK;
- btrfs_free_path(path);
- goto out_drop_write;
- }
- test_oid =
- BTRFS_I(test->d_inode)->root->root_key.objectid;
- ret = btrfs_find_root_ref(snap_src->fs_info->tree_root,
- path, test_oid, parent_oid);
- if (ret == 0) {
- printk(KERN_INFO "Btrfs snapshot creation "
- "failed, looping\n");
- error = -EMLINK;
- btrfs_free_path(path);
- goto out_drop_write;
- }
- btrfs_release_path(snap_src->fs_info->tree_root, path);
- test = test->d_parent;
- }
-create:
- btrfs_free_path(path);
- error = create_snapshot(snap_src, dentry, name, namelen);
+ error = create_snapshot(snap_src, dentry,
+ name, namelen);
} else {
- error = create_subvol(BTRFS_I(parent->dentry->d_inode)->root,
- dentry, name, namelen);
+ error = create_subvol(BTRFS_I(dir)->root, dentry,
+ name, namelen);
}
- if (error)
- goto out_drop_write;
-
- fsnotify_mkdir(parent->dentry->d_inode, dentry);
+ if (!error)
+ fsnotify_mkdir(dir, dentry);
+out_up_read:
+ up_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
out_drop_write:
mnt_drop_write(parent->mnt);
out_dput:
dput(dentry);
out_unlock:
- mutex_unlock(&parent->dentry->d_inode->i_mutex);
+ mutex_unlock(&dir->i_mutex);
return error;
}
-
static int btrfs_defrag_file(struct file *file)
{
struct inode *inode = fdentry(file)->d_inode;
@@ -596,9 +517,8 @@ again:
clear_page_dirty_for_io(page);
btrfs_set_extent_delalloc(inode, page_start, page_end);
-
- unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
set_page_dirty(page);
+ unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
unlock_page(page);
page_cache_release(page);
balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1);
@@ -609,7 +529,8 @@ out_unlock:
return 0;
}
-static int btrfs_ioctl_resize(struct btrfs_root *root, void __user *arg)
+static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
+ void __user *arg)
{
u64 new_size;
u64 old_size;
@@ -718,10 +639,7 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
{
struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
struct btrfs_ioctl_vol_args *vol_args;
- struct btrfs_dir_item *di;
- struct btrfs_path *path;
struct file *src_file;
- u64 root_dirid;
int namelen;
int ret = 0;
@@ -739,32 +657,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
goto out;
}
- path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
-
- root_dirid = root->fs_info->sb->s_root->d_inode->i_ino,
- di = btrfs_lookup_dir_item(NULL, root->fs_info->tree_root,
- path, root_dirid,
- vol_args->name, namelen, 0);
- btrfs_free_path(path);
-
- if (di && !IS_ERR(di)) {
- ret = -EEXIST;
- goto out;
- }
-
- if (IS_ERR(di)) {
- ret = PTR_ERR(di);
- goto out;
- }
-
if (subvol) {
- ret = btrfs_mksubvol(&file->f_path, vol_args->name,
- file->f_path.dentry->d_inode->i_mode,
- namelen, NULL);
+ ret = btrfs_mksubvol(&file->f_path, vol_args->name, namelen,
+ NULL);
} else {
struct inode *src_inode;
src_file = fget(vol_args->fd);
@@ -781,17 +676,156 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
fput(src_file);
goto out;
}
- ret = btrfs_mksubvol(&file->f_path, vol_args->name,
- file->f_path.dentry->d_inode->i_mode,
- namelen, BTRFS_I(src_inode)->root);
+ ret = btrfs_mksubvol(&file->f_path, vol_args->name, namelen,
+ BTRFS_I(src_inode)->root);
fput(src_file);
}
-
out:
kfree(vol_args);
return ret;
}
+/*
+ * helper to check if the subvolume references other subvolumes
+ */
+static noinline int may_destroy_subvol(struct btrfs_root *root)
+{
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ int ret;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = root->root_key.objectid;
+ key.type = BTRFS_ROOT_REF_KEY;
+ key.offset = (u64)-1;
+
+ ret = btrfs_search_slot(NULL, root->fs_info->tree_root,
+ &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+ BUG_ON(ret == 0);
+
+ ret = 0;
+ if (path->slots[0] > 0) {
+ path->slots[0]--;
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ if (key.objectid == root->root_key.objectid &&
+ key.type == BTRFS_ROOT_REF_KEY)
+ ret = -ENOTEMPTY;
+ }
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static noinline int btrfs_ioctl_snap_destroy(struct file *file,
+ void __user *arg)
+{
+ struct dentry *parent = fdentry(file);
+ struct dentry *dentry;
+ struct inode *dir = parent->d_inode;
+ struct inode *inode;
+ struct btrfs_root *root = BTRFS_I(dir)->root;
+ struct btrfs_root *dest = NULL;
+ struct btrfs_ioctl_vol_args *vol_args;
+ struct btrfs_trans_handle *trans;
+ int namelen;
+ int ret;
+ int err = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ vol_args = memdup_user(arg, sizeof(*vol_args));
+ if (IS_ERR(vol_args))
+ return PTR_ERR(vol_args);
+
+ vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
+ namelen = strlen(vol_args->name);
+ if (strchr(vol_args->name, '/') ||
+ strncmp(vol_args->name, "..", namelen) == 0) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mnt_want_write(file->f_path.mnt);
+ if (err)
+ goto out;
+
+ mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
+ dentry = lookup_one_len(vol_args->name, parent, namelen);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ goto out_unlock_dir;
+ }
+
+ if (!dentry->d_inode) {
+ err = -ENOENT;
+ goto out_dput;
+ }
+
+ inode = dentry->d_inode;
+ if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
+ err = -EINVAL;
+ goto out_dput;
+ }
+
+ dest = BTRFS_I(inode)->root;
+
+ mutex_lock(&inode->i_mutex);
+ err = d_invalidate(dentry);
+ if (err)
+ goto out_unlock;
+
+ down_write(&root->fs_info->subvol_sem);
+
+ err = may_destroy_subvol(dest);
+ if (err)
+ goto out_up_write;
+
+ trans = btrfs_start_transaction(root, 1);
+ ret = btrfs_unlink_subvol(trans, root, dir,
+ dest->root_key.objectid,
+ dentry->d_name.name,
+ dentry->d_name.len);
+ BUG_ON(ret);
+
+ btrfs_record_root_in_trans(trans, dest);
+
+ memset(&dest->root_item.drop_progress, 0,
+ sizeof(dest->root_item.drop_progress));
+ dest->root_item.drop_level = 0;
+ btrfs_set_root_refs(&dest->root_item, 0);
+
+ ret = btrfs_insert_orphan_item(trans,
+ root->fs_info->tree_root,
+ dest->root_key.objectid);
+ BUG_ON(ret);
+
+ ret = btrfs_commit_transaction(trans, root);
+ BUG_ON(ret);
+ inode->i_flags |= S_DEAD;
+out_up_write:
+ up_write(&root->fs_info->subvol_sem);
+out_unlock:
+ mutex_unlock(&inode->i_mutex);
+ if (!err) {
+ btrfs_invalidate_inodes(dest);
+ d_delete(dentry);
+ }
+out_dput:
+ dput(dentry);
+out_unlock_dir:
+ mutex_unlock(&dir->i_mutex);
+ mnt_drop_write(file->f_path.mnt);
+out:
+ kfree(vol_args);
+ return err;
+}
+
static int btrfs_ioctl_defrag(struct file *file)
{
struct inode *inode = fdentry(file)->d_inode;
@@ -865,8 +899,8 @@ static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
return ret;
}
-static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
- u64 off, u64 olen, u64 destoff)
+static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
+ u64 off, u64 olen, u64 destoff)
{
struct inode *inode = fdentry(file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -976,7 +1010,7 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
/* punch hole in destination first */
btrfs_drop_extents(trans, root, inode, off, off + len,
- off + len, 0, &hint_byte);
+ off + len, 0, &hint_byte, 1);
/* clone data */
key.objectid = src->i_ino;
@@ -1071,8 +1105,7 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
datao += off - key.offset;
datal -= off - key.offset;
}
- if (key.offset + datao + datal + key.offset >
- off + len)
+ if (key.offset + datao + datal > off + len)
datal = off + len - key.offset - datao;
/* disko == 0 means it's a hole */
if (!disko)
@@ -1258,6 +1291,8 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_snap_create(file, argp, 0);
case BTRFS_IOC_SUBVOL_CREATE:
return btrfs_ioctl_snap_create(file, argp, 1);
+ case BTRFS_IOC_SNAP_DESTROY:
+ return btrfs_ioctl_snap_destroy(file, argp);
case BTRFS_IOC_DEFRAG:
return btrfs_ioctl_defrag(file);
case BTRFS_IOC_RESIZE:
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
index b320b103fa1..bc49914475e 100644
--- a/fs/btrfs/ioctl.h
+++ b/fs/btrfs/ioctl.h
@@ -65,5 +65,6 @@ struct btrfs_ioctl_clone_range_args {
#define BTRFS_IOC_SUBVOL_CREATE _IOW(BTRFS_IOCTL_MAGIC, 14, \
struct btrfs_ioctl_vol_args)
-
+#define BTRFS_IOC_SNAP_DESTROY _IOW(BTRFS_IOCTL_MAGIC, 15, \
+ struct btrfs_ioctl_vol_args)
#endif
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 7b2f401e604..b5d6d24726b 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -159,8 +159,6 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
*
* len is the length of the extent
*
- * This also sets the EXTENT_ORDERED bit on the range in the inode.
- *
* The tree is given a single reference on the ordered extent that was
* inserted.
*/
@@ -181,6 +179,7 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
entry->start = start;
entry->len = len;
entry->disk_len = disk_len;
+ entry->bytes_left = len;
entry->inode = inode;
if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
set_bit(type, &entry->flags);
@@ -195,9 +194,6 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
&entry->rb_node);
BUG_ON(node);
- set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset,
- entry_end(entry) - 1, GFP_NOFS);
-
spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
list_add_tail(&entry->root_extent_list,
&BTRFS_I(inode)->root->fs_info->ordered_extents);
@@ -241,13 +237,10 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry;
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
int ret;
tree = &BTRFS_I(inode)->ordered_tree;
mutex_lock(&tree->mutex);
- clear_extent_ordered(io_tree, file_offset, file_offset + io_size - 1,
- GFP_NOFS);
node = tree_search(tree, file_offset);
if (!node) {
ret = 1;
@@ -260,11 +253,16 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
goto out;
}
- ret = test_range_bit(io_tree, entry->file_offset,
- entry->file_offset + entry->len - 1,
- EXTENT_ORDERED, 0);
- if (ret == 0)
+ if (io_size > entry->bytes_left) {
+ printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
+ (unsigned long long)entry->bytes_left,
+ (unsigned long long)io_size);
+ }
+ entry->bytes_left -= io_size;
+ if (entry->bytes_left == 0)
ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
+ else
+ ret = 1;
out:
mutex_unlock(&tree->mutex);
return ret == 0;
@@ -476,6 +474,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
u64 orig_end;
u64 wait_end;
struct btrfs_ordered_extent *ordered;
+ int found;
if (start + len < start) {
orig_end = INT_LIMIT(loff_t);
@@ -502,6 +501,7 @@ again:
orig_end >> PAGE_CACHE_SHIFT);
end = orig_end;
+ found = 0;
while (1) {
ordered = btrfs_lookup_first_ordered_extent(inode, end);
if (!ordered)
@@ -514,6 +514,7 @@ again:
btrfs_put_ordered_extent(ordered);
break;
}
+ found++;
btrfs_start_ordered_extent(inode, ordered, 1);
end = ordered->file_offset;
btrfs_put_ordered_extent(ordered);
@@ -521,8 +522,8 @@ again:
break;
end--;
}
- if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
- EXTENT_ORDERED | EXTENT_DELALLOC, 0)) {
+ if (found || test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
+ EXTENT_DELALLOC, 0, NULL)) {
schedule_timeout(1);
goto again;
}
@@ -613,7 +614,7 @@ int btrfs_ordered_update_i_size(struct inode *inode,
*/
if (test_range_bit(io_tree, disk_i_size,
ordered->file_offset + ordered->len - 1,
- EXTENT_DELALLOC, 0)) {
+ EXTENT_DELALLOC, 0, NULL)) {
goto out;
}
/*
@@ -664,7 +665,7 @@ int btrfs_ordered_update_i_size(struct inode *inode,
*/
if (i_size_test > entry_end(ordered) &&
!test_range_bit(io_tree, entry_end(ordered), i_size_test - 1,
- EXTENT_DELALLOC, 0)) {
+ EXTENT_DELALLOC, 0, NULL)) {
new_i_size = min_t(u64, i_size_test, i_size_read(inode));
}
BTRFS_I(inode)->disk_i_size = new_i_size;
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 3d31c8827b0..993a7ea45c7 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -85,6 +85,9 @@ struct btrfs_ordered_extent {
/* extent length on disk */
u64 disk_len;
+ /* number of bytes that still need writing */
+ u64 bytes_left;
+
/* flags (described above) */
unsigned long flags;
diff --git a/fs/btrfs/orphan.c b/fs/btrfs/orphan.c
index 3c0d52af4f8..79cba5fbc28 100644
--- a/fs/btrfs/orphan.c
+++ b/fs/btrfs/orphan.c
@@ -65,3 +65,23 @@ out:
btrfs_free_path(path);
return ret;
}
+
+int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset)
+{
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ int ret;
+
+ key.objectid = BTRFS_ORPHAN_OBJECTID;
+ key.type = BTRFS_ORPHAN_ITEM_KEY;
+ key.offset = offset;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+
+ btrfs_free_path(path);
+ return ret;
+}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index c04f7f21260..361ad323faa 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -121,6 +121,15 @@ struct inodevec {
int nr;
};
+#define MAX_EXTENTS 128
+
+struct file_extent_cluster {
+ u64 start;
+ u64 end;
+ u64 boundary[MAX_EXTENTS];
+ unsigned int nr;
+};
+
struct reloc_control {
/* block group to relocate */
struct btrfs_block_group_cache *block_group;
@@ -2180,7 +2189,7 @@ static int tree_block_processed(u64 bytenr, u32 blocksize,
struct reloc_control *rc)
{
if (test_range_bit(&rc->processed_blocks, bytenr,
- bytenr + blocksize - 1, EXTENT_DIRTY, 1))
+ bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
return 1;
return 0;
}
@@ -2529,56 +2538,94 @@ out:
}
static noinline_for_stack
-int relocate_inode_pages(struct inode *inode, u64 start, u64 len)
+int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
+ u64 block_start)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+ struct extent_map *em;
+ int ret = 0;
+
+ em = alloc_extent_map(GFP_NOFS);
+ if (!em)
+ return -ENOMEM;
+
+ em->start = start;
+ em->len = end + 1 - start;
+ em->block_len = em->len;
+ em->block_start = block_start;
+ em->bdev = root->fs_info->fs_devices->latest_bdev;
+ set_bit(EXTENT_FLAG_PINNED, &em->flags);
+
+ lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+ while (1) {
+ write_lock(&em_tree->lock);
+ ret = add_extent_mapping(em_tree, em);
+ write_unlock(&em_tree->lock);
+ if (ret != -EEXIST) {
+ free_extent_map(em);
+ break;
+ }
+ btrfs_drop_extent_cache(inode, start, end, 0);
+ }
+ unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
+ return ret;
+}
+
+static int relocate_file_extent_cluster(struct inode *inode,
+ struct file_extent_cluster *cluster)
{
u64 page_start;
u64 page_end;
- unsigned long i;
- unsigned long first_index;
+ u64 offset = BTRFS_I(inode)->index_cnt;
+ unsigned long index;
unsigned long last_index;
- unsigned int total_read = 0;
- unsigned int total_dirty = 0;
+ unsigned int dirty_page = 0;
struct page *page;
struct file_ra_state *ra;
- struct btrfs_ordered_extent *ordered;
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ int nr = 0;
int ret = 0;
+ if (!cluster->nr)
+ return 0;
+
ra = kzalloc(sizeof(*ra), GFP_NOFS);
if (!ra)
return -ENOMEM;
+ index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
+ last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
+
mutex_lock(&inode->i_mutex);
- first_index = start >> PAGE_CACHE_SHIFT;
- last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
- /* make sure the dirty trick played by the caller work */
- while (1) {
- ret = invalidate_inode_pages2_range(inode->i_mapping,
- first_index, last_index);
- if (ret != -EBUSY)
- break;
- schedule_timeout(HZ/10);
- }
+ i_size_write(inode, cluster->end + 1 - offset);
+ ret = setup_extent_mapping(inode, cluster->start - offset,
+ cluster->end - offset, cluster->start);
if (ret)
goto out_unlock;
file_ra_state_init(ra, inode->i_mapping);
- for (i = first_index ; i <= last_index; i++) {
- if (total_read % ra->ra_pages == 0) {
- btrfs_force_ra(inode->i_mapping, ra, NULL, i,
- min(last_index, ra->ra_pages + i - 1));
- }
- total_read++;
-again:
- if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
- BUG_ON(1);
- page = grab_cache_page(inode->i_mapping, i);
+ WARN_ON(cluster->start != cluster->boundary[0]);
+ while (index <= last_index) {
+ page = find_lock_page(inode->i_mapping, index);
if (!page) {
- ret = -ENOMEM;
- goto out_unlock;
+ page_cache_sync_readahead(inode->i_mapping,
+ ra, NULL, index,
+ last_index + 1 - index);
+ page = grab_cache_page(inode->i_mapping, index);
+ if (!page) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ }
+
+ if (PageReadahead(page)) {
+ page_cache_async_readahead(inode->i_mapping,
+ ra, NULL, page, index,
+ last_index + 1 - index);
}
+
if (!PageUptodate(page)) {
btrfs_readpage(NULL, page);
lock_page(page);
@@ -2589,75 +2636,79 @@ again:
goto out_unlock;
}
}
- wait_on_page_writeback(page);
page_start = (u64)page->index << PAGE_CACHE_SHIFT;
page_end = page_start + PAGE_CACHE_SIZE - 1;
- lock_extent(io_tree, page_start, page_end, GFP_NOFS);
-
- ordered = btrfs_lookup_ordered_extent(inode, page_start);
- if (ordered) {
- unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
- unlock_page(page);
- page_cache_release(page);
- btrfs_start_ordered_extent(inode, ordered, 1);
- btrfs_put_ordered_extent(ordered);
- goto again;
- }
+
+ lock_extent(&BTRFS_I(inode)->io_tree,
+ page_start, page_end, GFP_NOFS);
+
set_page_extent_mapped(page);
- if (i == first_index)
- set_extent_bits(io_tree, page_start, page_end,
+ if (nr < cluster->nr &&
+ page_start + offset == cluster->boundary[nr]) {
+ set_extent_bits(&BTRFS_I(inode)->io_tree,
+ page_start, page_end,
EXTENT_BOUNDARY, GFP_NOFS);
+ nr++;
+ }
btrfs_set_extent_delalloc(inode, page_start, page_end);
set_page_dirty(page);
- total_dirty++;
+ dirty_page++;
- unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
+ unlock_extent(&BTRFS_I(inode)->io_tree,
+ page_start, page_end, GFP_NOFS);
unlock_page(page);
page_cache_release(page);
+
+ index++;
+ if (nr < cluster->nr &&
+ page_end + 1 + offset == cluster->boundary[nr]) {
+ balance_dirty_pages_ratelimited_nr(inode->i_mapping,
+ dirty_page);
+ dirty_page = 0;
+ }
+ }
+ if (dirty_page) {
+ balance_dirty_pages_ratelimited_nr(inode->i_mapping,
+ dirty_page);
}
+ WARN_ON(nr != cluster->nr);
out_unlock:
mutex_unlock(&inode->i_mutex);
kfree(ra);
- balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
return ret;
}
static noinline_for_stack
-int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key)
+int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
+ struct file_extent_cluster *cluster)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
- struct extent_map *em;
- u64 start = extent_key->objectid - BTRFS_I(inode)->index_cnt;
- u64 end = start + extent_key->offset - 1;
-
- em = alloc_extent_map(GFP_NOFS);
- em->start = start;
- em->len = extent_key->offset;
- em->block_len = extent_key->offset;
- em->block_start = extent_key->objectid;
- em->bdev = root->fs_info->fs_devices->latest_bdev;
- set_bit(EXTENT_FLAG_PINNED, &em->flags);
+ int ret;
- /* setup extent map to cheat btrfs_readpage */
- lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
- while (1) {
- int ret;
- spin_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em);
- spin_unlock(&em_tree->lock);
- if (ret != -EEXIST) {
- free_extent_map(em);
- break;
- }
- btrfs_drop_extent_cache(inode, start, end, 0);
+ if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
+ ret = relocate_file_extent_cluster(inode, cluster);
+ if (ret)
+ return ret;
+ cluster->nr = 0;
}
- unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
- return relocate_inode_pages(inode, start, extent_key->offset);
+ if (!cluster->nr)
+ cluster->start = extent_key->objectid;
+ else
+ BUG_ON(cluster->nr >= MAX_EXTENTS);
+ cluster->end = extent_key->objectid + extent_key->offset - 1;
+ cluster->boundary[cluster->nr] = extent_key->objectid;
+ cluster->nr++;
+
+ if (cluster->nr >= MAX_EXTENTS) {
+ ret = relocate_file_extent_cluster(inode, cluster);
+ if (ret)
+ return ret;
+ cluster->nr = 0;
+ }
+ return 0;
}
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
@@ -3203,10 +3254,12 @@ static int check_extent_flags(u64 flags)
return 0;
}
+
static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
{
struct rb_root blocks = RB_ROOT;
struct btrfs_key key;
+ struct file_extent_cluster *cluster;
struct btrfs_trans_handle *trans = NULL;
struct btrfs_path *path;
struct btrfs_extent_item *ei;
@@ -3216,10 +3269,17 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
int ret;
int err = 0;
+ cluster = kzalloc(sizeof(*cluster), GFP_NOFS);
+ if (!cluster)
+ return -ENOMEM;
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
+ rc->extents_found = 0;
+ rc->extents_skipped = 0;
+
rc->search_start = rc->block_group->key.objectid;
clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
GFP_NOFS);
@@ -3306,14 +3366,15 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
}
nr = trans->blocks_used;
- btrfs_end_transaction_throttle(trans, rc->extent_root);
+ btrfs_end_transaction(trans, rc->extent_root);
trans = NULL;
btrfs_btree_balance_dirty(rc->extent_root, nr);
if (rc->stage == MOVE_DATA_EXTENTS &&
(flags & BTRFS_EXTENT_FLAG_DATA)) {
rc->found_file_extent = 1;
- ret = relocate_data_extent(rc->data_inode, &key);
+ ret = relocate_data_extent(rc->data_inode,
+ &key, cluster);
if (ret < 0) {
err = ret;
break;
@@ -3328,6 +3389,14 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
btrfs_btree_balance_dirty(rc->extent_root, nr);
}
+ if (!err) {
+ ret = relocate_file_extent_cluster(rc->data_inode, cluster);
+ if (ret < 0)
+ err = ret;
+ }
+
+ kfree(cluster);
+
rc->create_reloc_root = 0;
smp_mb();
@@ -3348,8 +3417,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
}
static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- u64 objectid, u64 size)
+ struct btrfs_root *root, u64 objectid)
{
struct btrfs_path *path;
struct btrfs_inode_item *item;
@@ -3368,7 +3436,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
btrfs_set_inode_generation(leaf, item, 1);
- btrfs_set_inode_size(leaf, item, size);
+ btrfs_set_inode_size(leaf, item, 0);
btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS);
btrfs_mark_buffer_dirty(leaf);
@@ -3404,12 +3472,7 @@ static struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
if (err)
goto out;
- err = __insert_orphan_inode(trans, root, objectid, group->key.offset);
- BUG_ON(err);
-
- err = btrfs_insert_file_extent(trans, root, objectid, 0, 0, 0,
- group->key.offset, 0, group->key.offset,
- 0, 0, 0);
+ err = __insert_orphan_inode(trans, root, objectid);
BUG_ON(err);
key.objectid = objectid;
@@ -3475,14 +3538,15 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
btrfs_wait_ordered_extents(fs_info->tree_root, 0);
while (1) {
- mutex_lock(&fs_info->cleaner_mutex);
- btrfs_clean_old_snapshots(fs_info->tree_root);
- mutex_unlock(&fs_info->cleaner_mutex);
-
rc->extents_found = 0;
rc->extents_skipped = 0;
+ mutex_lock(&fs_info->cleaner_mutex);
+
+ btrfs_clean_old_snapshots(fs_info->tree_root);
ret = relocate_block_group(rc);
+
+ mutex_unlock(&fs_info->cleaner_mutex);
if (ret < 0) {
err = ret;
break;
@@ -3514,10 +3578,10 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
}
}
- filemap_fdatawrite_range(fs_info->btree_inode->i_mapping,
- rc->block_group->key.objectid,
- rc->block_group->key.objectid +
- rc->block_group->key.offset - 1);
+ filemap_write_and_wait_range(fs_info->btree_inode->i_mapping,
+ rc->block_group->key.objectid,
+ rc->block_group->key.objectid +
+ rc->block_group->key.offset - 1);
WARN_ON(rc->block_group->pinned > 0);
WARN_ON(rc->block_group->reserved > 0);
@@ -3530,6 +3594,26 @@ out:
return err;
}
+static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
+{
+ struct btrfs_trans_handle *trans;
+ int ret;
+
+ trans = btrfs_start_transaction(root->fs_info->tree_root, 1);
+
+ memset(&root->root_item.drop_progress, 0,
+ sizeof(root->root_item.drop_progress));
+ root->root_item.drop_level = 0;
+ btrfs_set_root_refs(&root->root_item, 0);
+ ret = btrfs_update_root(trans, root->fs_info->tree_root,
+ &root->root_key, &root->root_item);
+ BUG_ON(ret);
+
+ ret = btrfs_end_transaction(trans, root->fs_info->tree_root);
+ BUG_ON(ret);
+ return 0;
+}
+
/*
* recover relocation interrupted by system crash.
*
@@ -3589,8 +3673,12 @@ int btrfs_recover_relocation(struct btrfs_root *root)
fs_root = read_fs_root(root->fs_info,
reloc_root->root_key.offset);
if (IS_ERR(fs_root)) {
- err = PTR_ERR(fs_root);
- goto out;
+ ret = PTR_ERR(fs_root);
+ if (ret != -ENOENT) {
+ err = ret;
+ goto out;
+ }
+ mark_garbage_root(reloc_root);
}
}
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 0ddc6d61c55..9351428f30e 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -94,17 +94,23 @@ int btrfs_find_last_root(struct btrfs_root *root, u64 objectid,
goto out;
BUG_ON(ret == 0);
+ if (path->slots[0] == 0) {
+ ret = 1;
+ goto out;
+ }
l = path->nodes[0];
- BUG_ON(path->slots[0] == 0);
slot = path->slots[0] - 1;
btrfs_item_key_to_cpu(l, &found_key, slot);
- if (found_key.objectid != objectid) {
+ if (found_key.objectid != objectid ||
+ found_key.type != BTRFS_ROOT_ITEM_KEY) {
ret = 1;
goto out;
}
- read_extent_buffer(l, item, btrfs_item_ptr_offset(l, slot),
- sizeof(*item));
- memcpy(key, &found_key, sizeof(found_key));
+ if (item)
+ read_extent_buffer(l, item, btrfs_item_ptr_offset(l, slot),
+ sizeof(*item));
+ if (key)
+ memcpy(key, &found_key, sizeof(found_key));
ret = 0;
out:
btrfs_free_path(path);
@@ -249,6 +255,59 @@ err:
return ret;
}
+int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
+{
+ struct extent_buffer *leaf;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ int err = 0;
+ int ret;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = BTRFS_ORPHAN_OBJECTID;
+ key.type = BTRFS_ORPHAN_ITEM_KEY;
+ key.offset = 0;
+
+ while (1) {
+ ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
+ if (ret < 0) {
+ err = ret;
+ break;
+ }
+
+ leaf = path->nodes[0];
+ if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(tree_root, path);
+ if (ret < 0)
+ err = ret;
+ if (ret != 0)
+ break;
+ leaf = path->nodes[0];
+ }
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ btrfs_release_path(tree_root, path);
+
+ if (key.objectid != BTRFS_ORPHAN_OBJECTID ||
+ key.type != BTRFS_ORPHAN_ITEM_KEY)
+ break;
+
+ ret = btrfs_find_dead_roots(tree_root, key.offset);
+ if (ret) {
+ err = ret;
+ break;
+ }
+
+ key.offset++;
+ }
+
+ btrfs_free_path(path);
+ return err;
+}
+
/* drop the root item for 'key' from 'root' */
int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_key *key)
@@ -278,31 +337,57 @@ out:
return ret;
}
-#if 0 /* this will get used when snapshot deletion is implemented */
int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *tree_root,
- u64 root_id, u8 type, u64 ref_id)
+ u64 root_id, u64 ref_id, u64 dirid, u64 *sequence,
+ const char *name, int name_len)
+
{
+ struct btrfs_path *path;
+ struct btrfs_root_ref *ref;
+ struct extent_buffer *leaf;
struct btrfs_key key;
+ unsigned long ptr;
+ int err = 0;
int ret;
- struct btrfs_path *path;
path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
key.objectid = root_id;
- key.type = type;
+ key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = ref_id;
-
+again:
ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
- BUG_ON(ret);
-
- ret = btrfs_del_item(trans, tree_root, path);
- BUG_ON(ret);
+ BUG_ON(ret < 0);
+ if (ret == 0) {
+ leaf = path->nodes[0];
+ ref = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_root_ref);
+
+ WARN_ON(btrfs_root_ref_dirid(leaf, ref) != dirid);
+ WARN_ON(btrfs_root_ref_name_len(leaf, ref) != name_len);
+ ptr = (unsigned long)(ref + 1);
+ WARN_ON(memcmp_extent_buffer(leaf, name, ptr, name_len));
+ *sequence = btrfs_root_ref_sequence(leaf, ref);
+
+ ret = btrfs_del_item(trans, tree_root, path);
+ BUG_ON(ret);
+ } else
+ err = -ENOENT;
+
+ if (key.type == BTRFS_ROOT_BACKREF_KEY) {
+ btrfs_release_path(tree_root, path);
+ key.objectid = ref_id;
+ key.type = BTRFS_ROOT_REF_KEY;
+ key.offset = root_id;
+ goto again;
+ }
btrfs_free_path(path);
- return ret;
+ return err;
}
-#endif
int btrfs_find_root_ref(struct btrfs_root *tree_root,
struct btrfs_path *path,
@@ -319,7 +404,6 @@ int btrfs_find_root_ref(struct btrfs_root *tree_root,
return ret;
}
-
/*
* add a btrfs_root_ref item. type is either BTRFS_ROOT_REF_KEY
* or BTRFS_ROOT_BACKREF_KEY.
@@ -335,8 +419,7 @@ int btrfs_find_root_ref(struct btrfs_root *tree_root,
*/
int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *tree_root,
- u64 root_id, u8 type, u64 ref_id,
- u64 dirid, u64 sequence,
+ u64 root_id, u64 ref_id, u64 dirid, u64 sequence,
const char *name, int name_len)
{
struct btrfs_key key;
@@ -346,13 +429,14 @@ int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
unsigned long ptr;
-
path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
key.objectid = root_id;
- key.type = type;
+ key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = ref_id;
-
+again:
ret = btrfs_insert_empty_item(trans, tree_root, path, &key,
sizeof(*ref) + name_len);
BUG_ON(ret);
@@ -366,6 +450,14 @@ int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
write_extent_buffer(leaf, name, ptr, name_len);
btrfs_mark_buffer_dirty(leaf);
+ if (key.type == BTRFS_ROOT_BACKREF_KEY) {
+ btrfs_release_path(tree_root, path);
+ key.objectid = ref_id;
+ key.type = BTRFS_ROOT_REF_KEY;
+ key.offset = root_id;
+ goto again;
+ }
+
btrfs_free_path(path);
- return ret;
+ return 0;
}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 6d6d06cb6df..67035385444 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -51,7 +51,7 @@
#include "export.h"
#include "compression.h"
-static struct super_operations btrfs_super_ops;
+static const struct super_operations btrfs_super_ops;
static void btrfs_put_super(struct super_block *sb)
{
@@ -675,7 +675,8 @@ static int btrfs_unfreeze(struct super_block *sb)
return 0;
}
-static struct super_operations btrfs_super_ops = {
+static const struct super_operations btrfs_super_ops = {
+ .drop_inode = btrfs_drop_inode,
.delete_inode = btrfs_delete_inode,
.put_super = btrfs_put_super,
.sync_fs = btrfs_sync_fs,
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index cdbb5022da5..88f866f85e7 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -104,7 +104,6 @@ static noinline int record_root_in_trans(struct btrfs_trans_handle *trans,
{
if (root->ref_cows && root->last_trans < trans->transid) {
WARN_ON(root == root->fs_info->extent_root);
- WARN_ON(root->root_item.refs == 0);
WARN_ON(root->commit_root != root->node);
radix_tree_tag_set(&root->fs_info->fs_roots_radix,
@@ -720,7 +719,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
key.objectid = objectid;
- key.offset = 0;
+ /* record when the snapshot was created in key.offset */
+ key.offset = trans->transid;
btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
old = btrfs_lock_root_node(root);
@@ -778,24 +778,14 @@ static noinline int finish_pending_snapshot(struct btrfs_fs_info *fs_info,
ret = btrfs_update_inode(trans, parent_root, parent_inode);
BUG_ON(ret);
- /* add the backref first */
ret = btrfs_add_root_ref(trans, parent_root->fs_info->tree_root,
pending->root_key.objectid,
- BTRFS_ROOT_BACKREF_KEY,
parent_root->root_key.objectid,
parent_inode->i_ino, index, pending->name,
namelen);
BUG_ON(ret);
- /* now add the forward ref */
- ret = btrfs_add_root_ref(trans, parent_root->fs_info->tree_root,
- parent_root->root_key.objectid,
- BTRFS_ROOT_REF_KEY,
- pending->root_key.objectid,
- parent_inode->i_ino, index, pending->name,
- namelen);
-
inode = btrfs_lookup_dentry(parent_inode, pending->dentry);
d_instantiate(pending->dentry, inode);
fail:
@@ -874,7 +864,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
unsigned long timeout = 1;
struct btrfs_transaction *cur_trans;
struct btrfs_transaction *prev_trans = NULL;
- struct extent_io_tree *pinned_copy;
DEFINE_WAIT(wait);
int ret;
int should_grow = 0;
@@ -915,13 +904,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
return 0;
}
- pinned_copy = kmalloc(sizeof(*pinned_copy), GFP_NOFS);
- if (!pinned_copy)
- return -ENOMEM;
-
- extent_io_tree_init(pinned_copy,
- root->fs_info->btree_inode->i_mapping, GFP_NOFS);
-
trans->transaction->in_commit = 1;
trans->transaction->blocked = 1;
if (cur_trans->list.prev != &root->fs_info->trans_list) {
@@ -1019,6 +1001,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
ret = commit_cowonly_roots(trans, root);
BUG_ON(ret);
+ btrfs_prepare_extent_commit(trans, root);
+
cur_trans = root->fs_info->running_transaction;
spin_lock(&root->fs_info->new_trans_lock);
root->fs_info->running_transaction = NULL;
@@ -1042,8 +1026,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
sizeof(root->fs_info->super_copy));
- btrfs_copy_pinned(root, pinned_copy);
-
trans->transaction->blocked = 0;
wake_up(&root->fs_info->transaction_wait);
@@ -1059,8 +1041,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
*/
mutex_unlock(&root->fs_info->tree_log_mutex);
- btrfs_finish_extent_commit(trans, root, pinned_copy);
- kfree(pinned_copy);
+ btrfs_finish_extent_commit(trans, root);
/* do the directory inserts of any pending snapshot creations */
finish_pending_snapshots(trans, root->fs_info);
@@ -1096,8 +1077,13 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
while (!list_empty(&list)) {
root = list_entry(list.next, struct btrfs_root, root_list);
- list_del_init(&root->root_list);
- btrfs_drop_snapshot(root, 0);
+ list_del(&root->root_list);
+
+ if (btrfs_header_backref_rev(root->node) <
+ BTRFS_MIXED_BACKREF_REV)
+ btrfs_drop_snapshot(root, 0);
+ else
+ btrfs_drop_snapshot(root, 1);
}
return 0;
}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index d91b0de7c50..7827841b55c 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -263,8 +263,8 @@ static int process_one_buffer(struct btrfs_root *log,
struct walk_control *wc, u64 gen)
{
if (wc->pin)
- btrfs_update_pinned_extents(log->fs_info->extent_root,
- eb->start, eb->len, 1);
+ btrfs_pin_extent(log->fs_info->extent_root,
+ eb->start, eb->len, 0);
if (btrfs_buffer_uptodate(eb, gen)) {
if (wc->write)
@@ -534,7 +534,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
saved_nbytes = inode_get_bytes(inode);
/* drop any overlapping extents */
ret = btrfs_drop_extents(trans, root, inode,
- start, extent_end, extent_end, start, &alloc_hint);
+ start, extent_end, extent_end, start, &alloc_hint, 1);
BUG_ON(ret);
if (found_type == BTRFS_FILE_EXTENT_REG ||
@@ -2605,7 +2605,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
extent);
cs = btrfs_file_extent_offset(src, extent);
cl = btrfs_file_extent_num_bytes(src,
- extent);;
+ extent);
if (btrfs_file_extent_compression(src,
extent)) {
cs = 0;
@@ -2841,7 +2841,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
break;
- if (parent == sb->s_root)
+ if (IS_ROOT(parent))
break;
parent = parent->d_parent;
@@ -2880,6 +2880,12 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
goto end_no_trans;
}
+ if (root != BTRFS_I(inode)->root ||
+ btrfs_root_refs(&root->root_item) == 0) {
+ ret = 1;
+ goto end_no_trans;
+ }
+
ret = check_parent_dirs_for_sync(trans, inode, parent,
sb, last_committed);
if (ret)
@@ -2907,12 +2913,15 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
break;
inode = parent->d_inode;
+ if (root != BTRFS_I(inode)->root)
+ break;
+
if (BTRFS_I(inode)->generation >
root->fs_info->last_trans_committed) {
ret = btrfs_log_inode(trans, root, inode, inode_only);
BUG_ON(ret);
}
- if (parent == sb->s_root)
+ if (IS_ROOT(parent))
break;
parent = parent->d_parent;
@@ -2951,7 +2960,6 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
struct btrfs_key tmp_key;
struct btrfs_root *log;
struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
- u64 highest_inode;
struct walk_control wc = {
.process_func = process_one_buffer,
.stage = 0,
@@ -3010,11 +3018,6 @@ again:
path);
BUG_ON(ret);
}
- ret = btrfs_find_highest_inode(wc.replay_dest, &highest_inode);
- if (ret == 0) {
- wc.replay_dest->highest_inode = highest_inode;
- wc.replay_dest->last_inode_alloc = highest_inode;
- }
key.offset = found_key.offset - 1;
wc.replay_dest->log_root = NULL;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 5cf405b0828..23e7d36ff32 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -276,7 +276,7 @@ loop_lock:
* is now congested. Back off and let other work structs
* run instead
*/
- if (pending && bdi_write_congested(bdi) && batch_run > 32 &&
+ if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
fs_info->fs_devices->open_devices > 1) {
struct io_context *ioc;
@@ -719,10 +719,9 @@ error:
* called very infrequently and that a given device has a small number
* of extents
*/
-static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
- struct btrfs_device *device,
- u64 num_bytes, u64 *start,
- u64 *max_avail)
+int find_free_dev_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_device *device, u64 num_bytes,
+ u64 *start, u64 *max_avail)
{
struct btrfs_key key;
struct btrfs_root *root = device->dev_root;
@@ -1736,6 +1735,10 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
extent_root = root->fs_info->extent_root;
em_tree = &root->fs_info->mapping_tree.map_tree;
+ ret = btrfs_can_relocate(extent_root, chunk_offset);
+ if (ret)
+ return -ENOSPC;
+
/* step one, relocate all the extents inside this chunk */
ret = btrfs_relocate_block_group(extent_root, chunk_offset);
BUG_ON(ret);
@@ -1749,9 +1752,9 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
* step two, delete the device extents and the
* chunk tree entries
*/
- spin_lock(&em_tree->lock);
+ read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, chunk_offset, 1);
- spin_unlock(&em_tree->lock);
+ read_unlock(&em_tree->lock);
BUG_ON(em->start > chunk_offset ||
em->start + em->len < chunk_offset);
@@ -1780,9 +1783,9 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
BUG_ON(ret);
- spin_lock(&em_tree->lock);
+ write_lock(&em_tree->lock);
remove_extent_mapping(em_tree, em);
- spin_unlock(&em_tree->lock);
+ write_unlock(&em_tree->lock);
kfree(map);
em->bdev = NULL;
@@ -1807,12 +1810,15 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
struct btrfs_key found_key;
u64 chunk_tree = chunk_root->root_key.objectid;
u64 chunk_type;
+ bool retried = false;
+ int failed = 0;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
+again:
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
key.offset = (u64)-1;
key.type = BTRFS_CHUNK_ITEM_KEY;
@@ -1842,7 +1848,10 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
found_key.objectid,
found_key.offset);
- BUG_ON(ret);
+ if (ret == -ENOSPC)
+ failed++;
+ else if (ret)
+ BUG();
}
if (found_key.offset == 0)
@@ -1850,6 +1859,14 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
key.offset = found_key.offset - 1;
}
ret = 0;
+ if (failed && !retried) {
+ failed = 0;
+ retried = true;
+ goto again;
+ } else if (failed && retried) {
+ WARN_ON(1);
+ ret = -ENOSPC;
+ }
error:
btrfs_free_path(path);
return ret;
@@ -1894,6 +1911,8 @@ int btrfs_balance(struct btrfs_root *dev_root)
continue;
ret = btrfs_shrink_device(device, old_size - size_to_free);
+ if (ret == -ENOSPC)
+ break;
BUG_ON(ret);
trans = btrfs_start_transaction(dev_root, 1);
@@ -1938,9 +1957,8 @@ int btrfs_balance(struct btrfs_root *dev_root)
chunk = btrfs_item_ptr(path->nodes[0],
path->slots[0],
struct btrfs_chunk);
- key.offset = found_key.offset;
/* chunk zero is special */
- if (key.offset == 0)
+ if (found_key.offset == 0)
break;
btrfs_release_path(chunk_root, path);
@@ -1948,7 +1966,8 @@ int btrfs_balance(struct btrfs_root *dev_root)
chunk_root->root_key.objectid,
found_key.objectid,
found_key.offset);
- BUG_ON(ret);
+ BUG_ON(ret && ret != -ENOSPC);
+ key.offset = found_key.offset - 1;
}
ret = 0;
error:
@@ -1974,10 +1993,13 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
u64 chunk_offset;
int ret;
int slot;
+ int failed = 0;
+ bool retried = false;
struct extent_buffer *l;
struct btrfs_key key;
struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
u64 old_total = btrfs_super_total_bytes(super_copy);
+ u64 old_size = device->total_bytes;
u64 diff = device->total_bytes - new_size;
if (new_size >= device->total_bytes)
@@ -1987,12 +2009,6 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
if (!path)
return -ENOMEM;
- trans = btrfs_start_transaction(root, 1);
- if (!trans) {
- ret = -ENOMEM;
- goto done;
- }
-
path->reada = 2;
lock_chunks(root);
@@ -2001,8 +2017,8 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
if (device->writeable)
device->fs_devices->total_rw_bytes -= diff;
unlock_chunks(root);
- btrfs_end_transaction(trans, root);
+again:
key.objectid = device->devid;
key.offset = (u64)-1;
key.type = BTRFS_DEV_EXTENT_KEY;
@@ -2017,6 +2033,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
goto done;
if (ret) {
ret = 0;
+ btrfs_release_path(root, path);
break;
}
@@ -2024,14 +2041,18 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
slot = path->slots[0];
btrfs_item_key_to_cpu(l, &key, path->slots[0]);
- if (key.objectid != device->devid)
+ if (key.objectid != device->devid) {
+ btrfs_release_path(root, path);
break;
+ }
dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
length = btrfs_dev_extent_length(l, dev_extent);
- if (key.offset + length <= new_size)
+ if (key.offset + length <= new_size) {
+ btrfs_release_path(root, path);
break;
+ }
chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
@@ -2040,8 +2061,26 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
chunk_offset);
- if (ret)
+ if (ret && ret != -ENOSPC)
goto done;
+ if (ret == -ENOSPC)
+ failed++;
+ key.offset -= 1;
+ }
+
+ if (failed && !retried) {
+ failed = 0;
+ retried = true;
+ goto again;
+ } else if (failed && retried) {
+ ret = -ENOSPC;
+ lock_chunks(root);
+
+ device->total_bytes = old_size;
+ if (device->writeable)
+ device->fs_devices->total_rw_bytes += diff;
+ unlock_chunks(root);
+ goto done;
}
/* Shrinking succeeded, else we would be at "done". */
@@ -2294,9 +2333,9 @@ again:
em->block_len = em->len;
em_tree = &extent_root->fs_info->mapping_tree.map_tree;
- spin_lock(&em_tree->lock);
+ write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
- spin_unlock(&em_tree->lock);
+ write_unlock(&em_tree->lock);
BUG_ON(ret);
free_extent_map(em);
@@ -2491,9 +2530,9 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
int readonly = 0;
int i;
- spin_lock(&map_tree->map_tree.lock);
+ read_lock(&map_tree->map_tree.lock);
em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
- spin_unlock(&map_tree->map_tree.lock);
+ read_unlock(&map_tree->map_tree.lock);
if (!em)
return 1;
@@ -2518,11 +2557,11 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
struct extent_map *em;
while (1) {
- spin_lock(&tree->map_tree.lock);
+ write_lock(&tree->map_tree.lock);
em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
if (em)
remove_extent_mapping(&tree->map_tree, em);
- spin_unlock(&tree->map_tree.lock);
+ write_unlock(&tree->map_tree.lock);
if (!em)
break;
kfree(em->bdev);
@@ -2540,9 +2579,9 @@ int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
struct extent_map_tree *em_tree = &map_tree->map_tree;
int ret;
- spin_lock(&em_tree->lock);
+ read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, logical, len);
- spin_unlock(&em_tree->lock);
+ read_unlock(&em_tree->lock);
BUG_ON(!em);
BUG_ON(em->start > logical || em->start + em->len < logical);
@@ -2604,9 +2643,9 @@ again:
atomic_set(&multi->error, 0);
}
- spin_lock(&em_tree->lock);
+ read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, logical, *length);
- spin_unlock(&em_tree->lock);
+ read_unlock(&em_tree->lock);
if (!em && unplug_page)
return 0;
@@ -2763,9 +2802,9 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
u64 stripe_nr;
int i, j, nr = 0;
- spin_lock(&em_tree->lock);
+ read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, chunk_start, 1);
- spin_unlock(&em_tree->lock);
+ read_unlock(&em_tree->lock);
BUG_ON(!em || em->start != chunk_start);
map = (struct map_lookup *)em->bdev;
@@ -3053,9 +3092,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
logical = key->offset;
length = btrfs_chunk_length(leaf, chunk);
- spin_lock(&map_tree->map_tree.lock);
+ read_lock(&map_tree->map_tree.lock);
em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
- spin_unlock(&map_tree->map_tree.lock);
+ read_unlock(&map_tree->map_tree.lock);
/* already mapped? */
if (em && em->start <= logical && em->start + em->len > logical) {
@@ -3114,9 +3153,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
map->stripes[i].dev->in_fs_metadata = 1;
}
- spin_lock(&map_tree->map_tree.lock);
+ write_lock(&map_tree->map_tree.lock);
ret = add_extent_mapping(&map_tree->map_tree, em);
- spin_unlock(&map_tree->map_tree.lock);
+ write_unlock(&map_tree->map_tree.lock);
BUG_ON(ret);
free_extent_map(em);
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 5139a833f72..31b0fabdd2e 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -181,4 +181,7 @@ int btrfs_balance(struct btrfs_root *dev_root);
void btrfs_unlock_volumes(void);
void btrfs_lock_volumes(void);
int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
+int find_free_dev_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_device *device, u64 num_bytes,
+ u64 *start, u64 *max_avail);
#endif
diff --git a/fs/buffer.c b/fs/buffer.c
index 90a98865b0c..6fa530256bf 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -52,6 +52,7 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
bh->b_end_io = handler;
bh->b_private = private;
}
+EXPORT_SYMBOL(init_buffer);
static int sync_buffer(void *word)
{
@@ -80,6 +81,7 @@ void unlock_buffer(struct buffer_head *bh)
smp_mb__after_clear_bit();
wake_up_bit(&bh->b_state, BH_Lock);
}
+EXPORT_SYMBOL(unlock_buffer);
/*
* Block until a buffer comes unlocked. This doesn't stop it
@@ -90,6 +92,7 @@ void __wait_on_buffer(struct buffer_head * bh)
{
wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
}
+EXPORT_SYMBOL(__wait_on_buffer);
static void
__clear_page_buffers(struct page *page)
@@ -144,6 +147,7 @@ void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
__end_buffer_read_notouch(bh, uptodate);
put_bh(bh);
}
+EXPORT_SYMBOL(end_buffer_read_sync);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
{
@@ -164,6 +168,7 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
unlock_buffer(bh);
put_bh(bh);
}
+EXPORT_SYMBOL(end_buffer_write_sync);
/*
* Various filesystems appear to want __find_get_block to be non-blocking.
@@ -272,9 +277,10 @@ void invalidate_bdev(struct block_device *bdev)
invalidate_bh_lrus();
invalidate_mapping_pages(mapping, 0, -1);
}
+EXPORT_SYMBOL(invalidate_bdev);
/*
- * Kick pdflush then try to free up some ZONE_NORMAL memory.
+ * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
*/
static void free_more_memory(void)
{
@@ -410,6 +416,7 @@ still_busy:
local_irq_restore(flags);
return;
}
+EXPORT_SYMBOL(end_buffer_async_write);
/*
* If a page's buffers are under async readin (end_buffer_async_read
@@ -438,8 +445,8 @@ static void mark_buffer_async_read(struct buffer_head *bh)
set_buffer_async_read(bh);
}
-void mark_buffer_async_write_endio(struct buffer_head *bh,
- bh_end_io_t *handler)
+static void mark_buffer_async_write_endio(struct buffer_head *bh,
+ bh_end_io_t *handler)
{
bh->b_end_io = handler;
set_buffer_async_write(bh);
@@ -553,7 +560,7 @@ repeat:
return err;
}
-void do_thaw_all(struct work_struct *work)
+static void do_thaw_all(struct work_struct *work)
{
struct super_block *sb;
char b[BDEVNAME_SIZE];
@@ -1172,6 +1179,7 @@ void mark_buffer_dirty(struct buffer_head *bh)
}
}
}
+EXPORT_SYMBOL(mark_buffer_dirty);
/*
* Decrement a buffer_head's reference count. If all buffers against a page
@@ -1188,6 +1196,7 @@ void __brelse(struct buffer_head * buf)
}
WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
}
+EXPORT_SYMBOL(__brelse);
/*
* bforget() is like brelse(), except it discards any
@@ -1206,6 +1215,7 @@ void __bforget(struct buffer_head *bh)
}
__brelse(bh);
}
+EXPORT_SYMBOL(__bforget);
static struct buffer_head *__bread_slow(struct buffer_head *bh)
{
@@ -1699,9 +1709,9 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
/*
* If it's a fully non-blocking write attempt and we cannot
* lock the buffer then redirty the page. Note that this can
- * potentially cause a busy-wait loop from pdflush and kswapd
- * activity, but those code paths have their own higher-level
- * throttling.
+ * potentially cause a busy-wait loop from writeback threads
+ * and kswapd activity, but those code paths have their own
+ * higher-level throttling.
*/
if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
lock_buffer(bh);
@@ -2218,6 +2228,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
}
return 0;
}
+EXPORT_SYMBOL(block_read_full_page);
/* utility function for filesystems that need to do work on expanding
* truncates. Uses filesystem pagecache writes to allow the filesystem to
@@ -2228,16 +2239,10 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
struct address_space *mapping = inode->i_mapping;
struct page *page;
void *fsdata;
- unsigned long limit;
int err;
- err = -EFBIG;
- limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
- if (limit != RLIM_INFINITY && size > (loff_t)limit) {
- send_sig(SIGXFSZ, current, 0);
- goto out;
- }
- if (size > inode->i_sb->s_maxbytes)
+ err = inode_newsize_ok(inode, size);
+ if (err)
goto out;
err = pagecache_write_begin(NULL, mapping, size, 0,
@@ -2252,6 +2257,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
out:
return err;
}
+EXPORT_SYMBOL(generic_cont_expand_simple);
static int cont_expand_zero(struct file *file, struct address_space *mapping,
loff_t pos, loff_t *bytes)
@@ -2352,6 +2358,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
out:
return err;
}
+EXPORT_SYMBOL(cont_write_begin);
int block_prepare_write(struct page *page, unsigned from, unsigned to,
get_block_t *get_block)
@@ -2362,6 +2369,7 @@ int block_prepare_write(struct page *page, unsigned from, unsigned to,
ClearPageUptodate(page);
return err;
}
+EXPORT_SYMBOL(block_prepare_write);
int block_commit_write(struct page *page, unsigned from, unsigned to)
{
@@ -2369,6 +2377,7 @@ int block_commit_write(struct page *page, unsigned from, unsigned to)
__block_commit_write(inode,page,from,to);
return 0;
}
+EXPORT_SYMBOL(block_commit_write);
/*
* block_page_mkwrite() is not allowed to change the file size as it gets
@@ -2426,6 +2435,7 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
out:
return ret;
}
+EXPORT_SYMBOL(block_page_mkwrite);
/*
* nobh_write_begin()'s prereads are special: the buffer_heads are freed
@@ -2849,6 +2859,7 @@ unlock:
out:
return err;
}
+EXPORT_SYMBOL(block_truncate_page);
/*
* The generic ->writepage function for buffer-backed address_spaces
@@ -2890,6 +2901,7 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block,
zero_user_segment(page, offset, PAGE_CACHE_SIZE);
return __block_write_full_page(inode, page, get_block, wbc, handler);
}
+EXPORT_SYMBOL(block_write_full_page_endio);
/*
* The generic ->writepage function for buffer-backed address_spaces
@@ -2900,7 +2912,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
return block_write_full_page_endio(page, get_block, wbc,
end_buffer_async_write);
}
-
+EXPORT_SYMBOL(block_write_full_page);
sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
get_block_t *get_block)
@@ -2913,6 +2925,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
get_block(inode, block, &tmp, 0);
return tmp.b_blocknr;
}
+EXPORT_SYMBOL(generic_block_bmap);
static void end_bio_bh_io_sync(struct bio *bio, int err)
{
@@ -2982,6 +2995,7 @@ int submit_bh(int rw, struct buffer_head * bh)
bio_put(bio);
return ret;
}
+EXPORT_SYMBOL(submit_bh);
/**
* ll_rw_block: low-level access to block devices (DEPRECATED)
@@ -3043,6 +3057,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
unlock_buffer(bh);
}
}
+EXPORT_SYMBOL(ll_rw_block);
/*
* For a data-integrity writeout, we need to wait upon any in-progress I/O
@@ -3071,6 +3086,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
}
return ret;
}
+EXPORT_SYMBOL(sync_dirty_buffer);
/*
* try_to_free_buffers() checks if all the buffers on this particular page
@@ -3185,13 +3201,14 @@ void block_sync_page(struct page *page)
if (mapping)
blk_run_backing_dev(mapping->backing_dev_info, page);
}
+EXPORT_SYMBOL(block_sync_page);
/*
* There are no bdflush tunables left. But distributions are
* still running obsolete flush daemons, so we terminate them here.
*
* Use of bdflush() is deprecated and will be removed in a future kernel.
- * The `pdflush' kernel threads fully replace bdflush daemons and this call.
+ * The `flush-X' kernel threads fully replace bdflush daemons and this call.
*/
SYSCALL_DEFINE2(bdflush, int, func, long, data)
{
@@ -3361,29 +3378,3 @@ void __init buffer_init(void)
max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
hotcpu_notifier(buffer_cpu_notify, 0);
}
-
-EXPORT_SYMBOL(__bforget);
-EXPORT_SYMBOL(__brelse);
-EXPORT_SYMBOL(__wait_on_buffer);
-EXPORT_SYMBOL(block_commit_write);
-EXPORT_SYMBOL(block_prepare_write);
-EXPORT_SYMBOL(block_page_mkwrite);
-EXPORT_SYMBOL(block_read_full_page);
-EXPORT_SYMBOL(block_sync_page);
-EXPORT_SYMBOL(block_truncate_page);
-EXPORT_SYMBOL(block_write_full_page);
-EXPORT_SYMBOL(block_write_full_page_endio);
-EXPORT_SYMBOL(cont_write_begin);
-EXPORT_SYMBOL(end_buffer_read_sync);
-EXPORT_SYMBOL(end_buffer_write_sync);
-EXPORT_SYMBOL(end_buffer_async_write);
-EXPORT_SYMBOL(file_fsync);
-EXPORT_SYMBOL(generic_block_bmap);
-EXPORT_SYMBOL(generic_cont_expand_simple);
-EXPORT_SYMBOL(init_buffer);
-EXPORT_SYMBOL(invalidate_bdev);
-EXPORT_SYMBOL(ll_rw_block);
-EXPORT_SYMBOL(mark_buffer_dirty);
-EXPORT_SYMBOL(submit_bh);
-EXPORT_SYMBOL(sync_dirty_buffer);
-EXPORT_SYMBOL(unlock_buffer);
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 3cbc57f932d..d6db933df2b 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -264,7 +264,6 @@ int __register_chrdev(unsigned int major, unsigned int baseminor,
{
struct char_device_struct *cd;
struct cdev *cdev;
- char *s;
int err = -ENOMEM;
cd = __register_chrdev_region(major, baseminor, count, name);
@@ -278,8 +277,6 @@ int __register_chrdev(unsigned int major, unsigned int baseminor,
cdev->owner = fops->owner;
cdev->ops = fops;
kobject_set_name(&cdev->kobj, "%s", name);
- for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/'))
- *s = '!';
err = cdev_add(cdev, MKDEV(cd->major, baseminor), count);
if (err)
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 6994a0f54f0..80f35259680 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -2,6 +2,7 @@ config CIFS
tristate "CIFS support (advanced network filesystem, SMBFS successor)"
depends on INET
select NLS
+ select SLOW_WORK
help
This is the client VFS module for the Common Internet File System
(CIFS) protocol which is the successor to the Server Message Block
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 606912d8f2a..fea9e898c4b 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -142,7 +142,7 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
rc = dns_resolve_server_name_to_ip(*devname, &srvIP);
if (rc != 0) {
cERROR(1, ("%s: Failed to resolve server part of %s to IP: %d",
- __func__, *devname, rc));;
+ __func__, *devname, rc));
goto compose_mount_options_err;
}
/* md_len = strlen(...) + 12 for 'sep+prefixpath='
@@ -385,7 +385,7 @@ out_err:
goto out;
}
-struct inode_operations cifs_dfs_referral_inode_operations = {
+const struct inode_operations cifs_dfs_referral_inode_operations = {
.follow_link = cifs_dfs_follow_mountpoint,
};
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 3610e9958b4..9a5e4f5f312 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -50,7 +50,7 @@
#define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
#ifdef CONFIG_CIFS_QUOTA
-static struct quotactl_ops cifs_quotactl_ops;
+static const struct quotactl_ops cifs_quotactl_ops;
#endif /* QUOTA */
int cifsFYI = 0;
@@ -64,9 +64,6 @@ unsigned int multiuser_mount = 0;
unsigned int extended_security = CIFSSEC_DEF;
/* unsigned int ntlmv2_support = 0; */
unsigned int sign_CIFS_PDUs = 1;
-extern struct task_struct *oplockThread; /* remove sparse warning */
-struct task_struct *oplockThread = NULL;
-/* extern struct task_struct * dnotifyThread; remove sparse warning */
static const struct super_operations cifs_super_ops;
unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
module_param(CIFSMaxBufSize, int, 0);
@@ -185,8 +182,7 @@ out_mount_failed:
cifs_sb->mountdata = NULL;
}
#endif
- if (cifs_sb->local_nls)
- unload_nls(cifs_sb->local_nls);
+ unload_nls(cifs_sb->local_nls);
kfree(cifs_sb);
}
return rc;
@@ -517,7 +513,7 @@ int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
return rc;
}
-static struct quotactl_ops cifs_quotactl_ops = {
+static const struct quotactl_ops cifs_quotactl_ops = {
.set_xquota = cifs_xquota_set,
.get_xquota = cifs_xquota_get,
.set_xstate = cifs_xstate_set,
@@ -973,89 +969,12 @@ cifs_destroy_mids(void)
kmem_cache_destroy(cifs_oplock_cachep);
}
-static int cifs_oplock_thread(void *dummyarg)
-{
- struct oplock_q_entry *oplock_item;
- struct cifsTconInfo *pTcon;
- struct inode *inode;
- __u16 netfid;
- int rc, waitrc = 0;
-
- set_freezable();
- do {
- if (try_to_freeze())
- continue;
-
- spin_lock(&cifs_oplock_lock);
- if (list_empty(&cifs_oplock_list)) {
- spin_unlock(&cifs_oplock_lock);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(39*HZ);
- } else {
- oplock_item = list_entry(cifs_oplock_list.next,
- struct oplock_q_entry, qhead);
- cFYI(1, ("found oplock item to write out"));
- pTcon = oplock_item->tcon;
- inode = oplock_item->pinode;
- netfid = oplock_item->netfid;
- spin_unlock(&cifs_oplock_lock);
- DeleteOplockQEntry(oplock_item);
- /* can not grab inode sem here since it would
- deadlock when oplock received on delete
- since vfs_unlink holds the i_mutex across
- the call */
- /* mutex_lock(&inode->i_mutex);*/
- if (S_ISREG(inode->i_mode)) {
-#ifdef CONFIG_CIFS_EXPERIMENTAL
- if (CIFS_I(inode)->clientCanCacheAll == 0)
- break_lease(inode, FMODE_READ);
- else if (CIFS_I(inode)->clientCanCacheRead == 0)
- break_lease(inode, FMODE_WRITE);
-#endif
- rc = filemap_fdatawrite(inode->i_mapping);
- if (CIFS_I(inode)->clientCanCacheRead == 0) {
- waitrc = filemap_fdatawait(
- inode->i_mapping);
- invalidate_remote_inode(inode);
- }
- if (rc == 0)
- rc = waitrc;
- } else
- rc = 0;
- /* mutex_unlock(&inode->i_mutex);*/
- if (rc)
- CIFS_I(inode)->write_behind_rc = rc;
- cFYI(1, ("Oplock flush inode %p rc %d",
- inode, rc));
-
- /* releasing stale oplock after recent reconnect
- of smb session using a now incorrect file
- handle is not a data integrity issue but do
- not bother sending an oplock release if session
- to server still is disconnected since oplock
- already released by the server in that case */
- if (!pTcon->need_reconnect) {
- rc = CIFSSMBLock(0, pTcon, netfid,
- 0 /* len */ , 0 /* offset */, 0,
- 0, LOCKING_ANDX_OPLOCK_RELEASE,
- false /* wait flag */);
- cFYI(1, ("Oplock release rc = %d", rc));
- }
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1); /* yield in case q were corrupt */
- }
- } while (!kthread_should_stop());
-
- return 0;
-}
-
static int __init
init_cifs(void)
{
int rc = 0;
cifs_proc_init();
INIT_LIST_HEAD(&cifs_tcp_ses_list);
- INIT_LIST_HEAD(&cifs_oplock_list);
#ifdef CONFIG_CIFS_EXPERIMENTAL
INIT_LIST_HEAD(&GlobalDnotifyReqList);
INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
@@ -1084,7 +1003,6 @@ init_cifs(void)
rwlock_init(&GlobalSMBSeslock);
rwlock_init(&cifs_tcp_ses_lock);
spin_lock_init(&GlobalMid_Lock);
- spin_lock_init(&cifs_oplock_lock);
if (cifs_max_pending < 2) {
cifs_max_pending = 2;
@@ -1119,16 +1037,13 @@ init_cifs(void)
if (rc)
goto out_unregister_key_type;
#endif
- oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
- if (IS_ERR(oplockThread)) {
- rc = PTR_ERR(oplockThread);
- cERROR(1, ("error %d create oplock thread", rc));
- goto out_unregister_dfs_key_type;
- }
+ rc = slow_work_register_user();
+ if (rc)
+ goto out_unregister_resolver_key;
return 0;
- out_unregister_dfs_key_type:
+ out_unregister_resolver_key:
#ifdef CONFIG_CIFS_DFS_UPCALL
unregister_key_type(&key_type_dns_resolver);
out_unregister_key_type:
@@ -1165,7 +1080,6 @@ exit_cifs(void)
cifs_destroy_inodecache();
cifs_destroy_mids();
cifs_destroy_request_bufs();
- kthread_stop(oplockThread);
}
MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 094325e3f71..ac2b24c192f 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -67,7 +67,7 @@ extern int cifs_setattr(struct dentry *, struct iattr *);
extern const struct inode_operations cifs_file_inode_ops;
extern const struct inode_operations cifs_symlink_inode_ops;
-extern struct inode_operations cifs_dfs_referral_inode_operations;
+extern const struct inode_operations cifs_dfs_referral_inode_operations;
/* Functions related to files and directories */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 6cfc81a3270..5d0fde18039 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -18,6 +18,7 @@
*/
#include <linux/in.h>
#include <linux/in6.h>
+#include <linux/slow-work.h>
#include "cifs_fs_sb.h"
#include "cifsacl.h"
/*
@@ -346,14 +347,16 @@ struct cifsFileInfo {
/* lock scope id (0 if none) */
struct file *pfile; /* needed for writepage */
struct inode *pInode; /* needed for oplock break */
+ struct vfsmount *mnt;
struct mutex lock_mutex;
struct list_head llist; /* list of byte range locks we have. */
bool closePend:1; /* file is marked to close */
bool invalidHandle:1; /* file closed via session abend */
- bool messageMode:1; /* for pipes: message vs byte mode */
+ bool oplock_break_cancelled:1;
atomic_t count; /* reference count */
struct mutex fh_mutex; /* prevents reopen race after dead ses*/
struct cifs_search_info srch_inf;
+ struct slow_work oplock_break; /* slow_work job for oplock breaks */
};
/* Take a reference on the file private data */
@@ -365,8 +368,10 @@ static inline void cifsFileInfo_get(struct cifsFileInfo *cifs_file)
/* Release a reference on the file private data */
static inline void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
{
- if (atomic_dec_and_test(&cifs_file->count))
+ if (atomic_dec_and_test(&cifs_file->count)) {
+ iput(cifs_file->pInode);
kfree(cifs_file);
+ }
}
/*
@@ -382,7 +387,6 @@ struct cifsInodeInfo {
unsigned long time; /* jiffies of last update/check of inode */
bool clientCanCacheRead:1; /* read oplock */
bool clientCanCacheAll:1; /* read and writebehind oplock */
- bool oplockPending:1;
bool delete_pending:1; /* DELETE_ON_CLOSE is set */
u64 server_eof; /* current file size on server */
u64 uniqueid; /* server inode number */
@@ -585,9 +589,9 @@ require use of the stronger protocol */
#define CIFSSEC_MUST_LANMAN 0x10010
#define CIFSSEC_MUST_PLNTXT 0x20020
#ifdef CONFIG_CIFS_UPCALL
-#define CIFSSEC_MASK 0xAF0AF /* allows weak security but also krb5 */
+#define CIFSSEC_MASK 0xBF0BF /* allows weak security but also krb5 */
#else
-#define CIFSSEC_MASK 0xA70A7 /* current flags supported if weak */
+#define CIFSSEC_MASK 0xB70B7 /* current flags supported if weak */
#endif /* UPCALL */
#else /* do not allow weak pw hash */
#ifdef CONFIG_CIFS_UPCALL
@@ -669,12 +673,6 @@ GLOBAL_EXTERN rwlock_t cifs_tcp_ses_lock;
*/
GLOBAL_EXTERN rwlock_t GlobalSMBSeslock;
-/* Global list of oplocks */
-GLOBAL_EXTERN struct list_head cifs_oplock_list;
-
-/* Protects the cifs_oplock_list */
-GLOBAL_EXTERN spinlock_t cifs_oplock_lock;
-
/* Outstanding dir notify requests */
GLOBAL_EXTERN struct list_head GlobalDnotifyReqList;
/* DirNotify response queue */
@@ -725,3 +723,4 @@ GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */
GLOBAL_EXTERN unsigned int cifs_min_small; /* min size of small buf pool */
GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/
+extern const struct slow_work_ops cifs_oplock_break_ops;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index da8fbf56599..6928c24d1d4 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -86,18 +86,17 @@ extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
const int stage,
const struct nls_table *nls_cp);
extern __u16 GetNextMid(struct TCP_Server_Info *server);
-extern struct oplock_q_entry *AllocOplockQEntry(struct inode *, u16,
- struct cifsTconInfo *);
-extern void DeleteOplockQEntry(struct oplock_q_entry *);
-extern void DeleteTconOplockQEntries(struct cifsTconInfo *);
extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
extern u64 cifs_UnixTimeToNT(struct timespec);
extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
int offset);
+extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode,
+ __u16 fileHandle, struct file *file,
+ struct vfsmount *mnt, unsigned int oflags);
extern int cifs_posix_open(char *full_path, struct inode **pinode,
- struct super_block *sb, int mode, int oflags,
- int *poplock, __u16 *pnetfid, int xid);
+ struct vfsmount *mnt, int mode, int oflags,
+ __u32 *poplock, __u16 *pnetfid, int xid);
extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
FILE_UNIX_BASIC_INFO *info,
struct cifs_sb_info *cifs_sb);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 301e307e127..941441d3e38 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -94,6 +94,7 @@ static void mark_open_files_invalid(struct cifsTconInfo *pTcon)
list_for_each_safe(tmp, tmp1, &pTcon->openFileList) {
open_file = list_entry(tmp, struct cifsFileInfo, tlist);
open_file->invalidHandle = true;
+ open_file->oplock_break_cancelled = true;
}
write_unlock(&GlobalSMBSeslock);
/* BB Add call to invalidate_inodes(sb) for all superblocks mounted
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index d49682433c2..43003e0bef1 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1670,7 +1670,6 @@ cifs_put_tcon(struct cifsTconInfo *tcon)
CIFSSMBTDis(xid, tcon);
_FreeXid(xid);
- DeleteTconOplockQEntries(tcon);
tconInfoFree(tcon);
cifs_put_smb_ses(ses);
}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index a6424cfc012..627a60a6c1b 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -24,6 +24,7 @@
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/namei.h>
+#include <linux/mount.h>
#include "cifsfs.h"
#include "cifspdu.h"
#include "cifsglob.h"
@@ -129,44 +130,45 @@ cifs_bp_rename_retry:
return full_path;
}
-static void
-cifs_fill_fileinfo(struct inode *newinode, __u16 fileHandle,
- struct cifsTconInfo *tcon, bool write_only)
+struct cifsFileInfo *
+cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
+ struct file *file, struct vfsmount *mnt, unsigned int oflags)
{
int oplock = 0;
struct cifsFileInfo *pCifsFile;
struct cifsInodeInfo *pCifsInode;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
-
if (pCifsFile == NULL)
- return;
+ return pCifsFile;
if (oplockEnabled)
oplock = REQ_OPLOCK;
pCifsFile->netfid = fileHandle;
pCifsFile->pid = current->tgid;
- pCifsFile->pInode = newinode;
+ pCifsFile->pInode = igrab(newinode);
+ pCifsFile->mnt = mnt;
+ pCifsFile->pfile = file;
pCifsFile->invalidHandle = false;
pCifsFile->closePend = false;
mutex_init(&pCifsFile->fh_mutex);
mutex_init(&pCifsFile->lock_mutex);
INIT_LIST_HEAD(&pCifsFile->llist);
atomic_set(&pCifsFile->count, 1);
+ slow_work_init(&pCifsFile->oplock_break, &cifs_oplock_break_ops);
- /* set the following in open now
- pCifsFile->pfile = file; */
write_lock(&GlobalSMBSeslock);
- list_add(&pCifsFile->tlist, &tcon->openFileList);
+ list_add(&pCifsFile->tlist, &cifs_sb->tcon->openFileList);
pCifsInode = CIFS_I(newinode);
if (pCifsInode) {
/* if readable file instance put first in list*/
- if (write_only)
+ if (oflags & FMODE_READ)
+ list_add(&pCifsFile->flist, &pCifsInode->openFileList);
+ else
list_add_tail(&pCifsFile->flist,
&pCifsInode->openFileList);
- else
- list_add(&pCifsFile->flist, &pCifsInode->openFileList);
if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
pCifsInode->clientCanCacheAll = true;
@@ -176,18 +178,18 @@ cifs_fill_fileinfo(struct inode *newinode, __u16 fileHandle,
pCifsInode->clientCanCacheRead = true;
}
write_unlock(&GlobalSMBSeslock);
+
+ return pCifsFile;
}
int cifs_posix_open(char *full_path, struct inode **pinode,
- struct super_block *sb, int mode, int oflags,
- int *poplock, __u16 *pnetfid, int xid)
+ struct vfsmount *mnt, int mode, int oflags,
+ __u32 *poplock, __u16 *pnetfid, int xid)
{
int rc;
- __u32 oplock;
- bool write_only = false;
FILE_UNIX_BASIC_INFO *presp_data;
__u32 posix_flags = 0;
- struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
struct cifs_fattr fattr;
cFYI(1, ("posix open %s", full_path));
@@ -223,12 +225,9 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
if (oflags & O_DIRECT)
posix_flags |= SMB_O_DIRECT;
- if (!(oflags & FMODE_READ))
- write_only = true;
-
mode &= ~current_umask();
rc = CIFSPOSIXCreate(xid, cifs_sb->tcon, posix_flags, mode,
- pnetfid, presp_data, &oplock, full_path,
+ pnetfid, presp_data, poplock, full_path,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc)
@@ -244,7 +243,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
/* get new inode and set it up */
if (*pinode == NULL) {
- *pinode = cifs_iget(sb, &fattr);
+ *pinode = cifs_iget(mnt->mnt_sb, &fattr);
if (!*pinode) {
rc = -ENOMEM;
goto posix_open_ret;
@@ -253,7 +252,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
cifs_fattr_to_inode(*pinode, &fattr);
}
- cifs_fill_fileinfo(*pinode, *pnetfid, cifs_sb->tcon, write_only);
+ cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
posix_open_ret:
kfree(presp_data);
@@ -280,7 +279,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
int rc = -ENOENT;
int xid;
int create_options = CREATE_NOT_DIR;
- int oplock = 0;
+ __u32 oplock = 0;
int oflags;
bool posix_create = false;
/*
@@ -298,7 +297,6 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
FILE_ALL_INFO *buf = NULL;
struct inode *newinode = NULL;
int disposition = FILE_OVERWRITE_IF;
- bool write_only = false;
xid = GetXid();
@@ -323,7 +321,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
- rc = cifs_posix_open(full_path, &newinode, inode->i_sb,
+ rc = cifs_posix_open(full_path, &newinode, nd->path.mnt,
mode, oflags, &oplock, &fileHandle, xid);
/* EIO could indicate that (posix open) operation is not
supported, despite what server claimed in capability
@@ -351,11 +349,8 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
desiredAccess = 0;
if (oflags & FMODE_READ)
desiredAccess |= GENERIC_READ; /* is this too little? */
- if (oflags & FMODE_WRITE) {
+ if (oflags & FMODE_WRITE)
desiredAccess |= GENERIC_WRITE;
- if (!(oflags & FMODE_READ))
- write_only = true;
- }
if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
disposition = FILE_CREATE;
@@ -470,8 +465,8 @@ cifs_create_set_dentry:
/* mknod case - do not leave file open */
CIFSSMBClose(xid, tcon, fileHandle);
} else if (!(posix_create) && (newinode)) {
- cifs_fill_fileinfo(newinode, fileHandle,
- cifs_sb->tcon, write_only);
+ cifs_new_fileinfo(newinode, fileHandle, NULL,
+ nd->path.mnt, oflags);
}
cifs_create_out:
kfree(buf);
@@ -611,7 +606,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
{
int xid;
int rc = 0; /* to get around spurious gcc warning, set to zero here */
- int oplock = 0;
+ __u32 oplock = 0;
__u16 fileHandle = 0;
bool posix_open = false;
struct cifs_sb_info *cifs_sb;
@@ -683,8 +678,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) &&
(nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
(nd->intent.open.flags & O_CREAT)) {
- rc = cifs_posix_open(full_path, &newInode,
- parent_dir_inode->i_sb,
+ rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
nd->intent.open.create_mode,
nd->intent.open.flags, &oplock,
&fileHandle, xid);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index fa7beac8b80..429337eb7af 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -30,6 +30,7 @@
#include <linux/writeback.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/delay.h>
+#include <linux/mount.h>
#include <asm/div64.h>
#include "cifsfs.h"
#include "cifspdu.h"
@@ -39,27 +40,6 @@
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
-static inline struct cifsFileInfo *cifs_init_private(
- struct cifsFileInfo *private_data, struct inode *inode,
- struct file *file, __u16 netfid)
-{
- memset(private_data, 0, sizeof(struct cifsFileInfo));
- private_data->netfid = netfid;
- private_data->pid = current->tgid;
- mutex_init(&private_data->fh_mutex);
- mutex_init(&private_data->lock_mutex);
- INIT_LIST_HEAD(&private_data->llist);
- private_data->pfile = file; /* needed for writepage */
- private_data->pInode = inode;
- private_data->invalidHandle = false;
- private_data->closePend = false;
- /* Initialize reference count to one. The private data is
- freed on the release of the last reference */
- atomic_set(&private_data->count, 1);
-
- return private_data;
-}
-
static inline int cifs_convert_flags(unsigned int flags)
{
if ((flags & O_ACCMODE) == O_RDONLY)
@@ -123,9 +103,11 @@ static inline int cifs_get_disposition(unsigned int flags)
}
/* all arguments to this function must be checked for validity in caller */
-static inline int cifs_posix_open_inode_helper(struct inode *inode,
- struct file *file, struct cifsInodeInfo *pCifsInode,
- struct cifsFileInfo *pCifsFile, int oplock, u16 netfid)
+static inline int
+cifs_posix_open_inode_helper(struct inode *inode, struct file *file,
+ struct cifsInodeInfo *pCifsInode,
+ struct cifsFileInfo *pCifsFile, __u32 oplock,
+ u16 netfid)
{
write_lock(&GlobalSMBSeslock);
@@ -219,17 +201,6 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
struct timespec temp;
int rc;
- /* want handles we can use to read with first
- in the list so we do not have to walk the
- list to search for one in write_begin */
- if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
- list_add_tail(&pCifsFile->flist,
- &pCifsInode->openFileList);
- } else {
- list_add(&pCifsFile->flist,
- &pCifsInode->openFileList);
- }
- write_unlock(&GlobalSMBSeslock);
if (pCifsInode->clientCanCacheRead) {
/* we have the inode open somewhere else
no need to discard cache data */
@@ -279,7 +250,8 @@ client_can_cache:
int cifs_open(struct inode *inode, struct file *file)
{
int rc = -EACCES;
- int xid, oplock;
+ int xid;
+ __u32 oplock;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *tcon;
struct cifsFileInfo *pCifsFile;
@@ -324,7 +296,7 @@ int cifs_open(struct inode *inode, struct file *file)
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
int oflags = (int) cifs_posix_convert_flags(file->f_flags);
/* can not refresh inode info since size could be stale */
- rc = cifs_posix_open(full_path, &inode, inode->i_sb,
+ rc = cifs_posix_open(full_path, &inode, file->f_path.mnt,
cifs_sb->mnt_file_mode /* ignored */,
oflags, &oplock, &netfid, xid);
if (rc == 0) {
@@ -414,24 +386,17 @@ int cifs_open(struct inode *inode, struct file *file)
cFYI(1, ("cifs_open returned 0x%x", rc));
goto out;
}
- file->private_data =
- kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
+
+ pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt,
+ file->f_flags);
+ file->private_data = pCifsFile;
if (file->private_data == NULL) {
rc = -ENOMEM;
goto out;
}
- pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
- write_lock(&GlobalSMBSeslock);
- list_add(&pCifsFile->tlist, &tcon->openFileList);
- pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
- if (pCifsInode) {
- rc = cifs_open_inode_helper(inode, file, pCifsInode,
- pCifsFile, tcon,
- &oplock, buf, full_path, xid);
- } else {
- write_unlock(&GlobalSMBSeslock);
- }
+ rc = cifs_open_inode_helper(inode, file, pCifsInode, pCifsFile, tcon,
+ &oplock, buf, full_path, xid);
if (oplock & CIFS_CREATE_ACTION) {
/* time to set mode which we can not set earlier due to
@@ -474,7 +439,8 @@ static int cifs_relock_file(struct cifsFileInfo *cifsFile)
static int cifs_reopen_file(struct file *file, bool can_flush)
{
int rc = -EACCES;
- int xid, oplock;
+ int xid;
+ __u32 oplock;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *tcon;
struct cifsFileInfo *pCifsFile;
@@ -543,7 +509,7 @@ reopen_error_exit:
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
int oflags = (int) cifs_posix_convert_flags(file->f_flags);
/* can not refresh inode info since size could be stale */
- rc = cifs_posix_open(full_path, NULL, inode->i_sb,
+ rc = cifs_posix_open(full_path, NULL, file->f_path.mnt,
cifs_sb->mnt_file_mode /* ignored */,
oflags, &oplock, &netfid, xid);
if (rc == 0) {
@@ -2308,6 +2274,73 @@ out:
return rc;
}
+static void
+cifs_oplock_break(struct slow_work *work)
+{
+ struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
+ oplock_break);
+ struct inode *inode = cfile->pInode;
+ struct cifsInodeInfo *cinode = CIFS_I(inode);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->mnt->mnt_sb);
+ int rc, waitrc = 0;
+
+ if (inode && S_ISREG(inode->i_mode)) {
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+ if (cinode->clientCanCacheAll == 0)
+ break_lease(inode, FMODE_READ);
+ else if (cinode->clientCanCacheRead == 0)
+ break_lease(inode, FMODE_WRITE);
+#endif
+ rc = filemap_fdatawrite(inode->i_mapping);
+ if (cinode->clientCanCacheRead == 0) {
+ waitrc = filemap_fdatawait(inode->i_mapping);
+ invalidate_remote_inode(inode);
+ }
+ if (!rc)
+ rc = waitrc;
+ if (rc)
+ cinode->write_behind_rc = rc;
+ cFYI(1, ("Oplock flush inode %p rc %d", inode, rc));
+ }
+
+ /*
+ * releasing stale oplock after recent reconnect of smb session using
+ * a now incorrect file handle is not a data integrity issue but do
+ * not bother sending an oplock release if session to server still is
+ * disconnected since oplock already released by the server
+ */
+ if (!cfile->closePend && !cfile->oplock_break_cancelled) {
+ rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0,
+ LOCKING_ANDX_OPLOCK_RELEASE, false);
+ cFYI(1, ("Oplock release rc = %d", rc));
+ }
+}
+
+static int
+cifs_oplock_break_get(struct slow_work *work)
+{
+ struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
+ oplock_break);
+ mntget(cfile->mnt);
+ cifsFileInfo_get(cfile);
+ return 0;
+}
+
+static void
+cifs_oplock_break_put(struct slow_work *work)
+{
+ struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
+ oplock_break);
+ mntput(cfile->mnt);
+ cifsFileInfo_put(cfile);
+}
+
+const struct slow_work_ops cifs_oplock_break_ops = {
+ .get_ref = cifs_oplock_break_get,
+ .put_ref = cifs_oplock_break_put,
+ .execute = cifs_oplock_break,
+};
+
const struct address_space_operations cifs_addr_ops = {
.readpage = cifs_readpage,
.readpages = cifs_readpages,
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 1f09c761931..5e2492535da 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1557,57 +1557,24 @@ static int cifs_truncate_page(struct address_space *mapping, loff_t from)
static int cifs_vmtruncate(struct inode *inode, loff_t offset)
{
- struct address_space *mapping = inode->i_mapping;
- unsigned long limit;
+ loff_t oldsize;
+ int err;
spin_lock(&inode->i_lock);
- if (inode->i_size < offset)
- goto do_expand;
- /*
- * truncation of in-use swapfiles is disallowed - it would cause
- * subsequent swapout to scribble on the now-freed blocks.
- */
- if (IS_SWAPFILE(inode)) {
- spin_unlock(&inode->i_lock);
- goto out_busy;
- }
- i_size_write(inode, offset);
- spin_unlock(&inode->i_lock);
- /*
- * unmap_mapping_range is called twice, first simply for efficiency
- * so that truncate_inode_pages does fewer single-page unmaps. However
- * after this first call, and before truncate_inode_pages finishes,
- * it is possible for private pages to be COWed, which remain after
- * truncate_inode_pages finishes, hence the second unmap_mapping_range
- * call must be made for correctness.
- */
- unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
- truncate_inode_pages(mapping, offset);
- unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
- goto out_truncate;
-
-do_expand:
- limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
- if (limit != RLIM_INFINITY && offset > limit) {
+ err = inode_newsize_ok(inode, offset);
+ if (err) {
spin_unlock(&inode->i_lock);
- goto out_sig;
- }
- if (offset > inode->i_sb->s_maxbytes) {
- spin_unlock(&inode->i_lock);
- goto out_big;
+ goto out;
}
+
+ oldsize = inode->i_size;
i_size_write(inode, offset);
spin_unlock(&inode->i_lock);
-out_truncate:
+ truncate_pagecache(inode, oldsize, offset);
if (inode->i_op->truncate)
inode->i_op->truncate(inode);
- return 0;
-out_sig:
- send_sig(SIGXFSZ, current, 0);
-out_big:
- return -EFBIG;
-out_busy:
- return -ETXTBSY;
+out:
+ return err;
}
static int
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index e079a9190ec..0241b25ac33 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -32,7 +32,6 @@
extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;
-extern struct task_struct *oplockThread;
/* The xid serves as a useful identifier for each incoming vfs request,
in a similar way to the mid which is useful to track each sent smb,
@@ -500,6 +499,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
struct cifsTconInfo *tcon;
struct cifsInodeInfo *pCifsInode;
struct cifsFileInfo *netfile;
+ int rc;
cFYI(1, ("Checking for oplock break or dnotify response"));
if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
@@ -562,30 +562,40 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
continue;
cifs_stats_inc(&tcon->num_oplock_brks);
- write_lock(&GlobalSMBSeslock);
+ read_lock(&GlobalSMBSeslock);
list_for_each(tmp2, &tcon->openFileList) {
netfile = list_entry(tmp2, struct cifsFileInfo,
tlist);
if (pSMB->Fid != netfile->netfid)
continue;
- write_unlock(&GlobalSMBSeslock);
- read_unlock(&cifs_tcp_ses_lock);
+ /*
+ * don't do anything if file is about to be
+ * closed anyway.
+ */
+ if (netfile->closePend) {
+ read_unlock(&GlobalSMBSeslock);
+ read_unlock(&cifs_tcp_ses_lock);
+ return true;
+ }
+
cFYI(1, ("file id match, oplock break"));
pCifsInode = CIFS_I(netfile->pInode);
pCifsInode->clientCanCacheAll = false;
if (pSMB->OplockLevel == 0)
pCifsInode->clientCanCacheRead = false;
- pCifsInode->oplockPending = true;
- AllocOplockQEntry(netfile->pInode,
- netfile->netfid, tcon);
- cFYI(1, ("about to wake up oplock thread"));
- if (oplockThread)
- wake_up_process(oplockThread);
-
+ rc = slow_work_enqueue(&netfile->oplock_break);
+ if (rc) {
+ cERROR(1, ("failed to enqueue oplock "
+ "break: %d\n", rc));
+ } else {
+ netfile->oplock_break_cancelled = false;
+ }
+ read_unlock(&GlobalSMBSeslock);
+ read_unlock(&cifs_tcp_ses_lock);
return true;
}
- write_unlock(&GlobalSMBSeslock);
+ read_unlock(&GlobalSMBSeslock);
read_unlock(&cifs_tcp_ses_lock);
cFYI(1, ("No matching file for oplock break"));
return true;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index f823a4a208a..1f098ca7163 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -146,7 +146,7 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
}
}
-void
+static void
cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
struct cifs_sb_info *cifs_sb)
{
@@ -161,7 +161,7 @@ cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
cifs_fill_common_info(fattr, cifs_sb);
}
-void
+static void
cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info,
struct cifs_sb_info *cifs_sb)
{
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 1da4ab250ea..07b8e71544e 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -103,56 +103,6 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
mempool_free(midEntry, cifs_mid_poolp);
}
-struct oplock_q_entry *
-AllocOplockQEntry(struct inode *pinode, __u16 fid, struct cifsTconInfo *tcon)
-{
- struct oplock_q_entry *temp;
- if ((pinode == NULL) || (tcon == NULL)) {
- cERROR(1, ("Null parms passed to AllocOplockQEntry"));
- return NULL;
- }
- temp = (struct oplock_q_entry *) kmem_cache_alloc(cifs_oplock_cachep,
- GFP_KERNEL);
- if (temp == NULL)
- return temp;
- else {
- temp->pinode = pinode;
- temp->tcon = tcon;
- temp->netfid = fid;
- spin_lock(&cifs_oplock_lock);
- list_add_tail(&temp->qhead, &cifs_oplock_list);
- spin_unlock(&cifs_oplock_lock);
- }
- return temp;
-}
-
-void DeleteOplockQEntry(struct oplock_q_entry *oplockEntry)
-{
- spin_lock(&cifs_oplock_lock);
- /* should we check if list empty first? */
- list_del(&oplockEntry->qhead);
- spin_unlock(&cifs_oplock_lock);
- kmem_cache_free(cifs_oplock_cachep, oplockEntry);
-}
-
-
-void DeleteTconOplockQEntries(struct cifsTconInfo *tcon)
-{
- struct oplock_q_entry *temp;
-
- if (tcon == NULL)
- return;
-
- spin_lock(&cifs_oplock_lock);
- list_for_each_entry(temp, &cifs_oplock_list, qhead) {
- if ((temp->tcon) && (temp->tcon == tcon)) {
- list_del(&temp->qhead);
- kmem_cache_free(cifs_oplock_cachep, temp);
- }
- }
- spin_unlock(&cifs_oplock_lock);
-}
-
static int
smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
{
diff --git a/fs/coda/coda_int.h b/fs/coda/coda_int.h
index 8ccd5ed81d9..d99860a3389 100644
--- a/fs/coda/coda_int.h
+++ b/fs/coda/coda_int.h
@@ -2,6 +2,7 @@
#define _CODA_INT_
struct dentry;
+struct file;
extern struct file_system_type coda_fs_type;
extern unsigned long coda_timeout;
diff --git a/fs/compat.c b/fs/compat.c
index 6d6f98fe64a..d576b552e8e 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -100,13 +100,6 @@ asmlinkage long compat_sys_utimensat(unsigned int dfd, char __user *filename, st
get_compat_timespec(&tv[1], &t[1]))
return -EFAULT;
- if ((tv[0].tv_nsec == UTIME_OMIT || tv[0].tv_nsec == UTIME_NOW)
- && tv[0].tv_sec != 0)
- return -EINVAL;
- if ((tv[1].tv_nsec == UTIME_OMIT || tv[1].tv_nsec == UTIME_NOW)
- && tv[1].tv_sec != 0)
- return -EINVAL;
-
if (tv[0].tv_nsec == UTIME_OMIT && tv[1].tv_nsec == UTIME_OMIT)
return 0;
}
@@ -775,13 +768,13 @@ asmlinkage long compat_sys_mount(char __user * dev_name, char __user * dir_name,
char __user * type, unsigned long flags,
void __user * data)
{
- unsigned long type_page;
+ char *kernel_type;
unsigned long data_page;
- unsigned long dev_page;
+ char *kernel_dev;
char *dir_page;
int retval;
- retval = copy_mount_options (type, &type_page);
+ retval = copy_mount_string(type, &kernel_type);
if (retval < 0)
goto out;
@@ -790,38 +783,38 @@ asmlinkage long compat_sys_mount(char __user * dev_name, char __user * dir_name,
if (IS_ERR(dir_page))
goto out1;
- retval = copy_mount_options (dev_name, &dev_page);
+ retval = copy_mount_string(dev_name, &kernel_dev);
if (retval < 0)
goto out2;
- retval = copy_mount_options (data, &data_page);
+ retval = copy_mount_options(data, &data_page);
if (retval < 0)
goto out3;
retval = -EINVAL;
- if (type_page && data_page) {
- if (!strcmp((char *)type_page, SMBFS_NAME)) {
+ if (kernel_type && data_page) {
+ if (!strcmp(kernel_type, SMBFS_NAME)) {
do_smb_super_data_conv((void *)data_page);
- } else if (!strcmp((char *)type_page, NCPFS_NAME)) {
+ } else if (!strcmp(kernel_type, NCPFS_NAME)) {
do_ncp_super_data_conv((void *)data_page);
- } else if (!strcmp((char *)type_page, NFS4_NAME)) {
+ } else if (!strcmp(kernel_type, NFS4_NAME)) {
if (do_nfs4_super_data_conv((void *) data_page))
goto out4;
}
}
- retval = do_mount((char*)dev_page, dir_page, (char*)type_page,
+ retval = do_mount(kernel_dev, dir_page, kernel_type,
flags, (void*)data_page);
out4:
free_page(data_page);
out3:
- free_page(dev_page);
+ kfree(kernel_dev);
out2:
putname(dir_page);
out1:
- free_page(type_page);
+ kfree(kernel_type);
out:
return retval;
}
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 75efb028974..d5f8c96964b 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -18,14 +18,13 @@
#include <linux/mount.h>
#include <linux/tty.h>
#include <linux/mutex.h>
+#include <linux/magic.h>
#include <linux/idr.h>
#include <linux/devpts_fs.h>
#include <linux/parser.h>
#include <linux/fsnotify.h>
#include <linux/seq_file.h>
-#define DEVPTS_SUPER_MAGIC 0x1cd1
-
#define DEVPTS_DEFAULT_MODE 0600
/*
* ptmx is a new node in /dev/pts and will be unused in legacy (single-
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index 1d1d2744223..1c8bb8c3a82 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -386,9 +386,9 @@ static int table_seq_show(struct seq_file *seq, void *iter_ptr)
return rv;
}
-static struct seq_operations format1_seq_ops;
-static struct seq_operations format2_seq_ops;
-static struct seq_operations format3_seq_ops;
+static const struct seq_operations format1_seq_ops;
+static const struct seq_operations format2_seq_ops;
+static const struct seq_operations format3_seq_ops;
static void *table_seq_start(struct seq_file *seq, loff_t *pos)
{
@@ -534,21 +534,21 @@ static void table_seq_stop(struct seq_file *seq, void *iter_ptr)
}
}
-static struct seq_operations format1_seq_ops = {
+static const struct seq_operations format1_seq_ops = {
.start = table_seq_start,
.next = table_seq_next,
.stop = table_seq_stop,
.show = table_seq_show,
};
-static struct seq_operations format2_seq_ops = {
+static const struct seq_operations format2_seq_ops = {
.start = table_seq_start,
.next = table_seq_next,
.stop = table_seq_stop,
.show = table_seq_show,
};
-static struct seq_operations format3_seq_ops = {
+static const struct seq_operations format3_seq_ops = {
.start = table_seq_start,
.next = table_seq_next,
.stop = table_seq_stop,
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index a2edb791344..31f4b0e6d72 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -63,9 +63,9 @@ static void drop_slab(void)
}
int drop_caches_sysctl_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+ void __user *buffer, size_t *length, loff_t *ppos)
{
- proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+ proc_dointvec_minmax(table, write, buffer, length, ppos);
if (write) {
if (sysctl_drop_caches & 1)
drop_pagecache();
diff --git a/fs/ecryptfs/Kconfig b/fs/ecryptfs/Kconfig
index 0c754e64232..8aadb99b763 100644
--- a/fs/ecryptfs/Kconfig
+++ b/fs/ecryptfs/Kconfig
@@ -1,6 +1,8 @@
config ECRYPT_FS
tristate "eCrypt filesystem layer support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && KEYS && CRYPTO && NET
+ depends on EXPERIMENTAL && KEYS && NET
+ select CRYPTO_ECB
+ select CRYPTO_CBC
help
Encrypted filesystem that operates on the VFS layer. See
<file:Documentation/filesystems/ecryptfs.txt> to learn more about
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index b91851f1cda..fbb6e5eed69 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -245,13 +245,11 @@ void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
crypto_free_blkcipher(crypt_stat->tfm);
if (crypt_stat->hash_tfm)
crypto_free_hash(crypt_stat->hash_tfm);
- mutex_lock(&crypt_stat->keysig_list_mutex);
list_for_each_entry_safe(key_sig, key_sig_tmp,
&crypt_stat->keysig_list, crypt_stat_list) {
list_del(&key_sig->crypt_stat_list);
kmem_cache_free(ecryptfs_key_sig_cache, key_sig);
}
- mutex_unlock(&crypt_stat->keysig_list_mutex);
memset(crypt_stat, 0, sizeof(struct ecryptfs_crypt_stat));
}
@@ -511,13 +509,14 @@ int ecryptfs_encrypt_page(struct page *page)
+ extent_offset), crypt_stat);
rc = ecryptfs_write_lower(ecryptfs_inode, enc_extent_virt,
offset, crypt_stat->extent_size);
- if (rc) {
+ if (rc < 0) {
ecryptfs_printk(KERN_ERR, "Error attempting "
"to write lower page; rc = [%d]"
"\n", rc);
goto out;
}
}
+ rc = 0;
out:
if (enc_extent_page) {
kunmap(enc_extent_page);
@@ -633,7 +632,7 @@ int ecryptfs_decrypt_page(struct page *page)
rc = ecryptfs_read_lower(enc_extent_virt, offset,
crypt_stat->extent_size,
ecryptfs_inode);
- if (rc) {
+ if (rc < 0) {
ecryptfs_printk(KERN_ERR, "Error attempting "
"to read lower page; rc = [%d]"
"\n", rc);
@@ -797,6 +796,7 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat)
kfree(full_alg_name);
if (IS_ERR(crypt_stat->tfm)) {
rc = PTR_ERR(crypt_stat->tfm);
+ crypt_stat->tfm = NULL;
ecryptfs_printk(KERN_ERR, "cryptfs: init_crypt_ctx(): "
"Error initializing cipher [%s]\n",
crypt_stat->cipher);
@@ -925,7 +925,9 @@ static int ecryptfs_copy_mount_wide_sigs_to_inode_sigs(
struct ecryptfs_global_auth_tok *global_auth_tok;
int rc = 0;
+ mutex_lock(&crypt_stat->keysig_list_mutex);
mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex);
+
list_for_each_entry(global_auth_tok,
&mount_crypt_stat->global_auth_tok_list,
mount_crypt_stat_list) {
@@ -934,13 +936,13 @@ static int ecryptfs_copy_mount_wide_sigs_to_inode_sigs(
rc = ecryptfs_add_keysig(crypt_stat, global_auth_tok->sig);
if (rc) {
printk(KERN_ERR "Error adding keysig; rc = [%d]\n", rc);
- mutex_unlock(
- &mount_crypt_stat->global_auth_tok_list_mutex);
goto out;
}
}
- mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex);
+
out:
+ mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex);
+ mutex_unlock(&crypt_stat->keysig_list_mutex);
return rc;
}
@@ -1212,14 +1214,15 @@ int ecryptfs_read_and_validate_header_region(char *data,
crypt_stat->extent_size = ECRYPTFS_DEFAULT_EXTENT_SIZE;
rc = ecryptfs_read_lower(data, 0, crypt_stat->extent_size,
ecryptfs_inode);
- if (rc) {
+ if (rc < 0) {
printk(KERN_ERR "%s: Error reading header region; rc = [%d]\n",
__func__, rc);
goto out;
}
if (!contains_ecryptfs_marker(data + ECRYPTFS_FILE_SIZE_BYTES)) {
rc = -EINVAL;
- }
+ } else
+ rc = 0;
out:
return rc;
}
@@ -1314,10 +1317,11 @@ ecryptfs_write_metadata_to_contents(struct dentry *ecryptfs_dentry,
rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, virt,
0, virt_len);
- if (rc)
+ if (rc < 0)
printk(KERN_ERR "%s: Error attempting to write header "
- "information to lower file; rc = [%d]\n", __func__,
- rc);
+ "information to lower file; rc = [%d]\n", __func__, rc);
+ else
+ rc = 0;
return rc;
}
@@ -1597,7 +1601,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
}
rc = ecryptfs_read_lower(page_virt, 0, crypt_stat->extent_size,
ecryptfs_inode);
- if (!rc)
+ if (rc >= 0)
rc = ecryptfs_read_headers_virt(page_virt, crypt_stat,
ecryptfs_dentry,
ECRYPTFS_VALIDATE_HEADER_SIZE);
@@ -1702,7 +1706,7 @@ ecryptfs_encrypt_filename(struct ecryptfs_filename *filename,
} else {
printk(KERN_ERR "%s: No support for requested filename "
"encryption method in this release\n", __func__);
- rc = -ENOTSUPP;
+ rc = -EOPNOTSUPP;
goto out;
}
out:
@@ -1763,7 +1767,7 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
if (IS_ERR(*key_tfm)) {
rc = PTR_ERR(*key_tfm);
printk(KERN_ERR "Unable to allocate crypto cipher with name "
- "[%s]; rc = [%d]\n", cipher_name, rc);
+ "[%s]; rc = [%d]\n", full_alg_name, rc);
goto out;
}
crypto_blkcipher_set_flags(*key_tfm, CRYPTO_TFM_REQ_WEAK_KEY);
@@ -1776,7 +1780,8 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
rc = crypto_blkcipher_setkey(*key_tfm, dummy_key, *key_size);
if (rc) {
printk(KERN_ERR "Error attempting to set key of size [%zd] for "
- "cipher [%s]; rc = [%d]\n", *key_size, cipher_name, rc);
+ "cipher [%s]; rc = [%d]\n", *key_size, full_alg_name,
+ rc);
rc = -EINVAL;
goto out;
}
@@ -2166,7 +2171,7 @@ int ecryptfs_encrypt_and_encode_filename(
(*encoded_name)[(*encoded_name_size)] = '\0';
(*encoded_name_size)++;
} else {
- rc = -ENOTSUPP;
+ rc = -EOPNOTSUPP;
}
if (rc) {
printk(KERN_ERR "%s: Error attempting to encode "
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 00b30a2d546..542f625312f 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -582,7 +582,7 @@ extern const struct inode_operations ecryptfs_dir_iops;
extern const struct inode_operations ecryptfs_symlink_iops;
extern const struct super_operations ecryptfs_sops;
extern const struct dentry_operations ecryptfs_dops;
-extern struct address_space_operations ecryptfs_aops;
+extern const struct address_space_operations ecryptfs_aops;
extern int ecryptfs_verbosity;
extern unsigned int ecryptfs_message_buf_len;
extern signed long ecryptfs_message_wait_timeout;
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 2f0945d6329..056fed62d0d 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -476,6 +476,7 @@ static int ecryptfs_unlink(struct inode *dir, struct dentry *dentry)
struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir);
struct dentry *lower_dir_dentry;
+ dget(lower_dentry);
lower_dir_dentry = lock_parent(lower_dentry);
rc = vfs_unlink(lower_dir_inode, lower_dentry);
if (rc) {
@@ -489,6 +490,7 @@ static int ecryptfs_unlink(struct inode *dir, struct dentry *dentry)
d_drop(dentry);
out_unlock:
unlock_dir(lower_dir_dentry);
+ dput(lower_dentry);
return rc;
}
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 259525c9abb..a0a7847567e 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -416,7 +416,9 @@ ecryptfs_find_global_auth_tok_for_sig(
&mount_crypt_stat->global_auth_tok_list,
mount_crypt_stat_list) {
if (memcmp(walker->sig, sig, ECRYPTFS_SIG_SIZE_HEX) == 0) {
- (*global_auth_tok) = walker;
+ rc = key_validate(walker->global_auth_tok_key);
+ if (!rc)
+ (*global_auth_tok) = walker;
goto out;
}
}
@@ -612,7 +614,12 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
}
/* TODO: Support other key modules than passphrase for
* filename encryption */
- BUG_ON(s->auth_tok->token_type != ECRYPTFS_PASSWORD);
+ if (s->auth_tok->token_type != ECRYPTFS_PASSWORD) {
+ rc = -EOPNOTSUPP;
+ printk(KERN_INFO "%s: Filename encryption only supports "
+ "password tokens\n", __func__);
+ goto out_free_unlock;
+ }
sg_init_one(
&s->hash_sg,
(u8 *)s->auth_tok->token.password.session_key_encryption_key,
@@ -910,7 +917,12 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
}
/* TODO: Support other key modules than passphrase for
* filename encryption */
- BUG_ON(s->auth_tok->token_type != ECRYPTFS_PASSWORD);
+ if (s->auth_tok->token_type != ECRYPTFS_PASSWORD) {
+ rc = -EOPNOTSUPP;
+ printk(KERN_INFO "%s: Filename encryption only supports "
+ "password tokens\n", __func__);
+ goto out_free_unlock;
+ }
rc = crypto_blkcipher_setkey(
s->desc.tfm,
s->auth_tok->token.password.session_key_encryption_key,
@@ -1316,8 +1328,10 @@ parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat,
rc = -EINVAL;
goto out_free;
}
- ecryptfs_cipher_code_to_string(crypt_stat->cipher,
- (u16)data[(*packet_size)]);
+ rc = ecryptfs_cipher_code_to_string(crypt_stat->cipher,
+ (u16)data[(*packet_size)]);
+ if (rc)
+ goto out_free;
/* A little extra work to differentiate among the AES key
* sizes; see RFC2440 */
switch(data[(*packet_size)++]) {
@@ -1328,7 +1342,9 @@ parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat,
crypt_stat->key_size =
(*new_auth_tok)->session_key.encrypted_key_size;
}
- ecryptfs_init_crypt_ctx(crypt_stat);
+ rc = ecryptfs_init_crypt_ctx(crypt_stat);
+ if (rc)
+ goto out_free;
if (unlikely(data[(*packet_size)++] != 0x03)) {
printk(KERN_WARNING "Only S2K ID 3 is currently supported\n");
rc = -ENOSYS;
@@ -2366,21 +2382,18 @@ struct kmem_cache *ecryptfs_key_sig_cache;
int ecryptfs_add_keysig(struct ecryptfs_crypt_stat *crypt_stat, char *sig)
{
struct ecryptfs_key_sig *new_key_sig;
- int rc = 0;
new_key_sig = kmem_cache_alloc(ecryptfs_key_sig_cache, GFP_KERNEL);
if (!new_key_sig) {
- rc = -ENOMEM;
printk(KERN_ERR
"Error allocating from ecryptfs_key_sig_cache\n");
- goto out;
+ return -ENOMEM;
}
memcpy(new_key_sig->keysig, sig, ECRYPTFS_SIG_SIZE_HEX);
- mutex_lock(&crypt_stat->keysig_list_mutex);
+ /* Caller must hold keysig_list_mutex */
list_add(&new_key_sig->crypt_stat_list, &crypt_stat->keysig_list);
- mutex_unlock(&crypt_stat->keysig_list_mutex);
-out:
- return rc;
+
+ return 0;
}
struct kmem_cache *ecryptfs_global_auth_tok_cache;
diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
index c6d7a4d748a..e14cf7e588d 100644
--- a/fs/ecryptfs/kthread.c
+++ b/fs/ecryptfs/kthread.c
@@ -136,6 +136,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
const struct cred *cred)
{
struct ecryptfs_open_req *req;
+ int flags = O_LARGEFILE;
int rc = 0;
/* Corresponding dput() and mntput() are done when the
@@ -143,10 +144,14 @@ int ecryptfs_privileged_open(struct file **lower_file,
* destroyed. */
dget(lower_dentry);
mntget(lower_mnt);
- (*lower_file) = dentry_open(lower_dentry, lower_mnt,
- (O_RDWR | O_LARGEFILE), cred);
+ flags |= IS_RDONLY(lower_dentry->d_inode) ? O_RDONLY : O_RDWR;
+ (*lower_file) = dentry_open(lower_dentry, lower_mnt, flags, cred);
if (!IS_ERR(*lower_file))
goto out;
+ if (flags & O_RDONLY) {
+ rc = PTR_ERR((*lower_file));
+ goto out;
+ }
req = kmem_cache_alloc(ecryptfs_open_req_cache, GFP_KERNEL);
if (!req) {
rc = -ENOMEM;
@@ -180,21 +185,8 @@ int ecryptfs_privileged_open(struct file **lower_file,
__func__);
goto out_unlock;
}
- if (IS_ERR(*req->lower_file)) {
+ if (IS_ERR(*req->lower_file))
rc = PTR_ERR(*req->lower_file);
- dget(lower_dentry);
- mntget(lower_mnt);
- (*lower_file) = dentry_open(lower_dentry, lower_mnt,
- (O_RDONLY | O_LARGEFILE), cred);
- if (IS_ERR(*lower_file)) {
- rc = PTR_ERR(*req->lower_file);
- (*lower_file) = NULL;
- printk(KERN_WARNING "%s: Error attempting privileged "
- "open of lower file with either RW or RO "
- "perms; rc = [%d]. Giving up.\n",
- __func__, rc);
- }
- }
out_unlock:
mutex_unlock(&req->mux);
out_free:
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 9f0aa9883c2..101fe4c7b1e 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -129,11 +129,10 @@ int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry)
lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
rc = ecryptfs_privileged_open(&inode_info->lower_file,
lower_dentry, lower_mnt, cred);
- if (rc || IS_ERR(inode_info->lower_file)) {
+ if (rc) {
printk(KERN_ERR "Error opening lower persistent file "
"for lower_dentry [0x%p] and lower_mnt [0x%p]; "
"rc = [%d]\n", lower_dentry, lower_mnt, rc);
- rc = PTR_ERR(inode_info->lower_file);
inode_info->lower_file = NULL;
}
}
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 5c6bab9786e..df4ce99d059 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -396,9 +396,11 @@ static int ecryptfs_write_inode_size_to_header(struct inode *ecryptfs_inode)
rc = ecryptfs_write_lower(ecryptfs_inode, file_size_virt, 0,
sizeof(u64));
kfree(file_size_virt);
- if (rc)
+ if (rc < 0)
printk(KERN_ERR "%s: Error writing file size to header; "
"rc = [%d]\n", __func__, rc);
+ else
+ rc = 0;
out:
return rc;
}
@@ -545,7 +547,7 @@ static sector_t ecryptfs_bmap(struct address_space *mapping, sector_t block)
return rc;
}
-struct address_space_operations ecryptfs_aops = {
+const struct address_space_operations ecryptfs_aops = {
.writepage = ecryptfs_writepage,
.readpage = ecryptfs_readpage,
.write_begin = ecryptfs_write_begin,
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
index a137c6ea2fe..0cc4fafd655 100644
--- a/fs/ecryptfs/read_write.c
+++ b/fs/ecryptfs/read_write.c
@@ -34,15 +34,14 @@
*
* Write data to the lower file.
*
- * Returns zero on success; non-zero on error
+ * Returns bytes written on success; less than zero on error
*/
int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
loff_t offset, size_t size)
{
struct ecryptfs_inode_info *inode_info;
- ssize_t octets_written;
mm_segment_t fs_save;
- int rc = 0;
+ ssize_t rc;
inode_info = ecryptfs_inode_to_private(ecryptfs_inode);
mutex_lock(&inode_info->lower_file_mutex);
@@ -50,14 +49,9 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
inode_info->lower_file->f_pos = offset;
fs_save = get_fs();
set_fs(get_ds());
- octets_written = vfs_write(inode_info->lower_file, data, size,
- &inode_info->lower_file->f_pos);
+ rc = vfs_write(inode_info->lower_file, data, size,
+ &inode_info->lower_file->f_pos);
set_fs(fs_save);
- if (octets_written < 0) {
- printk(KERN_ERR "%s: octets_written = [%td]; "
- "expected [%td]\n", __func__, octets_written, size);
- rc = -EINVAL;
- }
mutex_unlock(&inode_info->lower_file_mutex);
mark_inode_dirty_sync(ecryptfs_inode);
return rc;
@@ -91,6 +85,8 @@ int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
+ offset_in_page);
virt = kmap(page_for_lower);
rc = ecryptfs_write_lower(ecryptfs_inode, virt, offset, size);
+ if (rc > 0)
+ rc = 0;
kunmap(page_for_lower);
return rc;
}
@@ -229,30 +225,24 @@ out:
* Read @size bytes of data at byte offset @offset from the lower
* inode into memory location @data.
*
- * Returns zero on success; non-zero on error
+ * Returns bytes read on success; 0 on EOF; less than zero on error
*/
int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
struct inode *ecryptfs_inode)
{
struct ecryptfs_inode_info *inode_info =
ecryptfs_inode_to_private(ecryptfs_inode);
- ssize_t octets_read;
mm_segment_t fs_save;
- int rc = 0;
+ ssize_t rc;
mutex_lock(&inode_info->lower_file_mutex);
BUG_ON(!inode_info->lower_file);
inode_info->lower_file->f_pos = offset;
fs_save = get_fs();
set_fs(get_ds());
- octets_read = vfs_read(inode_info->lower_file, data, size,
- &inode_info->lower_file->f_pos);
+ rc = vfs_read(inode_info->lower_file, data, size,
+ &inode_info->lower_file->f_pos);
set_fs(fs_save);
- if (octets_read < 0) {
- printk(KERN_ERR "%s: octets_read = [%td]; "
- "expected [%td]\n", __func__, octets_read, size);
- rc = -EINVAL;
- }
mutex_unlock(&inode_info->lower_file_mutex);
return rc;
}
@@ -284,6 +274,8 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
offset = ((((loff_t)page_index) << PAGE_CACHE_SHIFT) + offset_in_page);
virt = kmap(page_for_ecryptfs);
rc = ecryptfs_read_lower(virt, offset, size, ecryptfs_inode);
+ if (rc > 0)
+ rc = 0;
kunmap(page_for_ecryptfs);
flush_dcache_page(page_for_ecryptfs);
return rc;
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 12d649602d3..b15a43a80ab 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -77,7 +77,6 @@ static void ecryptfs_destroy_inode(struct inode *inode)
struct ecryptfs_inode_info *inode_info;
inode_info = ecryptfs_inode_to_private(inode);
- mutex_lock(&inode_info->lower_file_mutex);
if (inode_info->lower_file) {
struct dentry *lower_dentry =
inode_info->lower_file->f_dentry;
@@ -89,7 +88,6 @@ static void ecryptfs_destroy_inode(struct inode *inode)
d_drop(lower_dentry);
}
}
- mutex_unlock(&inode_info->lower_file_mutex);
ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat);
kmem_cache_free(ecryptfs_inode_info_cache, inode_info);
}
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 31d12de83a2..8b47e4200e6 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -68,11 +68,16 @@ int eventfd_signal(struct eventfd_ctx *ctx, int n)
}
EXPORT_SYMBOL_GPL(eventfd_signal);
+static void eventfd_free_ctx(struct eventfd_ctx *ctx)
+{
+ kfree(ctx);
+}
+
static void eventfd_free(struct kref *kref)
{
struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);
- kfree(ctx);
+ eventfd_free_ctx(ctx);
}
/**
@@ -298,9 +303,23 @@ struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
}
EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
-SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
+/**
+ * eventfd_file_create - Creates an eventfd file pointer.
+ * @count: Initial eventfd counter value.
+ * @flags: Flags for the eventfd file.
+ *
+ * This function creates an eventfd file pointer, w/out installing it into
+ * the fd table. This is useful when the eventfd file is used during the
+ * initialization of data structures that require extra setup after the eventfd
+ * creation. So the eventfd creation is split into the file pointer creation
+ * phase, and the file descriptor installation phase.
+ * In this way races with userspace closing the newly installed file descriptor
+ * can be avoided.
+ * Returns an eventfd file pointer, or a proper error pointer.
+ */
+struct file *eventfd_file_create(unsigned int count, int flags)
{
- int fd;
+ struct file *file;
struct eventfd_ctx *ctx;
/* Check the EFD_* constants for consistency. */
@@ -308,26 +327,48 @@ SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);
if (flags & ~EFD_FLAGS_SET)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
kref_init(&ctx->kref);
init_waitqueue_head(&ctx->wqh);
ctx->count = count;
ctx->flags = flags;
- /*
- * When we call this, the initialization must be complete, since
- * anon_inode_getfd() will install the fd.
- */
- fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx,
- flags & EFD_SHARED_FCNTL_FLAGS);
- if (fd < 0)
- kfree(ctx);
+ file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx,
+ flags & EFD_SHARED_FCNTL_FLAGS);
+ if (IS_ERR(file))
+ eventfd_free_ctx(ctx);
+
+ return file;
+}
+
+SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
+{
+ int fd, error;
+ struct file *file;
+
+ error = get_unused_fd_flags(flags & EFD_SHARED_FCNTL_FLAGS);
+ if (error < 0)
+ return error;
+ fd = error;
+
+ file = eventfd_file_create(count, flags);
+ if (IS_ERR(file)) {
+ error = PTR_ERR(file);
+ goto err_put_unused_fd;
+ }
+ fd_install(fd, file);
+
return fd;
+
+err_put_unused_fd:
+ put_unused_fd(fd);
+
+ return error;
}
SYSCALL_DEFINE1(eventfd, unsigned int, count)
diff --git a/fs/exec.c b/fs/exec.c
index 172ceb6edde..d49be6bc179 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -33,7 +33,7 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/pagemap.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
#include <linux/key.h>
@@ -55,6 +55,7 @@
#include <linux/kmod.h>
#include <linux/fsnotify.h>
#include <linux/fs_struct.h>
+#include <linux/pipe_fs_i.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
@@ -63,6 +64,7 @@
int core_uses_pid;
char core_pattern[CORENAME_MAX_SIZE] = "core";
+unsigned int core_pipe_limit;
int suid_dumpable = 0;
/* The maximal length of core_pattern is also specified in sysctl.c */
@@ -845,6 +847,9 @@ static int de_thread(struct task_struct *tsk)
sig->notify_count = 0;
no_thread_group:
+ if (current->mm)
+ setmax_mm_hiwater_rss(&sig->maxrss, current->mm);
+
exit_itimers(sig);
flush_itimer_signals();
@@ -923,7 +928,7 @@ void set_task_comm(struct task_struct *tsk, char *buf)
task_lock(tsk);
strlcpy(tsk->comm, buf, sizeof(tsk->comm));
task_unlock(tsk);
- perf_counter_comm(tsk);
+ perf_event_comm(tsk);
}
int flush_old_exec(struct linux_binprm * bprm)
@@ -997,7 +1002,7 @@ int flush_old_exec(struct linux_binprm * bprm)
* security domain:
*/
if (!get_dumpable(current->mm))
- perf_counter_exit_task(current);
+ perf_event_exit_task(current);
/* An exec changes our domain. We are no longer part of the thread
group */
@@ -1354,6 +1359,8 @@ int do_execve(char * filename,
if (retval < 0)
goto out;
+ current->stack_start = current->mm->start_stack;
+
/* execve succeeded */
current->fs->in_exec = 0;
current->in_execve = 0;
@@ -1388,18 +1395,16 @@ out_ret:
return retval;
}
-int set_binfmt(struct linux_binfmt *new)
+void set_binfmt(struct linux_binfmt *new)
{
- struct linux_binfmt *old = current->binfmt;
+ struct mm_struct *mm = current->mm;
- if (new) {
- if (!try_module_get(new->module))
- return -1;
- }
- current->binfmt = new;
- if (old)
- module_put(old->module);
- return 0;
+ if (mm->binfmt)
+ module_put(mm->binfmt->module);
+
+ mm->binfmt = new;
+ if (new)
+ __module_get(new->module);
}
EXPORT_SYMBOL(set_binfmt);
@@ -1723,6 +1728,29 @@ int get_dumpable(struct mm_struct *mm)
return (ret >= 2) ? 2 : ret;
}
+static void wait_for_dump_helpers(struct file *file)
+{
+ struct pipe_inode_info *pipe;
+
+ pipe = file->f_path.dentry->d_inode->i_pipe;
+
+ pipe_lock(pipe);
+ pipe->readers++;
+ pipe->writers--;
+
+ while ((pipe->readers > 1) && (!signal_pending(current))) {
+ wake_up_interruptible_sync(&pipe->wait);
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+ pipe_wait(pipe);
+ }
+
+ pipe->readers--;
+ pipe->writers++;
+ pipe_unlock(pipe);
+
+}
+
+
void do_coredump(long signr, int exit_code, struct pt_regs *regs)
{
struct core_state core_state;
@@ -1739,11 +1767,12 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
unsigned long core_limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
char **helper_argv = NULL;
int helper_argc = 0;
- char *delimit;
+ int dump_count = 0;
+ static atomic_t core_dump_count = ATOMIC_INIT(0);
audit_core_dumps(signr);
- binfmt = current->binfmt;
+ binfmt = mm->binfmt;
if (!binfmt || !binfmt->core_dump)
goto fail;
@@ -1794,54 +1823,63 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
lock_kernel();
ispipe = format_corename(corename, signr);
unlock_kernel();
- /*
- * Don't bother to check the RLIMIT_CORE value if core_pattern points
- * to a pipe. Since we're not writing directly to the filesystem
- * RLIMIT_CORE doesn't really apply, as no actual core file will be
- * created unless the pipe reader choses to write out the core file
- * at which point file size limits and permissions will be imposed
- * as it does with any other process
- */
+
if ((!ispipe) && (core_limit < binfmt->min_coredump))
goto fail_unlock;
if (ispipe) {
+ if (core_limit == 0) {
+ /*
+ * Normally core limits are irrelevant to pipes, since
+ * we're not writing to the file system, but we use
+ * core_limit of 0 here as a speacial value. Any
+ * non-zero limit gets set to RLIM_INFINITY below, but
+ * a limit of 0 skips the dump. This is a consistent
+ * way to catch recursive crashes. We can still crash
+ * if the core_pattern binary sets RLIM_CORE = !0
+ * but it runs as root, and can do lots of stupid things
+ * Note that we use task_tgid_vnr here to grab the pid
+ * of the process group leader. That way we get the
+ * right pid if a thread in a multi-threaded
+ * core_pattern process dies.
+ */
+ printk(KERN_WARNING
+ "Process %d(%s) has RLIMIT_CORE set to 0\n",
+ task_tgid_vnr(current), current->comm);
+ printk(KERN_WARNING "Aborting core\n");
+ goto fail_unlock;
+ }
+
+ dump_count = atomic_inc_return(&core_dump_count);
+ if (core_pipe_limit && (core_pipe_limit < dump_count)) {
+ printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
+ task_tgid_vnr(current), current->comm);
+ printk(KERN_WARNING "Skipping core dump\n");
+ goto fail_dropcount;
+ }
+
helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc);
if (!helper_argv) {
printk(KERN_WARNING "%s failed to allocate memory\n",
__func__);
- goto fail_unlock;
- }
- /* Terminate the string before the first option */
- delimit = strchr(corename, ' ');
- if (delimit)
- *delimit = '\0';
- delimit = strrchr(helper_argv[0], '/');
- if (delimit)
- delimit++;
- else
- delimit = helper_argv[0];
- if (!strcmp(delimit, current->comm)) {
- printk(KERN_NOTICE "Recursive core dump detected, "
- "aborting\n");
- goto fail_unlock;
+ goto fail_dropcount;
}
core_limit = RLIM_INFINITY;
/* SIGPIPE can happen, but it's just never processed */
- if (call_usermodehelper_pipe(corename+1, helper_argv, NULL,
+ if (call_usermodehelper_pipe(helper_argv[0], helper_argv, NULL,
&file)) {
printk(KERN_INFO "Core dump to %s pipe failed\n",
corename);
- goto fail_unlock;
+ goto fail_dropcount;
}
} else
file = filp_open(corename,
O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
0600);
if (IS_ERR(file))
- goto fail_unlock;
+ goto fail_dropcount;
inode = file->f_path.dentry->d_inode;
if (inode->i_nlink > 1)
goto close_fail; /* multiple links - don't dump */
@@ -1870,7 +1908,12 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
if (retval)
current->signal->group_exit_code |= 0x80;
close_fail:
+ if (ispipe && core_pipe_limit)
+ wait_for_dump_helpers(file);
filp_close(file, NULL);
+fail_dropcount:
+ if (dump_count)
+ atomic_dec(&core_dump_count);
fail_unlock:
if (helper_argv)
argv_free(helper_argv);
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 5ab10c3bbeb..9f500dec3b5 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -214,7 +214,6 @@ int exofs_sync_fs(struct super_block *sb, int wait)
}
lock_super(sb);
- lock_kernel();
sbi = sb->s_fs_info;
fscb->s_nextid = cpu_to_le64(sbi->s_nextid);
fscb->s_numfiles = cpu_to_le32(sbi->s_numfiles);
@@ -245,7 +244,6 @@ int exofs_sync_fs(struct super_block *sb, int wait)
out:
if (or)
osd_end_request(or);
- unlock_kernel();
unlock_super(sb);
kfree(fscb);
return ret;
@@ -268,8 +266,6 @@ static void exofs_put_super(struct super_block *sb)
int num_pend;
struct exofs_sb_info *sbi = sb->s_fs_info;
- lock_kernel();
-
if (sb->s_dirt)
exofs_write_super(sb);
@@ -286,8 +282,6 @@ static void exofs_put_super(struct super_block *sb)
osduld_put_device(sbi->s_dev);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
-
- unlock_kernel();
}
/*
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 1c1638f873a..ade634076d0 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -819,6 +819,7 @@ const struct address_space_operations ext2_aops = {
.writepages = ext2_writepages,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
+ .error_remove_page = generic_error_remove_page,
};
const struct address_space_operations ext2_aops_xip = {
@@ -837,6 +838,7 @@ const struct address_space_operations ext2_nobh_aops = {
.direct_IO = ext2_direct_IO,
.writepages = ext2_writepages,
.migratepage = buffer_migrate_page,
+ .error_remove_page = generic_error_remove_page,
};
/*
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 23701f289e9..dd7175ce560 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -70,7 +70,7 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, str
if (PTR_ERR(inode) == -ESTALE) {
ext2_error(dir->i_sb, __func__,
"deleted inode referenced: %lu",
- ino);
+ (unsigned long) ino);
return ERR_PTR(-EIO);
} else {
return ERR_CAST(inode);
diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c
index b72b8588422..c18fbf3e406 100644
--- a/fs/ext2/xip.c
+++ b/fs/ext2/xip.c
@@ -20,7 +20,7 @@ __inode_direct_access(struct inode *inode, sector_t block,
void **kaddr, unsigned long *pfn)
{
struct block_device *bdev = inode->i_sb->s_bdev;
- struct block_device_operations *ops = bdev->bd_disk->fops;
+ const struct block_device_operations *ops = bdev->bd_disk->fops;
sector_t sector;
sector = block * (PAGE_SIZE / 512); /* ext2 block to bdev sector */
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index cd098a7b77f..acf1b142332 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1830,6 +1830,7 @@ static const struct address_space_operations ext3_ordered_aops = {
.direct_IO = ext3_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
+ .error_remove_page = generic_error_remove_page,
};
static const struct address_space_operations ext3_writeback_aops = {
@@ -1845,6 +1846,7 @@ static const struct address_space_operations ext3_writeback_aops = {
.direct_IO = ext3_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
+ .error_remove_page = generic_error_remove_page,
};
static const struct address_space_operations ext3_journalled_aops = {
@@ -1859,6 +1861,7 @@ static const struct address_space_operations ext3_journalled_aops = {
.invalidatepage = ext3_invalidatepage,
.releasepage = ext3_releasepage,
.is_partially_uptodate = block_is_partially_uptodate,
+ .error_remove_page = generic_error_remove_page,
};
void ext3_set_aops(struct inode *inode)
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index a8d80a7f110..72743d36050 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -720,7 +720,7 @@ static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
static ssize_t ext3_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off);
-static struct dquot_operations ext3_quota_operations = {
+static const struct dquot_operations ext3_quota_operations = {
.initialize = dquot_initialize,
.drop = dquot_drop,
.alloc_space = dquot_alloc_space,
@@ -737,7 +737,7 @@ static struct dquot_operations ext3_quota_operations = {
.destroy_dquot = dquot_destroy,
};
-static struct quotactl_ops ext3_qctl_operations = {
+static const struct quotactl_ops ext3_qctl_operations = {
.quota_on = ext3_quota_on,
.quota_off = vfs_quota_off,
.quota_sync = vfs_quota_sync,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 4abd683b963..064746fad58 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2337,7 +2337,7 @@ static int __mpage_da_writepage(struct page *page,
/*
* Rest of the page in the page_vec
* redirty then and skip then. We will
- * try to to write them again after
+ * try to write them again after
* starting a new transaction
*/
redirty_page_for_writepage(wbc, page);
@@ -3386,6 +3386,7 @@ static const struct address_space_operations ext4_ordered_aops = {
.direct_IO = ext4_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
+ .error_remove_page = generic_error_remove_page,
};
static const struct address_space_operations ext4_writeback_aops = {
@@ -3401,6 +3402,7 @@ static const struct address_space_operations ext4_writeback_aops = {
.direct_IO = ext4_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
+ .error_remove_page = generic_error_remove_page,
};
static const struct address_space_operations ext4_journalled_aops = {
@@ -3415,6 +3417,7 @@ static const struct address_space_operations ext4_journalled_aops = {
.invalidatepage = ext4_invalidatepage,
.releasepage = ext4_releasepage,
.is_partially_uptodate = block_is_partially_uptodate,
+ .error_remove_page = generic_error_remove_page,
};
static const struct address_space_operations ext4_da_aops = {
@@ -3431,6 +3434,7 @@ static const struct address_space_operations ext4_da_aops = {
.direct_IO = ext4_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
+ .error_remove_page = generic_error_remove_page,
};
void ext4_set_aops(struct inode *inode)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index a6b1ab73472..df539ba2777 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -964,7 +964,7 @@ static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
static ssize_t ext4_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off);
-static struct dquot_operations ext4_quota_operations = {
+static const struct dquot_operations ext4_quota_operations = {
.initialize = dquot_initialize,
.drop = dquot_drop,
.alloc_space = dquot_alloc_space,
@@ -985,7 +985,7 @@ static struct dquot_operations ext4_quota_operations = {
.destroy_dquot = dquot_destroy,
};
-static struct quotactl_ops ext4_qctl_operations = {
+static const struct quotactl_ops ext4_qctl_operations = {
.quota_on = ext4_quota_on,
.quota_off = vfs_quota_off,
.quota_sync = vfs_quota_sync,
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 8970d8c49bb..04629d1302f 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -470,19 +470,11 @@ static void fat_put_super(struct super_block *sb)
iput(sbi->fat_inode);
- if (sbi->nls_disk) {
- unload_nls(sbi->nls_disk);
- sbi->nls_disk = NULL;
- sbi->options.codepage = fat_default_codepage;
- }
- if (sbi->nls_io) {
- unload_nls(sbi->nls_io);
- sbi->nls_io = NULL;
- }
- if (sbi->options.iocharset != fat_default_iocharset) {
+ unload_nls(sbi->nls_disk);
+ unload_nls(sbi->nls_io);
+
+ if (sbi->options.iocharset != fat_default_iocharset)
kfree(sbi->options.iocharset);
- sbi->options.iocharset = fat_default_iocharset;
- }
sb->s_fs_info = NULL;
kfree(sbi);
diff --git a/fs/fcntl.c b/fs/fcntl.c
index ae413086db9..fc089f2f7f5 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -263,6 +263,79 @@ pid_t f_getown(struct file *filp)
return pid;
}
+static int f_setown_ex(struct file *filp, unsigned long arg)
+{
+ struct f_owner_ex * __user owner_p = (void * __user)arg;
+ struct f_owner_ex owner;
+ struct pid *pid;
+ int type;
+ int ret;
+
+ ret = copy_from_user(&owner, owner_p, sizeof(owner));
+ if (ret)
+ return ret;
+
+ switch (owner.type) {
+ case F_OWNER_TID:
+ type = PIDTYPE_MAX;
+ break;
+
+ case F_OWNER_PID:
+ type = PIDTYPE_PID;
+ break;
+
+ case F_OWNER_GID:
+ type = PIDTYPE_PGID;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ rcu_read_lock();
+ pid = find_vpid(owner.pid);
+ if (owner.pid && !pid)
+ ret = -ESRCH;
+ else
+ ret = __f_setown(filp, pid, type, 1);
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static int f_getown_ex(struct file *filp, unsigned long arg)
+{
+ struct f_owner_ex * __user owner_p = (void * __user)arg;
+ struct f_owner_ex owner;
+ int ret = 0;
+
+ read_lock(&filp->f_owner.lock);
+ owner.pid = pid_vnr(filp->f_owner.pid);
+ switch (filp->f_owner.pid_type) {
+ case PIDTYPE_MAX:
+ owner.type = F_OWNER_TID;
+ break;
+
+ case PIDTYPE_PID:
+ owner.type = F_OWNER_PID;
+ break;
+
+ case PIDTYPE_PGID:
+ owner.type = F_OWNER_GID;
+ break;
+
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ break;
+ }
+ read_unlock(&filp->f_owner.lock);
+
+ if (!ret)
+ ret = copy_to_user(owner_p, &owner, sizeof(owner));
+ return ret;
+}
+
static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
struct file *filp)
{
@@ -313,6 +386,12 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
case F_SETOWN:
err = f_setown(filp, arg, 1);
break;
+ case F_GETOWN_EX:
+ err = f_getown_ex(filp, arg);
+ break;
+ case F_SETOWN_EX:
+ err = f_setown_ex(filp, arg);
+ break;
case F_GETSIG:
err = filp->f_owner.signum;
break;
@@ -428,8 +507,7 @@ static inline int sigio_perm(struct task_struct *p,
static void send_sigio_to_task(struct task_struct *p,
struct fown_struct *fown,
- int fd,
- int reason)
+ int fd, int reason, int group)
{
/*
* F_SETSIG can change ->signum lockless in parallel, make
@@ -461,11 +539,11 @@ static void send_sigio_to_task(struct task_struct *p,
else
si.si_band = band_table[reason - POLL_IN];
si.si_fd = fd;
- if (!group_send_sig_info(signum, &si, p))
+ if (!do_send_sig_info(signum, &si, p, group))
break;
/* fall-through: fall back on the old plain SIGIO signal */
case 0:
- group_send_sig_info(SIGIO, SEND_SIG_PRIV, p);
+ do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, group);
}
}
@@ -474,16 +552,23 @@ void send_sigio(struct fown_struct *fown, int fd, int band)
struct task_struct *p;
enum pid_type type;
struct pid *pid;
+ int group = 1;
read_lock(&fown->lock);
+
type = fown->pid_type;
+ if (type == PIDTYPE_MAX) {
+ group = 0;
+ type = PIDTYPE_PID;
+ }
+
pid = fown->pid;
if (!pid)
goto out_unlock_fown;
read_lock(&tasklist_lock);
do_each_pid_task(pid, type, p) {
- send_sigio_to_task(p, fown, fd, band);
+ send_sigio_to_task(p, fown, fd, band, group);
} while_each_pid_task(pid, type, p);
read_unlock(&tasklist_lock);
out_unlock_fown:
@@ -491,10 +576,10 @@ void send_sigio(struct fown_struct *fown, int fd, int band)
}
static void send_sigurg_to_task(struct task_struct *p,
- struct fown_struct *fown)
+ struct fown_struct *fown, int group)
{
if (sigio_perm(p, fown, SIGURG))
- group_send_sig_info(SIGURG, SEND_SIG_PRIV, p);
+ do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, group);
}
int send_sigurg(struct fown_struct *fown)
@@ -502,10 +587,17 @@ int send_sigurg(struct fown_struct *fown)
struct task_struct *p;
enum pid_type type;
struct pid *pid;
+ int group = 1;
int ret = 0;
read_lock(&fown->lock);
+
type = fown->pid_type;
+ if (type == PIDTYPE_MAX) {
+ group = 0;
+ type = PIDTYPE_PID;
+ }
+
pid = fown->pid;
if (!pid)
goto out_unlock_fown;
@@ -514,7 +606,7 @@ int send_sigurg(struct fown_struct *fown)
read_lock(&tasklist_lock);
do_each_pid_task(pid, type, p) {
- send_sigurg_to_task(p, fown);
+ send_sigurg_to_task(p, fown, group);
} while_each_pid_task(pid, type, p);
read_unlock(&tasklist_lock);
out_unlock_fown:
diff --git a/fs/file_table.c b/fs/file_table.c
index 334ce39881f..8eb44042e00 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -74,14 +74,14 @@ EXPORT_SYMBOL_GPL(get_max_files);
* Handle nr_files sysctl
*/
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
-int proc_nr_files(ctl_table *table, int write, struct file *filp,
+int proc_nr_files(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
files_stat.nr_files = get_nr_files();
- return proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ return proc_dointvec(table, write, buffer, lenp, ppos);
}
#else
-int proc_nr_files(ctl_table *table, int write, struct file *filp,
+int proc_nr_files(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 8e1e5e19d21..9d5360c4c2a 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -41,8 +41,9 @@ struct wb_writeback_args {
long nr_pages;
struct super_block *sb;
enum writeback_sync_modes sync_mode;
- int for_kupdate;
- int range_cyclic;
+ int for_kupdate:1;
+ int range_cyclic:1;
+ int for_background:1;
};
/*
@@ -249,14 +250,25 @@ static void bdi_sync_writeback(struct backing_dev_info *bdi,
* completion. Caller need not hold sb s_umount semaphore.
*
*/
-void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
+void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
+ long nr_pages)
{
struct wb_writeback_args args = {
+ .sb = sb,
.sync_mode = WB_SYNC_NONE,
.nr_pages = nr_pages,
.range_cyclic = 1,
};
+ /*
+ * We treat @nr_pages=0 as the special case to do background writeback,
+ * ie. to sync pages until the background dirty threshold is reached.
+ */
+ if (!nr_pages) {
+ args.nr_pages = LONG_MAX;
+ args.for_background = 1;
+ }
+
bdi_alloc_queue_work(bdi, &args);
}
@@ -310,7 +322,7 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t)
* For inodes being constantly redirtied, dirtied_when can get stuck.
* It _appears_ to be in the future, but is actually in distant past.
* This test is necessary to prevent such wrapped-around relative times
- * from permanently stopping the whole pdflush writeback.
+ * from permanently stopping the whole bdi writeback.
*/
ret = ret && time_before_eq(inode->dirtied_when, jiffies);
#endif
@@ -324,13 +336,38 @@ static void move_expired_inodes(struct list_head *delaying_queue,
struct list_head *dispatch_queue,
unsigned long *older_than_this)
{
+ LIST_HEAD(tmp);
+ struct list_head *pos, *node;
+ struct super_block *sb = NULL;
+ struct inode *inode;
+ int do_sb_sort = 0;
+
while (!list_empty(delaying_queue)) {
- struct inode *inode = list_entry(delaying_queue->prev,
- struct inode, i_list);
+ inode = list_entry(delaying_queue->prev, struct inode, i_list);
if (older_than_this &&
inode_dirtied_after(inode, *older_than_this))
break;
- list_move(&inode->i_list, dispatch_queue);
+ if (sb && sb != inode->i_sb)
+ do_sb_sort = 1;
+ sb = inode->i_sb;
+ list_move(&inode->i_list, &tmp);
+ }
+
+ /* just one sb in list, splice to dispatch_queue and we're done */
+ if (!do_sb_sort) {
+ list_splice(&tmp, dispatch_queue);
+ return;
+ }
+
+ /* Move inodes from one superblock together */
+ while (!list_empty(&tmp)) {
+ inode = list_entry(tmp.prev, struct inode, i_list);
+ sb = inode->i_sb;
+ list_for_each_prev_safe(pos, node, &tmp) {
+ inode = list_entry(pos, struct inode, i_list);
+ if (inode->i_sb == sb)
+ list_move(&inode->i_list, dispatch_queue);
+ }
}
}
@@ -439,8 +476,18 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
spin_lock(&inode_lock);
inode->i_state &= ~I_SYNC;
if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
- if (!(inode->i_state & I_DIRTY) &&
- mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+ if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) {
+ /*
+ * More pages get dirtied by a fast dirtier.
+ */
+ goto select_queue;
+ } else if (inode->i_state & I_DIRTY) {
+ /*
+ * At least XFS will redirty the inode during the
+ * writeback (delalloc) and on io completion (isize).
+ */
+ redirty_tail(inode);
+ } else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
/*
* We didn't write back all the pages. nfs_writepages()
* sometimes bales out without doing anything. Redirty
@@ -462,6 +509,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* soon as the queue becomes uncongested.
*/
inode->i_state |= I_DIRTY_PAGES;
+select_queue:
if (wbc->nr_to_write <= 0) {
/*
* slice used up: queue for next turn
@@ -484,12 +532,6 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
inode->i_state |= I_DIRTY_PAGES;
redirty_tail(inode);
}
- } else if (inode->i_state & I_DIRTY) {
- /*
- * Someone redirtied the inode while were writing back
- * the pages.
- */
- redirty_tail(inode);
} else if (atomic_read(&inode->i_count)) {
/*
* The inode is clean, inuse
@@ -506,6 +548,17 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
return ret;
}
+static void unpin_sb_for_writeback(struct super_block **psb)
+{
+ struct super_block *sb = *psb;
+
+ if (sb) {
+ up_read(&sb->s_umount);
+ put_super(sb);
+ *psb = NULL;
+ }
+}
+
/*
* For WB_SYNC_NONE writeback, the caller does not have the sb pinned
* before calling writeback. So make sure that we do pin it, so it doesn't
@@ -515,11 +568,20 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* 1 if we failed.
*/
static int pin_sb_for_writeback(struct writeback_control *wbc,
- struct inode *inode)
+ struct inode *inode, struct super_block **psb)
{
struct super_block *sb = inode->i_sb;
/*
+ * If this sb is already pinned, nothing more to do. If not and
+ * *psb is non-NULL, unpin the old one first
+ */
+ if (sb == *psb)
+ return 0;
+ else if (*psb)
+ unpin_sb_for_writeback(psb);
+
+ /*
* Caller must already hold the ref for this
*/
if (wbc->sync_mode == WB_SYNC_ALL) {
@@ -532,7 +594,7 @@ static int pin_sb_for_writeback(struct writeback_control *wbc,
if (down_read_trylock(&sb->s_umount)) {
if (sb->s_root) {
spin_unlock(&sb_lock);
- return 0;
+ goto pinned;
}
/*
* umounted, drop rwsem again and fall through to failure
@@ -543,24 +605,15 @@ static int pin_sb_for_writeback(struct writeback_control *wbc,
sb->s_count--;
spin_unlock(&sb_lock);
return 1;
-}
-
-static void unpin_sb_for_writeback(struct writeback_control *wbc,
- struct inode *inode)
-{
- struct super_block *sb = inode->i_sb;
-
- if (wbc->sync_mode == WB_SYNC_ALL)
- return;
-
- up_read(&sb->s_umount);
- put_super(sb);
+pinned:
+ *psb = sb;
+ return 0;
}
static void writeback_inodes_wb(struct bdi_writeback *wb,
struct writeback_control *wbc)
{
- struct super_block *sb = wbc->sb;
+ struct super_block *sb = wbc->sb, *pin_sb = NULL;
const int is_blkdev_sb = sb_is_blkdev_sb(sb);
const unsigned long start = jiffies; /* livelock avoidance */
@@ -619,7 +672,7 @@ static void writeback_inodes_wb(struct bdi_writeback *wb,
if (inode_dirtied_after(inode, start))
break;
- if (pin_sb_for_writeback(wbc, inode)) {
+ if (pin_sb_for_writeback(wbc, inode, &pin_sb)) {
requeue_io(inode);
continue;
}
@@ -628,7 +681,6 @@ static void writeback_inodes_wb(struct bdi_writeback *wb,
__iget(inode);
pages_skipped = wbc->pages_skipped;
writeback_single_inode(inode, wbc);
- unpin_sb_for_writeback(wbc, inode);
if (wbc->pages_skipped != pages_skipped) {
/*
* writeback is not making progress due to locked
@@ -648,6 +700,8 @@ static void writeback_inodes_wb(struct bdi_writeback *wb,
wbc->more_io = 1;
}
+ unpin_sb_for_writeback(&pin_sb);
+
spin_unlock(&inode_lock);
/* Leave any unwritten inodes on b_io */
}
@@ -706,6 +760,7 @@ static long wb_writeback(struct bdi_writeback *wb,
};
unsigned long oldest_jif;
long wrote = 0;
+ struct inode *inode;
if (wbc.for_kupdate) {
wbc.older_than_this = &oldest_jif;
@@ -719,20 +774,16 @@ static long wb_writeback(struct bdi_writeback *wb,
for (;;) {
/*
- * Don't flush anything for non-integrity writeback where
- * no nr_pages was given
+ * Stop writeback when nr_pages has been consumed
*/
- if (!args->for_kupdate && args->nr_pages <= 0 &&
- args->sync_mode == WB_SYNC_NONE)
+ if (args->nr_pages <= 0)
break;
/*
- * If no specific pages were given and this is just a
- * periodic background writeout and we are below the
- * background dirty threshold, don't do anything
+ * For background writeout, stop when we are below the
+ * background dirty threshold
*/
- if (args->for_kupdate && args->nr_pages <= 0 &&
- !over_bground_thresh())
+ if (args->for_background && !over_bground_thresh())
break;
wbc.more_io = 0;
@@ -744,13 +795,32 @@ static long wb_writeback(struct bdi_writeback *wb,
wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
/*
- * If we ran out of stuff to write, bail unless more_io got set
+ * If we consumed everything, see if we have more
*/
- if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
- if (wbc.more_io && !wbc.for_kupdate)
- continue;
+ if (wbc.nr_to_write <= 0)
+ continue;
+ /*
+ * Didn't write everything and we don't have more IO, bail
+ */
+ if (!wbc.more_io)
break;
+ /*
+ * Did we write something? Try for more
+ */
+ if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
+ continue;
+ /*
+ * Nothing written. Wait for some inode to
+ * become available for writeback. Otherwise
+ * we'll just busyloop.
+ */
+ spin_lock(&inode_lock);
+ if (!list_empty(&wb->b_more_io)) {
+ inode = list_entry(wb->b_more_io.prev,
+ struct inode, i_list);
+ inode_wait_for_writeback(inode);
}
+ spin_unlock(&inode_lock);
}
return wrote;
@@ -1060,9 +1130,6 @@ EXPORT_SYMBOL(__mark_inode_dirty);
* If older_than_this is non-NULL, then only write out inodes which
* had their first dirtying at a time earlier than *older_than_this.
*
- * If we're a pdlfush thread, then implement pdflush collision avoidance
- * against the entire list.
- *
* If `bdi' is non-zero then we're being asked to writeback a specific queue.
* This function assumes that the blockdev superblock's inodes are backed by
* a variety of queues, so all inodes are searched. For other superblocks,
@@ -1141,7 +1208,7 @@ void writeback_inodes_sb(struct super_block *sb)
nr_to_write = nr_dirty + nr_unstable +
(inodes_stat.nr_inodes - inodes_stat.nr_unused);
- bdi_writeback_all(sb, nr_to_write);
+ bdi_start_writeback(sb->s_bdi, sb, nr_to_write);
}
EXPORT_SYMBOL(writeback_inodes_sb);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index e703654e7f4..992f6c9410b 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1276,14 +1276,9 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
return 0;
if (attr->ia_valid & ATTR_SIZE) {
- unsigned long limit;
- if (IS_SWAPFILE(inode))
- return -ETXTBSY;
- limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
- if (limit != RLIM_INFINITY && attr->ia_size > (loff_t) limit) {
- send_sig(SIGXFSZ, current, 0);
- return -EFBIG;
- }
+ err = inode_newsize_ok(inode, attr->ia_size);
+ if (err)
+ return err;
is_truncate = true;
}
@@ -1350,8 +1345,7 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
* FUSE_NOWRITE, otherwise fuse_launder_page() would deadlock.
*/
if (S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) {
- if (outarg.attr.size < oldsize)
- fuse_truncate(inode->i_mapping, outarg.attr.size);
+ truncate_pagecache(inode, oldsize, outarg.attr.size);
invalidate_inode_pages2(inode->i_mapping);
}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index fc9c79feb5f..01cc462ff45 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -606,8 +606,6 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
u64 attr_valid);
-void fuse_truncate(struct address_space *mapping, loff_t offset);
-
/**
* Initialize the client device
*/
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 6da947daabd..1a822ce2b24 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -140,14 +140,6 @@ static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
return 0;
}
-void fuse_truncate(struct address_space *mapping, loff_t offset)
-{
- /* See vmtruncate() */
- unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
- truncate_inode_pages(mapping, offset);
- unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
-}
-
void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
u64 attr_valid)
{
@@ -205,8 +197,7 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
spin_unlock(&fc->lock);
if (S_ISREG(inode->i_mode) && oldsize != attr->size) {
- if (attr->size < oldsize)
- fuse_truncate(inode->i_mapping, attr->size);
+ truncate_pagecache(inode, oldsize, attr->size);
invalidate_inode_pages2(inode->i_mapping);
}
}
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 7ebae9a4ecc..694b5d48f03 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -1135,6 +1135,7 @@ static const struct address_space_operations gfs2_writeback_aops = {
.direct_IO = gfs2_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
+ .error_remove_page = generic_error_remove_page,
};
static const struct address_space_operations gfs2_ordered_aops = {
@@ -1151,6 +1152,7 @@ static const struct address_space_operations gfs2_ordered_aops = {
.direct_IO = gfs2_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
+ .error_remove_page = generic_error_remove_page,
};
static const struct address_space_operations gfs2_jdata_aops = {
@@ -1166,6 +1168,7 @@ static const struct address_space_operations gfs2_jdata_aops = {
.invalidatepage = gfs2_invalidatepage,
.releasepage = gfs2_releasepage,
.is_partially_uptodate = block_is_partially_uptodate,
+ .error_remove_page = generic_error_remove_page,
};
void gfs2_set_aops(struct inode *inode)
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index c3ac1805405..247436c10de 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -12,7 +12,6 @@
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/namei.h>
-#include <linux/utsname.h>
#include <linux/mm.h>
#include <linux/xattr.h>
#include <linux/posix_acl.h>
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 28c590b7c9d..8f1cfb02a6c 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -179,7 +179,7 @@ static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
* always aligned to a 64 bit boundary.
*
* The size of the buffer is in bytes, but is it assumed that it is
- * always ok to to read a complete multiple of 64 bits at the end
+ * always ok to read a complete multiple of 64 bits at the end
* of the block in case the end is no aligned to a natural boundary.
*
* Return: the block number (bitmap buffer scope) that was found
diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c
index 7b6165f25fb..8bbe03c3f6d 100644
--- a/fs/hfs/mdb.c
+++ b/fs/hfs/mdb.c
@@ -344,10 +344,8 @@ void hfs_mdb_put(struct super_block *sb)
brelse(HFS_SB(sb)->mdb_bh);
brelse(HFS_SB(sb)->alt_mdb_bh);
- if (HFS_SB(sb)->nls_io)
- unload_nls(HFS_SB(sb)->nls_io);
- if (HFS_SB(sb)->nls_disk)
- unload_nls(HFS_SB(sb)->nls_disk);
+ unload_nls(HFS_SB(sb)->nls_io);
+ unload_nls(HFS_SB(sb)->nls_disk);
free_pages((unsigned long)HFS_SB(sb)->bitmap, PAGE_SIZE < 8192 ? 1 : 0);
kfree(HFS_SB(sb));
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index c0759fe0855..43022f3d514 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -229,8 +229,7 @@ static void hfsplus_put_super(struct super_block *sb)
iput(HFSPLUS_SB(sb).alloc_file);
iput(HFSPLUS_SB(sb).hidden_dir);
brelse(HFSPLUS_SB(sb).s_vhbh);
- if (HFSPLUS_SB(sb).nls)
- unload_nls(HFSPLUS_SB(sb).nls);
+ unload_nls(HFSPLUS_SB(sb).nls);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
@@ -464,8 +463,7 @@ out:
cleanup:
hfsplus_put_super(sb);
- if (nls)
- unload_nls(nls);
+ unload_nls(nls);
return err;
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index a93b885311d..87a1258953b 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -31,12 +31,10 @@
#include <linux/statfs.h>
#include <linux/security.h>
#include <linux/ima.h>
+#include <linux/magic.h>
#include <asm/uaccess.h>
-/* some random number */
-#define HUGETLBFS_MAGIC 0x958458f6
-
static const struct super_operations hugetlbfs_ops;
static const struct address_space_operations hugetlbfs_aops;
const struct file_operations hugetlbfs_file_operations;
@@ -382,36 +380,11 @@ static void hugetlbfs_delete_inode(struct inode *inode)
static void hugetlbfs_forget_inode(struct inode *inode) __releases(inode_lock)
{
- struct super_block *sb = inode->i_sb;
-
- if (!hlist_unhashed(&inode->i_hash)) {
- if (!(inode->i_state & (I_DIRTY|I_SYNC)))
- list_move(&inode->i_list, &inode_unused);
- inodes_stat.nr_unused++;
- if (!sb || (sb->s_flags & MS_ACTIVE)) {
- spin_unlock(&inode_lock);
- return;
- }
- inode->i_state |= I_WILL_FREE;
- spin_unlock(&inode_lock);
- /*
- * write_inode_now is a noop as we set BDI_CAP_NO_WRITEBACK
- * in our backing_dev_info.
- */
- write_inode_now(inode, 1);
- spin_lock(&inode_lock);
- inode->i_state &= ~I_WILL_FREE;
- inodes_stat.nr_unused--;
- hlist_del_init(&inode->i_hash);
+ if (generic_detach_inode(inode)) {
+ truncate_hugepages(inode, 0);
+ clear_inode(inode);
+ destroy_inode(inode);
}
- list_del_init(&inode->i_list);
- list_del_init(&inode->i_sb_list);
- inode->i_state |= I_FREEING;
- inodes_stat.nr_inodes--;
- spin_unlock(&inode_lock);
- truncate_hugepages(inode, 0);
- clear_inode(inode);
- destroy_inode(inode);
}
static void hugetlbfs_drop_inode(struct inode *inode)
@@ -507,6 +480,13 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid,
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
INIT_LIST_HEAD(&inode->i_mapping->private_list);
info = HUGETLBFS_I(inode);
+ /*
+ * The policy is initialized here even if we are creating a
+ * private inode because initialization simply creates an
+ * an empty rb tree and calls spin_lock_init(), later when we
+ * call mpol_free_shared_policy() it will just return because
+ * the rb tree will still be empty.
+ */
mpol_shared_policy_init(&info->policy, NULL);
switch (mode & S_IFMT) {
default:
@@ -937,7 +917,7 @@ static int can_do_hugetlb_shm(void)
}
struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag,
- struct user_struct **user)
+ struct user_struct **user, int creat_flags)
{
int error = -ENOMEM;
struct file *file;
@@ -949,7 +929,7 @@ struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag,
if (!hugetlbfs_vfsmount)
return ERR_PTR(-ENOENT);
- if (!can_do_hugetlb_shm()) {
+ if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
*user = current_user();
if (user_shm_lock(size, *user)) {
WARN_ONCE(1,
diff --git a/fs/inode.c b/fs/inode.c
index b2ba83d2c4e..4d8e3be5597 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/wait.h>
+#include <linux/rwsem.h>
#include <linux/hash.h>
#include <linux/swap.h>
#include <linux/security.h>
@@ -87,14 +88,18 @@ static struct hlist_head *inode_hashtable __read_mostly;
DEFINE_SPINLOCK(inode_lock);
/*
- * iprune_mutex provides exclusion between the kswapd or try_to_free_pages
+ * iprune_sem provides exclusion between the kswapd or try_to_free_pages
* icache shrinking path, and the umount path. Without this exclusion,
* by the time prune_icache calls iput for the inode whose pages it has
* been invalidating, or by the time it calls clear_inode & destroy_inode
* from its final dispose_list, the struct super_block they refer to
* (for inode->i_sb->s_op) may already have been freed and reused.
+ *
+ * We make this an rwsem because the fastpath is icache shrinking. In
+ * some cases a filesystem may be doing a significant amount of work in
+ * its inode reclaim code, so this should improve parallelism.
*/
-static DEFINE_MUTEX(iprune_mutex);
+static DECLARE_RWSEM(iprune_sem);
/*
* Statistics gathering..
@@ -123,7 +128,7 @@ static void wake_up_inode(struct inode *inode)
int inode_init_always(struct super_block *sb, struct inode *inode)
{
static const struct address_space_operations empty_aops;
- static struct inode_operations empty_iops;
+ static const struct inode_operations empty_iops;
static const struct file_operations empty_fops;
struct address_space *const mapping = &inode->i_data;
@@ -381,7 +386,7 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose)
/*
* We can reschedule here without worrying about the list's
* consistency because the per-sb list of inodes must not
- * change during umount anymore, and because iprune_mutex keeps
+ * change during umount anymore, and because iprune_sem keeps
* shrink_icache_memory() away.
*/
cond_resched_lock(&inode_lock);
@@ -420,7 +425,7 @@ int invalidate_inodes(struct super_block *sb)
int busy;
LIST_HEAD(throw_away);
- mutex_lock(&iprune_mutex);
+ down_write(&iprune_sem);
spin_lock(&inode_lock);
inotify_unmount_inodes(&sb->s_inodes);
fsnotify_unmount_inodes(&sb->s_inodes);
@@ -428,7 +433,7 @@ int invalidate_inodes(struct super_block *sb)
spin_unlock(&inode_lock);
dispose_list(&throw_away);
- mutex_unlock(&iprune_mutex);
+ up_write(&iprune_sem);
return busy;
}
@@ -467,7 +472,7 @@ static void prune_icache(int nr_to_scan)
int nr_scanned;
unsigned long reap = 0;
- mutex_lock(&iprune_mutex);
+ down_read(&iprune_sem);
spin_lock(&inode_lock);
for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
struct inode *inode;
@@ -509,7 +514,7 @@ static void prune_icache(int nr_to_scan)
spin_unlock(&inode_lock);
dispose_list(&freeable);
- mutex_unlock(&iprune_mutex);
+ up_read(&iprune_sem);
}
/*
@@ -695,13 +700,15 @@ void unlock_new_inode(struct inode *inode)
}
#endif
/*
- * This is special! We do not need the spinlock
- * when clearing I_LOCK, because we're guaranteed
- * that nobody else tries to do anything about the
- * state of the inode when it is locked, as we
- * just created it (so there can be no old holders
- * that haven't tested I_LOCK).
+ * This is special! We do not need the spinlock when clearing I_LOCK,
+ * because we're guaranteed that nobody else tries to do anything about
+ * the state of the inode when it is locked, as we just created it (so
+ * there can be no old holders that haven't tested I_LOCK).
+ * However we must emit the memory barrier so that other CPUs reliably
+ * see the clearing of I_LOCK after the other inode initialisation has
+ * completed.
*/
+ smp_mb();
WARN_ON((inode->i_state & (I_LOCK|I_NEW)) != (I_LOCK|I_NEW));
inode->i_state &= ~(I_LOCK|I_NEW);
wake_up_inode(inode);
@@ -1234,7 +1241,16 @@ void generic_delete_inode(struct inode *inode)
}
EXPORT_SYMBOL(generic_delete_inode);
-static void generic_forget_inode(struct inode *inode)
+/**
+ * generic_detach_inode - remove inode from inode lists
+ * @inode: inode to remove
+ *
+ * Remove inode from inode lists, write it if it's dirty. This is just an
+ * internal VFS helper exported for hugetlbfs. Do not use!
+ *
+ * Returns 1 if inode should be completely destroyed.
+ */
+int generic_detach_inode(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
@@ -1244,7 +1260,7 @@ static void generic_forget_inode(struct inode *inode)
inodes_stat.nr_unused++;
if (sb->s_flags & MS_ACTIVE) {
spin_unlock(&inode_lock);
- return;
+ return 0;
}
WARN_ON(inode->i_state & I_NEW);
inode->i_state |= I_WILL_FREE;
@@ -1262,6 +1278,14 @@ static void generic_forget_inode(struct inode *inode)
inode->i_state |= I_FREEING;
inodes_stat.nr_inodes--;
spin_unlock(&inode_lock);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(generic_detach_inode);
+
+static void generic_forget_inode(struct inode *inode)
+{
+ if (!generic_detach_inode(inode))
+ return;
if (inode->i_data.nrpages)
truncate_inode_pages(&inode->i_data, 0);
clear_inode(inode);
@@ -1392,31 +1416,31 @@ void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
struct inode *inode = dentry->d_inode;
struct timespec now;
- if (mnt_want_write(mnt))
- return;
if (inode->i_flags & S_NOATIME)
- goto out;
+ return;
if (IS_NOATIME(inode))
- goto out;
+ return;
if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
- goto out;
+ return;
if (mnt->mnt_flags & MNT_NOATIME)
- goto out;
+ return;
if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
- goto out;
+ return;
now = current_fs_time(inode->i_sb);
if (!relatime_need_update(mnt, inode, now))
- goto out;
+ return;
if (timespec_equal(&inode->i_atime, &now))
- goto out;
+ return;
+
+ if (mnt_want_write(mnt))
+ return;
inode->i_atime = now;
mark_inode_dirty_sync(inode);
-out:
mnt_drop_write(mnt);
}
EXPORT_SYMBOL(touch_atime);
@@ -1437,34 +1461,37 @@ void file_update_time(struct file *file)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct timespec now;
- int sync_it = 0;
- int err;
+ enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
+ /* First try to exhaust all avenues to not sync */
if (IS_NOCMTIME(inode))
return;
- err = mnt_want_write_file(file);
- if (err)
- return;
-
now = current_fs_time(inode->i_sb);
- if (!timespec_equal(&inode->i_mtime, &now)) {
- inode->i_mtime = now;
- sync_it = 1;
- }
+ if (!timespec_equal(&inode->i_mtime, &now))
+ sync_it = S_MTIME;
- if (!timespec_equal(&inode->i_ctime, &now)) {
- inode->i_ctime = now;
- sync_it = 1;
- }
+ if (!timespec_equal(&inode->i_ctime, &now))
+ sync_it |= S_CTIME;
- if (IS_I_VERSION(inode)) {
- inode_inc_iversion(inode);
- sync_it = 1;
- }
+ if (IS_I_VERSION(inode))
+ sync_it |= S_VERSION;
+
+ if (!sync_it)
+ return;
- if (sync_it)
- mark_inode_dirty_sync(inode);
+ /* Finally allowed to write? Takes lock. */
+ if (mnt_want_write_file(file))
+ return;
+
+ /* Only change inode inside the lock region */
+ if (sync_it & S_VERSION)
+ inode_inc_iversion(inode);
+ if (sync_it & S_CTIME)
+ inode->i_ctime = now;
+ if (sync_it & S_MTIME)
+ inode->i_mtime = now;
+ mark_inode_dirty_sync(inode);
mnt_drop_write(file->f_path.mnt);
}
EXPORT_SYMBOL(file_update_time);
@@ -1592,7 +1619,8 @@ void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
else if (S_ISSOCK(mode))
inode->i_fop = &bad_sock_fops;
else
- printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o)\n",
- mode);
+ printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
+ " inode %s:%lu\n", mode, inode->i_sb->s_id,
+ inode->i_ino);
}
EXPORT_SYMBOL(init_special_inode);
diff --git a/fs/internal.h b/fs/internal.h
index d55ef562f0b..515175b8b72 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -57,6 +57,7 @@ extern int check_unsafe_exec(struct linux_binprm *);
* namespace.c
*/
extern int copy_mount_options(const void __user *, unsigned long *);
+extern int copy_mount_string(const void __user *, char **);
extern void free_vfsmnt(struct vfsmount *);
extern struct vfsmount *alloc_vfsmnt(const char *);
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 5612880fcbe..7b17a14396f 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -162,20 +162,21 @@ EXPORT_SYMBOL(fiemap_check_flags);
static int fiemap_check_ranges(struct super_block *sb,
u64 start, u64 len, u64 *new_len)
{
+ u64 maxbytes = (u64) sb->s_maxbytes;
+
*new_len = len;
if (len == 0)
return -EINVAL;
- if (start > sb->s_maxbytes)
+ if (start > maxbytes)
return -EFBIG;
/*
* Shrink request scope to what the fs can actually handle.
*/
- if ((len > sb->s_maxbytes) ||
- (sb->s_maxbytes - len) < start)
- *new_len = sb->s_maxbytes - start;
+ if (len > maxbytes || (maxbytes - len) < start)
+ *new_len = maxbytes - start;
return 0;
}
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 85f96bc651c..6b4dcd4f294 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -46,10 +46,7 @@ static void isofs_put_super(struct super_block *sb)
#ifdef CONFIG_JOLIET
lock_kernel();
- if (sbi->s_nls_iocharset) {
- unload_nls(sbi->s_nls_iocharset);
- sbi->s_nls_iocharset = NULL;
- }
+ unload_nls(sbi->s_nls_iocharset);
unlock_kernel();
#endif
@@ -912,8 +909,7 @@ out_no_root:
printk(KERN_WARNING "%s: get root inode failed\n", __func__);
out_no_inode:
#ifdef CONFIG_JOLIET
- if (sbi->s_nls_iocharset)
- unload_nls(sbi->s_nls_iocharset);
+ unload_nls(sbi->s_nls_iocharset);
#endif
goto out_freesbi;
out_no_read:
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index a8a358bc0f2..53b86e16e5f 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -768,7 +768,7 @@ static void jbd2_seq_history_stop(struct seq_file *seq, void *v)
{
}
-static struct seq_operations jbd2_seq_history_ops = {
+static const struct seq_operations jbd2_seq_history_ops = {
.start = jbd2_seq_history_start,
.next = jbd2_seq_history_next,
.stop = jbd2_seq_history_stop,
@@ -872,7 +872,7 @@ static void jbd2_seq_info_stop(struct seq_file *seq, void *v)
{
}
-static struct seq_operations jbd2_seq_info_ops = {
+static const struct seq_operations jbd2_seq_info_ops = {
.start = jbd2_seq_info_start,
.next = jbd2_seq_info_next,
.stop = jbd2_seq_info_stop,
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index e9580104b6b..3ff50da9478 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -15,6 +15,7 @@
#include <linux/completion.h>
#include <linux/sched.h>
#include <linux/freezer.h>
+#include <linux/kthread.h>
#include "nodelist.h"
@@ -31,7 +32,7 @@ void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c)
/* This must only ever be called when no GC thread is currently running */
int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
{
- pid_t pid;
+ struct task_struct *tsk;
int ret = 0;
BUG_ON(c->gc_task);
@@ -39,15 +40,16 @@ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
init_completion(&c->gc_thread_start);
init_completion(&c->gc_thread_exit);
- pid = kernel_thread(jffs2_garbage_collect_thread, c, CLONE_FS|CLONE_FILES);
- if (pid < 0) {
- printk(KERN_WARNING "fork failed for JFFS2 garbage collect thread: %d\n", -pid);
+ tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index);
+ if (IS_ERR(tsk)) {
+ printk(KERN_WARNING "fork failed for JFFS2 garbage collect thread: %ld\n", -PTR_ERR(tsk));
complete(&c->gc_thread_exit);
- ret = pid;
+ ret = PTR_ERR(tsk);
} else {
/* Wait for it... */
- D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", pid));
+ D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", tsk->pid));
wait_for_completion(&c->gc_thread_start);
+ ret = tsk->pid;
}
return ret;
@@ -71,7 +73,6 @@ static int jffs2_garbage_collect_thread(void *_c)
{
struct jffs2_sb_info *c = _c;
- daemonize("jffs2_gcd_mtd%d", c->mtd->index);
allow_signal(SIGKILL);
allow_signal(SIGSTOP);
allow_signal(SIGCONT);
@@ -107,6 +108,11 @@ static int jffs2_garbage_collect_thread(void *_c)
* the GC thread get there first. */
schedule_timeout_interruptible(msecs_to_jiffies(50));
+ if (kthread_should_stop()) {
+ D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): kthread_stop() called.\n"));
+ goto die;
+ }
+
/* Put_super will send a SIGKILL and then wait on the sem.
*/
while (signal_pending(current) || freezing(current)) {
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c
index 9eff2bdae8a..c082868910f 100644
--- a/fs/jffs2/malloc.c
+++ b/fs/jffs2/malloc.c
@@ -39,13 +39,13 @@ int __init jffs2_create_slab_caches(void)
raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent",
sizeof(struct jffs2_raw_dirent),
- 0, 0, NULL);
+ 0, SLAB_HWCACHE_ALIGN, NULL);
if (!raw_dirent_slab)
goto err;
raw_inode_slab = kmem_cache_create("jffs2_raw_inode",
sizeof(struct jffs2_raw_inode),
- 0, 0, NULL);
+ 0, SLAB_HWCACHE_ALIGN, NULL);
if (!raw_inode_slab)
goto err;
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 0035c021395..9a80e8e595d 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -123,7 +123,7 @@ static struct dentry *jffs2_get_parent(struct dentry *child)
return d_obtain_alias(jffs2_iget(child->d_inode->i_sb, pino));
}
-static struct export_operations jffs2_export_ops = {
+static const struct export_operations jffs2_export_ops = {
.get_parent = jffs2_get_parent,
.fh_to_dentry = jffs2_fh_to_dentry,
.fh_to_parent = jffs2_fh_to_parent,
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 37e6dcda8fc..2234c73fc57 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -178,13 +178,11 @@ static void jfs_put_super(struct super_block *sb)
rc = jfs_umount(sb);
if (rc)
jfs_err("jfs_umount failed with return code %d", rc);
- if (sbi->nls_tab)
- unload_nls(sbi->nls_tab);
- sbi->nls_tab = NULL;
+
+ unload_nls(sbi->nls_tab);
truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
iput(sbi->direct_inode);
- sbi->direct_inode = NULL;
kfree(sbi);
@@ -347,8 +345,7 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
if (nls_map != (void *) -1) {
/* Discard old (if remount) */
- if (sbi->nls_tab)
- unload_nls(sbi->nls_tab);
+ unload_nls(sbi->nls_tab);
sbi->nls_tab = nls_map;
}
return 1;
diff --git a/fs/libfs.c b/fs/libfs.c
index dcec3d3ea64..219576c52d8 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -527,14 +527,18 @@ ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos,
const void *from, size_t available)
{
loff_t pos = *ppos;
+ size_t ret;
+
if (pos < 0)
return -EINVAL;
- if (pos >= available)
+ if (pos >= available || !count)
return 0;
if (count > available - pos)
count = available - pos;
- if (copy_to_user(to, from + pos, count))
+ ret = copy_to_user(to, from + pos, count);
+ if (ret == count)
return -EFAULT;
+ count -= ret;
*ppos = pos + count;
return count;
}
@@ -735,10 +739,11 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
if (copy_from_user(attr->set_buf, buf, size))
goto out;
- ret = len; /* claim we got the whole input */
attr->set_buf[size] = '\0';
val = simple_strtol(attr->set_buf, NULL, 0);
- attr->set(attr->data, val);
+ ret = attr->set(attr->data, val);
+ if (ret == 0)
+ ret = len; /* on success, claim we got the whole input */
out:
mutex_unlock(&attr->mutex);
return ret;
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 1f3b0fc0d35..fc9032dc886 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -166,7 +166,7 @@ __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock)
*/
if (fl_blocked->fl_u.nfs_fl.owner->pid != lock->svid)
continue;
- if (!nlm_cmp_addr(nlm_addr(block->b_host), addr))
+ if (!rpc_cmp_addr(nlm_addr(block->b_host), addr))
continue;
if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_path.dentry->d_inode) ,fh) != 0)
continue;
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 4336adba952..c81249fef11 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -458,7 +458,7 @@ static void nlmclnt_locks_release_private(struct file_lock *fl)
nlm_put_lockowner(fl->fl_u.nfs_fl.owner);
}
-static struct file_lock_operations nlmclnt_lock_ops = {
+static const struct file_lock_operations nlmclnt_lock_ops = {
.fl_copy_lock = nlmclnt_locks_copy_lock,
.fl_release_private = nlmclnt_locks_release_private,
};
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 7cb076ac6b4..4600c2037b8 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -111,7 +111,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
*/
chain = &nlm_hosts[nlm_hash_address(ni->sap)];
hlist_for_each_entry(host, pos, chain, h_hash) {
- if (!nlm_cmp_addr(nlm_addr(host), ni->sap))
+ if (!rpc_cmp_addr(nlm_addr(host), ni->sap))
continue;
/* See if we have an NSM handle for this client */
@@ -125,7 +125,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
if (host->h_server != ni->server)
continue;
if (ni->server &&
- !nlm_cmp_addr(nlm_srcaddr(host), ni->src_sap))
+ !rpc_cmp_addr(nlm_srcaddr(host), ni->src_sap))
continue;
/* Move to head of hash chain. */
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 30c933188dd..f956651d0f6 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -209,7 +209,7 @@ static struct nsm_handle *nsm_lookup_addr(const struct sockaddr *sap)
struct nsm_handle *nsm;
list_for_each_entry(nsm, &nsm_handles, sm_link)
- if (nlm_cmp_addr(nsm_addr(nsm), sap))
+ if (rpc_cmp_addr(nsm_addr(nsm), sap))
return nsm;
return NULL;
}
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index e577a78d7ba..d1001790fa9 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -705,7 +705,7 @@ static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2)
return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid;
}
-struct lock_manager_operations nlmsvc_lock_operations = {
+const struct lock_manager_operations nlmsvc_lock_operations = {
.fl_compare_owner = nlmsvc_same_owner,
.fl_notify = nlmsvc_notify_blocked,
.fl_grant = nlmsvc_grant_deferred,
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index 9e4d6aab611..ad478da7ca6 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -417,7 +417,7 @@ EXPORT_SYMBOL_GPL(nlmsvc_unlock_all_by_sb);
static int
nlmsvc_match_ip(void *datap, struct nlm_host *host)
{
- return nlm_cmp_addr(nlm_srcaddr(host), datap);
+ return rpc_cmp_addr(nlm_srcaddr(host), datap);
}
/**
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
index 0336f2beacd..b583ab0a4cb 100644
--- a/fs/lockd/xdr.c
+++ b/fs/lockd/xdr.c
@@ -8,7 +8,6 @@
#include <linux/types.h>
#include <linux/sched.h>
-#include <linux/utsname.h>
#include <linux/nfs.h>
#include <linux/sunrpc/xdr.h>
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index e1d52865319..ad9dbbc9145 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -9,7 +9,6 @@
#include <linux/types.h>
#include <linux/sched.h>
-#include <linux/utsname.h>
#include <linux/nfs.h>
#include <linux/sunrpc/xdr.h>
diff --git a/fs/locks.c b/fs/locks.c
index 19ee18a6829..a8794f233bc 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -434,7 +434,7 @@ static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try)
return fl->fl_file == try->fl_file;
}
-static struct lock_manager_operations lease_manager_ops = {
+static const struct lock_manager_operations lease_manager_ops = {
.fl_break = lease_break_callback,
.fl_release_private = lease_release_private_callback,
.fl_mylease = lease_mylease_callback,
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index d407e7a0b6f..6198731d7fc 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -308,14 +308,18 @@ int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
struct inode *inode = (struct inode*)mapping->host;
char *kaddr = page_address(page);
loff_t pos = page_offset(page) + (char*)de - kaddr;
- unsigned len = minix_sb(inode->i_sb)->s_dirsize;
+ struct minix_sb_info *sbi = minix_sb(inode->i_sb);
+ unsigned len = sbi->s_dirsize;
int err;
lock_page(page);
err = __minix_write_begin(NULL, mapping, pos, len,
AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
if (err == 0) {
- de->inode = 0;
+ if (sbi->s_version == MINIX_V3)
+ ((minix3_dirent *) de)->inode = 0;
+ else
+ de->inode = 0;
err = dir_commit_chunk(page, pos, len);
} else {
unlock_page(page);
@@ -440,7 +444,10 @@ void minix_set_link(struct minix_dir_entry *de, struct page *page,
err = __minix_write_begin(NULL, mapping, pos, sbi->s_dirsize,
AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
if (err == 0) {
- de->inode = inode->i_ino;
+ if (sbi->s_version == MINIX_V3)
+ ((minix3_dirent *) de)->inode = inode->i_ino;
+ else
+ de->inode = inode->i_ino;
err = dir_commit_chunk(page, pos, sbi->s_dirsize);
} else {
unlock_page(page);
@@ -470,7 +477,14 @@ ino_t minix_inode_by_name(struct dentry *dentry)
ino_t res = 0;
if (de) {
- res = de->inode;
+ struct address_space *mapping = page->mapping;
+ struct inode *inode = mapping->host;
+ struct minix_sb_info *sbi = minix_sb(inode->i_sb);
+
+ if (sbi->s_version == MINIX_V3)
+ res = ((minix3_dirent *) de)->inode;
+ else
+ res = de->inode;
dir_put_page(page);
}
return res;
diff --git a/fs/namespace.c b/fs/namespace.c
index 7230787d18b..bdc3cb4fd22 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1640,7 +1640,7 @@ static int do_new_mount(struct path *path, char *type, int flags,
{
struct vfsmount *mnt;
- if (!type || !memchr(type, 0, PAGE_SIZE))
+ if (!type)
return -EINVAL;
/* we need capabilities... */
@@ -1871,6 +1871,23 @@ int copy_mount_options(const void __user * data, unsigned long *where)
return 0;
}
+int copy_mount_string(const void __user *data, char **where)
+{
+ char *tmp;
+
+ if (!data) {
+ *where = NULL;
+ return 0;
+ }
+
+ tmp = strndup_user(data, PAGE_SIZE);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+
+ *where = tmp;
+ return 0;
+}
+
/*
* Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
* be given to the mount() call (ie: read-only, no-dev, no-suid etc).
@@ -1900,8 +1917,6 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
return -EINVAL;
- if (dev_name && !memchr(dev_name, 0, PAGE_SIZE))
- return -EINVAL;
if (data_page)
((char *)data_page)[PAGE_SIZE - 1] = 0;
@@ -2070,40 +2085,42 @@ EXPORT_SYMBOL(create_mnt_ns);
SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
char __user *, type, unsigned long, flags, void __user *, data)
{
- int retval;
+ int ret;
+ char *kernel_type;
+ char *kernel_dir;
+ char *kernel_dev;
unsigned long data_page;
- unsigned long type_page;
- unsigned long dev_page;
- char *dir_page;
- retval = copy_mount_options(type, &type_page);
- if (retval < 0)
- return retval;
+ ret = copy_mount_string(type, &kernel_type);
+ if (ret < 0)
+ goto out_type;
- dir_page = getname(dir_name);
- retval = PTR_ERR(dir_page);
- if (IS_ERR(dir_page))
- goto out1;
+ kernel_dir = getname(dir_name);
+ if (IS_ERR(kernel_dir)) {
+ ret = PTR_ERR(kernel_dir);
+ goto out_dir;
+ }
- retval = copy_mount_options(dev_name, &dev_page);
- if (retval < 0)
- goto out2;
+ ret = copy_mount_string(dev_name, &kernel_dev);
+ if (ret < 0)
+ goto out_dev;
- retval = copy_mount_options(data, &data_page);
- if (retval < 0)
- goto out3;
+ ret = copy_mount_options(data, &data_page);
+ if (ret < 0)
+ goto out_data;
- retval = do_mount((char *)dev_page, dir_page, (char *)type_page,
- flags, (void *)data_page);
- free_page(data_page);
+ ret = do_mount(kernel_dev, kernel_dir, kernel_type, flags,
+ (void *) data_page);
-out3:
- free_page(dev_page);
-out2:
- putname(dir_page);
-out1:
- free_page(type_page);
- return retval;
+ free_page(data_page);
+out_data:
+ kfree(kernel_dev);
+out_dev:
+ putname(kernel_dir);
+out_dir:
+ kfree(kernel_type);
+out_type:
+ return ret;
}
/*
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 9c590722d87..b8b5b30d53f 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -1241,7 +1241,7 @@ ncp_date_unix2dos(int unix_date, __le16 *time, __le16 *date)
month = 2;
} else {
nl_day = (year & 3) || day <= 59 ? day : day - 1;
- for (month = 0; month < 12; month++)
+ for (month = 1; month < 12; month++)
if (day_n[month] > nl_day)
break;
}
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index b99ce205b1b..cf98da1be23 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -746,16 +746,8 @@ static void ncp_put_super(struct super_block *sb)
#ifdef CONFIG_NCPFS_NLS
/* unload the NLS charsets */
- if (server->nls_vol)
- {
- unload_nls(server->nls_vol);
- server->nls_vol = NULL;
- }
- if (server->nls_io)
- {
- unload_nls(server->nls_io);
- server->nls_io = NULL;
- }
+ unload_nls(server->nls_vol);
+ unload_nls(server->nls_io);
#endif /* CONFIG_NCPFS_NLS */
if (server->info_filp)
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index fa038df63ac..0d58caf4a6e 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -223,10 +223,8 @@ ncp_set_charsets(struct ncp_server* server, struct ncp_nls_ioctl __user *arg)
oldset_io = server->nls_io;
server->nls_io = iocharset;
- if (oldset_cp)
- unload_nls(oldset_cp);
- if (oldset_io)
- unload_nls(oldset_io);
+ unload_nls(oldset_cp);
+ unload_nls(oldset_io);
return 0;
}
@@ -442,7 +440,7 @@ static int __ncp_ioctl(struct inode *inode, struct file *filp,
if (dentry) {
struct inode* s_inode = dentry->d_inode;
- if (inode) {
+ if (s_inode) {
NCP_FINFO(s_inode)->volNumber = vnum;
NCP_FINFO(s_inode)->dirEntNum = de;
NCP_FINFO(s_inode)->DosDirNum = dosde;
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index e5a2dac5f71..76b0aa0f73b 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -222,7 +222,7 @@ static unsigned decode_sessionid(struct xdr_stream *xdr,
p = read_buf(xdr, len);
if (unlikely(p == NULL))
- return htonl(NFS4ERR_RESOURCE);;
+ return htonl(NFS4ERR_RESOURCE);
memcpy(sid->data, p, len);
return 0;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index a7ce15d3c24..63976c0ccc2 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -648,8 +648,6 @@ static int nfs_start_lockd(struct nfs_server *server)
.hostname = clp->cl_hostname,
.address = (struct sockaddr *)&clp->cl_addr,
.addrlen = clp->cl_addrlen,
- .protocol = server->flags & NFS_MOUNT_TCP ?
- IPPROTO_TCP : IPPROTO_UDP,
.nfs_version = clp->rpc_ops->version,
.noresvport = server->flags & NFS_MOUNT_NORESVPORT ?
1 : 0,
@@ -660,6 +658,14 @@ static int nfs_start_lockd(struct nfs_server *server)
if (server->flags & NFS_MOUNT_NONLM)
return 0;
+ switch (clp->cl_proto) {
+ default:
+ nlm_init.protocol = IPPROTO_TCP;
+ break;
+ case XPRT_TRANSPORT_UDP:
+ nlm_init.protocol = IPPROTO_UDP;
+ }
+
host = nlmclnt_init(&nlm_init);
if (IS_ERR(host))
return PTR_ERR(host);
@@ -787,7 +793,7 @@ static int nfs_init_server(struct nfs_server *server,
dprintk("--> nfs_init_server()\n");
#ifdef CONFIG_NFS_V3
- if (data->flags & NFS_MOUNT_VER3)
+ if (data->version == 3)
cl_init.rpc_ops = &nfs_v3_clientops;
#endif
@@ -964,6 +970,7 @@ static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_serve
target->acdirmin = source->acdirmin;
target->acdirmax = source->acdirmax;
target->caps = source->caps;
+ target->options = source->options;
}
/*
@@ -1531,7 +1538,7 @@ static void *nfs_server_list_next(struct seq_file *p, void *v, loff_t *pos);
static void nfs_server_list_stop(struct seq_file *p, void *v);
static int nfs_server_list_show(struct seq_file *m, void *v);
-static struct seq_operations nfs_server_list_ops = {
+static const struct seq_operations nfs_server_list_ops = {
.start = nfs_server_list_start,
.next = nfs_server_list_next,
.stop = nfs_server_list_stop,
@@ -1552,7 +1559,7 @@ static void *nfs_volume_list_next(struct seq_file *p, void *v, loff_t *pos);
static void nfs_volume_list_stop(struct seq_file *p, void *v);
static int nfs_volume_list_show(struct seq_file *m, void *v);
-static struct seq_operations nfs_volume_list_ops = {
+static const struct seq_operations nfs_volume_list_ops = {
.start = nfs_volume_list_start,
.next = nfs_volume_list_next,
.stop = nfs_volume_list_stop,
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 5021b75d2d1..86d6b4db109 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -525,6 +525,7 @@ const struct address_space_operations nfs_file_aops = {
.direct_IO = nfs_direct_IO,
.migratepage = nfs_migrate_page,
.launder_page = nfs_launder_page,
+ .error_remove_page = generic_error_remove_page,
};
/*
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 379be678cb7..70fad69eb95 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -58,17 +58,34 @@ void nfs_fscache_release_client_cookie(struct nfs_client *clp)
/*
* Get the cache cookie for an NFS superblock. We have to handle
* uniquification here because the cache doesn't do it for us.
+ *
+ * The default uniquifier is just an empty string, but it may be overridden
+ * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
+ * superblock across an automount point of some nature.
*/
-void nfs_fscache_get_super_cookie(struct super_block *sb,
- struct nfs_parsed_mount_data *data)
+void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq,
+ struct nfs_clone_mount *mntdata)
{
struct nfs_fscache_key *key, *xkey;
struct nfs_server *nfss = NFS_SB(sb);
struct rb_node **p, *parent;
- const char *uniq = data->fscache_uniq ?: "";
int diff, ulen;
- ulen = strlen(uniq);
+ if (uniq) {
+ ulen = strlen(uniq);
+ } else if (mntdata) {
+ struct nfs_server *mnt_s = NFS_SB(mntdata->sb);
+ if (mnt_s->fscache_key) {
+ uniq = mnt_s->fscache_key->key.uniquifier;
+ ulen = mnt_s->fscache_key->key.uniq_len;
+ }
+ }
+
+ if (!uniq) {
+ uniq = "";
+ ulen = 1;
+ }
+
key = kzalloc(sizeof(*key) + ulen, GFP_KERNEL);
if (!key)
return;
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index 6e809bb0ff0..b9c572d0679 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -74,7 +74,8 @@ extern void nfs_fscache_get_client_cookie(struct nfs_client *);
extern void nfs_fscache_release_client_cookie(struct nfs_client *);
extern void nfs_fscache_get_super_cookie(struct super_block *,
- struct nfs_parsed_mount_data *);
+ const char *,
+ struct nfs_clone_mount *);
extern void nfs_fscache_release_super_cookie(struct super_block *);
extern void nfs_fscache_init_inode_cookie(struct inode *);
@@ -173,7 +174,8 @@ static inline void nfs_fscache_release_client_cookie(struct nfs_client *clp) {}
static inline void nfs_fscache_get_super_cookie(
struct super_block *sb,
- struct nfs_parsed_mount_data *data)
+ const char *uniq,
+ struct nfs_clone_mount *mntdata)
{
}
static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 060022b4651..faa091865ad 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -458,49 +458,21 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr)
*/
static int nfs_vmtruncate(struct inode * inode, loff_t offset)
{
- if (i_size_read(inode) < offset) {
- unsigned long limit;
-
- limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
- if (limit != RLIM_INFINITY && offset > limit)
- goto out_sig;
- if (offset > inode->i_sb->s_maxbytes)
- goto out_big;
- spin_lock(&inode->i_lock);
- i_size_write(inode, offset);
- spin_unlock(&inode->i_lock);
- } else {
- struct address_space *mapping = inode->i_mapping;
+ loff_t oldsize;
+ int err;
- /*
- * truncation of in-use swapfiles is disallowed - it would
- * cause subsequent swapout to scribble on the now-freed
- * blocks.
- */
- if (IS_SWAPFILE(inode))
- return -ETXTBSY;
- spin_lock(&inode->i_lock);
- i_size_write(inode, offset);
- spin_unlock(&inode->i_lock);
+ err = inode_newsize_ok(inode, offset);
+ if (err)
+ goto out;
- /*
- * unmap_mapping_range is called twice, first simply for
- * efficiency so that truncate_inode_pages does fewer
- * single-page unmaps. However after this first call, and
- * before truncate_inode_pages finishes, it is possible for
- * private pages to be COWed, which remain after
- * truncate_inode_pages finishes, hence the second
- * unmap_mapping_range call must be made for correctness.
- */
- unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
- truncate_inode_pages(mapping, offset);
- unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
- }
- return 0;
-out_sig:
- send_sig(SIGXFSZ, current, 0);
-out_big:
- return -EFBIG;
+ spin_lock(&inode->i_lock);
+ oldsize = inode->i_size;
+ i_size_write(inode, offset);
+ spin_unlock(&inode->i_lock);
+
+ truncate_pagecache(inode, oldsize, offset);
+out:
+ return err;
}
/**
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index c862c9340f9..5e078b222b4 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -13,7 +13,6 @@
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <linux/utsname.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/in.h>
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index ee6a13f0544..3f8881d1a05 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -7,7 +7,6 @@
*/
#include <linux/mm.h>
-#include <linux/utsname.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/sunrpc/clnt.h>
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 35869a4921f..5fe5492fbd2 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -10,7 +10,6 @@
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <linux/utsname.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/in.h>
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index be6544aef41..ed7c269e251 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -36,7 +36,6 @@
*/
#include <linux/mm.h>
-#include <linux/utsname.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/string.h>
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 1434080aefe..2ef4fecf398 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -638,7 +638,7 @@ static void nfs4_fl_release_lock(struct file_lock *fl)
nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
}
-static struct file_lock_operations nfs4_fl_lock_ops = {
+static const struct file_lock_operations nfs4_fl_lock_ops = {
.fl_copy_lock = nfs4_fl_copy_lock,
.fl_release_private = nfs4_fl_release_lock,
};
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index cfc30d362f9..83ad47cbdd8 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -39,7 +39,6 @@
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <linux/utsname.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/in.h>
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 7be72d90d49..ef583854d8d 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -32,7 +32,6 @@
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/mm.h>
-#include <linux/utsname.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/in.h>
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index f1cc0587cfe..29786d3b932 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -728,6 +728,27 @@ static void nfs_umount_begin(struct super_block *sb)
unlock_kernel();
}
+static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(int flags)
+{
+ struct nfs_parsed_mount_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data) {
+ data->flags = flags;
+ data->rsize = NFS_MAX_FILE_IO_SIZE;
+ data->wsize = NFS_MAX_FILE_IO_SIZE;
+ data->acregmin = NFS_DEF_ACREGMIN;
+ data->acregmax = NFS_DEF_ACREGMAX;
+ data->acdirmin = NFS_DEF_ACDIRMIN;
+ data->acdirmax = NFS_DEF_ACDIRMAX;
+ data->nfs_server.port = NFS_UNSPEC_PORT;
+ data->auth_flavors[0] = RPC_AUTH_UNIX;
+ data->auth_flavor_len = 1;
+ data->minorversion = 0;
+ }
+ return data;
+}
+
/*
* Sanity-check a server address provided by the mount command.
*
@@ -1430,10 +1451,13 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args,
int status;
if (args->mount_server.version == 0) {
- if (args->flags & NFS_MOUNT_VER3)
- args->mount_server.version = NFS_MNT3_VERSION;
- else
- args->mount_server.version = NFS_MNT_VERSION;
+ switch (args->version) {
+ default:
+ args->mount_server.version = NFS_MNT3_VERSION;
+ break;
+ case 2:
+ args->mount_server.version = NFS_MNT_VERSION;
+ }
}
request.version = args->mount_server.version;
@@ -1634,20 +1658,6 @@ static int nfs_validate_mount_data(void *options,
if (data == NULL)
goto out_no_data;
- args->flags = (NFS_MOUNT_VER3 | NFS_MOUNT_TCP);
- args->rsize = NFS_MAX_FILE_IO_SIZE;
- args->wsize = NFS_MAX_FILE_IO_SIZE;
- args->acregmin = NFS_DEF_ACREGMIN;
- args->acregmax = NFS_DEF_ACREGMAX;
- args->acdirmin = NFS_DEF_ACDIRMIN;
- args->acdirmax = NFS_DEF_ACDIRMAX;
- args->mount_server.port = NFS_UNSPEC_PORT;
- args->nfs_server.port = NFS_UNSPEC_PORT;
- args->nfs_server.protocol = XPRT_TRANSPORT_TCP;
- args->auth_flavors[0] = RPC_AUTH_UNIX;
- args->auth_flavor_len = 1;
- args->minorversion = 0;
-
switch (data->version) {
case 1:
data->namlen = 0;
@@ -1701,6 +1711,8 @@ static int nfs_validate_mount_data(void *options,
if (!(data->flags & NFS_MOUNT_TCP))
args->nfs_server.protocol = XPRT_TRANSPORT_UDP;
+ else
+ args->nfs_server.protocol = XPRT_TRANSPORT_TCP;
/* N.B. caller will free nfs_server.hostname in all cases */
args->nfs_server.hostname = kstrdup(data->hostname, GFP_KERNEL);
args->namlen = data->namlen;
@@ -1778,7 +1790,7 @@ static int nfs_validate_mount_data(void *options,
}
#ifndef CONFIG_NFS_V3
- if (args->flags & NFS_MOUNT_VER3)
+ if (args->version == 3)
goto out_v3_not_compiled;
#endif /* !CONFIG_NFS_V3 */
@@ -1936,7 +1948,7 @@ static void nfs_fill_super(struct super_block *sb,
if (data->bsize)
sb->s_blocksize = nfs_block_size(data->bsize, &sb->s_blocksize_bits);
- if (server->flags & NFS_MOUNT_VER3) {
+ if (server->nfs_client->rpc_ops->version == 3) {
/* The VFS shouldn't apply the umask to mode bits. We will do
* so ourselves when necessary.
*/
@@ -1960,7 +1972,7 @@ static void nfs_clone_super(struct super_block *sb,
sb->s_blocksize = old_sb->s_blocksize;
sb->s_maxbytes = old_sb->s_maxbytes;
- if (server->flags & NFS_MOUNT_VER3) {
+ if (server->nfs_client->rpc_ops->version == 3) {
/* The VFS shouldn't apply the umask to mode bits. We will do
* so ourselves when necessary.
*/
@@ -2094,7 +2106,7 @@ static int nfs_get_sb(struct file_system_type *fs_type,
};
int error = -ENOMEM;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = nfs_alloc_parsed_mount_data(NFS_MOUNT_VER3 | NFS_MOUNT_TCP);
mntfh = kzalloc(sizeof(*mntfh), GFP_KERNEL);
if (data == NULL || mntfh == NULL)
goto out_free_fh;
@@ -2144,7 +2156,8 @@ static int nfs_get_sb(struct file_system_type *fs_type,
if (!s->s_root) {
/* initial superblock/root creation */
nfs_fill_super(s, data);
- nfs_fscache_get_super_cookie(s, data);
+ nfs_fscache_get_super_cookie(
+ s, data ? data->fscache_uniq : NULL, NULL);
}
mntroot = nfs_get_root(s, mntfh);
@@ -2245,6 +2258,7 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags,
if (!s->s_root) {
/* initial superblock/root creation */
nfs_clone_super(s, data->sb);
+ nfs_fscache_get_super_cookie(s, NULL, data);
}
mntroot = nfs_get_root(s, data->fh);
@@ -2362,18 +2376,7 @@ static int nfs4_validate_mount_data(void *options,
if (data == NULL)
goto out_no_data;
- args->rsize = NFS_MAX_FILE_IO_SIZE;
- args->wsize = NFS_MAX_FILE_IO_SIZE;
- args->acregmin = NFS_DEF_ACREGMIN;
- args->acregmax = NFS_DEF_ACREGMAX;
- args->acdirmin = NFS_DEF_ACDIRMIN;
- args->acdirmax = NFS_DEF_ACDIRMAX;
- args->nfs_server.port = NFS_UNSPEC_PORT;
- args->auth_flavors[0] = RPC_AUTH_UNIX;
- args->auth_flavor_len = 1;
args->version = 4;
- args->minorversion = 0;
-
switch (data->version) {
case 1:
if (data->host_addrlen > sizeof(args->nfs_server.address))
@@ -2508,7 +2511,8 @@ static int nfs4_remote_get_sb(struct file_system_type *fs_type,
if (!s->s_root) {
/* initial superblock/root creation */
nfs4_fill_super(s);
- nfs_fscache_get_super_cookie(s, data);
+ nfs_fscache_get_super_cookie(
+ s, data ? data->fscache_uniq : NULL, NULL);
}
mntroot = nfs4_get_root(s, mntfh);
@@ -2656,7 +2660,7 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
struct nfs_parsed_mount_data *data;
int error = -ENOMEM;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = nfs_alloc_parsed_mount_data(0);
if (data == NULL)
goto out_free_data;
@@ -2741,6 +2745,7 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
if (!s->s_root) {
/* initial superblock/root creation */
nfs4_clone_super(s, data->sb);
+ nfs_fscache_get_super_cookie(s, NULL, data);
}
mntroot = nfs4_get_root(s, data->fh);
@@ -2822,6 +2827,7 @@ static int nfs4_remote_referral_get_sb(struct file_system_type *fs_type,
if (!s->s_root) {
/* initial superblock/root creation */
nfs4_fill_super(s);
+ nfs_fscache_get_super_cookie(s, NULL, data);
}
mntroot = nfs4_get_root(s, &mntfh);
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index d9462643155..c1c9e035d4a 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1341,6 +1341,8 @@ exp_pseudoroot(struct svc_rqst *rqstp, struct svc_fh *fhp)
if (rv)
goto out;
rv = check_nfsd_access(exp, rqstp);
+ if (rv)
+ fh_put(fhp);
out:
exp_put(exp);
return rv;
@@ -1515,7 +1517,7 @@ static int e_show(struct seq_file *m, void *p)
return svc_export_show(m, &svc_export_cache, cp);
}
-struct seq_operations nfs_exports_op = {
+const struct seq_operations nfs_exports_op = {
.start = e_start,
.next = e_next,
.stop = e_stop,
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 01d4ec1c88e..edf926e1062 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -814,17 +814,6 @@ encode_entry_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name,
return p;
}
-static __be32 *
-encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p,
- struct svc_fh *fhp)
-{
- p = encode_post_op_attr(cd->rqstp, p, fhp);
- *p++ = xdr_one; /* yes, a file handle follows */
- p = encode_fh(p, fhp);
- fh_put(fhp);
- return p;
-}
-
static int
compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
const char *name, int namlen)
@@ -836,29 +825,54 @@ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
dparent = cd->fh.fh_dentry;
exp = cd->fh.fh_export;
- fh_init(fhp, NFS3_FHSIZE);
if (isdotent(name, namlen)) {
if (namlen == 2) {
dchild = dget_parent(dparent);
if (dchild == dparent) {
/* filesystem root - cannot return filehandle for ".." */
dput(dchild);
- return 1;
+ return -ENOENT;
}
} else
dchild = dget(dparent);
} else
dchild = lookup_one_len(name, dparent, namlen);
if (IS_ERR(dchild))
- return 1;
- if (d_mountpoint(dchild) ||
- fh_compose(fhp, exp, dchild, &cd->fh) != 0 ||
- !dchild->d_inode)
- rv = 1;
+ return -ENOENT;
+ rv = -ENOENT;
+ if (d_mountpoint(dchild))
+ goto out;
+ rv = fh_compose(fhp, exp, dchild, &cd->fh);
+ if (rv)
+ goto out;
+ if (!dchild->d_inode)
+ goto out;
+ rv = 0;
+out:
dput(dchild);
return rv;
}
+__be32 *encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen)
+{
+ struct svc_fh fh;
+ int err;
+
+ fh_init(&fh, NFS3_FHSIZE);
+ err = compose_entry_fh(cd, &fh, name, namlen);
+ if (err) {
+ *p++ = 0;
+ *p++ = 0;
+ goto out;
+ }
+ p = encode_post_op_attr(cd->rqstp, p, &fh);
+ *p++ = xdr_one; /* yes, a file handle follows */
+ p = encode_fh(p, &fh);
+out:
+ fh_put(&fh);
+ return p;
+}
+
/*
* Encode a directory entry. This one works for both normal readdir
* and readdirplus.
@@ -929,16 +943,8 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen,
p = encode_entry_baggage(cd, p, name, namlen, ino);
- /* throw in readdirplus baggage */
- if (plus) {
- struct svc_fh fh;
-
- if (compose_entry_fh(cd, &fh, name, namlen) > 0) {
- *p++ = 0;
- *p++ = 0;
- } else
- p = encode_entryplus_baggage(cd, p, &fh);
- }
+ if (plus)
+ p = encode_entryplus_baggage(cd, p, name, namlen);
num_entry_words = p - cd->buffer;
} else if (cd->rqstp->rq_respages[pn+1] != NULL) {
/* temporarily encode entry into next page, then move back to
@@ -951,17 +957,8 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen,
p1 = encode_entry_baggage(cd, p1, name, namlen, ino);
- /* throw in readdirplus baggage */
- if (plus) {
- struct svc_fh fh;
-
- if (compose_entry_fh(cd, &fh, name, namlen) > 0) {
- /* zero out the filehandle */
- *p1++ = 0;
- *p1++ = 0;
- } else
- p1 = encode_entryplus_baggage(cd, p1, &fh);
- }
+ if (plus)
+ p = encode_entryplus_baggage(cd, p1, name, namlen);
/* determine entry word length and lengths to go in pages */
num_entry_words = p1 - tmp;
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index 54b8b4140c8..725d02f210e 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -321,7 +321,7 @@ _posix_to_nfsv4_one(struct posix_acl *pacl, struct nfs4_acl *acl,
deny = ~pas.group & pas.other;
if (deny) {
ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE;
- ace->flag = eflag | NFS4_ACE_IDENTIFIER_GROUP;
+ ace->flag = eflag;
ace->access_mask = deny_mask_from_posix(deny, flags);
ace->whotype = NFS4_ACL_WHO_GROUP;
ace++;
@@ -335,7 +335,7 @@ _posix_to_nfsv4_one(struct posix_acl *pacl, struct nfs4_acl *acl,
if (deny) {
ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE;
ace->flag = eflag | NFS4_ACE_IDENTIFIER_GROUP;
- ace->access_mask = mask_from_posix(deny, flags);
+ ace->access_mask = deny_mask_from_posix(deny, flags);
ace->whotype = NFS4_ACL_WHO_NAMED;
ace->who = pa->e_id;
ace++;
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 3fd23f7acec..24e8d78f8dd 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -43,25 +43,30 @@
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svcsock.h>
#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/state.h>
#include <linux/sunrpc/sched.h>
#include <linux/nfs4.h>
+#include <linux/sunrpc/xprtsock.h>
#define NFSDDBG_FACILITY NFSDDBG_PROC
#define NFSPROC4_CB_NULL 0
#define NFSPROC4_CB_COMPOUND 1
+#define NFS4_STATEID_SIZE 16
/* Index of predefined Linux callback client operations */
enum {
- NFSPROC4_CLNT_CB_NULL = 0,
+ NFSPROC4_CLNT_CB_NULL = 0,
NFSPROC4_CLNT_CB_RECALL,
+ NFSPROC4_CLNT_CB_SEQUENCE,
};
enum nfs_cb_opnum4 {
OP_CB_RECALL = 4,
+ OP_CB_SEQUENCE = 11,
};
#define NFS4_MAXTAGLEN 20
@@ -70,17 +75,29 @@ enum nfs_cb_opnum4 {
#define NFS4_dec_cb_null_sz 0
#define cb_compound_enc_hdr_sz 4
#define cb_compound_dec_hdr_sz (3 + (NFS4_MAXTAGLEN >> 2))
+#define sessionid_sz (NFS4_MAX_SESSIONID_LEN >> 2)
+#define cb_sequence_enc_sz (sessionid_sz + 4 + \
+ 1 /* no referring calls list yet */)
+#define cb_sequence_dec_sz (op_dec_sz + sessionid_sz + 4)
+
#define op_enc_sz 1
#define op_dec_sz 2
#define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2))
#define enc_stateid_sz (NFS4_STATEID_SIZE >> 2)
#define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \
+ cb_sequence_enc_sz + \
1 + enc_stateid_sz + \
enc_nfs4_fh_sz)
#define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \
+ cb_sequence_dec_sz + \
op_dec_sz)
+struct nfs4_rpc_args {
+ void *args_op;
+ struct nfsd4_cb_sequence args_seq;
+};
+
/*
* Generic encode routines from fs/nfs/nfs4xdr.c
*/
@@ -137,11 +154,13 @@ xdr_error: \
} while (0)
struct nfs4_cb_compound_hdr {
- int status;
- u32 ident;
+ /* args */
+ u32 ident; /* minorversion 0 only */
u32 nops;
__be32 *nops_p;
u32 minorversion;
+ /* res */
+ int status;
u32 taglen;
char *tag;
};
@@ -238,6 +257,27 @@ encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp,
hdr->nops++;
}
+static void
+encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_cb_sequence *args,
+ struct nfs4_cb_compound_hdr *hdr)
+{
+ __be32 *p;
+
+ if (hdr->minorversion == 0)
+ return;
+
+ RESERVE_SPACE(1 + NFS4_MAX_SESSIONID_LEN + 20);
+
+ WRITE32(OP_CB_SEQUENCE);
+ WRITEMEM(args->cbs_clp->cl_sessionid.data, NFS4_MAX_SESSIONID_LEN);
+ WRITE32(args->cbs_clp->cl_cb_seq_nr);
+ WRITE32(0); /* slotid, always 0 */
+ WRITE32(0); /* highest slotid always 0 */
+ WRITE32(0); /* cachethis always 0 */
+ WRITE32(0); /* FIXME: support referring_call_lists */
+ hdr->nops++;
+}
+
static int
nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
{
@@ -249,15 +289,19 @@ nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
}
static int
-nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, struct nfs4_delegation *args)
+nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p,
+ struct nfs4_rpc_args *rpc_args)
{
struct xdr_stream xdr;
+ struct nfs4_delegation *args = rpc_args->args_op;
struct nfs4_cb_compound_hdr hdr = {
.ident = args->dl_ident,
+ .minorversion = rpc_args->args_seq.cbs_minorversion,
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_cb_compound_hdr(&xdr, &hdr);
+ encode_cb_sequence(&xdr, &rpc_args->args_seq, &hdr);
encode_cb_recall(&xdr, args, &hdr);
encode_cb_nops(&hdr);
return 0;
@@ -299,6 +343,57 @@ decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
return 0;
}
+/*
+ * Our current back channel implmentation supports a single backchannel
+ * with a single slot.
+ */
+static int
+decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_cb_sequence *res,
+ struct rpc_rqst *rqstp)
+{
+ struct nfs4_sessionid id;
+ int status;
+ u32 dummy;
+ __be32 *p;
+
+ if (res->cbs_minorversion == 0)
+ return 0;
+
+ status = decode_cb_op_hdr(xdr, OP_CB_SEQUENCE);
+ if (status)
+ return status;
+
+ /*
+ * If the server returns different values for sessionID, slotID or
+ * sequence number, the server is looney tunes.
+ */
+ status = -ESERVERFAULT;
+
+ READ_BUF(NFS4_MAX_SESSIONID_LEN + 16);
+ memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
+ p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
+ if (memcmp(id.data, res->cbs_clp->cl_sessionid.data,
+ NFS4_MAX_SESSIONID_LEN)) {
+ dprintk("%s Invalid session id\n", __func__);
+ goto out;
+ }
+ READ32(dummy);
+ if (dummy != res->cbs_clp->cl_cb_seq_nr) {
+ dprintk("%s Invalid sequence number\n", __func__);
+ goto out;
+ }
+ READ32(dummy); /* slotid must be 0 */
+ if (dummy != 0) {
+ dprintk("%s Invalid slotid\n", __func__);
+ goto out;
+ }
+ /* FIXME: process highest slotid and target highest slotid */
+ status = 0;
+out:
+ return status;
+}
+
+
static int
nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
{
@@ -306,7 +401,8 @@ nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
}
static int
-nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p)
+nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
+ struct nfsd4_cb_sequence *seq)
{
struct xdr_stream xdr;
struct nfs4_cb_compound_hdr hdr;
@@ -316,6 +412,11 @@ nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p)
status = decode_cb_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ if (seq) {
+ status = decode_cb_sequence(&xdr, seq, rqstp);
+ if (status)
+ goto out;
+ }
status = decode_cb_op_hdr(&xdr, OP_CB_RECALL);
out:
return status;
@@ -377,16 +478,15 @@ static int max_cb_time(void)
int setup_callback_client(struct nfs4_client *clp)
{
- struct sockaddr_in addr;
struct nfs4_cb_conn *cb = &clp->cl_cb_conn;
struct rpc_timeout timeparms = {
.to_initval = max_cb_time(),
.to_retries = 0,
};
struct rpc_create_args args = {
- .protocol = IPPROTO_TCP,
- .address = (struct sockaddr *)&addr,
- .addrsize = sizeof(addr),
+ .protocol = XPRT_TRANSPORT_TCP,
+ .address = (struct sockaddr *) &cb->cb_addr,
+ .addrsize = cb->cb_addrlen,
.timeout = &timeparms,
.program = &cb_program,
.prognumber = cb->cb_prog,
@@ -399,13 +499,10 @@ int setup_callback_client(struct nfs4_client *clp)
if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
return -EINVAL;
-
- /* Initialize address */
- memset(&addr, 0, sizeof(addr));
- addr.sin_family = AF_INET;
- addr.sin_port = htons(cb->cb_port);
- addr.sin_addr.s_addr = htonl(cb->cb_addr);
-
+ if (cb->cb_minorversion) {
+ args.bc_xprt = clp->cl_cb_xprt;
+ args.protocol = XPRT_TRANSPORT_BC_TCP;
+ }
/* Create RPC client */
client = rpc_create(&args);
if (IS_ERR(client)) {
@@ -439,42 +536,29 @@ static const struct rpc_call_ops nfsd4_cb_probe_ops = {
.rpc_call_done = nfsd4_cb_probe_done,
};
-static struct rpc_cred *lookup_cb_cred(struct nfs4_cb_conn *cb)
-{
- struct auth_cred acred = {
- .machine_cred = 1
- };
+static struct rpc_cred *callback_cred;
- /*
- * Note in the gss case this doesn't actually have to wait for a
- * gss upcall (or any calls to the client); this just creates a
- * non-uptodate cred which the rpc state machine will fill in with
- * a refresh_upcall later.
- */
- return rpcauth_lookup_credcache(cb->cb_client->cl_auth, &acred,
- RPCAUTH_LOOKUP_NEW);
+int set_callback_cred(void)
+{
+ callback_cred = rpc_lookup_machine_cred();
+ if (!callback_cred)
+ return -ENOMEM;
+ return 0;
}
+
void do_probe_callback(struct nfs4_client *clp)
{
struct nfs4_cb_conn *cb = &clp->cl_cb_conn;
struct rpc_message msg = {
.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL],
.rpc_argp = clp,
+ .rpc_cred = callback_cred
};
- struct rpc_cred *cred;
int status;
- cred = lookup_cb_cred(cb);
- if (IS_ERR(cred)) {
- status = PTR_ERR(cred);
- goto out;
- }
- cb->cb_cred = cred;
- msg.rpc_cred = cb->cb_cred;
status = rpc_call_async(cb->cb_client, &msg, RPC_TASK_SOFT,
&nfsd4_cb_probe_ops, (void *)clp);
-out:
if (status) {
warn_no_callback_path(clp, status);
put_nfs4_client(clp);
@@ -503,11 +587,95 @@ nfsd4_probe_callback(struct nfs4_client *clp)
do_probe_callback(clp);
}
+/*
+ * There's currently a single callback channel slot.
+ * If the slot is available, then mark it busy. Otherwise, set the
+ * thread for sleeping on the callback RPC wait queue.
+ */
+static int nfsd41_cb_setup_sequence(struct nfs4_client *clp,
+ struct rpc_task *task)
+{
+ struct nfs4_rpc_args *args = task->tk_msg.rpc_argp;
+ u32 *ptr = (u32 *)clp->cl_sessionid.data;
+ int status = 0;
+
+ dprintk("%s: %u:%u:%u:%u\n", __func__,
+ ptr[0], ptr[1], ptr[2], ptr[3]);
+
+ if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
+ rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
+ dprintk("%s slot is busy\n", __func__);
+ status = -EAGAIN;
+ goto out;
+ }
+
+ /*
+ * We'll need the clp during XDR encoding and decoding,
+ * and the sequence during decoding to verify the reply
+ */
+ args->args_seq.cbs_clp = clp;
+ task->tk_msg.rpc_resp = &args->args_seq;
+
+out:
+ dprintk("%s status=%d\n", __func__, status);
+ return status;
+}
+
+/*
+ * TODO: cb_sequence should support referring call lists, cachethis, multiple
+ * slots, and mark callback channel down on communication errors.
+ */
+static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_delegation *dp = calldata;
+ struct nfs4_client *clp = dp->dl_client;
+ struct nfs4_rpc_args *args = task->tk_msg.rpc_argp;
+ u32 minorversion = clp->cl_cb_conn.cb_minorversion;
+ int status = 0;
+
+ args->args_seq.cbs_minorversion = minorversion;
+ if (minorversion) {
+ status = nfsd41_cb_setup_sequence(clp, task);
+ if (status) {
+ if (status != -EAGAIN) {
+ /* terminate rpc task */
+ task->tk_status = status;
+ task->tk_action = NULL;
+ }
+ return;
+ }
+ }
+ rpc_call_start(task);
+}
+
+static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_delegation *dp = calldata;
+ struct nfs4_client *clp = dp->dl_client;
+
+ dprintk("%s: minorversion=%d\n", __func__,
+ clp->cl_cb_conn.cb_minorversion);
+
+ if (clp->cl_cb_conn.cb_minorversion) {
+ /* No need for lock, access serialized in nfsd4_cb_prepare */
+ ++clp->cl_cb_seq_nr;
+ clear_bit(0, &clp->cl_cb_slot_busy);
+ rpc_wake_up_next(&clp->cl_cb_waitq);
+ dprintk("%s: freed slot, new seqid=%d\n", __func__,
+ clp->cl_cb_seq_nr);
+
+ /* We're done looking into the sequence information */
+ task->tk_msg.rpc_resp = NULL;
+ }
+}
+
static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
{
struct nfs4_delegation *dp = calldata;
struct nfs4_client *clp = dp->dl_client;
+ nfsd4_cb_done(task, calldata);
+
switch (task->tk_status) {
case -EIO:
/* Network partition? */
@@ -520,16 +688,19 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
break;
default:
/* success, or error we can't handle */
- return;
+ goto done;
}
if (dp->dl_retries--) {
rpc_delay(task, 2*HZ);
task->tk_status = 0;
rpc_restart_call(task);
+ return;
} else {
atomic_set(&clp->cl_cb_conn.cb_set, 0);
warn_no_callback_path(clp, task->tk_status);
}
+done:
+ kfree(task->tk_msg.rpc_argp);
}
static void nfsd4_cb_recall_release(void *calldata)
@@ -542,6 +713,7 @@ static void nfsd4_cb_recall_release(void *calldata)
}
static const struct rpc_call_ops nfsd4_cb_recall_ops = {
+ .rpc_call_prepare = nfsd4_cb_prepare,
.rpc_call_done = nfsd4_cb_recall_done,
.rpc_release = nfsd4_cb_recall_release,
};
@@ -554,17 +726,24 @@ nfsd4_cb_recall(struct nfs4_delegation *dp)
{
struct nfs4_client *clp = dp->dl_client;
struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client;
+ struct nfs4_rpc_args *args;
struct rpc_message msg = {
.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL],
- .rpc_argp = dp,
- .rpc_cred = clp->cl_cb_conn.cb_cred
+ .rpc_cred = callback_cred
};
- int status;
+ int status = -ENOMEM;
+ args = kzalloc(sizeof(*args), GFP_KERNEL);
+ if (!args)
+ goto out;
+ args->args_op = dp;
+ msg.rpc_argp = args;
dp->dl_retries = 1;
status = rpc_call_async(clnt, &msg, RPC_TASK_SOFT,
&nfsd4_cb_recall_ops, dp);
+out:
if (status) {
+ kfree(args);
put_nfs4_client(clp);
nfs4_put_delegation(dp);
}
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index cdfa86fa147..ba2c199592f 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -38,7 +38,6 @@
#include <linux/init.h>
#include <linux/mm.h>
-#include <linux/utsname.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/sunrpc/clnt.h>
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 7c8801769a3..bebc0c2e1b0 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -68,7 +68,6 @@ check_attr_support(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
u32 *bmval, u32 *writable)
{
struct dentry *dentry = cstate->current_fh.fh_dentry;
- struct svc_export *exp = cstate->current_fh.fh_export;
/*
* Check about attributes are supported by the NFSv4 server or not.
@@ -80,17 +79,13 @@ check_attr_support(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return nfserr_attrnotsupp;
/*
- * Check FATTR4_WORD0_ACL & FATTR4_WORD0_FS_LOCATIONS can be supported
+ * Check FATTR4_WORD0_ACL can be supported
* in current environment or not.
*/
if (bmval[0] & FATTR4_WORD0_ACL) {
if (!IS_POSIXACL(dentry->d_inode))
return nfserr_attrnotsupp;
}
- if (bmval[0] & FATTR4_WORD0_FS_LOCATIONS) {
- if (exp->ex_fslocs.locations == NULL)
- return nfserr_attrnotsupp;
- }
/*
* According to spec, read-only attributes return ERR_INVAL.
@@ -123,6 +118,35 @@ nfsd4_check_open_attributes(struct svc_rqst *rqstp,
return status;
}
+static int
+is_create_with_attrs(struct nfsd4_open *open)
+{
+ return open->op_create == NFS4_OPEN_CREATE
+ && (open->op_createmode == NFS4_CREATE_UNCHECKED
+ || open->op_createmode == NFS4_CREATE_GUARDED
+ || open->op_createmode == NFS4_CREATE_EXCLUSIVE4_1);
+}
+
+/*
+ * if error occurs when setting the acl, just clear the acl bit
+ * in the returned attr bitmap.
+ */
+static void
+do_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ struct nfs4_acl *acl, u32 *bmval)
+{
+ __be32 status;
+
+ status = nfsd4_set_nfs4_acl(rqstp, fhp, acl);
+ if (status)
+ /*
+ * We should probably fail the whole open at this point,
+ * but we've already created the file, so it's too late;
+ * So this seems the least of evils:
+ */
+ bmval[0] &= ~FATTR4_WORD0_ACL;
+}
+
static inline void
fh_dup2(struct svc_fh *dst, struct svc_fh *src)
{
@@ -206,6 +230,9 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
if (status)
goto out;
+ if (is_create_with_attrs(open) && open->op_acl != NULL)
+ do_set_nfs4_acl(rqstp, &resfh, open->op_acl, open->op_bmval);
+
set_change_info(&open->op_cinfo, current_fh);
fh_dup2(current_fh, &resfh);
@@ -536,12 +563,17 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfserr_badtype;
}
- if (!status) {
- fh_unlock(&cstate->current_fh);
- set_change_info(&create->cr_cinfo, &cstate->current_fh);
- fh_dup2(&cstate->current_fh, &resfh);
- }
+ if (status)
+ goto out;
+
+ if (create->cr_acl != NULL)
+ do_set_nfs4_acl(rqstp, &resfh, create->cr_acl,
+ create->cr_bmval);
+ fh_unlock(&cstate->current_fh);
+ set_change_info(&create->cr_cinfo, &cstate->current_fh);
+ fh_dup2(&cstate->current_fh, &resfh);
+out:
fh_put(&resfh);
return status;
}
@@ -947,34 +979,6 @@ static struct nfsd4_operation nfsd4_ops[];
static const char *nfsd4_op_name(unsigned opnum);
/*
- * This is a replay of a compound for which no cache entry pages
- * were used. Encode the sequence operation, and if cachethis is FALSE
- * encode the uncache rep error on the next operation.
- */
-static __be32
-nfsd4_enc_uncached_replay(struct nfsd4_compoundargs *args,
- struct nfsd4_compoundres *resp)
-{
- struct nfsd4_op *op;
-
- dprintk("--> %s resp->opcnt %d ce_cachethis %u \n", __func__,
- resp->opcnt, resp->cstate.slot->sl_cache_entry.ce_cachethis);
-
- /* Encode the replayed sequence operation */
- BUG_ON(resp->opcnt != 1);
- op = &args->ops[resp->opcnt - 1];
- nfsd4_encode_operation(resp, op);
-
- /*return nfserr_retry_uncached_rep in next operation. */
- if (resp->cstate.slot->sl_cache_entry.ce_cachethis == 0) {
- op = &args->ops[resp->opcnt++];
- op->status = nfserr_retry_uncached_rep;
- nfsd4_encode_operation(resp, op);
- }
- return op->status;
-}
-
-/*
* Enforce NFSv4.1 COMPOUND ordering rules.
*
* TODO:
@@ -1083,13 +1087,10 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
BUG_ON(op->status == nfs_ok);
encode_op:
- /* Only from SEQUENCE or CREATE_SESSION */
+ /* Only from SEQUENCE */
if (resp->cstate.status == nfserr_replay_cache) {
dprintk("%s NFS4.1 replay from cache\n", __func__);
- if (nfsd4_not_cached(resp))
- status = nfsd4_enc_uncached_replay(args, resp);
- else
- status = op->status;
+ status = op->status;
goto out;
}
if (op->status == nfserr_replay_me) {
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 980a216a48c..2153f9bdbeb 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -55,6 +55,7 @@
#include <linux/lockd/bind.h>
#include <linux/module.h>
#include <linux/sunrpc/svcauth_gss.h>
+#include <linux/sunrpc/clnt.h>
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -413,36 +414,65 @@ gen_sessionid(struct nfsd4_session *ses)
}
/*
- * Give the client the number of slots it requests bound by
- * NFSD_MAX_SLOTS_PER_SESSION and by sv_drc_max_pages.
+ * The protocol defines ca_maxresponssize_cached to include the size of
+ * the rpc header, but all we need to cache is the data starting after
+ * the end of the initial SEQUENCE operation--the rest we regenerate
+ * each time. Therefore we can advertise a ca_maxresponssize_cached
+ * value that is the number of bytes in our cache plus a few additional
+ * bytes. In order to stay on the safe side, and not promise more than
+ * we can cache, those additional bytes must be the minimum possible: 24
+ * bytes of rpc header (xid through accept state, with AUTH_NULL
+ * verifier), 12 for the compound header (with zero-length tag), and 44
+ * for the SEQUENCE op response:
+ */
+#define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
+
+/*
+ * Give the client the number of ca_maxresponsesize_cached slots it
+ * requests, of size bounded by NFSD_SLOT_CACHE_SIZE,
+ * NFSD_MAX_MEM_PER_SESSION, and nfsd_drc_max_mem. Do not allow more
+ * than NFSD_MAX_SLOTS_PER_SESSION.
*
- * If we run out of pages (sv_drc_pages_used == sv_drc_max_pages) we
- * should (up to a point) re-negotiate active sessions and reduce their
- * slot usage to make rooom for new connections. For now we just fail the
- * create session.
+ * If we run out of reserved DRC memory we should (up to a point)
+ * re-negotiate active sessions and reduce their slot usage to make
+ * rooom for new connections. For now we just fail the create session.
*/
-static int set_forechannel_maxreqs(struct nfsd4_channel_attrs *fchan)
+static int set_forechannel_drc_size(struct nfsd4_channel_attrs *fchan)
{
- int status = 0, np = fchan->maxreqs * NFSD_PAGES_PER_SLOT;
+ int mem, size = fchan->maxresp_cached;
if (fchan->maxreqs < 1)
return nfserr_inval;
- else if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION)
- fchan->maxreqs = NFSD_MAX_SLOTS_PER_SESSION;
- spin_lock(&nfsd_serv->sv_lock);
- if (np + nfsd_serv->sv_drc_pages_used > nfsd_serv->sv_drc_max_pages)
- np = nfsd_serv->sv_drc_max_pages - nfsd_serv->sv_drc_pages_used;
- nfsd_serv->sv_drc_pages_used += np;
- spin_unlock(&nfsd_serv->sv_lock);
+ if (size < NFSD_MIN_HDR_SEQ_SZ)
+ size = NFSD_MIN_HDR_SEQ_SZ;
+ size -= NFSD_MIN_HDR_SEQ_SZ;
+ if (size > NFSD_SLOT_CACHE_SIZE)
+ size = NFSD_SLOT_CACHE_SIZE;
+
+ /* bound the maxreqs by NFSD_MAX_MEM_PER_SESSION */
+ mem = fchan->maxreqs * size;
+ if (mem > NFSD_MAX_MEM_PER_SESSION) {
+ fchan->maxreqs = NFSD_MAX_MEM_PER_SESSION / size;
+ if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION)
+ fchan->maxreqs = NFSD_MAX_SLOTS_PER_SESSION;
+ mem = fchan->maxreqs * size;
+ }
- if (np <= 0) {
- status = nfserr_resource;
- fchan->maxreqs = 0;
- } else
- fchan->maxreqs = np / NFSD_PAGES_PER_SLOT;
+ spin_lock(&nfsd_drc_lock);
+ /* bound the total session drc memory ussage */
+ if (mem + nfsd_drc_mem_used > nfsd_drc_max_mem) {
+ fchan->maxreqs = (nfsd_drc_max_mem - nfsd_drc_mem_used) / size;
+ mem = fchan->maxreqs * size;
+ }
+ nfsd_drc_mem_used += mem;
+ spin_unlock(&nfsd_drc_lock);
- return status;
+ if (fchan->maxreqs == 0)
+ return nfserr_serverfault;
+
+ fchan->maxresp_cached = size + NFSD_MIN_HDR_SEQ_SZ;
+ return 0;
}
/*
@@ -466,36 +496,41 @@ static int init_forechannel_attrs(struct svc_rqst *rqstp,
fchan->maxresp_sz = maxcount;
session_fchan->maxresp_sz = fchan->maxresp_sz;
- /* Set the max response cached size our default which is
- * a multiple of PAGE_SIZE and small */
- session_fchan->maxresp_cached = NFSD_PAGES_PER_SLOT * PAGE_SIZE;
- fchan->maxresp_cached = session_fchan->maxresp_cached;
-
/* Use the client's maxops if possible */
if (fchan->maxops > NFSD_MAX_OPS_PER_COMPOUND)
fchan->maxops = NFSD_MAX_OPS_PER_COMPOUND;
session_fchan->maxops = fchan->maxops;
- /* try to use the client requested number of slots */
- if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION)
- fchan->maxreqs = NFSD_MAX_SLOTS_PER_SESSION;
-
/* FIXME: Error means no more DRC pages so the server should
* recover pages from existing sessions. For now fail session
* creation.
*/
- status = set_forechannel_maxreqs(fchan);
+ status = set_forechannel_drc_size(fchan);
+ session_fchan->maxresp_cached = fchan->maxresp_cached;
session_fchan->maxreqs = fchan->maxreqs;
+
+ dprintk("%s status %d\n", __func__, status);
return status;
}
+static void
+free_session_slots(struct nfsd4_session *ses)
+{
+ int i;
+
+ for (i = 0; i < ses->se_fchannel.maxreqs; i++)
+ kfree(ses->se_slots[i]);
+}
+
static int
alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp,
struct nfsd4_create_session *cses)
{
struct nfsd4_session *new, tmp;
- int idx, status = nfserr_resource, slotsize;
+ struct nfsd4_slot *sp;
+ int idx, slotsize, cachesize, i;
+ int status;
memset(&tmp, 0, sizeof(tmp));
@@ -506,14 +541,27 @@ alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp,
if (status)
goto out;
- /* allocate struct nfsd4_session and slot table in one piece */
- slotsize = tmp.se_fchannel.maxreqs * sizeof(struct nfsd4_slot);
+ BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot)
+ + sizeof(struct nfsd4_session) > PAGE_SIZE);
+
+ status = nfserr_serverfault;
+ /* allocate struct nfsd4_session and slot table pointers in one piece */
+ slotsize = tmp.se_fchannel.maxreqs * sizeof(struct nfsd4_slot *);
new = kzalloc(sizeof(*new) + slotsize, GFP_KERNEL);
if (!new)
goto out;
memcpy(new, &tmp, sizeof(*new));
+ /* allocate each struct nfsd4_slot and data cache in one piece */
+ cachesize = new->se_fchannel.maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
+ for (i = 0; i < new->se_fchannel.maxreqs; i++) {
+ sp = kzalloc(sizeof(*sp) + cachesize, GFP_KERNEL);
+ if (!sp)
+ goto out_free;
+ new->se_slots[i] = sp;
+ }
+
new->se_client = clp;
gen_sessionid(new);
idx = hash_sessionid(&new->se_sessionid);
@@ -530,6 +578,10 @@ alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp,
status = nfs_ok;
out:
return status;
+out_free:
+ free_session_slots(new);
+ kfree(new);
+ goto out;
}
/* caller must hold sessionid_lock */
@@ -572,19 +624,16 @@ release_session(struct nfsd4_session *ses)
nfsd4_put_session(ses);
}
-static void nfsd4_release_respages(struct page **respages, short resused);
-
void
free_session(struct kref *kref)
{
struct nfsd4_session *ses;
- int i;
ses = container_of(kref, struct nfsd4_session, se_ref);
- for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
- struct nfsd4_cache_entry *e = &ses->se_slots[i].sl_cache_entry;
- nfsd4_release_respages(e->ce_respages, e->ce_resused);
- }
+ spin_lock(&nfsd_drc_lock);
+ nfsd_drc_mem_used -= ses->se_fchannel.maxreqs * NFSD_SLOT_CACHE_SIZE;
+ spin_unlock(&nfsd_drc_lock);
+ free_session_slots(ses);
kfree(ses);
}
@@ -647,18 +696,14 @@ shutdown_callback_client(struct nfs4_client *clp)
clp->cl_cb_conn.cb_client = NULL;
rpc_shutdown_client(clnt);
}
- if (clp->cl_cb_conn.cb_cred) {
- put_rpccred(clp->cl_cb_conn.cb_cred);
- clp->cl_cb_conn.cb_cred = NULL;
- }
}
static inline void
free_client(struct nfs4_client *clp)
{
shutdown_callback_client(clp);
- nfsd4_release_respages(clp->cl_slot.sl_cache_entry.ce_respages,
- clp->cl_slot.sl_cache_entry.ce_resused);
+ if (clp->cl_cb_xprt)
+ svc_xprt_put(clp->cl_cb_xprt);
if (clp->cl_cred.cr_group_info)
put_group_info(clp->cl_cred.cr_group_info);
kfree(clp->cl_principal);
@@ -714,25 +759,6 @@ expire_client(struct nfs4_client *clp)
put_nfs4_client(clp);
}
-static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir)
-{
- struct nfs4_client *clp;
-
- clp = alloc_client(name);
- if (clp == NULL)
- return NULL;
- memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
- atomic_set(&clp->cl_count, 1);
- atomic_set(&clp->cl_cb_conn.cb_set, 0);
- INIT_LIST_HEAD(&clp->cl_idhash);
- INIT_LIST_HEAD(&clp->cl_strhash);
- INIT_LIST_HEAD(&clp->cl_openowners);
- INIT_LIST_HEAD(&clp->cl_delegations);
- INIT_LIST_HEAD(&clp->cl_sessions);
- INIT_LIST_HEAD(&clp->cl_lru);
- return clp;
-}
-
static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
{
memcpy(target->cl_verifier.data, source->data,
@@ -795,6 +821,46 @@ static void gen_confirm(struct nfs4_client *clp)
*p++ = i++;
}
+static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
+ struct svc_rqst *rqstp, nfs4_verifier *verf)
+{
+ struct nfs4_client *clp;
+ struct sockaddr *sa = svc_addr(rqstp);
+ char *princ;
+
+ clp = alloc_client(name);
+ if (clp == NULL)
+ return NULL;
+
+ princ = svc_gss_principal(rqstp);
+ if (princ) {
+ clp->cl_principal = kstrdup(princ, GFP_KERNEL);
+ if (clp->cl_principal == NULL) {
+ free_client(clp);
+ return NULL;
+ }
+ }
+
+ memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
+ atomic_set(&clp->cl_count, 1);
+ atomic_set(&clp->cl_cb_conn.cb_set, 0);
+ INIT_LIST_HEAD(&clp->cl_idhash);
+ INIT_LIST_HEAD(&clp->cl_strhash);
+ INIT_LIST_HEAD(&clp->cl_openowners);
+ INIT_LIST_HEAD(&clp->cl_delegations);
+ INIT_LIST_HEAD(&clp->cl_sessions);
+ INIT_LIST_HEAD(&clp->cl_lru);
+ clear_bit(0, &clp->cl_cb_slot_busy);
+ rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
+ copy_verf(clp, verf);
+ rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
+ clp->cl_flavor = rqstp->rq_flavor;
+ copy_cred(&clp->cl_cred, &rqstp->rq_cred);
+ gen_confirm(clp);
+
+ return clp;
+}
+
static int check_name(struct xdr_netobj name)
{
if (name.len == 0)
@@ -902,93 +968,40 @@ find_unconfirmed_client_by_str(const char *dname, unsigned int hashval,
return NULL;
}
-/* a helper function for parse_callback */
-static int
-parse_octet(unsigned int *lenp, char **addrp)
-{
- unsigned int len = *lenp;
- char *p = *addrp;
- int n = -1;
- char c;
-
- for (;;) {
- if (!len)
- break;
- len--;
- c = *p++;
- if (c == '.')
- break;
- if ((c < '0') || (c > '9')) {
- n = -1;
- break;
- }
- if (n < 0)
- n = 0;
- n = (n * 10) + (c - '0');
- if (n > 255) {
- n = -1;
- break;
- }
- }
- *lenp = len;
- *addrp = p;
- return n;
-}
-
-/* parse and set the setclientid ipv4 callback address */
-static int
-parse_ipv4(unsigned int addr_len, char *addr_val, unsigned int *cbaddrp, unsigned short *cbportp)
-{
- int temp = 0;
- u32 cbaddr = 0;
- u16 cbport = 0;
- u32 addrlen = addr_len;
- char *addr = addr_val;
- int i, shift;
-
- /* ipaddress */
- shift = 24;
- for(i = 4; i > 0 ; i--) {
- if ((temp = parse_octet(&addrlen, &addr)) < 0) {
- return 0;
- }
- cbaddr |= (temp << shift);
- if (shift > 0)
- shift -= 8;
- }
- *cbaddrp = cbaddr;
-
- /* port */
- shift = 8;
- for(i = 2; i > 0 ; i--) {
- if ((temp = parse_octet(&addrlen, &addr)) < 0) {
- return 0;
- }
- cbport |= (temp << shift);
- if (shift > 0)
- shift -= 8;
- }
- *cbportp = cbport;
- return 1;
-}
-
static void
-gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se)
+gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, u32 scopeid)
{
struct nfs4_cb_conn *cb = &clp->cl_cb_conn;
-
- /* Currently, we only support tcp for the callback channel */
- if ((se->se_callback_netid_len != 3) || memcmp((char *)se->se_callback_netid_val, "tcp", 3))
+ unsigned short expected_family;
+
+ /* Currently, we only support tcp and tcp6 for the callback channel */
+ if (se->se_callback_netid_len == 3 &&
+ !memcmp(se->se_callback_netid_val, "tcp", 3))
+ expected_family = AF_INET;
+ else if (se->se_callback_netid_len == 4 &&
+ !memcmp(se->se_callback_netid_val, "tcp6", 4))
+ expected_family = AF_INET6;
+ else
goto out_err;
- if ( !(parse_ipv4(se->se_callback_addr_len, se->se_callback_addr_val,
- &cb->cb_addr, &cb->cb_port)))
+ cb->cb_addrlen = rpc_uaddr2sockaddr(se->se_callback_addr_val,
+ se->se_callback_addr_len,
+ (struct sockaddr *) &cb->cb_addr,
+ sizeof(cb->cb_addr));
+
+ if (!cb->cb_addrlen || cb->cb_addr.ss_family != expected_family)
goto out_err;
+
+ if (cb->cb_addr.ss_family == AF_INET6)
+ ((struct sockaddr_in6 *) &cb->cb_addr)->sin6_scope_id = scopeid;
+
cb->cb_minorversion = 0;
cb->cb_prog = se->se_callback_prog;
cb->cb_ident = se->se_callback_ident;
return;
out_err:
+ cb->cb_addr.ss_family = AF_UNSPEC;
+ cb->cb_addrlen = 0;
dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
"will not receive delegations\n",
clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
@@ -996,175 +1009,87 @@ out_err:
return;
}
-void
-nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp)
-{
- struct nfsd4_compoundres *resp = rqstp->rq_resp;
-
- resp->cstate.statp = statp;
-}
-
/*
- * Dereference the result pages.
+ * Cache a reply. nfsd4_check_drc_limit() has bounded the cache size.
*/
-static void
-nfsd4_release_respages(struct page **respages, short resused)
+void
+nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
{
- int i;
+ struct nfsd4_slot *slot = resp->cstate.slot;
+ unsigned int base;
- dprintk("--> %s\n", __func__);
- for (i = 0; i < resused; i++) {
- if (!respages[i])
- continue;
- put_page(respages[i]);
- respages[i] = NULL;
- }
-}
+ dprintk("--> %s slot %p\n", __func__, slot);
-static void
-nfsd4_copy_pages(struct page **topages, struct page **frompages, short count)
-{
- int i;
+ slot->sl_opcnt = resp->opcnt;
+ slot->sl_status = resp->cstate.status;
- for (i = 0; i < count; i++) {
- topages[i] = frompages[i];
- if (!topages[i])
- continue;
- get_page(topages[i]);
+ if (nfsd4_not_cached(resp)) {
+ slot->sl_datalen = 0;
+ return;
}
+ slot->sl_datalen = (char *)resp->p - (char *)resp->cstate.datap;
+ base = (char *)resp->cstate.datap -
+ (char *)resp->xbuf->head[0].iov_base;
+ if (read_bytes_from_xdr_buf(resp->xbuf, base, slot->sl_data,
+ slot->sl_datalen))
+ WARN("%s: sessions DRC could not cache compound\n", __func__);
+ return;
}
/*
- * Cache the reply pages up to NFSD_PAGES_PER_SLOT + 1, clearing the previous
- * pages. We add a page to NFSD_PAGES_PER_SLOT for the case where the total
- * length of the XDR response is less than se_fmaxresp_cached
- * (NFSD_PAGES_PER_SLOT * PAGE_SIZE) but the xdr_buf pages is used for a
- * of the reply (e.g. readdir).
+ * Encode the replay sequence operation from the slot values.
+ * If cachethis is FALSE encode the uncached rep error on the next
+ * operation which sets resp->p and increments resp->opcnt for
+ * nfs4svc_encode_compoundres.
*
- * Store the base and length of the rq_req.head[0] page
- * of the NFSv4.1 data, just past the rpc header.
*/
-void
-nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
+static __be32
+nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
+ struct nfsd4_compoundres *resp)
{
- struct nfsd4_cache_entry *entry = &resp->cstate.slot->sl_cache_entry;
- struct svc_rqst *rqstp = resp->rqstp;
- struct nfsd4_compoundargs *args = rqstp->rq_argp;
- struct nfsd4_op *op = &args->ops[resp->opcnt];
- struct kvec *resv = &rqstp->rq_res.head[0];
-
- dprintk("--> %s entry %p\n", __func__, entry);
-
- /* Don't cache a failed OP_SEQUENCE. */
- if (resp->opcnt == 1 && op->opnum == OP_SEQUENCE && resp->cstate.status)
- return;
+ struct nfsd4_op *op;
+ struct nfsd4_slot *slot = resp->cstate.slot;
- nfsd4_release_respages(entry->ce_respages, entry->ce_resused);
- entry->ce_opcnt = resp->opcnt;
- entry->ce_status = resp->cstate.status;
+ dprintk("--> %s resp->opcnt %d cachethis %u \n", __func__,
+ resp->opcnt, resp->cstate.slot->sl_cachethis);
- /*
- * Don't need a page to cache just the sequence operation - the slot
- * does this for us!
- */
+ /* Encode the replayed sequence operation */
+ op = &args->ops[resp->opcnt - 1];
+ nfsd4_encode_operation(resp, op);
- if (nfsd4_not_cached(resp)) {
- entry->ce_resused = 0;
- entry->ce_rpchdrlen = 0;
- dprintk("%s Just cache SEQUENCE. ce_cachethis %d\n", __func__,
- resp->cstate.slot->sl_cache_entry.ce_cachethis);
- return;
- }
- entry->ce_resused = rqstp->rq_resused;
- if (entry->ce_resused > NFSD_PAGES_PER_SLOT + 1)
- entry->ce_resused = NFSD_PAGES_PER_SLOT + 1;
- nfsd4_copy_pages(entry->ce_respages, rqstp->rq_respages,
- entry->ce_resused);
- entry->ce_datav.iov_base = resp->cstate.statp;
- entry->ce_datav.iov_len = resv->iov_len - ((char *)resp->cstate.statp -
- (char *)page_address(rqstp->rq_respages[0]));
- /* Current request rpc header length*/
- entry->ce_rpchdrlen = (char *)resp->cstate.statp -
- (char *)page_address(rqstp->rq_respages[0]);
-}
-
-/*
- * We keep the rpc header, but take the nfs reply from the replycache.
- */
-static int
-nfsd41_copy_replay_data(struct nfsd4_compoundres *resp,
- struct nfsd4_cache_entry *entry)
-{
- struct svc_rqst *rqstp = resp->rqstp;
- struct kvec *resv = &resp->rqstp->rq_res.head[0];
- int len;
-
- /* Current request rpc header length*/
- len = (char *)resp->cstate.statp -
- (char *)page_address(rqstp->rq_respages[0]);
- if (entry->ce_datav.iov_len + len > PAGE_SIZE) {
- dprintk("%s v41 cached reply too large (%Zd).\n", __func__,
- entry->ce_datav.iov_len);
- return 0;
+ /* Return nfserr_retry_uncached_rep in next operation. */
+ if (args->opcnt > 1 && slot->sl_cachethis == 0) {
+ op = &args->ops[resp->opcnt++];
+ op->status = nfserr_retry_uncached_rep;
+ nfsd4_encode_operation(resp, op);
}
- /* copy the cached reply nfsd data past the current rpc header */
- memcpy((char *)resv->iov_base + len, entry->ce_datav.iov_base,
- entry->ce_datav.iov_len);
- resv->iov_len = len + entry->ce_datav.iov_len;
- return 1;
+ return op->status;
}
/*
- * Keep the first page of the replay. Copy the NFSv4.1 data from the first
- * cached page. Replace any futher replay pages from the cache.
+ * The sequence operation is not cached because we can use the slot and
+ * session values.
*/
__be32
nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
struct nfsd4_sequence *seq)
{
- struct nfsd4_cache_entry *entry = &resp->cstate.slot->sl_cache_entry;
+ struct nfsd4_slot *slot = resp->cstate.slot;
__be32 status;
- dprintk("--> %s entry %p\n", __func__, entry);
-
- /*
- * If this is just the sequence operation, we did not keep
- * a page in the cache entry because we can just use the
- * slot info stored in struct nfsd4_sequence that was checked
- * against the slot in nfsd4_sequence().
- *
- * This occurs when seq->cachethis is FALSE, or when the client
- * session inactivity timer fires and a solo sequence operation
- * is sent (lease renewal).
- */
- if (seq && nfsd4_not_cached(resp)) {
- seq->maxslots = resp->cstate.session->se_fchannel.maxreqs;
- return nfs_ok;
- }
-
- if (!nfsd41_copy_replay_data(resp, entry)) {
- /*
- * Not enough room to use the replay rpc header, send the
- * cached header. Release all the allocated result pages.
- */
- svc_free_res_pages(resp->rqstp);
- nfsd4_copy_pages(resp->rqstp->rq_respages, entry->ce_respages,
- entry->ce_resused);
- } else {
- /* Release all but the first allocated result page */
+ dprintk("--> %s slot %p\n", __func__, slot);
- resp->rqstp->rq_resused--;
- svc_free_res_pages(resp->rqstp);
+ /* Either returns 0 or nfserr_retry_uncached */
+ status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
+ if (status == nfserr_retry_uncached_rep)
+ return status;
- nfsd4_copy_pages(&resp->rqstp->rq_respages[1],
- &entry->ce_respages[1],
- entry->ce_resused - 1);
- }
+ /* The sequence operation has been encoded, cstate->datap set. */
+ memcpy(resp->cstate.datap, slot->sl_data, slot->sl_datalen);
- resp->rqstp->rq_resused = entry->ce_resused;
- resp->opcnt = entry->ce_opcnt;
- resp->cstate.iovlen = entry->ce_datav.iov_len + entry->ce_rpchdrlen;
- status = entry->ce_status;
+ resp->opcnt = slot->sl_opcnt;
+ resp->p = resp->cstate.datap + XDR_QUADLEN(slot->sl_datalen);
+ status = slot->sl_status;
return status;
}
@@ -1194,13 +1119,15 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
int status;
unsigned int strhashval;
char dname[HEXDIR_LEN];
+ char addr_str[INET6_ADDRSTRLEN];
nfs4_verifier verf = exid->verifier;
- u32 ip_addr = svc_addr_in(rqstp)->sin_addr.s_addr;
+ struct sockaddr *sa = svc_addr(rqstp);
+ rpc_ntop(sa, addr_str, sizeof(addr_str));
dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
- " ip_addr=%u flags %x, spa_how %d\n",
+ "ip_addr=%s flags %x, spa_how %d\n",
__func__, rqstp, exid, exid->clname.len, exid->clname.data,
- ip_addr, exid->flags, exid->spa_how);
+ addr_str, exid->flags, exid->spa_how);
if (!check_name(exid->clname) || (exid->flags & ~EXCHGID4_FLAG_MASK_A))
return nfserr_inval;
@@ -1281,28 +1208,23 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
out_new:
/* Normal case */
- new = create_client(exid->clname, dname);
+ new = create_client(exid->clname, dname, rqstp, &verf);
if (new == NULL) {
- status = nfserr_resource;
+ status = nfserr_serverfault;
goto out;
}
- copy_verf(new, &verf);
- copy_cred(&new->cl_cred, &rqstp->rq_cred);
- new->cl_addr = ip_addr;
gen_clid(new);
- gen_confirm(new);
add_to_unconfirmed(new, strhashval);
out_copy:
exid->clientid.cl_boot = new->cl_clientid.cl_boot;
exid->clientid.cl_id = new->cl_clientid.cl_id;
- new->cl_slot.sl_seqid = 0;
exid->seqid = 1;
nfsd4_set_ex_flags(new, exid);
dprintk("nfsd4_exchange_id seqid %d flags %x\n",
- new->cl_slot.sl_seqid, new->cl_exchange_flags);
+ new->cl_cs_slot.sl_seqid, new->cl_exchange_flags);
status = nfs_ok;
out:
@@ -1313,40 +1235,60 @@ error:
}
static int
-check_slot_seqid(u32 seqid, struct nfsd4_slot *slot)
+check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
{
- dprintk("%s enter. seqid %d slot->sl_seqid %d\n", __func__, seqid,
- slot->sl_seqid);
+ dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
+ slot_seqid);
/* The slot is in use, and no response has been sent. */
- if (slot->sl_inuse) {
- if (seqid == slot->sl_seqid)
+ if (slot_inuse) {
+ if (seqid == slot_seqid)
return nfserr_jukebox;
else
return nfserr_seq_misordered;
}
/* Normal */
- if (likely(seqid == slot->sl_seqid + 1))
+ if (likely(seqid == slot_seqid + 1))
return nfs_ok;
/* Replay */
- if (seqid == slot->sl_seqid)
+ if (seqid == slot_seqid)
return nfserr_replay_cache;
/* Wraparound */
- if (seqid == 1 && (slot->sl_seqid + 1) == 0)
+ if (seqid == 1 && (slot_seqid + 1) == 0)
return nfs_ok;
/* Misordered replay or misordered new request */
return nfserr_seq_misordered;
}
+/*
+ * Cache the create session result into the create session single DRC
+ * slot cache by saving the xdr structure. sl_seqid has been set.
+ * Do this for solo or embedded create session operations.
+ */
+static void
+nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
+ struct nfsd4_clid_slot *slot, int nfserr)
+{
+ slot->sl_status = nfserr;
+ memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
+}
+
+static __be32
+nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
+ struct nfsd4_clid_slot *slot)
+{
+ memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
+ return slot->sl_status;
+}
+
__be32
nfsd4_create_session(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_create_session *cr_ses)
{
- u32 ip_addr = svc_addr_in(rqstp)->sin_addr.s_addr;
- struct nfsd4_compoundres *resp = rqstp->rq_resp;
+ struct sockaddr *sa = svc_addr(rqstp);
struct nfs4_client *conf, *unconf;
- struct nfsd4_slot *slot = NULL;
+ struct nfsd4_clid_slot *cs_slot = NULL;
int status = 0;
nfs4_lock_state();
@@ -1354,40 +1296,38 @@ nfsd4_create_session(struct svc_rqst *rqstp,
conf = find_confirmed_client(&cr_ses->clientid);
if (conf) {
- slot = &conf->cl_slot;
- status = check_slot_seqid(cr_ses->seqid, slot);
+ cs_slot = &conf->cl_cs_slot;
+ status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
if (status == nfserr_replay_cache) {
dprintk("Got a create_session replay! seqid= %d\n",
- slot->sl_seqid);
- cstate->slot = slot;
- cstate->status = status;
+ cs_slot->sl_seqid);
/* Return the cached reply status */
- status = nfsd4_replay_cache_entry(resp, NULL);
+ status = nfsd4_replay_create_session(cr_ses, cs_slot);
goto out;
- } else if (cr_ses->seqid != conf->cl_slot.sl_seqid + 1) {
+ } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
status = nfserr_seq_misordered;
dprintk("Sequence misordered!\n");
dprintk("Expected seqid= %d but got seqid= %d\n",
- slot->sl_seqid, cr_ses->seqid);
+ cs_slot->sl_seqid, cr_ses->seqid);
goto out;
}
- conf->cl_slot.sl_seqid++;
+ cs_slot->sl_seqid++;
} else if (unconf) {
if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
- (ip_addr != unconf->cl_addr)) {
+ !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
status = nfserr_clid_inuse;
goto out;
}
- slot = &unconf->cl_slot;
- status = check_slot_seqid(cr_ses->seqid, slot);
+ cs_slot = &unconf->cl_cs_slot;
+ status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
if (status) {
/* an unconfirmed replay returns misordered */
status = nfserr_seq_misordered;
- goto out;
+ goto out_cache;
}
- slot->sl_seqid++; /* from 0 to 1 */
+ cs_slot->sl_seqid++; /* from 0 to 1 */
move_to_confirmed(unconf);
/*
@@ -1396,6 +1336,19 @@ nfsd4_create_session(struct svc_rqst *rqstp,
cr_ses->flags &= ~SESSION4_PERSIST;
cr_ses->flags &= ~SESSION4_RDMA;
+ if (cr_ses->flags & SESSION4_BACK_CHAN) {
+ unconf->cl_cb_xprt = rqstp->rq_xprt;
+ svc_xprt_get(unconf->cl_cb_xprt);
+ rpc_copy_addr(
+ (struct sockaddr *)&unconf->cl_cb_conn.cb_addr,
+ sa);
+ unconf->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
+ unconf->cl_cb_conn.cb_minorversion =
+ cstate->minorversion;
+ unconf->cl_cb_conn.cb_prog = cr_ses->callback_prog;
+ unconf->cl_cb_seq_nr = 1;
+ nfsd4_probe_callback(unconf);
+ }
conf = unconf;
} else {
status = nfserr_stale_clientid;
@@ -1408,12 +1361,11 @@ nfsd4_create_session(struct svc_rqst *rqstp,
memcpy(cr_ses->sessionid.data, conf->cl_sessionid.data,
NFS4_MAX_SESSIONID_LEN);
- cr_ses->seqid = slot->sl_seqid;
+ cr_ses->seqid = cs_slot->sl_seqid;
- slot->sl_inuse = true;
- cstate->slot = slot;
- /* Ensure a page is used for the cache */
- slot->sl_cache_entry.ce_cachethis = 1;
+out_cache:
+ /* cache solo and embedded create sessions under the state lock */
+ nfsd4_cache_create_session(cr_ses, cs_slot, status);
out:
nfs4_unlock_state();
dprintk("%s returns %d\n", __func__, ntohl(status));
@@ -1478,18 +1430,23 @@ nfsd4_sequence(struct svc_rqst *rqstp,
if (seq->slotid >= session->se_fchannel.maxreqs)
goto out;
- slot = &session->se_slots[seq->slotid];
+ slot = session->se_slots[seq->slotid];
dprintk("%s: slotid %d\n", __func__, seq->slotid);
- status = check_slot_seqid(seq->seqid, slot);
+ /* We do not negotiate the number of slots yet, so set the
+ * maxslots to the session maxreqs which is used to encode
+ * sr_highest_slotid and the sr_target_slot id to maxslots */
+ seq->maxslots = session->se_fchannel.maxreqs;
+
+ status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_inuse);
if (status == nfserr_replay_cache) {
cstate->slot = slot;
cstate->session = session;
/* Return the cached reply status and set cstate->status
- * for nfsd4_svc_encode_compoundres processing */
+ * for nfsd4_proc_compound processing */
status = nfsd4_replay_cache_entry(resp, seq);
cstate->status = nfserr_replay_cache;
- goto replay_cache;
+ goto out;
}
if (status)
goto out;
@@ -1497,23 +1454,23 @@ nfsd4_sequence(struct svc_rqst *rqstp,
/* Success! bump slot seqid */
slot->sl_inuse = true;
slot->sl_seqid = seq->seqid;
- slot->sl_cache_entry.ce_cachethis = seq->cachethis;
- /* Always set the cache entry cachethis for solo sequence */
- if (nfsd4_is_solo_sequence(resp))
- slot->sl_cache_entry.ce_cachethis = 1;
+ slot->sl_cachethis = seq->cachethis;
cstate->slot = slot;
cstate->session = session;
-replay_cache:
- /* Renew the clientid on success and on replay.
- * Hold a session reference until done processing the compound:
+ /* Hold a session reference until done processing the compound:
* nfsd4_put_session called only if the cstate slot is set.
*/
- renew_client(session->se_client);
nfsd4_get_session(session);
out:
spin_unlock(&sessionid_lock);
+ /* Renew the clientid on success and on replay */
+ if (cstate->session) {
+ nfs4_lock_state();
+ renew_client(session->se_client);
+ nfs4_unlock_state();
+ }
dprintk("%s: return %d\n", __func__, ntohl(status));
return status;
}
@@ -1522,7 +1479,7 @@ __be32
nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_setclientid *setclid)
{
- struct sockaddr_in *sin = svc_addr_in(rqstp);
+ struct sockaddr *sa = svc_addr(rqstp);
struct xdr_netobj clname = {
.len = setclid->se_namelen,
.data = setclid->se_name,
@@ -1531,7 +1488,6 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
unsigned int strhashval;
struct nfs4_client *conf, *unconf, *new;
__be32 status;
- char *princ;
char dname[HEXDIR_LEN];
if (!check_name(clname))
@@ -1554,8 +1510,11 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
/* RFC 3530 14.2.33 CASE 0: */
status = nfserr_clid_inuse;
if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
- dprintk("NFSD: setclientid: string in use by client"
- " at %pI4\n", &conf->cl_addr);
+ char addr_str[INET6_ADDRSTRLEN];
+ rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
+ sizeof(addr_str));
+ dprintk("NFSD: setclientid: string in use by client "
+ "at %s\n", addr_str);
goto out;
}
}
@@ -1573,7 +1532,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
*/
if (unconf)
expire_client(unconf);
- new = create_client(clname, dname);
+ new = create_client(clname, dname, rqstp, &clverifier);
if (new == NULL)
goto out;
gen_clid(new);
@@ -1590,7 +1549,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
*/
expire_client(unconf);
}
- new = create_client(clname, dname);
+ new = create_client(clname, dname, rqstp, &clverifier);
if (new == NULL)
goto out;
copy_clid(new, conf);
@@ -1600,7 +1559,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* probable client reboot; state will be removed if
* confirmed.
*/
- new = create_client(clname, dname);
+ new = create_client(clname, dname, rqstp, &clverifier);
if (new == NULL)
goto out;
gen_clid(new);
@@ -1611,25 +1570,12 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* confirmed.
*/
expire_client(unconf);
- new = create_client(clname, dname);
+ new = create_client(clname, dname, rqstp, &clverifier);
if (new == NULL)
goto out;
gen_clid(new);
}
- copy_verf(new, &clverifier);
- new->cl_addr = sin->sin_addr.s_addr;
- new->cl_flavor = rqstp->rq_flavor;
- princ = svc_gss_principal(rqstp);
- if (princ) {
- new->cl_principal = kstrdup(princ, GFP_KERNEL);
- if (new->cl_principal == NULL) {
- free_client(new);
- goto out;
- }
- }
- copy_cred(&new->cl_cred, &rqstp->rq_cred);
- gen_confirm(new);
- gen_callback(new, setclid);
+ gen_callback(new, setclid, rpc_get_scope_id(sa));
add_to_unconfirmed(new, strhashval);
setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
@@ -1651,7 +1597,7 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_setclientid_confirm *setclientid_confirm)
{
- struct sockaddr_in *sin = svc_addr_in(rqstp);
+ struct sockaddr *sa = svc_addr(rqstp);
struct nfs4_client *conf, *unconf;
nfs4_verifier confirm = setclientid_confirm->sc_confirm;
clientid_t * clid = &setclientid_confirm->sc_clientid;
@@ -1670,9 +1616,9 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
unconf = find_unconfirmed_client(clid);
status = nfserr_clid_inuse;
- if (conf && conf->cl_addr != sin->sin_addr.s_addr)
+ if (conf && !rpc_cmp_addr((struct sockaddr *) &conf->cl_addr, sa))
goto out;
- if (unconf && unconf->cl_addr != sin->sin_addr.s_addr)
+ if (unconf && !rpc_cmp_addr((struct sockaddr *) &unconf->cl_addr, sa))
goto out;
/*
@@ -2163,7 +2109,7 @@ int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
return -EAGAIN;
}
-static struct lock_manager_operations nfsd_lease_mng_ops = {
+static const struct lock_manager_operations nfsd_lease_mng_ops = {
.fl_break = nfsd_break_deleg_cb,
.fl_release_private = nfsd_release_deleg_cb,
.fl_copy_lock = nfsd_copy_lock_deleg_cb,
@@ -3368,7 +3314,7 @@ nfs4_transform_lock_offset(struct file_lock *lock)
/* Hack!: For now, we're defining this just so we can use a pointer to it
* as a unique cookie to identify our (NFSv4's) posix locks. */
-static struct lock_manager_operations nfsd_posix_mng_ops = {
+static const struct lock_manager_operations nfsd_posix_mng_ops = {
};
static inline void
@@ -4072,7 +4018,7 @@ set_max_delegations(void)
/* initialization to perform when the nfsd service is started: */
-static void
+static int
__nfs4_state_start(void)
{
unsigned long grace_time;
@@ -4084,19 +4030,26 @@ __nfs4_state_start(void)
printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
grace_time/HZ);
laundry_wq = create_singlethread_workqueue("nfsd4");
+ if (laundry_wq == NULL)
+ return -ENOMEM;
queue_delayed_work(laundry_wq, &laundromat_work, grace_time);
set_max_delegations();
+ return set_callback_cred();
}
-void
+int
nfs4_state_start(void)
{
+ int ret;
+
if (nfs4_init)
- return;
+ return 0;
nfsd4_load_reboot_recovery_data();
- __nfs4_state_start();
+ ret = __nfs4_state_start();
+ if (ret)
+ return ret;
nfs4_init = 1;
- return;
+ return 0;
}
time_t
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 2dcc7feaa6f..0fbd50cee1f 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1599,7 +1599,8 @@ static __be32 nfsd4_encode_fs_location4(struct nfsd4_fs_location *location,
static char *nfsd4_path(struct svc_rqst *rqstp, struct svc_export *exp, __be32 *stat)
{
struct svc_fh tmp_fh;
- char *path, *rootpath;
+ char *path = NULL, *rootpath;
+ size_t rootlen;
fh_init(&tmp_fh, NFS4_FHSIZE);
*stat = exp_pseudoroot(rqstp, &tmp_fh);
@@ -1609,14 +1610,18 @@ static char *nfsd4_path(struct svc_rqst *rqstp, struct svc_export *exp, __be32 *
path = exp->ex_pathname;
- if (strncmp(path, rootpath, strlen(rootpath))) {
+ rootlen = strlen(rootpath);
+ if (strncmp(path, rootpath, rootlen)) {
dprintk("nfsd: fs_locations failed;"
"%s is not contained in %s\n", path, rootpath);
*stat = nfserr_notsupp;
- return NULL;
+ path = NULL;
+ goto out;
}
-
- return path + strlen(rootpath);
+ path += rootlen;
+out:
+ fh_put(&tmp_fh);
+ return path;
}
/*
@@ -1793,11 +1798,6 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
goto out_nfserr;
}
}
- if (bmval0 & FATTR4_WORD0_FS_LOCATIONS) {
- if (exp->ex_fslocs.locations == NULL) {
- bmval0 &= ~FATTR4_WORD0_FS_LOCATIONS;
- }
- }
if ((buflen -= 16) < 0)
goto out_resource;
@@ -1825,8 +1825,6 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
goto out_resource;
if (!aclsupport)
word0 &= ~FATTR4_WORD0_ACL;
- if (!exp->ex_fslocs.locations)
- word0 &= ~FATTR4_WORD0_FS_LOCATIONS;
if (!word2) {
WRITE32(2);
WRITE32(word0);
@@ -3064,6 +3062,7 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr,
WRITE32(0);
ADJUST_ARGS();
+ resp->cstate.datap = p; /* DRC cache data pointer */
return 0;
}
@@ -3166,7 +3165,7 @@ static int nfsd4_check_drc_limit(struct nfsd4_compoundres *resp)
return status;
session = resp->cstate.session;
- if (session == NULL || slot->sl_cache_entry.ce_cachethis == 0)
+ if (session == NULL || slot->sl_cachethis == 0)
return status;
if (resp->opcnt >= args->opcnt)
@@ -3291,6 +3290,7 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo
/*
* All that remains is to write the tag and operation count...
*/
+ struct nfsd4_compound_state *cs = &resp->cstate;
struct kvec *iov;
p = resp->tagp;
*p++ = htonl(resp->taglen);
@@ -3304,17 +3304,11 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo
iov = &rqstp->rq_res.head[0];
iov->iov_len = ((char*)resp->p) - (char*)iov->iov_base;
BUG_ON(iov->iov_len > PAGE_SIZE);
- if (nfsd4_has_session(&resp->cstate)) {
- if (resp->cstate.status == nfserr_replay_cache &&
- !nfsd4_not_cached(resp)) {
- iov->iov_len = resp->cstate.iovlen;
- } else {
- nfsd4_store_cache_entry(resp);
- dprintk("%s: SET SLOT STATE TO AVAILABLE\n", __func__);
- resp->cstate.slot->sl_inuse = 0;
- }
- if (resp->cstate.session)
- nfsd4_put_session(resp->cstate.session);
+ if (nfsd4_has_session(cs) && cs->status != nfserr_replay_cache) {
+ nfsd4_store_cache_entry(resp);
+ dprintk("%s: SET SLOT STATE TO AVAILABLE\n", __func__);
+ resp->cstate.slot->sl_inuse = false;
+ nfsd4_put_session(resp->cstate.session);
}
return 1;
}
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 7e906c5b767..00388d2a3c9 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -174,12 +174,13 @@ static const struct file_operations exports_operations = {
};
extern int nfsd_pool_stats_open(struct inode *inode, struct file *file);
+extern int nfsd_pool_stats_release(struct inode *inode, struct file *file);
static struct file_operations pool_stats_operations = {
.open = nfsd_pool_stats_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = nfsd_pool_stats_release,
.owner = THIS_MODULE,
};
@@ -776,10 +777,7 @@ static ssize_t write_pool_threads(struct file *file, char *buf, size_t size)
size -= len;
mesg += len;
}
-
- mutex_unlock(&nfsd_mutex);
- return (mesg-buf);
-
+ rv = mesg - buf;
out_free:
kfree(nthreads);
mutex_unlock(&nfsd_mutex);
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 8847f3fbfc1..01965b2f3a7 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -397,44 +397,51 @@ static inline void _fh_update_old(struct dentry *dentry,
fh->ofh_dirino = 0;
}
-__be32
-fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
- struct svc_fh *ref_fh)
+static bool is_root_export(struct svc_export *exp)
{
- /* ref_fh is a reference file handle.
- * if it is non-null and for the same filesystem, then we should compose
- * a filehandle which is of the same version, where possible.
- * Currently, that means that if ref_fh->fh_handle.fh_version == 0xca
- * Then create a 32byte filehandle using nfs_fhbase_old
- *
- */
+ return exp->ex_path.dentry == exp->ex_path.dentry->d_sb->s_root;
+}
- u8 version;
- u8 fsid_type = 0;
- struct inode * inode = dentry->d_inode;
- struct dentry *parent = dentry->d_parent;
- __u32 *datap;
- dev_t ex_dev = exp->ex_path.dentry->d_inode->i_sb->s_dev;
- int root_export = (exp->ex_path.dentry == exp->ex_path.dentry->d_sb->s_root);
+static struct super_block *exp_sb(struct svc_export *exp)
+{
+ return exp->ex_path.dentry->d_inode->i_sb;
+}
- dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %s/%s, ino=%ld)\n",
- MAJOR(ex_dev), MINOR(ex_dev),
- (long) exp->ex_path.dentry->d_inode->i_ino,
- parent->d_name.name, dentry->d_name.name,
- (inode ? inode->i_ino : 0));
+static bool fsid_type_ok_for_exp(u8 fsid_type, struct svc_export *exp)
+{
+ switch (fsid_type) {
+ case FSID_DEV:
+ if (!old_valid_dev(exp_sb(exp)->s_dev))
+ return 0;
+ /* FALL THROUGH */
+ case FSID_MAJOR_MINOR:
+ case FSID_ENCODE_DEV:
+ return exp_sb(exp)->s_type->fs_flags & FS_REQUIRES_DEV;
+ case FSID_NUM:
+ return exp->ex_flags & NFSEXP_FSID;
+ case FSID_UUID8:
+ case FSID_UUID16:
+ if (!is_root_export(exp))
+ return 0;
+ /* fall through */
+ case FSID_UUID4_INUM:
+ case FSID_UUID16_INUM:
+ return exp->ex_uuid != NULL;
+ }
+ return 1;
+}
- /* Choose filehandle version and fsid type based on
- * the reference filehandle (if it is in the same export)
- * or the export options.
- */
- retry:
+
+static void set_version_and_fsid_type(struct svc_fh *fhp, struct svc_export *exp, struct svc_fh *ref_fh)
+{
+ u8 version;
+ u8 fsid_type;
+retry:
version = 1;
if (ref_fh && ref_fh->fh_export == exp) {
version = ref_fh->fh_handle.fh_version;
fsid_type = ref_fh->fh_handle.fh_fsid_type;
- if (ref_fh == fhp)
- fh_put(ref_fh);
ref_fh = NULL;
switch (version) {
@@ -447,58 +454,66 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
goto retry;
}
- /* Need to check that this type works for this
- * export point. As the fsid -> filesystem mapping
- * was guided by user-space, there is no guarantee
- * that the filesystem actually supports that fsid
- * type. If it doesn't we loop around again without
- * ref_fh set.
+ /*
+ * As the fsid -> filesystem mapping was guided by
+ * user-space, there is no guarantee that the filesystem
+ * actually supports that fsid type. If it doesn't we
+ * loop around again without ref_fh set.
*/
- switch(fsid_type) {
- case FSID_DEV:
- if (!old_valid_dev(ex_dev))
- goto retry;
- /* FALL THROUGH */
- case FSID_MAJOR_MINOR:
- case FSID_ENCODE_DEV:
- if (!(exp->ex_path.dentry->d_inode->i_sb->s_type->fs_flags
- & FS_REQUIRES_DEV))
- goto retry;
- break;
- case FSID_NUM:
- if (! (exp->ex_flags & NFSEXP_FSID))
- goto retry;
- break;
- case FSID_UUID8:
- case FSID_UUID16:
- if (!root_export)
- goto retry;
- /* fall through */
- case FSID_UUID4_INUM:
- case FSID_UUID16_INUM:
- if (exp->ex_uuid == NULL)
- goto retry;
- break;
- }
+ if (!fsid_type_ok_for_exp(fsid_type, exp))
+ goto retry;
} else if (exp->ex_flags & NFSEXP_FSID) {
fsid_type = FSID_NUM;
} else if (exp->ex_uuid) {
if (fhp->fh_maxsize >= 64) {
- if (root_export)
+ if (is_root_export(exp))
fsid_type = FSID_UUID16;
else
fsid_type = FSID_UUID16_INUM;
} else {
- if (root_export)
+ if (is_root_export(exp))
fsid_type = FSID_UUID8;
else
fsid_type = FSID_UUID4_INUM;
}
- } else if (!old_valid_dev(ex_dev))
+ } else if (!old_valid_dev(exp_sb(exp)->s_dev))
/* for newer device numbers, we must use a newer fsid format */
fsid_type = FSID_ENCODE_DEV;
else
fsid_type = FSID_DEV;
+ fhp->fh_handle.fh_version = version;
+ if (version)
+ fhp->fh_handle.fh_fsid_type = fsid_type;
+}
+
+__be32
+fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
+ struct svc_fh *ref_fh)
+{
+ /* ref_fh is a reference file handle.
+ * if it is non-null and for the same filesystem, then we should compose
+ * a filehandle which is of the same version, where possible.
+ * Currently, that means that if ref_fh->fh_handle.fh_version == 0xca
+ * Then create a 32byte filehandle using nfs_fhbase_old
+ *
+ */
+
+ struct inode * inode = dentry->d_inode;
+ struct dentry *parent = dentry->d_parent;
+ __u32 *datap;
+ dev_t ex_dev = exp_sb(exp)->s_dev;
+
+ dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %s/%s, ino=%ld)\n",
+ MAJOR(ex_dev), MINOR(ex_dev),
+ (long) exp->ex_path.dentry->d_inode->i_ino,
+ parent->d_name.name, dentry->d_name.name,
+ (inode ? inode->i_ino : 0));
+
+ /* Choose filehandle version and fsid type based on
+ * the reference filehandle (if it is in the same export)
+ * or the export options.
+ */
+ set_version_and_fsid_type(fhp, exp, ref_fh);
if (ref_fh == fhp)
fh_put(ref_fh);
@@ -516,7 +531,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
fhp->fh_export = exp;
cache_get(&exp->h);
- if (version == 0xca) {
+ if (fhp->fh_handle.fh_version == 0xca) {
/* old style filehandle please */
memset(&fhp->fh_handle.fh_base, 0, NFS_FHSIZE);
fhp->fh_handle.fh_size = NFS_FHSIZE;
@@ -530,22 +545,22 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
_fh_update_old(dentry, exp, &fhp->fh_handle);
} else {
int len;
- fhp->fh_handle.fh_version = 1;
fhp->fh_handle.fh_auth_type = 0;
datap = fhp->fh_handle.fh_auth+0;
- fhp->fh_handle.fh_fsid_type = fsid_type;
- mk_fsid(fsid_type, datap, ex_dev,
+ mk_fsid(fhp->fh_handle.fh_fsid_type, datap, ex_dev,
exp->ex_path.dentry->d_inode->i_ino,
exp->ex_fsid, exp->ex_uuid);
- len = key_len(fsid_type);
+ len = key_len(fhp->fh_handle.fh_fsid_type);
datap += len/4;
fhp->fh_handle.fh_size = 4 + len;
if (inode)
_fh_update(fhp, exp, dentry);
- if (fhp->fh_handle.fh_fileid_type == 255)
+ if (fhp->fh_handle.fh_fileid_type == 255) {
+ fh_put(fhp);
return nfserr_opnotsupp;
+ }
}
return 0;
@@ -639,8 +654,7 @@ enum fsid_source fsid_source(struct svc_fh *fhp)
case FSID_DEV:
case FSID_ENCODE_DEV:
case FSID_MAJOR_MINOR:
- if (fhp->fh_export->ex_path.dentry->d_inode->i_sb->s_type->fs_flags
- & FS_REQUIRES_DEV)
+ if (exp_sb(fhp->fh_export)->s_type->fs_flags & FS_REQUIRES_DEV)
return FSIDSOURCE_DEV;
break;
case FSID_NUM:
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 24d58adfe5f..67ea83eedd4 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -34,6 +34,7 @@
#include <linux/nfsd/syscall.h>
#include <linux/lockd/bind.h>
#include <linux/nfsacl.h>
+#include <linux/seq_file.h>
#define NFSDDBG_FACILITY NFSDDBG_SVC
@@ -66,6 +67,16 @@ struct timeval nfssvc_boot;
DEFINE_MUTEX(nfsd_mutex);
struct svc_serv *nfsd_serv;
+/*
+ * nfsd_drc_lock protects nfsd_drc_max_pages and nfsd_drc_pages_used.
+ * nfsd_drc_max_pages limits the total amount of memory available for
+ * version 4.1 DRC caches.
+ * nfsd_drc_pages_used tracks the current version 4.1 DRC memory usage.
+ */
+spinlock_t nfsd_drc_lock;
+unsigned int nfsd_drc_max_mem;
+unsigned int nfsd_drc_mem_used;
+
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
static struct svc_stat nfsd_acl_svcstats;
static struct svc_version * nfsd_acl_version[] = {
@@ -235,13 +246,12 @@ void nfsd_reset_versions(void)
*/
static void set_max_drc(void)
{
- /* The percent of nr_free_buffer_pages used by the V4.1 server DRC */
- #define NFSD_DRC_SIZE_SHIFT 7
- nfsd_serv->sv_drc_max_pages = nr_free_buffer_pages()
- >> NFSD_DRC_SIZE_SHIFT;
- nfsd_serv->sv_drc_pages_used = 0;
- dprintk("%s svc_drc_max_pages %u\n", __func__,
- nfsd_serv->sv_drc_max_pages);
+ #define NFSD_DRC_SIZE_SHIFT 10
+ nfsd_drc_max_mem = (nr_free_buffer_pages()
+ >> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE;
+ nfsd_drc_mem_used = 0;
+ spin_lock_init(&nfsd_drc_lock);
+ dprintk("%s nfsd_drc_max_mem %u \n", __func__, nfsd_drc_max_mem);
}
int nfsd_create_serv(void)
@@ -401,7 +411,9 @@ nfsd_svc(unsigned short port, int nrservs)
error = nfsd_racache_init(2*nrservs);
if (error<0)
goto out;
- nfs4_state_start();
+ error = nfs4_state_start();
+ if (error)
+ goto out;
nfsd_reset_versions();
@@ -569,10 +581,6 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+ rqstp->rq_res.head[0].iov_len;
rqstp->rq_res.head[0].iov_len += sizeof(__be32);
- /* NFSv4.1 DRC requires statp */
- if (rqstp->rq_vers == 4)
- nfsd4_set_statp(rqstp, statp);
-
/* Now call the procedure handler, and encode NFS status. */
nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
nfserr = map_new_errors(rqstp->rq_vers, nfserr);
@@ -607,7 +615,25 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
int nfsd_pool_stats_open(struct inode *inode, struct file *file)
{
- if (nfsd_serv == NULL)
+ int ret;
+ mutex_lock(&nfsd_mutex);
+ if (nfsd_serv == NULL) {
+ mutex_unlock(&nfsd_mutex);
return -ENODEV;
- return svc_pool_stats_open(nfsd_serv, file);
+ }
+ /* bump up the psudo refcount while traversing */
+ svc_get(nfsd_serv);
+ ret = svc_pool_stats_open(nfsd_serv, file);
+ mutex_unlock(&nfsd_mutex);
+ return ret;
+}
+
+int nfsd_pool_stats_release(struct inode *inode, struct file *file)
+{
+ int ret = seq_release(inode, file);
+ mutex_lock(&nfsd_mutex);
+ /* this function really, really should have been called svc_put() */
+ svc_destroy(nfsd_serv);
+ mutex_unlock(&nfsd_mutex);
+ return ret;
}
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 8fa09bfbcba..a293f027326 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -89,6 +89,12 @@ struct raparm_hbucket {
#define RAPARM_HASH_MASK (RAPARM_HASH_SIZE-1)
static struct raparm_hbucket raparm_hash[RAPARM_HASH_SIZE];
+static inline int
+nfsd_v4client(struct svc_rqst *rq)
+{
+ return rq->rq_prog == NFS_PROGRAM && rq->rq_vers == 4;
+}
+
/*
* Called from nfsd_lookup and encode_dirent. Check if we have crossed
* a mount point.
@@ -115,7 +121,8 @@ nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
path_put(&path);
goto out;
}
- if ((exp->ex_flags & NFSEXP_CROSSMOUNT) || EX_NOHIDE(exp2)) {
+ if (nfsd_v4client(rqstp) ||
+ (exp->ex_flags & NFSEXP_CROSSMOUNT) || EX_NOHIDE(exp2)) {
/* successfully crossed mount point */
/*
* This is subtle: path.dentry is *not* on path.mnt
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index c668bca579c..6a2711f4c32 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -46,7 +46,7 @@ void nilfs_btnode_cache_init_once(struct address_space *btnc)
INIT_LIST_HEAD(&btnc->i_mmap_nonlinear);
}
-static struct address_space_operations def_btnode_aops = {
+static const struct address_space_operations def_btnode_aops = {
.sync_page = block_sync_page,
};
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 6bd84a0d823..fc8278c77cd 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -151,7 +151,7 @@ struct file_operations nilfs_file_operations = {
.splice_read = generic_file_splice_read,
};
-struct inode_operations nilfs_file_inode_operations = {
+const struct inode_operations nilfs_file_inode_operations = {
.truncate = nilfs_truncate,
.setattr = nilfs_setattr,
.permission = nilfs_permission,
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 1b3c2bb20da..e6de0a27ab5 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -52,7 +52,7 @@
#include "dat.h"
#include "ifile.h"
-static struct address_space_operations def_gcinode_aops = {
+static const struct address_space_operations def_gcinode_aops = {
.sync_page = block_sync_page,
};
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 807e584b163..2d2c501deb5 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -238,7 +238,7 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
return size;
}
-struct address_space_operations nilfs_aops = {
+const struct address_space_operations nilfs_aops = {
.writepage = nilfs_writepage,
.readpage = nilfs_readpage,
.sync_page = block_sync_page,
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 156bf6091a9..b18c4998f8d 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -427,12 +427,12 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
}
-static struct address_space_operations def_mdt_aops = {
+static const struct address_space_operations def_mdt_aops = {
.writepage = nilfs_mdt_write_page,
.sync_page = block_sync_page,
};
-static struct inode_operations def_mdt_iops;
+static const struct inode_operations def_mdt_iops;
static struct file_operations def_mdt_fops;
/*
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index df70dadb336..ed02e886fa7 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -448,7 +448,7 @@ out:
return err;
}
-struct inode_operations nilfs_dir_inode_operations = {
+const struct inode_operations nilfs_dir_inode_operations = {
.create = nilfs_create,
.lookup = nilfs_lookup,
.link = nilfs_link,
@@ -462,12 +462,12 @@ struct inode_operations nilfs_dir_inode_operations = {
.permission = nilfs_permission,
};
-struct inode_operations nilfs_special_inode_operations = {
+const struct inode_operations nilfs_special_inode_operations = {
.setattr = nilfs_setattr,
.permission = nilfs_permission,
};
-struct inode_operations nilfs_symlink_inode_operations = {
+const struct inode_operations nilfs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 724c63766e8..bad7368782d 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -295,12 +295,12 @@ void nilfs_clear_gcdat_inode(struct the_nilfs *);
* Inodes and files operations
*/
extern struct file_operations nilfs_dir_operations;
-extern struct inode_operations nilfs_file_inode_operations;
+extern const struct inode_operations nilfs_file_inode_operations;
extern struct file_operations nilfs_file_operations;
-extern struct address_space_operations nilfs_aops;
-extern struct inode_operations nilfs_dir_inode_operations;
-extern struct inode_operations nilfs_special_inode_operations;
-extern struct inode_operations nilfs_symlink_inode_operations;
+extern const struct address_space_operations nilfs_aops;
+extern const struct inode_operations nilfs_dir_inode_operations;
+extern const struct inode_operations nilfs_special_inode_operations;
+extern const struct inode_operations nilfs_symlink_inode_operations;
/*
* filesystem type
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 55f3d6b6073..644e66727dd 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -504,7 +504,7 @@ static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
return 0;
}
-static struct super_operations nilfs_sops = {
+static const struct super_operations nilfs_sops = {
.alloc_inode = nilfs_alloc_inode,
.destroy_inode = nilfs_destroy_inode,
.dirty_inode = nilfs_dirty_inode,
@@ -560,7 +560,7 @@ nilfs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len,
nilfs_nfs_get_inode);
}
-static struct export_operations nilfs_export_ops = {
+static const struct export_operations nilfs_export_ops = {
.fh_to_dentry = nilfs_fh_to_dentry,
.fh_to_parent = nilfs_fh_to_parent,
.get_parent = nilfs_get_parent,
diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
index 477d37d83b3..2224b4d07bf 100644
--- a/fs/nls/nls_base.c
+++ b/fs/nls/nls_base.c
@@ -270,7 +270,8 @@ struct nls_table *load_nls(char *charset)
void unload_nls(struct nls_table *nls)
{
- module_put(nls->owner);
+ if (nls)
+ module_put(nls->owner);
}
static const wchar_t charset2uni[256] = {
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index b38f944f066..cfce53cb65d 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -1550,6 +1550,7 @@ const struct address_space_operations ntfs_aops = {
.migratepage = buffer_migrate_page, /* Move a page cache page from
one physical page to an
other. */
+ .error_remove_page = generic_error_remove_page,
};
/**
@@ -1569,6 +1570,7 @@ const struct address_space_operations ntfs_mst_aops = {
.migratepage = buffer_migrate_page, /* Move a page cache page from
one physical page to an
other. */
+ .error_remove_page = generic_error_remove_page,
};
#ifdef NTFS_RW
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 4350d4993b1..663c0e341f8 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -2146,46 +2146,6 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
}
/**
- * ntfs_file_writev -
- *
- * Basically the same as generic_file_writev() except that it ends up calling
- * ntfs_file_aio_write_nolock() instead of __generic_file_aio_write_nolock().
- */
-static ssize_t ntfs_file_writev(struct file *file, const struct iovec *iov,
- unsigned long nr_segs, loff_t *ppos)
-{
- struct address_space *mapping = file->f_mapping;
- struct inode *inode = mapping->host;
- struct kiocb kiocb;
- ssize_t ret;
-
- mutex_lock(&inode->i_mutex);
- init_sync_kiocb(&kiocb, file);
- ret = ntfs_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
- if (ret == -EIOCBQUEUED)
- ret = wait_on_sync_kiocb(&kiocb);
- mutex_unlock(&inode->i_mutex);
- if (ret > 0) {
- int err = generic_write_sync(file, *ppos - ret, ret);
- if (err < 0)
- ret = err;
- }
- return ret;
-}
-
-/**
- * ntfs_file_write - simple wrapper for ntfs_file_writev()
- */
-static ssize_t ntfs_file_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct iovec local_iov = { .iov_base = (void __user *)buf,
- .iov_len = count };
-
- return ntfs_file_writev(file, &local_iov, 1, ppos);
-}
-
-/**
* ntfs_file_fsync - sync a file to disk
* @filp: file to be synced
* @dentry: dentry describing the file to sync
@@ -2247,7 +2207,7 @@ const struct file_operations ntfs_file_ops = {
.read = do_sync_read, /* Read from file. */
.aio_read = generic_file_aio_read, /* Async read from file. */
#ifdef NTFS_RW
- .write = ntfs_file_write, /* Write to file. */
+ .write = do_sync_write, /* Write to file. */
.aio_write = ntfs_file_aio_write, /* Async write to file. */
/*.release = ,*/ /* Last file is closed. See
fs/ext2/file.c::
diff --git a/fs/ntfs/layout.h b/fs/ntfs/layout.h
index 50931b1ce4b..8b2549f672b 100644
--- a/fs/ntfs/layout.h
+++ b/fs/ntfs/layout.h
@@ -829,7 +829,7 @@ enum {
/* Note, FILE_ATTR_VALID_SET_FLAGS masks out the old DOS VolId, the
F_A_DEVICE, F_A_DIRECTORY, F_A_SPARSE_FILE, F_A_REPARSE_POINT,
F_A_COMPRESSED, and F_A_ENCRYPTED and preserves the rest. This mask
- is used to to obtain all flags that are valid for setting. */
+ is used to obtain all flags that are valid for setting. */
/*
* The flag FILE_ATTR_DUP_FILENAME_INDEX_PRESENT is present in all
* FILENAME_ATTR attributes but not in the STANDARD_INFORMATION
diff --git a/fs/ntfs/malloc.h b/fs/ntfs/malloc.h
index cd0be3f5c3c..a44b14cbcee 100644
--- a/fs/ntfs/malloc.h
+++ b/fs/ntfs/malloc.h
@@ -47,7 +47,7 @@ static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask)
return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM);
/* return (void *)__get_free_page(gfp_mask); */
}
- if (likely(size >> PAGE_SHIFT < num_physpages))
+ if (likely((size >> PAGE_SHIFT) < totalram_pages))
return __vmalloc(size, gfp_mask, PAGE_KERNEL);
return NULL;
}
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index abaaa1cbf8d..80b04770e8e 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -201,8 +201,7 @@ use_utf8:
v, old_nls->charset);
nls_map = old_nls;
} else /* nls_map */ {
- if (old_nls)
- unload_nls(old_nls);
+ unload_nls(old_nls);
}
} else if (!strcmp(p, "utf8")) {
bool val = false;
@@ -2427,10 +2426,9 @@ static void ntfs_put_super(struct super_block *sb)
ntfs_free(vol->upcase);
vol->upcase = NULL;
}
- if (vol->nls_map) {
- unload_nls(vol->nls_map);
- vol->nls_map = NULL;
- }
+
+ unload_nls(vol->nls_map);
+
sb->s_fs_info = NULL;
kfree(vol);
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile
index 01596079dd6..31f25ce32c9 100644
--- a/fs/ocfs2/Makefile
+++ b/fs/ocfs2/Makefile
@@ -28,6 +28,7 @@ ocfs2-objs := \
locks.o \
mmap.o \
namei.o \
+ refcounttree.o \
resize.o \
slot_map.o \
suballoc.o \
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index ab513ddaeff..38a42f5d59f 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -49,10 +49,21 @@
#include "super.h"
#include "uptodate.h"
#include "xattr.h"
+#include "refcounttree.h"
#include "buffer_head_io.h"
+enum ocfs2_contig_type {
+ CONTIG_NONE = 0,
+ CONTIG_LEFT,
+ CONTIG_RIGHT,
+ CONTIG_LEFTRIGHT,
+};
+static enum ocfs2_contig_type
+ ocfs2_extent_rec_contig(struct super_block *sb,
+ struct ocfs2_extent_rec *ext,
+ struct ocfs2_extent_rec *insert_rec);
/*
* Operations for a specific extent tree type.
*
@@ -79,18 +90,30 @@ struct ocfs2_extent_tree_operations {
* that value. new_clusters is the delta, and must be
* added to the total. Required.
*/
- void (*eo_update_clusters)(struct inode *inode,
- struct ocfs2_extent_tree *et,
+ void (*eo_update_clusters)(struct ocfs2_extent_tree *et,
u32 new_clusters);
/*
+ * If this extent tree is supported by an extent map, insert
+ * a record into the map.
+ */
+ void (*eo_extent_map_insert)(struct ocfs2_extent_tree *et,
+ struct ocfs2_extent_rec *rec);
+
+ /*
+ * If this extent tree is supported by an extent map, truncate the
+ * map to clusters,
+ */
+ void (*eo_extent_map_truncate)(struct ocfs2_extent_tree *et,
+ u32 clusters);
+
+ /*
* If ->eo_insert_check() exists, it is called before rec is
* inserted into the extent tree. It is optional.
*/
- int (*eo_insert_check)(struct inode *inode,
- struct ocfs2_extent_tree *et,
+ int (*eo_insert_check)(struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *rec);
- int (*eo_sanity_check)(struct inode *inode, struct ocfs2_extent_tree *et);
+ int (*eo_sanity_check)(struct ocfs2_extent_tree *et);
/*
* --------------------------------------------------------------
@@ -109,8 +132,17 @@ struct ocfs2_extent_tree_operations {
* it exists. If it does not, et->et_max_leaf_clusters is set
* to 0 (unlimited). Optional.
*/
- void (*eo_fill_max_leaf_clusters)(struct inode *inode,
- struct ocfs2_extent_tree *et);
+ void (*eo_fill_max_leaf_clusters)(struct ocfs2_extent_tree *et);
+
+ /*
+ * ->eo_extent_contig test whether the 2 ocfs2_extent_rec
+ * are contiguous or not. Optional. Don't need to set it if use
+ * ocfs2_extent_rec as the tree leaf.
+ */
+ enum ocfs2_contig_type
+ (*eo_extent_contig)(struct ocfs2_extent_tree *et,
+ struct ocfs2_extent_rec *ext,
+ struct ocfs2_extent_rec *insert_rec);
};
@@ -121,19 +153,22 @@ struct ocfs2_extent_tree_operations {
static u64 ocfs2_dinode_get_last_eb_blk(struct ocfs2_extent_tree *et);
static void ocfs2_dinode_set_last_eb_blk(struct ocfs2_extent_tree *et,
u64 blkno);
-static void ocfs2_dinode_update_clusters(struct inode *inode,
- struct ocfs2_extent_tree *et,
+static void ocfs2_dinode_update_clusters(struct ocfs2_extent_tree *et,
u32 clusters);
-static int ocfs2_dinode_insert_check(struct inode *inode,
- struct ocfs2_extent_tree *et,
+static void ocfs2_dinode_extent_map_insert(struct ocfs2_extent_tree *et,
+ struct ocfs2_extent_rec *rec);
+static void ocfs2_dinode_extent_map_truncate(struct ocfs2_extent_tree *et,
+ u32 clusters);
+static int ocfs2_dinode_insert_check(struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *rec);
-static int ocfs2_dinode_sanity_check(struct inode *inode,
- struct ocfs2_extent_tree *et);
+static int ocfs2_dinode_sanity_check(struct ocfs2_extent_tree *et);
static void ocfs2_dinode_fill_root_el(struct ocfs2_extent_tree *et);
static struct ocfs2_extent_tree_operations ocfs2_dinode_et_ops = {
.eo_set_last_eb_blk = ocfs2_dinode_set_last_eb_blk,
.eo_get_last_eb_blk = ocfs2_dinode_get_last_eb_blk,
.eo_update_clusters = ocfs2_dinode_update_clusters,
+ .eo_extent_map_insert = ocfs2_dinode_extent_map_insert,
+ .eo_extent_map_truncate = ocfs2_dinode_extent_map_truncate,
.eo_insert_check = ocfs2_dinode_insert_check,
.eo_sanity_check = ocfs2_dinode_sanity_check,
.eo_fill_root_el = ocfs2_dinode_fill_root_el,
@@ -156,40 +191,53 @@ static u64 ocfs2_dinode_get_last_eb_blk(struct ocfs2_extent_tree *et)
return le64_to_cpu(di->i_last_eb_blk);
}
-static void ocfs2_dinode_update_clusters(struct inode *inode,
- struct ocfs2_extent_tree *et,
+static void ocfs2_dinode_update_clusters(struct ocfs2_extent_tree *et,
u32 clusters)
{
+ struct ocfs2_inode_info *oi = cache_info_to_inode(et->et_ci);
struct ocfs2_dinode *di = et->et_object;
le32_add_cpu(&di->i_clusters, clusters);
- spin_lock(&OCFS2_I(inode)->ip_lock);
- OCFS2_I(inode)->ip_clusters = le32_to_cpu(di->i_clusters);
- spin_unlock(&OCFS2_I(inode)->ip_lock);
+ spin_lock(&oi->ip_lock);
+ oi->ip_clusters = le32_to_cpu(di->i_clusters);
+ spin_unlock(&oi->ip_lock);
}
-static int ocfs2_dinode_insert_check(struct inode *inode,
- struct ocfs2_extent_tree *et,
+static void ocfs2_dinode_extent_map_insert(struct ocfs2_extent_tree *et,
+ struct ocfs2_extent_rec *rec)
+{
+ struct inode *inode = &cache_info_to_inode(et->et_ci)->vfs_inode;
+
+ ocfs2_extent_map_insert_rec(inode, rec);
+}
+
+static void ocfs2_dinode_extent_map_truncate(struct ocfs2_extent_tree *et,
+ u32 clusters)
+{
+ struct inode *inode = &cache_info_to_inode(et->et_ci)->vfs_inode;
+
+ ocfs2_extent_map_trunc(inode, clusters);
+}
+
+static int ocfs2_dinode_insert_check(struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *rec)
{
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_inode_info *oi = cache_info_to_inode(et->et_ci);
+ struct ocfs2_super *osb = OCFS2_SB(oi->vfs_inode.i_sb);
- BUG_ON(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL);
+ BUG_ON(oi->ip_dyn_features & OCFS2_INLINE_DATA_FL);
mlog_bug_on_msg(!ocfs2_sparse_alloc(osb) &&
- (OCFS2_I(inode)->ip_clusters !=
- le32_to_cpu(rec->e_cpos)),
+ (oi->ip_clusters != le32_to_cpu(rec->e_cpos)),
"Device %s, asking for sparse allocation: inode %llu, "
"cpos %u, clusters %u\n",
osb->dev_str,
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- rec->e_cpos,
- OCFS2_I(inode)->ip_clusters);
+ (unsigned long long)oi->ip_blkno,
+ rec->e_cpos, oi->ip_clusters);
return 0;
}
-static int ocfs2_dinode_sanity_check(struct inode *inode,
- struct ocfs2_extent_tree *et)
+static int ocfs2_dinode_sanity_check(struct ocfs2_extent_tree *et)
{
struct ocfs2_dinode *di = et->et_object;
@@ -229,8 +277,7 @@ static u64 ocfs2_xattr_value_get_last_eb_blk(struct ocfs2_extent_tree *et)
return le64_to_cpu(vb->vb_xv->xr_last_eb_blk);
}
-static void ocfs2_xattr_value_update_clusters(struct inode *inode,
- struct ocfs2_extent_tree *et,
+static void ocfs2_xattr_value_update_clusters(struct ocfs2_extent_tree *et,
u32 clusters)
{
struct ocfs2_xattr_value_buf *vb = et->et_object;
@@ -252,12 +299,11 @@ static void ocfs2_xattr_tree_fill_root_el(struct ocfs2_extent_tree *et)
et->et_root_el = &xb->xb_attrs.xb_root.xt_list;
}
-static void ocfs2_xattr_tree_fill_max_leaf_clusters(struct inode *inode,
- struct ocfs2_extent_tree *et)
+static void ocfs2_xattr_tree_fill_max_leaf_clusters(struct ocfs2_extent_tree *et)
{
+ struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
et->et_max_leaf_clusters =
- ocfs2_clusters_for_bytes(inode->i_sb,
- OCFS2_MAX_XATTR_TREE_LEAF_SIZE);
+ ocfs2_clusters_for_bytes(sb, OCFS2_MAX_XATTR_TREE_LEAF_SIZE);
}
static void ocfs2_xattr_tree_set_last_eb_blk(struct ocfs2_extent_tree *et,
@@ -277,8 +323,7 @@ static u64 ocfs2_xattr_tree_get_last_eb_blk(struct ocfs2_extent_tree *et)
return le64_to_cpu(xt->xt_last_eb_blk);
}
-static void ocfs2_xattr_tree_update_clusters(struct inode *inode,
- struct ocfs2_extent_tree *et,
+static void ocfs2_xattr_tree_update_clusters(struct ocfs2_extent_tree *et,
u32 clusters)
{
struct ocfs2_xattr_block *xb = et->et_object;
@@ -309,8 +354,7 @@ static u64 ocfs2_dx_root_get_last_eb_blk(struct ocfs2_extent_tree *et)
return le64_to_cpu(dx_root->dr_last_eb_blk);
}
-static void ocfs2_dx_root_update_clusters(struct inode *inode,
- struct ocfs2_extent_tree *et,
+static void ocfs2_dx_root_update_clusters(struct ocfs2_extent_tree *et,
u32 clusters)
{
struct ocfs2_dx_root_block *dx_root = et->et_object;
@@ -318,8 +362,7 @@ static void ocfs2_dx_root_update_clusters(struct inode *inode,
le32_add_cpu(&dx_root->dr_clusters, clusters);
}
-static int ocfs2_dx_root_sanity_check(struct inode *inode,
- struct ocfs2_extent_tree *et)
+static int ocfs2_dx_root_sanity_check(struct ocfs2_extent_tree *et)
{
struct ocfs2_dx_root_block *dx_root = et->et_object;
@@ -343,8 +386,54 @@ static struct ocfs2_extent_tree_operations ocfs2_dx_root_et_ops = {
.eo_fill_root_el = ocfs2_dx_root_fill_root_el,
};
+static void ocfs2_refcount_tree_fill_root_el(struct ocfs2_extent_tree *et)
+{
+ struct ocfs2_refcount_block *rb = et->et_object;
+
+ et->et_root_el = &rb->rf_list;
+}
+
+static void ocfs2_refcount_tree_set_last_eb_blk(struct ocfs2_extent_tree *et,
+ u64 blkno)
+{
+ struct ocfs2_refcount_block *rb = et->et_object;
+
+ rb->rf_last_eb_blk = cpu_to_le64(blkno);
+}
+
+static u64 ocfs2_refcount_tree_get_last_eb_blk(struct ocfs2_extent_tree *et)
+{
+ struct ocfs2_refcount_block *rb = et->et_object;
+
+ return le64_to_cpu(rb->rf_last_eb_blk);
+}
+
+static void ocfs2_refcount_tree_update_clusters(struct ocfs2_extent_tree *et,
+ u32 clusters)
+{
+ struct ocfs2_refcount_block *rb = et->et_object;
+
+ le32_add_cpu(&rb->rf_clusters, clusters);
+}
+
+static enum ocfs2_contig_type
+ocfs2_refcount_tree_extent_contig(struct ocfs2_extent_tree *et,
+ struct ocfs2_extent_rec *ext,
+ struct ocfs2_extent_rec *insert_rec)
+{
+ return CONTIG_NONE;
+}
+
+static struct ocfs2_extent_tree_operations ocfs2_refcount_tree_et_ops = {
+ .eo_set_last_eb_blk = ocfs2_refcount_tree_set_last_eb_blk,
+ .eo_get_last_eb_blk = ocfs2_refcount_tree_get_last_eb_blk,
+ .eo_update_clusters = ocfs2_refcount_tree_update_clusters,
+ .eo_fill_root_el = ocfs2_refcount_tree_fill_root_el,
+ .eo_extent_contig = ocfs2_refcount_tree_extent_contig,
+};
+
static void __ocfs2_init_extent_tree(struct ocfs2_extent_tree *et,
- struct inode *inode,
+ struct ocfs2_caching_info *ci,
struct buffer_head *bh,
ocfs2_journal_access_func access,
void *obj,
@@ -352,6 +441,7 @@ static void __ocfs2_init_extent_tree(struct ocfs2_extent_tree *et,
{
et->et_ops = ops;
et->et_root_bh = bh;
+ et->et_ci = ci;
et->et_root_journal_access = access;
if (!obj)
obj = (void *)bh->b_data;
@@ -361,41 +451,49 @@ static void __ocfs2_init_extent_tree(struct ocfs2_extent_tree *et,
if (!et->et_ops->eo_fill_max_leaf_clusters)
et->et_max_leaf_clusters = 0;
else
- et->et_ops->eo_fill_max_leaf_clusters(inode, et);
+ et->et_ops->eo_fill_max_leaf_clusters(et);
}
void ocfs2_init_dinode_extent_tree(struct ocfs2_extent_tree *et,
- struct inode *inode,
+ struct ocfs2_caching_info *ci,
struct buffer_head *bh)
{
- __ocfs2_init_extent_tree(et, inode, bh, ocfs2_journal_access_di,
+ __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_di,
NULL, &ocfs2_dinode_et_ops);
}
void ocfs2_init_xattr_tree_extent_tree(struct ocfs2_extent_tree *et,
- struct inode *inode,
+ struct ocfs2_caching_info *ci,
struct buffer_head *bh)
{
- __ocfs2_init_extent_tree(et, inode, bh, ocfs2_journal_access_xb,
+ __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_xb,
NULL, &ocfs2_xattr_tree_et_ops);
}
void ocfs2_init_xattr_value_extent_tree(struct ocfs2_extent_tree *et,
- struct inode *inode,
+ struct ocfs2_caching_info *ci,
struct ocfs2_xattr_value_buf *vb)
{
- __ocfs2_init_extent_tree(et, inode, vb->vb_bh, vb->vb_access, vb,
+ __ocfs2_init_extent_tree(et, ci, vb->vb_bh, vb->vb_access, vb,
&ocfs2_xattr_value_et_ops);
}
void ocfs2_init_dx_root_extent_tree(struct ocfs2_extent_tree *et,
- struct inode *inode,
+ struct ocfs2_caching_info *ci,
struct buffer_head *bh)
{
- __ocfs2_init_extent_tree(et, inode, bh, ocfs2_journal_access_dr,
+ __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_dr,
NULL, &ocfs2_dx_root_et_ops);
}
+void ocfs2_init_refcount_extent_tree(struct ocfs2_extent_tree *et,
+ struct ocfs2_caching_info *ci,
+ struct buffer_head *bh)
+{
+ __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_rb,
+ NULL, &ocfs2_refcount_tree_et_ops);
+}
+
static inline void ocfs2_et_set_last_eb_blk(struct ocfs2_extent_tree *et,
u64 new_last_eb_blk)
{
@@ -407,78 +505,71 @@ static inline u64 ocfs2_et_get_last_eb_blk(struct ocfs2_extent_tree *et)
return et->et_ops->eo_get_last_eb_blk(et);
}
-static inline void ocfs2_et_update_clusters(struct inode *inode,
- struct ocfs2_extent_tree *et,
+static inline void ocfs2_et_update_clusters(struct ocfs2_extent_tree *et,
u32 clusters)
{
- et->et_ops->eo_update_clusters(inode, et, clusters);
+ et->et_ops->eo_update_clusters(et, clusters);
+}
+
+static inline void ocfs2_et_extent_map_insert(struct ocfs2_extent_tree *et,
+ struct ocfs2_extent_rec *rec)
+{
+ if (et->et_ops->eo_extent_map_insert)
+ et->et_ops->eo_extent_map_insert(et, rec);
+}
+
+static inline void ocfs2_et_extent_map_truncate(struct ocfs2_extent_tree *et,
+ u32 clusters)
+{
+ if (et->et_ops->eo_extent_map_truncate)
+ et->et_ops->eo_extent_map_truncate(et, clusters);
}
static inline int ocfs2_et_root_journal_access(handle_t *handle,
- struct inode *inode,
struct ocfs2_extent_tree *et,
int type)
{
- return et->et_root_journal_access(handle, inode, et->et_root_bh,
+ return et->et_root_journal_access(handle, et->et_ci, et->et_root_bh,
type);
}
-static inline int ocfs2_et_insert_check(struct inode *inode,
- struct ocfs2_extent_tree *et,
+static inline enum ocfs2_contig_type
+ ocfs2_et_extent_contig(struct ocfs2_extent_tree *et,
+ struct ocfs2_extent_rec *rec,
+ struct ocfs2_extent_rec *insert_rec)
+{
+ if (et->et_ops->eo_extent_contig)
+ return et->et_ops->eo_extent_contig(et, rec, insert_rec);
+
+ return ocfs2_extent_rec_contig(
+ ocfs2_metadata_cache_get_super(et->et_ci),
+ rec, insert_rec);
+}
+
+static inline int ocfs2_et_insert_check(struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *rec)
{
int ret = 0;
if (et->et_ops->eo_insert_check)
- ret = et->et_ops->eo_insert_check(inode, et, rec);
+ ret = et->et_ops->eo_insert_check(et, rec);
return ret;
}
-static inline int ocfs2_et_sanity_check(struct inode *inode,
- struct ocfs2_extent_tree *et)
+static inline int ocfs2_et_sanity_check(struct ocfs2_extent_tree *et)
{
int ret = 0;
if (et->et_ops->eo_sanity_check)
- ret = et->et_ops->eo_sanity_check(inode, et);
+ ret = et->et_ops->eo_sanity_check(et);
return ret;
}
static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc);
static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt,
struct ocfs2_extent_block *eb);
-
-/*
- * Structures which describe a path through a btree, and functions to
- * manipulate them.
- *
- * The idea here is to be as generic as possible with the tree
- * manipulation code.
- */
-struct ocfs2_path_item {
- struct buffer_head *bh;
- struct ocfs2_extent_list *el;
-};
-
-#define OCFS2_MAX_PATH_DEPTH 5
-
-struct ocfs2_path {
- int p_tree_depth;
- ocfs2_journal_access_func p_root_access;
- struct ocfs2_path_item p_node[OCFS2_MAX_PATH_DEPTH];
-};
-
-#define path_root_bh(_path) ((_path)->p_node[0].bh)
-#define path_root_el(_path) ((_path)->p_node[0].el)
-#define path_root_access(_path)((_path)->p_root_access)
-#define path_leaf_bh(_path) ((_path)->p_node[(_path)->p_tree_depth].bh)
-#define path_leaf_el(_path) ((_path)->p_node[(_path)->p_tree_depth].el)
-#define path_num_items(_path) ((_path)->p_tree_depth + 1)
-
-static int ocfs2_find_path(struct inode *inode, struct ocfs2_path *path,
- u32 cpos);
-static void ocfs2_adjust_rightmost_records(struct inode *inode,
- handle_t *handle,
+static void ocfs2_adjust_rightmost_records(handle_t *handle,
+ struct ocfs2_extent_tree *et,
struct ocfs2_path *path,
struct ocfs2_extent_rec *insert_rec);
/*
@@ -486,7 +577,7 @@ static void ocfs2_adjust_rightmost_records(struct inode *inode,
* to build another path. Generally, this involves freeing the buffer
* heads.
*/
-static void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root)
+void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root)
{
int i, start = 0, depth = 0;
struct ocfs2_path_item *node;
@@ -515,7 +606,7 @@ static void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root)
path->p_tree_depth = depth;
}
-static void ocfs2_free_path(struct ocfs2_path *path)
+void ocfs2_free_path(struct ocfs2_path *path)
{
if (path) {
ocfs2_reinit_path(path, 0);
@@ -613,13 +704,13 @@ static struct ocfs2_path *ocfs2_new_path(struct buffer_head *root_bh,
return path;
}
-static struct ocfs2_path *ocfs2_new_path_from_path(struct ocfs2_path *path)
+struct ocfs2_path *ocfs2_new_path_from_path(struct ocfs2_path *path)
{
return ocfs2_new_path(path_root_bh(path), path_root_el(path),
path_root_access(path));
}
-static struct ocfs2_path *ocfs2_new_path_from_et(struct ocfs2_extent_tree *et)
+struct ocfs2_path *ocfs2_new_path_from_et(struct ocfs2_extent_tree *et)
{
return ocfs2_new_path(et->et_root_bh, et->et_root_el,
et->et_root_journal_access);
@@ -632,10 +723,10 @@ static struct ocfs2_path *ocfs2_new_path_from_et(struct ocfs2_extent_tree *et)
* I don't like the way this function's name looks next to
* ocfs2_journal_access_path(), but I don't have a better one.
*/
-static int ocfs2_path_bh_journal_access(handle_t *handle,
- struct inode *inode,
- struct ocfs2_path *path,
- int idx)
+int ocfs2_path_bh_journal_access(handle_t *handle,
+ struct ocfs2_caching_info *ci,
+ struct ocfs2_path *path,
+ int idx)
{
ocfs2_journal_access_func access = path_root_access(path);
@@ -645,15 +736,16 @@ static int ocfs2_path_bh_journal_access(handle_t *handle,
if (idx)
access = ocfs2_journal_access_eb;
- return access(handle, inode, path->p_node[idx].bh,
+ return access(handle, ci, path->p_node[idx].bh,
OCFS2_JOURNAL_ACCESS_WRITE);
}
/*
* Convenience function to journal all components in a path.
*/
-static int ocfs2_journal_access_path(struct inode *inode, handle_t *handle,
- struct ocfs2_path *path)
+int ocfs2_journal_access_path(struct ocfs2_caching_info *ci,
+ handle_t *handle,
+ struct ocfs2_path *path)
{
int i, ret = 0;
@@ -661,7 +753,7 @@ static int ocfs2_journal_access_path(struct inode *inode, handle_t *handle,
goto out;
for(i = 0; i < path_num_items(path); i++) {
- ret = ocfs2_path_bh_journal_access(handle, inode, path, i);
+ ret = ocfs2_path_bh_journal_access(handle, ci, path, i);
if (ret < 0) {
mlog_errno(ret);
goto out;
@@ -702,17 +794,9 @@ int ocfs2_search_extent_list(struct ocfs2_extent_list *el, u32 v_cluster)
return ret;
}
-enum ocfs2_contig_type {
- CONTIG_NONE = 0,
- CONTIG_LEFT,
- CONTIG_RIGHT,
- CONTIG_LEFTRIGHT,
-};
-
-
/*
* NOTE: ocfs2_block_extent_contig(), ocfs2_extents_adjacent() and
- * ocfs2_extent_contig only work properly against leaf nodes!
+ * ocfs2_extent_rec_contig only work properly against leaf nodes!
*/
static int ocfs2_block_extent_contig(struct super_block *sb,
struct ocfs2_extent_rec *ext,
@@ -738,9 +822,9 @@ static int ocfs2_extents_adjacent(struct ocfs2_extent_rec *left,
}
static enum ocfs2_contig_type
- ocfs2_extent_contig(struct inode *inode,
- struct ocfs2_extent_rec *ext,
- struct ocfs2_extent_rec *insert_rec)
+ ocfs2_extent_rec_contig(struct super_block *sb,
+ struct ocfs2_extent_rec *ext,
+ struct ocfs2_extent_rec *insert_rec)
{
u64 blkno = le64_to_cpu(insert_rec->e_blkno);
@@ -753,12 +837,12 @@ static enum ocfs2_contig_type
return CONTIG_NONE;
if (ocfs2_extents_adjacent(ext, insert_rec) &&
- ocfs2_block_extent_contig(inode->i_sb, ext, blkno))
+ ocfs2_block_extent_contig(sb, ext, blkno))
return CONTIG_RIGHT;
blkno = le64_to_cpu(ext->e_blkno);
if (ocfs2_extents_adjacent(insert_rec, ext) &&
- ocfs2_block_extent_contig(inode->i_sb, insert_rec, blkno))
+ ocfs2_block_extent_contig(sb, insert_rec, blkno))
return CONTIG_LEFT;
return CONTIG_NONE;
@@ -853,13 +937,13 @@ static int ocfs2_validate_extent_block(struct super_block *sb,
return 0;
}
-int ocfs2_read_extent_block(struct inode *inode, u64 eb_blkno,
+int ocfs2_read_extent_block(struct ocfs2_caching_info *ci, u64 eb_blkno,
struct buffer_head **bh)
{
int rc;
struct buffer_head *tmp = *bh;
- rc = ocfs2_read_block(inode, eb_blkno, &tmp,
+ rc = ocfs2_read_block(ci, eb_blkno, &tmp,
ocfs2_validate_extent_block);
/* If ocfs2_read_block() got us a new bh, pass it up. */
@@ -874,7 +958,6 @@ int ocfs2_read_extent_block(struct inode *inode, u64 eb_blkno,
* How many free extents have we got before we need more meta data?
*/
int ocfs2_num_free_extents(struct ocfs2_super *osb,
- struct inode *inode,
struct ocfs2_extent_tree *et)
{
int retval;
@@ -889,7 +972,8 @@ int ocfs2_num_free_extents(struct ocfs2_super *osb,
last_eb_blk = ocfs2_et_get_last_eb_blk(et);
if (last_eb_blk) {
- retval = ocfs2_read_extent_block(inode, last_eb_blk, &eb_bh);
+ retval = ocfs2_read_extent_block(et->et_ci, last_eb_blk,
+ &eb_bh);
if (retval < 0) {
mlog_errno(retval);
goto bail;
@@ -913,9 +997,8 @@ bail:
* sets h_signature, h_blkno, h_suballoc_bit, h_suballoc_slot, and
* l_count for you
*/
-static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb,
- handle_t *handle,
- struct inode *inode,
+static int ocfs2_create_new_meta_bhs(handle_t *handle,
+ struct ocfs2_extent_tree *et,
int wanted,
struct ocfs2_alloc_context *meta_ac,
struct buffer_head *bhs[])
@@ -924,6 +1007,8 @@ static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb,
u16 suballoc_bit_start;
u32 num_got;
u64 first_blkno;
+ struct ocfs2_super *osb =
+ OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci));
struct ocfs2_extent_block *eb;
mlog_entry_void();
@@ -949,9 +1034,10 @@ static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb,
mlog_errno(status);
goto bail;
}
- ocfs2_set_new_buffer_uptodate(inode, bhs[i]);
+ ocfs2_set_new_buffer_uptodate(et->et_ci, bhs[i]);
- status = ocfs2_journal_access_eb(handle, inode, bhs[i],
+ status = ocfs2_journal_access_eb(handle, et->et_ci,
+ bhs[i],
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
@@ -1023,7 +1109,6 @@ static inline u32 ocfs2_sum_rightmost_rec(struct ocfs2_extent_list *el)
* extent block's rightmost record.
*/
static int ocfs2_adjust_rightmost_branch(handle_t *handle,
- struct inode *inode,
struct ocfs2_extent_tree *et)
{
int status;
@@ -1037,7 +1122,7 @@ static int ocfs2_adjust_rightmost_branch(handle_t *handle,
return status;
}
- status = ocfs2_find_path(inode, path, UINT_MAX);
+ status = ocfs2_find_path(et->et_ci, path, UINT_MAX);
if (status < 0) {
mlog_errno(status);
goto out;
@@ -1050,7 +1135,7 @@ static int ocfs2_adjust_rightmost_branch(handle_t *handle,
goto out;
}
- status = ocfs2_journal_access_path(inode, handle, path);
+ status = ocfs2_journal_access_path(et->et_ci, handle, path);
if (status < 0) {
mlog_errno(status);
goto out;
@@ -1059,7 +1144,7 @@ static int ocfs2_adjust_rightmost_branch(handle_t *handle,
el = path_leaf_el(path);
rec = &el->l_recs[le32_to_cpu(el->l_next_free_rec) - 1];
- ocfs2_adjust_rightmost_records(inode, handle, path, rec);
+ ocfs2_adjust_rightmost_records(handle, et, path, rec);
out:
ocfs2_free_path(path);
@@ -1068,7 +1153,7 @@ out:
/*
* Add an entire tree branch to our inode. eb_bh is the extent block
- * to start at, if we don't want to start the branch at the dinode
+ * to start at, if we don't want to start the branch at the root
* structure.
*
* last_eb_bh is required as we have to update it's next_leaf pointer
@@ -1077,9 +1162,7 @@ out:
* the new branch will be 'empty' in the sense that every block will
* contain a single record with cluster count == 0.
*/
-static int ocfs2_add_branch(struct ocfs2_super *osb,
- handle_t *handle,
- struct inode *inode,
+static int ocfs2_add_branch(handle_t *handle,
struct ocfs2_extent_tree *et,
struct buffer_head *eb_bh,
struct buffer_head **last_eb_bh,
@@ -1123,7 +1206,7 @@ static int ocfs2_add_branch(struct ocfs2_super *osb,
if (root_end > new_cpos) {
mlog(0, "adjust the cluster end from %u to %u\n",
root_end, new_cpos);
- status = ocfs2_adjust_rightmost_branch(handle, inode, et);
+ status = ocfs2_adjust_rightmost_branch(handle, et);
if (status) {
mlog_errno(status);
goto bail;
@@ -1139,7 +1222,7 @@ static int ocfs2_add_branch(struct ocfs2_super *osb,
goto bail;
}
- status = ocfs2_create_new_meta_bhs(osb, handle, inode, new_blocks,
+ status = ocfs2_create_new_meta_bhs(handle, et, new_blocks,
meta_ac, new_eb_bhs);
if (status < 0) {
mlog_errno(status);
@@ -1161,7 +1244,7 @@ static int ocfs2_add_branch(struct ocfs2_super *osb,
BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb));
eb_el = &eb->h_list;
- status = ocfs2_journal_access_eb(handle, inode, bh,
+ status = ocfs2_journal_access_eb(handle, et->et_ci, bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
@@ -1201,20 +1284,20 @@ static int ocfs2_add_branch(struct ocfs2_super *osb,
* journal_dirty erroring as it won't unless we've aborted the
* handle (in which case we would never be here) so reserving
* the write with journal_access is all we need to do. */
- status = ocfs2_journal_access_eb(handle, inode, *last_eb_bh,
+ status = ocfs2_journal_access_eb(handle, et->et_ci, *last_eb_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
- status = ocfs2_et_root_journal_access(handle, inode, et,
+ status = ocfs2_et_root_journal_access(handle, et,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
if (eb_bh) {
- status = ocfs2_journal_access_eb(handle, inode, eb_bh,
+ status = ocfs2_journal_access_eb(handle, et->et_ci, eb_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -1274,9 +1357,7 @@ bail:
* returns back the new extent block so you can add a branch to it
* after this call.
*/
-static int ocfs2_shift_tree_depth(struct ocfs2_super *osb,
- handle_t *handle,
- struct inode *inode,
+static int ocfs2_shift_tree_depth(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_alloc_context *meta_ac,
struct buffer_head **ret_new_eb_bh)
@@ -1290,7 +1371,7 @@ static int ocfs2_shift_tree_depth(struct ocfs2_super *osb,
mlog_entry_void();
- status = ocfs2_create_new_meta_bhs(osb, handle, inode, 1, meta_ac,
+ status = ocfs2_create_new_meta_bhs(handle, et, 1, meta_ac,
&new_eb_bh);
if (status < 0) {
mlog_errno(status);
@@ -1304,7 +1385,7 @@ static int ocfs2_shift_tree_depth(struct ocfs2_super *osb,
eb_el = &eb->h_list;
root_el = et->et_root_el;
- status = ocfs2_journal_access_eb(handle, inode, new_eb_bh,
+ status = ocfs2_journal_access_eb(handle, et->et_ci, new_eb_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
@@ -1323,7 +1404,7 @@ static int ocfs2_shift_tree_depth(struct ocfs2_super *osb,
goto bail;
}
- status = ocfs2_et_root_journal_access(handle, inode, et,
+ status = ocfs2_et_root_journal_access(handle, et,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -1379,9 +1460,7 @@ bail:
*
* return status < 0 indicates an error.
*/
-static int ocfs2_find_branch_target(struct ocfs2_super *osb,
- struct inode *inode,
- struct ocfs2_extent_tree *et,
+static int ocfs2_find_branch_target(struct ocfs2_extent_tree *et,
struct buffer_head **target_bh)
{
int status = 0, i;
@@ -1399,19 +1478,21 @@ static int ocfs2_find_branch_target(struct ocfs2_super *osb,
while(le16_to_cpu(el->l_tree_depth) > 1) {
if (le16_to_cpu(el->l_next_free_rec) == 0) {
- ocfs2_error(inode->i_sb, "Dinode %llu has empty "
+ ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
+ "Owner %llu has empty "
"extent list (next_free_rec == 0)",
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci));
status = -EIO;
goto bail;
}
i = le16_to_cpu(el->l_next_free_rec) - 1;
blkno = le64_to_cpu(el->l_recs[i].e_blkno);
if (!blkno) {
- ocfs2_error(inode->i_sb, "Dinode %llu has extent "
+ ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
+ "Owner %llu has extent "
"list where extent # %d has no physical "
"block start",
- (unsigned long long)OCFS2_I(inode)->ip_blkno, i);
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), i);
status = -EIO;
goto bail;
}
@@ -1419,7 +1500,7 @@ static int ocfs2_find_branch_target(struct ocfs2_super *osb,
brelse(bh);
bh = NULL;
- status = ocfs2_read_extent_block(inode, blkno, &bh);
+ status = ocfs2_read_extent_block(et->et_ci, blkno, &bh);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -1460,20 +1541,18 @@ bail:
*
* *last_eb_bh will be updated by ocfs2_add_branch().
*/
-static int ocfs2_grow_tree(struct inode *inode, handle_t *handle,
- struct ocfs2_extent_tree *et, int *final_depth,
- struct buffer_head **last_eb_bh,
+static int ocfs2_grow_tree(handle_t *handle, struct ocfs2_extent_tree *et,
+ int *final_depth, struct buffer_head **last_eb_bh,
struct ocfs2_alloc_context *meta_ac)
{
int ret, shift;
struct ocfs2_extent_list *el = et->et_root_el;
int depth = le16_to_cpu(el->l_tree_depth);
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct buffer_head *bh = NULL;
BUG_ON(meta_ac == NULL);
- shift = ocfs2_find_branch_target(osb, inode, et, &bh);
+ shift = ocfs2_find_branch_target(et, &bh);
if (shift < 0) {
ret = shift;
mlog_errno(ret);
@@ -1490,8 +1569,7 @@ static int ocfs2_grow_tree(struct inode *inode, handle_t *handle,
/* ocfs2_shift_tree_depth will return us a buffer with
* the new extent block (so we can pass that to
* ocfs2_add_branch). */
- ret = ocfs2_shift_tree_depth(osb, handle, inode, et,
- meta_ac, &bh);
+ ret = ocfs2_shift_tree_depth(handle, et, meta_ac, &bh);
if (ret < 0) {
mlog_errno(ret);
goto out;
@@ -1517,7 +1595,7 @@ static int ocfs2_grow_tree(struct inode *inode, handle_t *handle,
/* call ocfs2_add_branch to add the final part of the tree with
* the new data. */
mlog(0, "add branch. bh = %p\n", bh);
- ret = ocfs2_add_branch(osb, handle, inode, et, bh, last_eb_bh,
+ ret = ocfs2_add_branch(handle, et, bh, last_eb_bh,
meta_ac);
if (ret < 0) {
mlog_errno(ret);
@@ -1687,7 +1765,7 @@ set_and_inc:
*
* The array index of the subtree root is passed back.
*/
-static int ocfs2_find_subtree_root(struct inode *inode,
+static int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et,
struct ocfs2_path *left,
struct ocfs2_path *right)
{
@@ -1705,10 +1783,10 @@ static int ocfs2_find_subtree_root(struct inode *inode,
* The caller didn't pass two adjacent paths.
*/
mlog_bug_on_msg(i > left->p_tree_depth,
- "Inode %lu, left depth %u, right depth %u\n"
+ "Owner %llu, left depth %u, right depth %u\n"
"left leaf blk %llu, right leaf blk %llu\n",
- inode->i_ino, left->p_tree_depth,
- right->p_tree_depth,
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
+ left->p_tree_depth, right->p_tree_depth,
(unsigned long long)path_leaf_bh(left)->b_blocknr,
(unsigned long long)path_leaf_bh(right)->b_blocknr);
} while (left->p_node[i].bh->b_blocknr ==
@@ -1725,7 +1803,7 @@ typedef void (path_insert_t)(void *, struct buffer_head *);
* This code can be called with a cpos larger than the tree, in which
* case it will return the rightmost path.
*/
-static int __ocfs2_find_path(struct inode *inode,
+static int __ocfs2_find_path(struct ocfs2_caching_info *ci,
struct ocfs2_extent_list *root_el, u32 cpos,
path_insert_t *func, void *data)
{
@@ -1736,15 +1814,14 @@ static int __ocfs2_find_path(struct inode *inode,
struct ocfs2_extent_block *eb;
struct ocfs2_extent_list *el;
struct ocfs2_extent_rec *rec;
- struct ocfs2_inode_info *oi = OCFS2_I(inode);
el = root_el;
while (el->l_tree_depth) {
if (le16_to_cpu(el->l_next_free_rec) == 0) {
- ocfs2_error(inode->i_sb,
- "Inode %llu has empty extent list at "
+ ocfs2_error(ocfs2_metadata_cache_get_super(ci),
+ "Owner %llu has empty extent list at "
"depth %u\n",
- (unsigned long long)oi->ip_blkno,
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
le16_to_cpu(el->l_tree_depth));
ret = -EROFS;
goto out;
@@ -1767,10 +1844,10 @@ static int __ocfs2_find_path(struct inode *inode,
blkno = le64_to_cpu(el->l_recs[i].e_blkno);
if (blkno == 0) {
- ocfs2_error(inode->i_sb,
- "Inode %llu has bad blkno in extent list "
+ ocfs2_error(ocfs2_metadata_cache_get_super(ci),
+ "Owner %llu has bad blkno in extent list "
"at depth %u (index %d)\n",
- (unsigned long long)oi->ip_blkno,
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
le16_to_cpu(el->l_tree_depth), i);
ret = -EROFS;
goto out;
@@ -1778,7 +1855,7 @@ static int __ocfs2_find_path(struct inode *inode,
brelse(bh);
bh = NULL;
- ret = ocfs2_read_extent_block(inode, blkno, &bh);
+ ret = ocfs2_read_extent_block(ci, blkno, &bh);
if (ret) {
mlog_errno(ret);
goto out;
@@ -1789,10 +1866,10 @@ static int __ocfs2_find_path(struct inode *inode,
if (le16_to_cpu(el->l_next_free_rec) >
le16_to_cpu(el->l_count)) {
- ocfs2_error(inode->i_sb,
- "Inode %llu has bad count in extent list "
+ ocfs2_error(ocfs2_metadata_cache_get_super(ci),
+ "Owner %llu has bad count in extent list "
"at block %llu (next free=%u, count=%u)\n",
- (unsigned long long)oi->ip_blkno,
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)bh->b_blocknr,
le16_to_cpu(el->l_next_free_rec),
le16_to_cpu(el->l_count));
@@ -1836,14 +1913,14 @@ static void find_path_ins(void *data, struct buffer_head *bh)
ocfs2_path_insert_eb(fp->path, fp->index, bh);
fp->index++;
}
-static int ocfs2_find_path(struct inode *inode, struct ocfs2_path *path,
- u32 cpos)
+int ocfs2_find_path(struct ocfs2_caching_info *ci,
+ struct ocfs2_path *path, u32 cpos)
{
struct find_path_data data;
data.index = 1;
data.path = path;
- return __ocfs2_find_path(inode, path_root_el(path), cpos,
+ return __ocfs2_find_path(ci, path_root_el(path), cpos,
find_path_ins, &data);
}
@@ -1868,13 +1945,14 @@ static void find_leaf_ins(void *data, struct buffer_head *bh)
*
* This function doesn't handle non btree extent lists.
*/
-int ocfs2_find_leaf(struct inode *inode, struct ocfs2_extent_list *root_el,
- u32 cpos, struct buffer_head **leaf_bh)
+int ocfs2_find_leaf(struct ocfs2_caching_info *ci,
+ struct ocfs2_extent_list *root_el, u32 cpos,
+ struct buffer_head **leaf_bh)
{
int ret;
struct buffer_head *bh = NULL;
- ret = __ocfs2_find_path(inode, root_el, cpos, find_leaf_ins, &bh);
+ ret = __ocfs2_find_path(ci, root_el, cpos, find_leaf_ins, &bh);
if (ret) {
mlog_errno(ret);
goto out;
@@ -1980,7 +2058,7 @@ static void ocfs2_adjust_root_records(struct ocfs2_extent_list *root_el,
* - When we've adjusted the last extent record in the left path leaf and the
* 1st extent record in the right path leaf during cross extent block merge.
*/
-static void ocfs2_complete_edge_insert(struct inode *inode, handle_t *handle,
+static void ocfs2_complete_edge_insert(handle_t *handle,
struct ocfs2_path *left_path,
struct ocfs2_path *right_path,
int subtree_index)
@@ -2058,8 +2136,8 @@ static void ocfs2_complete_edge_insert(struct inode *inode, handle_t *handle,
mlog_errno(ret);
}
-static int ocfs2_rotate_subtree_right(struct inode *inode,
- handle_t *handle,
+static int ocfs2_rotate_subtree_right(handle_t *handle,
+ struct ocfs2_extent_tree *et,
struct ocfs2_path *left_path,
struct ocfs2_path *right_path,
int subtree_index)
@@ -2075,10 +2153,10 @@ static int ocfs2_rotate_subtree_right(struct inode *inode,
left_el = path_leaf_el(left_path);
if (left_el->l_next_free_rec != left_el->l_count) {
- ocfs2_error(inode->i_sb,
+ ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
"Inode %llu has non-full interior leaf node %llu"
"(next free = %u)",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
(unsigned long long)left_leaf_bh->b_blocknr,
le16_to_cpu(left_el->l_next_free_rec));
return -EROFS;
@@ -2094,7 +2172,7 @@ static int ocfs2_rotate_subtree_right(struct inode *inode,
root_bh = left_path->p_node[subtree_index].bh;
BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
- ret = ocfs2_path_bh_journal_access(handle, inode, right_path,
+ ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path,
subtree_index);
if (ret) {
mlog_errno(ret);
@@ -2102,14 +2180,14 @@ static int ocfs2_rotate_subtree_right(struct inode *inode,
}
for(i = subtree_index + 1; i < path_num_items(right_path); i++) {
- ret = ocfs2_path_bh_journal_access(handle, inode,
+ ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
right_path, i);
if (ret) {
mlog_errno(ret);
goto out;
}
- ret = ocfs2_path_bh_journal_access(handle, inode,
+ ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
left_path, i);
if (ret) {
mlog_errno(ret);
@@ -2123,7 +2201,7 @@ static int ocfs2_rotate_subtree_right(struct inode *inode,
/* This is a code error, not a disk corruption. */
mlog_bug_on_msg(!right_el->l_next_free_rec, "Inode %llu: Rotate fails "
"because rightmost leaf block %llu is empty\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
(unsigned long long)right_leaf_bh->b_blocknr);
ocfs2_create_empty_extent(right_el);
@@ -2157,8 +2235,8 @@ static int ocfs2_rotate_subtree_right(struct inode *inode,
goto out;
}
- ocfs2_complete_edge_insert(inode, handle, left_path, right_path,
- subtree_index);
+ ocfs2_complete_edge_insert(handle, left_path, right_path,
+ subtree_index);
out:
return ret;
@@ -2248,10 +2326,18 @@ static int ocfs2_extend_rotate_transaction(handle_t *handle, int subtree_depth,
int op_credits,
struct ocfs2_path *path)
{
+ int ret;
int credits = (path->p_tree_depth - subtree_depth) * 2 + 1 + op_credits;
- if (handle->h_buffer_credits < credits)
- return ocfs2_extend_trans(handle, credits);
+ if (handle->h_buffer_credits < credits) {
+ ret = ocfs2_extend_trans(handle,
+ credits - handle->h_buffer_credits);
+ if (ret)
+ return ret;
+
+ if (unlikely(handle->h_buffer_credits < credits))
+ return ocfs2_extend_trans(handle, credits);
+ }
return 0;
}
@@ -2321,8 +2407,8 @@ static int ocfs2_leftmost_rec_contains(struct ocfs2_extent_list *el, u32 cpos)
* *ret_left_path will contain a valid path which can be passed to
* ocfs2_insert_path().
*/
-static int ocfs2_rotate_tree_right(struct inode *inode,
- handle_t *handle,
+static int ocfs2_rotate_tree_right(handle_t *handle,
+ struct ocfs2_extent_tree *et,
enum ocfs2_split_type split,
u32 insert_cpos,
struct ocfs2_path *right_path,
@@ -2331,6 +2417,7 @@ static int ocfs2_rotate_tree_right(struct inode *inode,
int ret, start, orig_credits = handle->h_buffer_credits;
u32 cpos;
struct ocfs2_path *left_path = NULL;
+ struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
*ret_left_path = NULL;
@@ -2341,7 +2428,7 @@ static int ocfs2_rotate_tree_right(struct inode *inode,
goto out;
}
- ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path, &cpos);
+ ret = ocfs2_find_cpos_for_left_leaf(sb, right_path, &cpos);
if (ret) {
mlog_errno(ret);
goto out;
@@ -2379,7 +2466,7 @@ static int ocfs2_rotate_tree_right(struct inode *inode,
mlog(0, "Rotating a tree: ins. cpos: %u, left path cpos: %u\n",
insert_cpos, cpos);
- ret = ocfs2_find_path(inode, left_path, cpos);
+ ret = ocfs2_find_path(et->et_ci, left_path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
@@ -2387,10 +2474,11 @@ static int ocfs2_rotate_tree_right(struct inode *inode,
mlog_bug_on_msg(path_leaf_bh(left_path) ==
path_leaf_bh(right_path),
- "Inode %lu: error during insert of %u "
+ "Owner %llu: error during insert of %u "
"(left path cpos %u) results in two identical "
"paths ending at %llu\n",
- inode->i_ino, insert_cpos, cpos,
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
+ insert_cpos, cpos,
(unsigned long long)
path_leaf_bh(left_path)->b_blocknr);
@@ -2416,7 +2504,7 @@ static int ocfs2_rotate_tree_right(struct inode *inode,
goto out_ret_path;
}
- start = ocfs2_find_subtree_root(inode, left_path, right_path);
+ start = ocfs2_find_subtree_root(et, left_path, right_path);
mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n",
start,
@@ -2430,7 +2518,7 @@ static int ocfs2_rotate_tree_right(struct inode *inode,
goto out;
}
- ret = ocfs2_rotate_subtree_right(inode, handle, left_path,
+ ret = ocfs2_rotate_subtree_right(handle, et, left_path,
right_path, start);
if (ret) {
mlog_errno(ret);
@@ -2462,8 +2550,7 @@ static int ocfs2_rotate_tree_right(struct inode *inode,
*/
ocfs2_mv_path(right_path, left_path);
- ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path,
- &cpos);
+ ret = ocfs2_find_cpos_for_left_leaf(sb, right_path, &cpos);
if (ret) {
mlog_errno(ret);
goto out;
@@ -2477,7 +2564,8 @@ out_ret_path:
return ret;
}
-static int ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle,
+static int ocfs2_update_edge_lengths(handle_t *handle,
+ struct ocfs2_extent_tree *et,
int subtree_index, struct ocfs2_path *path)
{
int i, idx, ret;
@@ -2502,7 +2590,7 @@ static int ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle,
goto out;
}
- ret = ocfs2_journal_access_path(inode, handle, path);
+ ret = ocfs2_journal_access_path(et->et_ci, handle, path);
if (ret) {
mlog_errno(ret);
goto out;
@@ -2532,7 +2620,8 @@ out:
return ret;
}
-static void ocfs2_unlink_path(struct inode *inode, handle_t *handle,
+static void ocfs2_unlink_path(handle_t *handle,
+ struct ocfs2_extent_tree *et,
struct ocfs2_cached_dealloc_ctxt *dealloc,
struct ocfs2_path *path, int unlink_start)
{
@@ -2554,12 +2643,12 @@ static void ocfs2_unlink_path(struct inode *inode, handle_t *handle,
mlog(ML_ERROR,
"Inode %llu, attempted to remove extent block "
"%llu with %u records\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
(unsigned long long)le64_to_cpu(eb->h_blkno),
le16_to_cpu(el->l_next_free_rec));
ocfs2_journal_dirty(handle, bh);
- ocfs2_remove_from_cache(inode, bh);
+ ocfs2_remove_from_cache(et->et_ci, bh);
continue;
}
@@ -2572,11 +2661,12 @@ static void ocfs2_unlink_path(struct inode *inode, handle_t *handle,
if (ret)
mlog_errno(ret);
- ocfs2_remove_from_cache(inode, bh);
+ ocfs2_remove_from_cache(et->et_ci, bh);
}
}
-static void ocfs2_unlink_subtree(struct inode *inode, handle_t *handle,
+static void ocfs2_unlink_subtree(handle_t *handle,
+ struct ocfs2_extent_tree *et,
struct ocfs2_path *left_path,
struct ocfs2_path *right_path,
int subtree_index,
@@ -2607,17 +2697,17 @@ static void ocfs2_unlink_subtree(struct inode *inode, handle_t *handle,
ocfs2_journal_dirty(handle, root_bh);
ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
- ocfs2_unlink_path(inode, handle, dealloc, right_path,
+ ocfs2_unlink_path(handle, et, dealloc, right_path,
subtree_index + 1);
}
-static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle,
+static int ocfs2_rotate_subtree_left(handle_t *handle,
+ struct ocfs2_extent_tree *et,
struct ocfs2_path *left_path,
struct ocfs2_path *right_path,
int subtree_index,
struct ocfs2_cached_dealloc_ctxt *dealloc,
- int *deleted,
- struct ocfs2_extent_tree *et)
+ int *deleted)
{
int ret, i, del_right_subtree = 0, right_has_empty = 0;
struct buffer_head *root_bh, *et_root_bh = path_root_bh(right_path);
@@ -2653,7 +2743,7 @@ static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle,
return -EAGAIN;
if (le16_to_cpu(right_leaf_el->l_next_free_rec) > 1) {
- ret = ocfs2_journal_access_eb(handle, inode,
+ ret = ocfs2_journal_access_eb(handle, et->et_ci,
path_leaf_bh(right_path),
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
@@ -2672,7 +2762,7 @@ static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle,
* We have to update i_last_eb_blk during the meta
* data delete.
*/
- ret = ocfs2_et_root_journal_access(handle, inode, et,
+ ret = ocfs2_et_root_journal_access(handle, et,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -2688,7 +2778,7 @@ static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle,
*/
BUG_ON(right_has_empty && !del_right_subtree);
- ret = ocfs2_path_bh_journal_access(handle, inode, right_path,
+ ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path,
subtree_index);
if (ret) {
mlog_errno(ret);
@@ -2696,14 +2786,14 @@ static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle,
}
for(i = subtree_index + 1; i < path_num_items(right_path); i++) {
- ret = ocfs2_path_bh_journal_access(handle, inode,
+ ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
right_path, i);
if (ret) {
mlog_errno(ret);
goto out;
}
- ret = ocfs2_path_bh_journal_access(handle, inode,
+ ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
left_path, i);
if (ret) {
mlog_errno(ret);
@@ -2740,9 +2830,9 @@ static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle,
mlog_errno(ret);
if (del_right_subtree) {
- ocfs2_unlink_subtree(inode, handle, left_path, right_path,
+ ocfs2_unlink_subtree(handle, et, left_path, right_path,
subtree_index, dealloc);
- ret = ocfs2_update_edge_lengths(inode, handle, subtree_index,
+ ret = ocfs2_update_edge_lengths(handle, et, subtree_index,
left_path);
if (ret) {
mlog_errno(ret);
@@ -2766,7 +2856,7 @@ static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle,
*deleted = 1;
} else
- ocfs2_complete_edge_insert(inode, handle, left_path, right_path,
+ ocfs2_complete_edge_insert(handle, left_path, right_path,
subtree_index);
out:
@@ -2852,8 +2942,8 @@ out:
return ret;
}
-static int ocfs2_rotate_rightmost_leaf_left(struct inode *inode,
- handle_t *handle,
+static int ocfs2_rotate_rightmost_leaf_left(handle_t *handle,
+ struct ocfs2_extent_tree *et,
struct ocfs2_path *path)
{
int ret;
@@ -2863,7 +2953,7 @@ static int ocfs2_rotate_rightmost_leaf_left(struct inode *inode,
if (!ocfs2_is_empty_extent(&el->l_recs[0]))
return 0;
- ret = ocfs2_path_bh_journal_access(handle, inode, path,
+ ret = ocfs2_path_bh_journal_access(handle, et->et_ci, path,
path_num_items(path) - 1);
if (ret) {
mlog_errno(ret);
@@ -2880,24 +2970,24 @@ out:
return ret;
}
-static int __ocfs2_rotate_tree_left(struct inode *inode,
- handle_t *handle, int orig_credits,
+static int __ocfs2_rotate_tree_left(handle_t *handle,
+ struct ocfs2_extent_tree *et,
+ int orig_credits,
struct ocfs2_path *path,
struct ocfs2_cached_dealloc_ctxt *dealloc,
- struct ocfs2_path **empty_extent_path,
- struct ocfs2_extent_tree *et)
+ struct ocfs2_path **empty_extent_path)
{
int ret, subtree_root, deleted;
u32 right_cpos;
struct ocfs2_path *left_path = NULL;
struct ocfs2_path *right_path = NULL;
+ struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
BUG_ON(!ocfs2_is_empty_extent(&(path_leaf_el(path)->l_recs[0])));
*empty_extent_path = NULL;
- ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, path,
- &right_cpos);
+ ret = ocfs2_find_cpos_for_right_leaf(sb, path, &right_cpos);
if (ret) {
mlog_errno(ret);
goto out;
@@ -2920,13 +3010,13 @@ static int __ocfs2_rotate_tree_left(struct inode *inode,
}
while (right_cpos) {
- ret = ocfs2_find_path(inode, right_path, right_cpos);
+ ret = ocfs2_find_path(et->et_ci, right_path, right_cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
- subtree_root = ocfs2_find_subtree_root(inode, left_path,
+ subtree_root = ocfs2_find_subtree_root(et, left_path,
right_path);
mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n",
@@ -2946,16 +3036,16 @@ static int __ocfs2_rotate_tree_left(struct inode *inode,
* Caller might still want to make changes to the
* tree root, so re-add it to the journal here.
*/
- ret = ocfs2_path_bh_journal_access(handle, inode,
+ ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
left_path, 0);
if (ret) {
mlog_errno(ret);
goto out;
}
- ret = ocfs2_rotate_subtree_left(inode, handle, left_path,
+ ret = ocfs2_rotate_subtree_left(handle, et, left_path,
right_path, subtree_root,
- dealloc, &deleted, et);
+ dealloc, &deleted);
if (ret == -EAGAIN) {
/*
* The rotation has to temporarily stop due to
@@ -2982,7 +3072,7 @@ static int __ocfs2_rotate_tree_left(struct inode *inode,
ocfs2_mv_path(left_path, right_path);
- ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, left_path,
+ ret = ocfs2_find_cpos_for_right_leaf(sb, left_path,
&right_cpos);
if (ret) {
mlog_errno(ret);
@@ -2997,10 +3087,10 @@ out:
return ret;
}
-static int ocfs2_remove_rightmost_path(struct inode *inode, handle_t *handle,
+static int ocfs2_remove_rightmost_path(handle_t *handle,
+ struct ocfs2_extent_tree *et,
struct ocfs2_path *path,
- struct ocfs2_cached_dealloc_ctxt *dealloc,
- struct ocfs2_extent_tree *et)
+ struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret, subtree_index;
u32 cpos;
@@ -3009,7 +3099,7 @@ static int ocfs2_remove_rightmost_path(struct inode *inode, handle_t *handle,
struct ocfs2_extent_list *el;
- ret = ocfs2_et_sanity_check(inode, et);
+ ret = ocfs2_et_sanity_check(et);
if (ret)
goto out;
/*
@@ -3024,13 +3114,14 @@ static int ocfs2_remove_rightmost_path(struct inode *inode, handle_t *handle,
goto out;
}
- ret = ocfs2_journal_access_path(inode, handle, path);
+ ret = ocfs2_journal_access_path(et->et_ci, handle, path);
if (ret) {
mlog_errno(ret);
goto out;
}
- ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path, &cpos);
+ ret = ocfs2_find_cpos_for_left_leaf(ocfs2_metadata_cache_get_super(et->et_ci),
+ path, &cpos);
if (ret) {
mlog_errno(ret);
goto out;
@@ -3048,23 +3139,23 @@ static int ocfs2_remove_rightmost_path(struct inode *inode, handle_t *handle,
goto out;
}
- ret = ocfs2_find_path(inode, left_path, cpos);
+ ret = ocfs2_find_path(et->et_ci, left_path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
- ret = ocfs2_journal_access_path(inode, handle, left_path);
+ ret = ocfs2_journal_access_path(et->et_ci, handle, left_path);
if (ret) {
mlog_errno(ret);
goto out;
}
- subtree_index = ocfs2_find_subtree_root(inode, left_path, path);
+ subtree_index = ocfs2_find_subtree_root(et, left_path, path);
- ocfs2_unlink_subtree(inode, handle, left_path, path,
+ ocfs2_unlink_subtree(handle, et, left_path, path,
subtree_index, dealloc);
- ret = ocfs2_update_edge_lengths(inode, handle, subtree_index,
+ ret = ocfs2_update_edge_lengths(handle, et, subtree_index,
left_path);
if (ret) {
mlog_errno(ret);
@@ -3078,10 +3169,10 @@ static int ocfs2_remove_rightmost_path(struct inode *inode, handle_t *handle,
* 'path' is also the leftmost path which
* means it must be the only one. This gets
* handled differently because we want to
- * revert the inode back to having extents
+ * revert the root back to having extents
* in-line.
*/
- ocfs2_unlink_path(inode, handle, dealloc, path, 1);
+ ocfs2_unlink_path(handle, et, dealloc, path, 1);
el = et->et_root_el;
el->l_tree_depth = 0;
@@ -3114,10 +3205,10 @@ out:
* the rightmost tree leaf record is removed so the caller is
* responsible for detecting and correcting that.
*/
-static int ocfs2_rotate_tree_left(struct inode *inode, handle_t *handle,
+static int ocfs2_rotate_tree_left(handle_t *handle,
+ struct ocfs2_extent_tree *et,
struct ocfs2_path *path,
- struct ocfs2_cached_dealloc_ctxt *dealloc,
- struct ocfs2_extent_tree *et)
+ struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret, orig_credits = handle->h_buffer_credits;
struct ocfs2_path *tmp_path = NULL, *restart_path = NULL;
@@ -3134,8 +3225,7 @@ rightmost_no_delete:
* Inline extents. This is trivially handled, so do
* it up front.
*/
- ret = ocfs2_rotate_rightmost_leaf_left(inode, handle,
- path);
+ ret = ocfs2_rotate_rightmost_leaf_left(handle, et, path);
if (ret)
mlog_errno(ret);
goto out;
@@ -3151,7 +3241,7 @@ rightmost_no_delete:
*
* 1) is handled via ocfs2_rotate_rightmost_leaf_left()
* 2a) we need the left branch so that we can update it with the unlink
- * 2b) we need to bring the inode back to inline extents.
+ * 2b) we need to bring the root back to inline extents.
*/
eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data;
@@ -3167,9 +3257,9 @@ rightmost_no_delete:
if (le16_to_cpu(el->l_next_free_rec) == 0) {
ret = -EIO;
- ocfs2_error(inode->i_sb,
- "Inode %llu has empty extent block at %llu",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
+ "Owner %llu has empty extent block at %llu",
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
(unsigned long long)le64_to_cpu(eb->h_blkno));
goto out;
}
@@ -3183,8 +3273,8 @@ rightmost_no_delete:
* nonempty list.
*/
- ret = ocfs2_remove_rightmost_path(inode, handle, path,
- dealloc, et);
+ ret = ocfs2_remove_rightmost_path(handle, et, path,
+ dealloc);
if (ret)
mlog_errno(ret);
goto out;
@@ -3195,8 +3285,8 @@ rightmost_no_delete:
* and restarting from there.
*/
try_rotate:
- ret = __ocfs2_rotate_tree_left(inode, handle, orig_credits, path,
- dealloc, &restart_path, et);
+ ret = __ocfs2_rotate_tree_left(handle, et, orig_credits, path,
+ dealloc, &restart_path);
if (ret && ret != -EAGAIN) {
mlog_errno(ret);
goto out;
@@ -3206,9 +3296,9 @@ try_rotate:
tmp_path = restart_path;
restart_path = NULL;
- ret = __ocfs2_rotate_tree_left(inode, handle, orig_credits,
+ ret = __ocfs2_rotate_tree_left(handle, et, orig_credits,
tmp_path, dealloc,
- &restart_path, et);
+ &restart_path);
if (ret && ret != -EAGAIN) {
mlog_errno(ret);
goto out;
@@ -3259,7 +3349,7 @@ static void ocfs2_cleanup_merge(struct ocfs2_extent_list *el,
}
}
-static int ocfs2_get_right_path(struct inode *inode,
+static int ocfs2_get_right_path(struct ocfs2_extent_tree *et,
struct ocfs2_path *left_path,
struct ocfs2_path **ret_right_path)
{
@@ -3276,8 +3366,8 @@ static int ocfs2_get_right_path(struct inode *inode,
left_el = path_leaf_el(left_path);
BUG_ON(left_el->l_next_free_rec != left_el->l_count);
- ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, left_path,
- &right_cpos);
+ ret = ocfs2_find_cpos_for_right_leaf(ocfs2_metadata_cache_get_super(et->et_ci),
+ left_path, &right_cpos);
if (ret) {
mlog_errno(ret);
goto out;
@@ -3293,7 +3383,7 @@ static int ocfs2_get_right_path(struct inode *inode,
goto out;
}
- ret = ocfs2_find_path(inode, right_path, right_cpos);
+ ret = ocfs2_find_path(et->et_ci, right_path, right_cpos);
if (ret) {
mlog_errno(ret);
goto out;
@@ -3313,9 +3403,9 @@ out:
* For index == l_count - 1, the "next" means the 1st extent rec of the
* next extent block.
*/
-static int ocfs2_merge_rec_right(struct inode *inode,
- struct ocfs2_path *left_path,
+static int ocfs2_merge_rec_right(struct ocfs2_path *left_path,
handle_t *handle,
+ struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *split_rec,
int index)
{
@@ -3336,7 +3426,7 @@ static int ocfs2_merge_rec_right(struct inode *inode,
if (index == le16_to_cpu(el->l_next_free_rec) - 1 &&
le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count)) {
/* we meet with a cross extent block merge. */
- ret = ocfs2_get_right_path(inode, left_path, &right_path);
+ ret = ocfs2_get_right_path(et, left_path, &right_path);
if (ret) {
mlog_errno(ret);
goto out;
@@ -3355,8 +3445,8 @@ static int ocfs2_merge_rec_right(struct inode *inode,
le16_to_cpu(left_rec->e_leaf_clusters) !=
le32_to_cpu(right_rec->e_cpos));
- subtree_index = ocfs2_find_subtree_root(inode,
- left_path, right_path);
+ subtree_index = ocfs2_find_subtree_root(et, left_path,
+ right_path);
ret = ocfs2_extend_rotate_transaction(handle, subtree_index,
handle->h_buffer_credits,
@@ -3369,7 +3459,7 @@ static int ocfs2_merge_rec_right(struct inode *inode,
root_bh = left_path->p_node[subtree_index].bh;
BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
- ret = ocfs2_path_bh_journal_access(handle, inode, right_path,
+ ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path,
subtree_index);
if (ret) {
mlog_errno(ret);
@@ -3378,14 +3468,14 @@ static int ocfs2_merge_rec_right(struct inode *inode,
for (i = subtree_index + 1;
i < path_num_items(right_path); i++) {
- ret = ocfs2_path_bh_journal_access(handle, inode,
+ ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
right_path, i);
if (ret) {
mlog_errno(ret);
goto out;
}
- ret = ocfs2_path_bh_journal_access(handle, inode,
+ ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
left_path, i);
if (ret) {
mlog_errno(ret);
@@ -3398,7 +3488,7 @@ static int ocfs2_merge_rec_right(struct inode *inode,
right_rec = &el->l_recs[index + 1];
}
- ret = ocfs2_path_bh_journal_access(handle, inode, left_path,
+ ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path,
path_num_items(left_path) - 1);
if (ret) {
mlog_errno(ret);
@@ -3409,7 +3499,8 @@ static int ocfs2_merge_rec_right(struct inode *inode,
le32_add_cpu(&right_rec->e_cpos, -split_clusters);
le64_add_cpu(&right_rec->e_blkno,
- -ocfs2_clusters_to_blocks(inode->i_sb, split_clusters));
+ -ocfs2_clusters_to_blocks(ocfs2_metadata_cache_get_super(et->et_ci),
+ split_clusters));
le16_add_cpu(&right_rec->e_leaf_clusters, split_clusters);
ocfs2_cleanup_merge(el, index);
@@ -3423,8 +3514,8 @@ static int ocfs2_merge_rec_right(struct inode *inode,
if (ret)
mlog_errno(ret);
- ocfs2_complete_edge_insert(inode, handle, left_path,
- right_path, subtree_index);
+ ocfs2_complete_edge_insert(handle, left_path, right_path,
+ subtree_index);
}
out:
if (right_path)
@@ -3432,7 +3523,7 @@ out:
return ret;
}
-static int ocfs2_get_left_path(struct inode *inode,
+static int ocfs2_get_left_path(struct ocfs2_extent_tree *et,
struct ocfs2_path *right_path,
struct ocfs2_path **ret_left_path)
{
@@ -3445,7 +3536,7 @@ static int ocfs2_get_left_path(struct inode *inode,
/* This function shouldn't be called for non-trees. */
BUG_ON(right_path->p_tree_depth == 0);
- ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
+ ret = ocfs2_find_cpos_for_left_leaf(ocfs2_metadata_cache_get_super(et->et_ci),
right_path, &left_cpos);
if (ret) {
mlog_errno(ret);
@@ -3462,7 +3553,7 @@ static int ocfs2_get_left_path(struct inode *inode,
goto out;
}
- ret = ocfs2_find_path(inode, left_path, left_cpos);
+ ret = ocfs2_find_path(et->et_ci, left_path, left_cpos);
if (ret) {
mlog_errno(ret);
goto out;
@@ -3485,12 +3576,11 @@ out:
* remove the rightmost leaf extent block in the right_path and change
* the right path to indicate the new rightmost path.
*/
-static int ocfs2_merge_rec_left(struct inode *inode,
- struct ocfs2_path *right_path,
+static int ocfs2_merge_rec_left(struct ocfs2_path *right_path,
handle_t *handle,
+ struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *split_rec,
struct ocfs2_cached_dealloc_ctxt *dealloc,
- struct ocfs2_extent_tree *et,
int index)
{
int ret, i, subtree_index = 0, has_empty_extent = 0;
@@ -3508,7 +3598,7 @@ static int ocfs2_merge_rec_left(struct inode *inode,
right_rec = &el->l_recs[index];
if (index == 0) {
/* we meet with a cross extent block merge. */
- ret = ocfs2_get_left_path(inode, right_path, &left_path);
+ ret = ocfs2_get_left_path(et, right_path, &left_path);
if (ret) {
mlog_errno(ret);
goto out;
@@ -3524,8 +3614,8 @@ static int ocfs2_merge_rec_left(struct inode *inode,
le16_to_cpu(left_rec->e_leaf_clusters) !=
le32_to_cpu(split_rec->e_cpos));
- subtree_index = ocfs2_find_subtree_root(inode,
- left_path, right_path);
+ subtree_index = ocfs2_find_subtree_root(et, left_path,
+ right_path);
ret = ocfs2_extend_rotate_transaction(handle, subtree_index,
handle->h_buffer_credits,
@@ -3538,7 +3628,7 @@ static int ocfs2_merge_rec_left(struct inode *inode,
root_bh = left_path->p_node[subtree_index].bh;
BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
- ret = ocfs2_path_bh_journal_access(handle, inode, right_path,
+ ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path,
subtree_index);
if (ret) {
mlog_errno(ret);
@@ -3547,14 +3637,14 @@ static int ocfs2_merge_rec_left(struct inode *inode,
for (i = subtree_index + 1;
i < path_num_items(right_path); i++) {
- ret = ocfs2_path_bh_journal_access(handle, inode,
+ ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
right_path, i);
if (ret) {
mlog_errno(ret);
goto out;
}
- ret = ocfs2_path_bh_journal_access(handle, inode,
+ ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
left_path, i);
if (ret) {
mlog_errno(ret);
@@ -3567,7 +3657,7 @@ static int ocfs2_merge_rec_left(struct inode *inode,
has_empty_extent = 1;
}
- ret = ocfs2_path_bh_journal_access(handle, inode, right_path,
+ ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path,
path_num_items(right_path) - 1);
if (ret) {
mlog_errno(ret);
@@ -3586,7 +3676,8 @@ static int ocfs2_merge_rec_left(struct inode *inode,
le32_add_cpu(&right_rec->e_cpos, split_clusters);
le64_add_cpu(&right_rec->e_blkno,
- ocfs2_clusters_to_blocks(inode->i_sb, split_clusters));
+ ocfs2_clusters_to_blocks(ocfs2_metadata_cache_get_super(et->et_ci),
+ split_clusters));
le16_add_cpu(&right_rec->e_leaf_clusters, -split_clusters);
ocfs2_cleanup_merge(el, index);
@@ -3608,9 +3699,9 @@ static int ocfs2_merge_rec_left(struct inode *inode,
if (le16_to_cpu(right_rec->e_leaf_clusters) == 0 &&
le16_to_cpu(el->l_next_free_rec) == 1) {
- ret = ocfs2_remove_rightmost_path(inode, handle,
+ ret = ocfs2_remove_rightmost_path(handle, et,
right_path,
- dealloc, et);
+ dealloc);
if (ret) {
mlog_errno(ret);
goto out;
@@ -3622,7 +3713,7 @@ static int ocfs2_merge_rec_left(struct inode *inode,
ocfs2_mv_path(right_path, left_path);
left_path = NULL;
} else
- ocfs2_complete_edge_insert(inode, handle, left_path,
+ ocfs2_complete_edge_insert(handle, left_path,
right_path, subtree_index);
}
out:
@@ -3631,15 +3722,13 @@ out:
return ret;
}
-static int ocfs2_try_to_merge_extent(struct inode *inode,
- handle_t *handle,
+static int ocfs2_try_to_merge_extent(handle_t *handle,
+ struct ocfs2_extent_tree *et,
struct ocfs2_path *path,
int split_index,
struct ocfs2_extent_rec *split_rec,
struct ocfs2_cached_dealloc_ctxt *dealloc,
- struct ocfs2_merge_ctxt *ctxt,
- struct ocfs2_extent_tree *et)
-
+ struct ocfs2_merge_ctxt *ctxt)
{
int ret = 0;
struct ocfs2_extent_list *el = path_leaf_el(path);
@@ -3655,8 +3744,7 @@ static int ocfs2_try_to_merge_extent(struct inode *inode,
* extents - having more than one in a leaf is
* illegal.
*/
- ret = ocfs2_rotate_tree_left(inode, handle, path,
- dealloc, et);
+ ret = ocfs2_rotate_tree_left(handle, et, path, dealloc);
if (ret) {
mlog_errno(ret);
goto out;
@@ -3685,8 +3773,7 @@ static int ocfs2_try_to_merge_extent(struct inode *inode,
* prevoius extent block. It is more efficient and easier
* if we do merge_right first and merge_left later.
*/
- ret = ocfs2_merge_rec_right(inode, path,
- handle, split_rec,
+ ret = ocfs2_merge_rec_right(path, handle, et, split_rec,
split_index);
if (ret) {
mlog_errno(ret);
@@ -3699,8 +3786,7 @@ static int ocfs2_try_to_merge_extent(struct inode *inode,
BUG_ON(!ocfs2_is_empty_extent(&el->l_recs[0]));
/* The merge left us with an empty extent, remove it. */
- ret = ocfs2_rotate_tree_left(inode, handle, path,
- dealloc, et);
+ ret = ocfs2_rotate_tree_left(handle, et, path, dealloc);
if (ret) {
mlog_errno(ret);
goto out;
@@ -3712,18 +3798,15 @@ static int ocfs2_try_to_merge_extent(struct inode *inode,
* Note that we don't pass split_rec here on purpose -
* we've merged it into the rec already.
*/
- ret = ocfs2_merge_rec_left(inode, path,
- handle, rec,
- dealloc, et,
- split_index);
+ ret = ocfs2_merge_rec_left(path, handle, et, rec,
+ dealloc, split_index);
if (ret) {
mlog_errno(ret);
goto out;
}
- ret = ocfs2_rotate_tree_left(inode, handle, path,
- dealloc, et);
+ ret = ocfs2_rotate_tree_left(handle, et, path, dealloc);
/*
* Error from this last rotate is not critical, so
* print but don't bubble it up.
@@ -3740,19 +3823,16 @@ static int ocfs2_try_to_merge_extent(struct inode *inode,
* the record on the left (hence the left merge).
*/
if (ctxt->c_contig_type == CONTIG_RIGHT) {
- ret = ocfs2_merge_rec_left(inode,
- path,
- handle, split_rec,
- dealloc, et,
+ ret = ocfs2_merge_rec_left(path, handle, et,
+ split_rec, dealloc,
split_index);
if (ret) {
mlog_errno(ret);
goto out;
}
} else {
- ret = ocfs2_merge_rec_right(inode,
- path,
- handle, split_rec,
+ ret = ocfs2_merge_rec_right(path, handle,
+ et, split_rec,
split_index);
if (ret) {
mlog_errno(ret);
@@ -3765,8 +3845,8 @@ static int ocfs2_try_to_merge_extent(struct inode *inode,
* The merge may have left an empty extent in
* our leaf. Try to rotate it away.
*/
- ret = ocfs2_rotate_tree_left(inode, handle, path,
- dealloc, et);
+ ret = ocfs2_rotate_tree_left(handle, et, path,
+ dealloc);
if (ret)
mlog_errno(ret);
ret = 0;
@@ -3812,10 +3892,10 @@ static void ocfs2_subtract_from_rec(struct super_block *sb,
* list. If this leaf is part of an allocation tree, it is assumed
* that the tree above has been prepared.
*/
-static void ocfs2_insert_at_leaf(struct ocfs2_extent_rec *insert_rec,
+static void ocfs2_insert_at_leaf(struct ocfs2_extent_tree *et,
+ struct ocfs2_extent_rec *insert_rec,
struct ocfs2_extent_list *el,
- struct ocfs2_insert_type *insert,
- struct inode *inode)
+ struct ocfs2_insert_type *insert)
{
int i = insert->ins_contig_index;
unsigned int range;
@@ -3827,7 +3907,8 @@ static void ocfs2_insert_at_leaf(struct ocfs2_extent_rec *insert_rec,
i = ocfs2_search_extent_list(el, le32_to_cpu(insert_rec->e_cpos));
BUG_ON(i == -1);
rec = &el->l_recs[i];
- ocfs2_subtract_from_rec(inode->i_sb, insert->ins_split, rec,
+ ocfs2_subtract_from_rec(ocfs2_metadata_cache_get_super(et->et_ci),
+ insert->ins_split, rec,
insert_rec);
goto rotate;
}
@@ -3869,10 +3950,10 @@ static void ocfs2_insert_at_leaf(struct ocfs2_extent_rec *insert_rec,
mlog_bug_on_msg(le16_to_cpu(el->l_next_free_rec) >=
le16_to_cpu(el->l_count),
- "inode %lu, depth %u, count %u, next free %u, "
+ "owner %llu, depth %u, count %u, next free %u, "
"rec.cpos %u, rec.clusters %u, "
"insert.cpos %u, insert.clusters %u\n",
- inode->i_ino,
+ ocfs2_metadata_cache_owner(et->et_ci),
le16_to_cpu(el->l_tree_depth),
le16_to_cpu(el->l_count),
le16_to_cpu(el->l_next_free_rec),
@@ -3900,8 +3981,8 @@ rotate:
ocfs2_rotate_leaf(el, insert_rec);
}
-static void ocfs2_adjust_rightmost_records(struct inode *inode,
- handle_t *handle,
+static void ocfs2_adjust_rightmost_records(handle_t *handle,
+ struct ocfs2_extent_tree *et,
struct ocfs2_path *path,
struct ocfs2_extent_rec *insert_rec)
{
@@ -3919,9 +4000,9 @@ static void ocfs2_adjust_rightmost_records(struct inode *inode,
next_free = le16_to_cpu(el->l_next_free_rec);
if (next_free == 0) {
- ocfs2_error(inode->i_sb,
- "Dinode %llu has a bad extent list",
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
+ "Owner %llu has a bad extent list",
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci));
ret = -EIO;
return;
}
@@ -3941,7 +4022,8 @@ static void ocfs2_adjust_rightmost_records(struct inode *inode,
}
}
-static int ocfs2_append_rec_to_path(struct inode *inode, handle_t *handle,
+static int ocfs2_append_rec_to_path(handle_t *handle,
+ struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *insert_rec,
struct ocfs2_path *right_path,
struct ocfs2_path **ret_left_path)
@@ -3969,8 +4051,8 @@ static int ocfs2_append_rec_to_path(struct inode *inode, handle_t *handle,
(next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0]))) {
u32 left_cpos;
- ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path,
- &left_cpos);
+ ret = ocfs2_find_cpos_for_left_leaf(ocfs2_metadata_cache_get_super(et->et_ci),
+ right_path, &left_cpos);
if (ret) {
mlog_errno(ret);
goto out;
@@ -3992,7 +4074,8 @@ static int ocfs2_append_rec_to_path(struct inode *inode, handle_t *handle,
goto out;
}
- ret = ocfs2_find_path(inode, left_path, left_cpos);
+ ret = ocfs2_find_path(et->et_ci, left_path,
+ left_cpos);
if (ret) {
mlog_errno(ret);
goto out;
@@ -4005,13 +4088,13 @@ static int ocfs2_append_rec_to_path(struct inode *inode, handle_t *handle,
}
}
- ret = ocfs2_journal_access_path(inode, handle, right_path);
+ ret = ocfs2_journal_access_path(et->et_ci, handle, right_path);
if (ret) {
mlog_errno(ret);
goto out;
}
- ocfs2_adjust_rightmost_records(inode, handle, right_path, insert_rec);
+ ocfs2_adjust_rightmost_records(handle, et, right_path, insert_rec);
*ret_left_path = left_path;
ret = 0;
@@ -4022,7 +4105,7 @@ out:
return ret;
}
-static void ocfs2_split_record(struct inode *inode,
+static void ocfs2_split_record(struct ocfs2_extent_tree *et,
struct ocfs2_path *left_path,
struct ocfs2_path *right_path,
struct ocfs2_extent_rec *split_rec,
@@ -4095,7 +4178,8 @@ static void ocfs2_split_record(struct inode *inode,
}
rec = &el->l_recs[index];
- ocfs2_subtract_from_rec(inode->i_sb, split, rec, split_rec);
+ ocfs2_subtract_from_rec(ocfs2_metadata_cache_get_super(et->et_ci),
+ split, rec, split_rec);
ocfs2_rotate_leaf(insert_el, split_rec);
}
@@ -4107,8 +4191,8 @@ static void ocfs2_split_record(struct inode *inode,
* in. left_path should only be passed in if we need to update that
* portion of the tree after an edge insert.
*/
-static int ocfs2_insert_path(struct inode *inode,
- handle_t *handle,
+static int ocfs2_insert_path(handle_t *handle,
+ struct ocfs2_extent_tree *et,
struct ocfs2_path *left_path,
struct ocfs2_path *right_path,
struct ocfs2_extent_rec *insert_rec,
@@ -4134,7 +4218,7 @@ static int ocfs2_insert_path(struct inode *inode,
goto out;
}
- ret = ocfs2_journal_access_path(inode, handle, left_path);
+ ret = ocfs2_journal_access_path(et->et_ci, handle, left_path);
if (ret < 0) {
mlog_errno(ret);
goto out;
@@ -4145,7 +4229,7 @@ static int ocfs2_insert_path(struct inode *inode,
* Pass both paths to the journal. The majority of inserts
* will be touching all components anyway.
*/
- ret = ocfs2_journal_access_path(inode, handle, right_path);
+ ret = ocfs2_journal_access_path(et->et_ci, handle, right_path);
if (ret < 0) {
mlog_errno(ret);
goto out;
@@ -4157,7 +4241,7 @@ static int ocfs2_insert_path(struct inode *inode,
* of splits, but it's easier to just let one separate
* function sort it all out.
*/
- ocfs2_split_record(inode, left_path, right_path,
+ ocfs2_split_record(et, left_path, right_path,
insert_rec, insert->ins_split);
/*
@@ -4171,8 +4255,8 @@ static int ocfs2_insert_path(struct inode *inode,
if (ret)
mlog_errno(ret);
} else
- ocfs2_insert_at_leaf(insert_rec, path_leaf_el(right_path),
- insert, inode);
+ ocfs2_insert_at_leaf(et, insert_rec, path_leaf_el(right_path),
+ insert);
ret = ocfs2_journal_dirty(handle, leaf_bh);
if (ret)
@@ -4185,10 +4269,10 @@ static int ocfs2_insert_path(struct inode *inode,
*
* XXX: Should we extend the transaction here?
*/
- subtree_index = ocfs2_find_subtree_root(inode, left_path,
+ subtree_index = ocfs2_find_subtree_root(et, left_path,
right_path);
- ocfs2_complete_edge_insert(inode, handle, left_path,
- right_path, subtree_index);
+ ocfs2_complete_edge_insert(handle, left_path, right_path,
+ subtree_index);
}
ret = 0;
@@ -4196,8 +4280,7 @@ out:
return ret;
}
-static int ocfs2_do_insert_extent(struct inode *inode,
- handle_t *handle,
+static int ocfs2_do_insert_extent(handle_t *handle,
struct ocfs2_extent_tree *et,
struct ocfs2_extent_rec *insert_rec,
struct ocfs2_insert_type *type)
@@ -4210,7 +4293,7 @@ static int ocfs2_do_insert_extent(struct inode *inode,
el = et->et_root_el;
- ret = ocfs2_et_root_journal_access(handle, inode, et,
+ ret = ocfs2_et_root_journal_access(handle, et,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -4218,7 +4301,7 @@ static int ocfs2_do_insert_extent(struct inode *inode,
}
if (le16_to_cpu(el->l_tree_depth) == 0) {
- ocfs2_insert_at_leaf(insert_rec, el, type, inode);
+ ocfs2_insert_at_leaf(et, insert_rec, el, type);
goto out_update_clusters;
}
@@ -4241,7 +4324,7 @@ static int ocfs2_do_insert_extent(struct inode *inode,
cpos = UINT_MAX;
}
- ret = ocfs2_find_path(inode, right_path, cpos);
+ ret = ocfs2_find_path(et->et_ci, right_path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
@@ -4260,7 +4343,7 @@ static int ocfs2_do_insert_extent(struct inode *inode,
* can wind up skipping both of these two special cases...
*/
if (rotate) {
- ret = ocfs2_rotate_tree_right(inode, handle, type->ins_split,
+ ret = ocfs2_rotate_tree_right(handle, et, type->ins_split,
le32_to_cpu(insert_rec->e_cpos),
right_path, &left_path);
if (ret) {
@@ -4272,7 +4355,7 @@ static int ocfs2_do_insert_extent(struct inode *inode,
* ocfs2_rotate_tree_right() might have extended the
* transaction without re-journaling our tree root.
*/
- ret = ocfs2_et_root_journal_access(handle, inode, et,
+ ret = ocfs2_et_root_journal_access(handle, et,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -4280,7 +4363,7 @@ static int ocfs2_do_insert_extent(struct inode *inode,
}
} else if (type->ins_appending == APPEND_TAIL
&& type->ins_contig != CONTIG_LEFT) {
- ret = ocfs2_append_rec_to_path(inode, handle, insert_rec,
+ ret = ocfs2_append_rec_to_path(handle, et, insert_rec,
right_path, &left_path);
if (ret) {
mlog_errno(ret);
@@ -4288,7 +4371,7 @@ static int ocfs2_do_insert_extent(struct inode *inode,
}
}
- ret = ocfs2_insert_path(inode, handle, left_path, right_path,
+ ret = ocfs2_insert_path(handle, et, left_path, right_path,
insert_rec, type);
if (ret) {
mlog_errno(ret);
@@ -4297,7 +4380,7 @@ static int ocfs2_do_insert_extent(struct inode *inode,
out_update_clusters:
if (type->ins_split == SPLIT_NONE)
- ocfs2_et_update_clusters(inode, et,
+ ocfs2_et_update_clusters(et,
le16_to_cpu(insert_rec->e_leaf_clusters));
ret = ocfs2_journal_dirty(handle, et->et_root_bh);
@@ -4312,7 +4395,8 @@ out:
}
static enum ocfs2_contig_type
-ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path,
+ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et,
+ struct ocfs2_path *path,
struct ocfs2_extent_list *el, int index,
struct ocfs2_extent_rec *split_rec)
{
@@ -4324,12 +4408,12 @@ ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path,
struct ocfs2_path *left_path = NULL, *right_path = NULL;
struct buffer_head *bh;
struct ocfs2_extent_block *eb;
+ struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
if (index > 0) {
rec = &el->l_recs[index - 1];
} else if (path->p_tree_depth > 0) {
- status = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
- path, &left_cpos);
+ status = ocfs2_find_cpos_for_left_leaf(sb, path, &left_cpos);
if (status)
goto out;
@@ -4338,7 +4422,8 @@ ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path,
if (!left_path)
goto out;
- status = ocfs2_find_path(inode, left_path, left_cpos);
+ status = ocfs2_find_path(et->et_ci, left_path,
+ left_cpos);
if (status)
goto out;
@@ -4348,7 +4433,7 @@ ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path,
le16_to_cpu(new_el->l_count)) {
bh = path_leaf_bh(left_path);
eb = (struct ocfs2_extent_block *)bh->b_data;
- ocfs2_error(inode->i_sb,
+ ocfs2_error(sb,
"Extent block #%llu has an "
"invalid l_next_free_rec of "
"%d. It should have "
@@ -4373,7 +4458,7 @@ ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path,
if (split_rec->e_cpos == el->l_recs[index].e_cpos)
ret = CONTIG_RIGHT;
} else {
- ret = ocfs2_extent_contig(inode, rec, split_rec);
+ ret = ocfs2_et_extent_contig(et, rec, split_rec);
}
}
@@ -4382,8 +4467,7 @@ ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path,
rec = &el->l_recs[index + 1];
else if (le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count) &&
path->p_tree_depth > 0) {
- status = ocfs2_find_cpos_for_right_leaf(inode->i_sb,
- path, &right_cpos);
+ status = ocfs2_find_cpos_for_right_leaf(sb, path, &right_cpos);
if (status)
goto out;
@@ -4394,7 +4478,7 @@ ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path,
if (!right_path)
goto out;
- status = ocfs2_find_path(inode, right_path, right_cpos);
+ status = ocfs2_find_path(et->et_ci, right_path, right_cpos);
if (status)
goto out;
@@ -4404,7 +4488,7 @@ ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path,
if (le16_to_cpu(new_el->l_next_free_rec) <= 1) {
bh = path_leaf_bh(right_path);
eb = (struct ocfs2_extent_block *)bh->b_data;
- ocfs2_error(inode->i_sb,
+ ocfs2_error(sb,
"Extent block #%llu has an "
"invalid l_next_free_rec of %d",
(unsigned long long)le64_to_cpu(eb->h_blkno),
@@ -4419,7 +4503,7 @@ ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path,
if (rec) {
enum ocfs2_contig_type contig_type;
- contig_type = ocfs2_extent_contig(inode, rec, split_rec);
+ contig_type = ocfs2_et_extent_contig(et, rec, split_rec);
if (contig_type == CONTIG_LEFT && ret == CONTIG_RIGHT)
ret = CONTIG_LEFTRIGHT;
@@ -4436,11 +4520,10 @@ out:
return ret;
}
-static void ocfs2_figure_contig_type(struct inode *inode,
+static void ocfs2_figure_contig_type(struct ocfs2_extent_tree *et,
struct ocfs2_insert_type *insert,
struct ocfs2_extent_list *el,
- struct ocfs2_extent_rec *insert_rec,
- struct ocfs2_extent_tree *et)
+ struct ocfs2_extent_rec *insert_rec)
{
int i;
enum ocfs2_contig_type contig_type = CONTIG_NONE;
@@ -4448,8 +4531,8 @@ static void ocfs2_figure_contig_type(struct inode *inode,
BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
- contig_type = ocfs2_extent_contig(inode, &el->l_recs[i],
- insert_rec);
+ contig_type = ocfs2_et_extent_contig(et, &el->l_recs[i],
+ insert_rec);
if (contig_type != CONTIG_NONE) {
insert->ins_contig_index = i;
break;
@@ -4530,8 +4613,7 @@ set_tail_append:
* All of the information is stored on the ocfs2_insert_type
* structure.
*/
-static int ocfs2_figure_insert_type(struct inode *inode,
- struct ocfs2_extent_tree *et,
+static int ocfs2_figure_insert_type(struct ocfs2_extent_tree *et,
struct buffer_head **last_eb_bh,
struct ocfs2_extent_rec *insert_rec,
int *free_records,
@@ -4555,7 +4637,7 @@ static int ocfs2_figure_insert_type(struct inode *inode,
* ocfs2_figure_insert_type() and ocfs2_add_branch()
* may want it later.
*/
- ret = ocfs2_read_extent_block(inode,
+ ret = ocfs2_read_extent_block(et->et_ci,
ocfs2_et_get_last_eb_blk(et),
&bh);
if (ret) {
@@ -4578,7 +4660,7 @@ static int ocfs2_figure_insert_type(struct inode *inode,
le16_to_cpu(el->l_next_free_rec);
if (!insert->ins_tree_depth) {
- ocfs2_figure_contig_type(inode, insert, el, insert_rec, et);
+ ocfs2_figure_contig_type(et, insert, el, insert_rec);
ocfs2_figure_appending_type(insert, el, insert_rec);
return 0;
}
@@ -4596,7 +4678,7 @@ static int ocfs2_figure_insert_type(struct inode *inode,
* us the rightmost tree path. This is accounted for below in
* the appending code.
*/
- ret = ocfs2_find_path(inode, path, le32_to_cpu(insert_rec->e_cpos));
+ ret = ocfs2_find_path(et->et_ci, path, le32_to_cpu(insert_rec->e_cpos));
if (ret) {
mlog_errno(ret);
goto out;
@@ -4612,7 +4694,7 @@ static int ocfs2_figure_insert_type(struct inode *inode,
* into two types of appends: simple record append, or a
* rotate inside the tail leaf.
*/
- ocfs2_figure_contig_type(inode, insert, el, insert_rec, et);
+ ocfs2_figure_contig_type(et, insert, el, insert_rec);
/*
* The insert code isn't quite ready to deal with all cases of
@@ -4657,13 +4739,11 @@ out:
}
/*
- * Insert an extent into an inode btree.
+ * Insert an extent into a btree.
*
- * The caller needs to update fe->i_clusters
+ * The caller needs to update the owning btree's cluster count.
*/
-int ocfs2_insert_extent(struct ocfs2_super *osb,
- handle_t *handle,
- struct inode *inode,
+int ocfs2_insert_extent(handle_t *handle,
struct ocfs2_extent_tree *et,
u32 cpos,
u64 start_blk,
@@ -4677,21 +4757,22 @@ int ocfs2_insert_extent(struct ocfs2_super *osb,
struct ocfs2_insert_type insert = {0, };
struct ocfs2_extent_rec rec;
- mlog(0, "add %u clusters at position %u to inode %llu\n",
- new_clusters, cpos, (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ mlog(0, "add %u clusters at position %u to owner %llu\n",
+ new_clusters, cpos,
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci));
memset(&rec, 0, sizeof(rec));
rec.e_cpos = cpu_to_le32(cpos);
rec.e_blkno = cpu_to_le64(start_blk);
rec.e_leaf_clusters = cpu_to_le16(new_clusters);
rec.e_flags = flags;
- status = ocfs2_et_insert_check(inode, et, &rec);
+ status = ocfs2_et_insert_check(et, &rec);
if (status) {
mlog_errno(status);
goto bail;
}
- status = ocfs2_figure_insert_type(inode, et, &last_eb_bh, &rec,
+ status = ocfs2_figure_insert_type(et, &last_eb_bh, &rec,
&free_records, &insert);
if (status < 0) {
mlog_errno(status);
@@ -4705,7 +4786,7 @@ int ocfs2_insert_extent(struct ocfs2_super *osb,
free_records, insert.ins_tree_depth);
if (insert.ins_contig == CONTIG_NONE && free_records == 0) {
- status = ocfs2_grow_tree(inode, handle, et,
+ status = ocfs2_grow_tree(handle, et,
&insert.ins_tree_depth, &last_eb_bh,
meta_ac);
if (status) {
@@ -4715,11 +4796,11 @@ int ocfs2_insert_extent(struct ocfs2_super *osb,
}
/* Finally, we can add clusters. This might rotate the tree for us. */
- status = ocfs2_do_insert_extent(inode, handle, et, &rec, &insert);
+ status = ocfs2_do_insert_extent(handle, et, &rec, &insert);
if (status < 0)
mlog_errno(status);
- else if (et->et_ops == &ocfs2_dinode_et_ops)
- ocfs2_extent_map_insert_rec(inode, &rec);
+ else
+ ocfs2_et_extent_map_insert(et, &rec);
bail:
brelse(last_eb_bh);
@@ -4735,13 +4816,11 @@ bail:
* it is not limited to the file storage. Any extent tree can use this
* function if it implements the proper ocfs2_extent_tree.
*/
-int ocfs2_add_clusters_in_btree(struct ocfs2_super *osb,
- struct inode *inode,
+int ocfs2_add_clusters_in_btree(handle_t *handle,
+ struct ocfs2_extent_tree *et,
u32 *logical_offset,
u32 clusters_to_add,
int mark_unwritten,
- struct ocfs2_extent_tree *et,
- handle_t *handle,
struct ocfs2_alloc_context *data_ac,
struct ocfs2_alloc_context *meta_ac,
enum ocfs2_alloc_restarted *reason_ret)
@@ -4752,13 +4831,15 @@ int ocfs2_add_clusters_in_btree(struct ocfs2_super *osb,
u32 bit_off, num_bits;
u64 block;
u8 flags = 0;
+ struct ocfs2_super *osb =
+ OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci));
BUG_ON(!clusters_to_add);
if (mark_unwritten)
flags = OCFS2_EXT_UNWRITTEN;
- free_extents = ocfs2_num_free_extents(osb, inode, et);
+ free_extents = ocfs2_num_free_extents(osb, et);
if (free_extents < 0) {
status = free_extents;
mlog_errno(status);
@@ -4795,7 +4876,7 @@ int ocfs2_add_clusters_in_btree(struct ocfs2_super *osb,
BUG_ON(num_bits > clusters_to_add);
/* reserve our write early -- insert_extent may update the tree root */
- status = ocfs2_et_root_journal_access(handle, inode, et,
+ status = ocfs2_et_root_journal_access(handle, et,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -4803,10 +4884,10 @@ int ocfs2_add_clusters_in_btree(struct ocfs2_super *osb,
}
block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
- mlog(0, "Allocating %u clusters at block %u for inode %llu\n",
- num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno);
- status = ocfs2_insert_extent(osb, handle, inode, et,
- *logical_offset, block,
+ mlog(0, "Allocating %u clusters at block %u for owner %llu\n",
+ num_bits, bit_off,
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci));
+ status = ocfs2_insert_extent(handle, et, *logical_offset, block,
num_bits, flags, meta_ac);
if (status < 0) {
mlog_errno(status);
@@ -4856,10 +4937,9 @@ static void ocfs2_make_right_split_rec(struct super_block *sb,
split_rec->e_flags = rec->e_flags;
}
-static int ocfs2_split_and_insert(struct inode *inode,
- handle_t *handle,
- struct ocfs2_path *path,
+static int ocfs2_split_and_insert(handle_t *handle,
struct ocfs2_extent_tree *et,
+ struct ocfs2_path *path,
struct buffer_head **last_eb_bh,
int split_index,
struct ocfs2_extent_rec *orig_split_rec,
@@ -4892,7 +4972,7 @@ leftright:
if (le16_to_cpu(rightmost_el->l_next_free_rec) ==
le16_to_cpu(rightmost_el->l_count)) {
- ret = ocfs2_grow_tree(inode, handle, et,
+ ret = ocfs2_grow_tree(handle, et,
&depth, last_eb_bh, meta_ac);
if (ret) {
mlog_errno(ret);
@@ -4921,8 +5001,8 @@ leftright:
*/
insert.ins_split = SPLIT_RIGHT;
- ocfs2_make_right_split_rec(inode->i_sb, &tmprec, insert_range,
- &rec);
+ ocfs2_make_right_split_rec(ocfs2_metadata_cache_get_super(et->et_ci),
+ &tmprec, insert_range, &rec);
split_rec = tmprec;
@@ -4930,7 +5010,7 @@ leftright:
do_leftright = 1;
}
- ret = ocfs2_do_insert_extent(inode, handle, et, &split_rec, &insert);
+ ret = ocfs2_do_insert_extent(handle, et, &split_rec, &insert);
if (ret) {
mlog_errno(ret);
goto out;
@@ -4946,7 +5026,7 @@ leftright:
ocfs2_reinit_path(path, 1);
cpos = le32_to_cpu(split_rec.e_cpos);
- ret = ocfs2_find_path(inode, path, cpos);
+ ret = ocfs2_find_path(et->et_ci, path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
@@ -4961,8 +5041,8 @@ out:
return ret;
}
-static int ocfs2_replace_extent_rec(struct inode *inode,
- handle_t *handle,
+static int ocfs2_replace_extent_rec(handle_t *handle,
+ struct ocfs2_extent_tree *et,
struct ocfs2_path *path,
struct ocfs2_extent_list *el,
int split_index,
@@ -4970,7 +5050,7 @@ static int ocfs2_replace_extent_rec(struct inode *inode,
{
int ret;
- ret = ocfs2_path_bh_journal_access(handle, inode, path,
+ ret = ocfs2_path_bh_journal_access(handle, et->et_ci, path,
path_num_items(path) - 1);
if (ret) {
mlog_errno(ret);
@@ -4985,9 +5065,8 @@ out:
}
/*
- * Mark part or all of the extent record at split_index in the leaf
- * pointed to by path as written. This removes the unwritten
- * extent flag.
+ * Split part or all of the extent record at split_index in the leaf
+ * pointed to by path. Merge with the contiguous extent record if needed.
*
* Care is taken to handle contiguousness so as to not grow the tree.
*
@@ -5004,14 +5083,13 @@ out:
* have been brought into cache (and pinned via the journal), so the
* extra overhead is not expressed in terms of disk reads.
*/
-static int __ocfs2_mark_extent_written(struct inode *inode,
- struct ocfs2_extent_tree *et,
- handle_t *handle,
- struct ocfs2_path *path,
- int split_index,
- struct ocfs2_extent_rec *split_rec,
- struct ocfs2_alloc_context *meta_ac,
- struct ocfs2_cached_dealloc_ctxt *dealloc)
+int ocfs2_split_extent(handle_t *handle,
+ struct ocfs2_extent_tree *et,
+ struct ocfs2_path *path,
+ int split_index,
+ struct ocfs2_extent_rec *split_rec,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret = 0;
struct ocfs2_extent_list *el = path_leaf_el(path);
@@ -5020,12 +5098,6 @@ static int __ocfs2_mark_extent_written(struct inode *inode,
struct ocfs2_merge_ctxt ctxt;
struct ocfs2_extent_list *rightmost_el;
- if (!(rec->e_flags & OCFS2_EXT_UNWRITTEN)) {
- ret = -EIO;
- mlog_errno(ret);
- goto out;
- }
-
if (le32_to_cpu(rec->e_cpos) > le32_to_cpu(split_rec->e_cpos) ||
((le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters)) <
(le32_to_cpu(split_rec->e_cpos) + le16_to_cpu(split_rec->e_leaf_clusters)))) {
@@ -5034,19 +5106,19 @@ static int __ocfs2_mark_extent_written(struct inode *inode,
goto out;
}
- ctxt.c_contig_type = ocfs2_figure_merge_contig_type(inode, path, el,
+ ctxt.c_contig_type = ocfs2_figure_merge_contig_type(et, path, el,
split_index,
split_rec);
/*
* The core merge / split code wants to know how much room is
- * left in this inodes allocation tree, so we pass the
+ * left in this allocation tree, so we pass the
* rightmost extent list.
*/
if (path->p_tree_depth) {
struct ocfs2_extent_block *eb;
- ret = ocfs2_read_extent_block(inode,
+ ret = ocfs2_read_extent_block(et->et_ci,
ocfs2_et_get_last_eb_blk(et),
&last_eb_bh);
if (ret) {
@@ -5073,19 +5145,18 @@ static int __ocfs2_mark_extent_written(struct inode *inode,
if (ctxt.c_contig_type == CONTIG_NONE) {
if (ctxt.c_split_covers_rec)
- ret = ocfs2_replace_extent_rec(inode, handle,
- path, el,
+ ret = ocfs2_replace_extent_rec(handle, et, path, el,
split_index, split_rec);
else
- ret = ocfs2_split_and_insert(inode, handle, path, et,
+ ret = ocfs2_split_and_insert(handle, et, path,
&last_eb_bh, split_index,
split_rec, meta_ac);
if (ret)
mlog_errno(ret);
} else {
- ret = ocfs2_try_to_merge_extent(inode, handle, path,
+ ret = ocfs2_try_to_merge_extent(handle, et, path,
split_index, split_rec,
- dealloc, &ctxt, et);
+ dealloc, &ctxt);
if (ret)
mlog_errno(ret);
}
@@ -5096,46 +5167,31 @@ out:
}
/*
- * Mark the already-existing extent at cpos as written for len clusters.
+ * Change the flags of the already-existing extent at cpos for len clusters.
+ *
+ * new_flags: the flags we want to set.
+ * clear_flags: the flags we want to clear.
+ * phys: the new physical offset we want this new extent starts from.
*
* If the existing extent is larger than the request, initiate a
* split. An attempt will be made at merging with adjacent extents.
*
* The caller is responsible for passing down meta_ac if we'll need it.
*/
-int ocfs2_mark_extent_written(struct inode *inode,
- struct ocfs2_extent_tree *et,
- handle_t *handle, u32 cpos, u32 len, u32 phys,
- struct ocfs2_alloc_context *meta_ac,
- struct ocfs2_cached_dealloc_ctxt *dealloc)
+int ocfs2_change_extent_flag(handle_t *handle,
+ struct ocfs2_extent_tree *et,
+ u32 cpos, u32 len, u32 phys,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_cached_dealloc_ctxt *dealloc,
+ int new_flags, int clear_flags)
{
int ret, index;
- u64 start_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys);
+ struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
+ u64 start_blkno = ocfs2_clusters_to_blocks(sb, phys);
struct ocfs2_extent_rec split_rec;
struct ocfs2_path *left_path = NULL;
struct ocfs2_extent_list *el;
-
- mlog(0, "Inode %lu cpos %u, len %u, phys %u (%llu)\n",
- inode->i_ino, cpos, len, phys, (unsigned long long)start_blkno);
-
- if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) {
- ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents "
- "that are being written to, but the feature bit "
- "is not set in the super block.",
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
- ret = -EROFS;
- goto out;
- }
-
- /*
- * XXX: This should be fixed up so that we just re-insert the
- * next extent records.
- *
- * XXX: This is a hack on the extent tree, maybe it should be
- * an op?
- */
- if (et->et_ops == &ocfs2_dinode_et_ops)
- ocfs2_extent_map_trunc(inode, 0);
+ struct ocfs2_extent_rec *rec;
left_path = ocfs2_new_path_from_et(et);
if (!left_path) {
@@ -5144,7 +5200,7 @@ int ocfs2_mark_extent_written(struct inode *inode,
goto out;
}
- ret = ocfs2_find_path(inode, left_path, cpos);
+ ret = ocfs2_find_path(et->et_ci, left_path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
@@ -5153,34 +5209,102 @@ int ocfs2_mark_extent_written(struct inode *inode,
index = ocfs2_search_extent_list(el, cpos);
if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
- ocfs2_error(inode->i_sb,
- "Inode %llu has an extent at cpos %u which can no "
+ ocfs2_error(sb,
+ "Owner %llu has an extent at cpos %u which can no "
"longer be found.\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos);
+ (unsigned long long)
+ ocfs2_metadata_cache_owner(et->et_ci), cpos);
ret = -EROFS;
goto out;
}
+ ret = -EIO;
+ rec = &el->l_recs[index];
+ if (new_flags && (rec->e_flags & new_flags)) {
+ mlog(ML_ERROR, "Owner %llu tried to set %d flags on an "
+ "extent that already had them",
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
+ new_flags);
+ goto out;
+ }
+
+ if (clear_flags && !(rec->e_flags & clear_flags)) {
+ mlog(ML_ERROR, "Owner %llu tried to clear %d flags on an "
+ "extent that didn't have them",
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
+ clear_flags);
+ goto out;
+ }
+
memset(&split_rec, 0, sizeof(struct ocfs2_extent_rec));
split_rec.e_cpos = cpu_to_le32(cpos);
split_rec.e_leaf_clusters = cpu_to_le16(len);
split_rec.e_blkno = cpu_to_le64(start_blkno);
- split_rec.e_flags = path_leaf_el(left_path)->l_recs[index].e_flags;
- split_rec.e_flags &= ~OCFS2_EXT_UNWRITTEN;
-
- ret = __ocfs2_mark_extent_written(inode, et, handle, left_path,
- index, &split_rec, meta_ac,
- dealloc);
+ split_rec.e_flags = rec->e_flags;
+ if (new_flags)
+ split_rec.e_flags |= new_flags;
+ if (clear_flags)
+ split_rec.e_flags &= ~clear_flags;
+
+ ret = ocfs2_split_extent(handle, et, left_path,
+ index, &split_rec, meta_ac,
+ dealloc);
if (ret)
mlog_errno(ret);
out:
ocfs2_free_path(left_path);
return ret;
+
}
-static int ocfs2_split_tree(struct inode *inode, struct ocfs2_extent_tree *et,
- handle_t *handle, struct ocfs2_path *path,
+/*
+ * Mark the already-existing extent at cpos as written for len clusters.
+ * This removes the unwritten extent flag.
+ *
+ * If the existing extent is larger than the request, initiate a
+ * split. An attempt will be made at merging with adjacent extents.
+ *
+ * The caller is responsible for passing down meta_ac if we'll need it.
+ */
+int ocfs2_mark_extent_written(struct inode *inode,
+ struct ocfs2_extent_tree *et,
+ handle_t *handle, u32 cpos, u32 len, u32 phys,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_cached_dealloc_ctxt *dealloc)
+{
+ int ret;
+
+ mlog(0, "Inode %lu cpos %u, len %u, phys clusters %u\n",
+ inode->i_ino, cpos, len, phys);
+
+ if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) {
+ ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents "
+ "that are being written to, but the feature bit "
+ "is not set in the super block.",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ ret = -EROFS;
+ goto out;
+ }
+
+ /*
+ * XXX: This should be fixed up so that we just re-insert the
+ * next extent records.
+ */
+ ocfs2_et_extent_map_truncate(et, 0);
+
+ ret = ocfs2_change_extent_flag(handle, et, cpos,
+ len, phys, meta_ac, dealloc,
+ 0, OCFS2_EXT_UNWRITTEN);
+ if (ret)
+ mlog_errno(ret);
+
+out:
+ return ret;
+}
+
+static int ocfs2_split_tree(handle_t *handle, struct ocfs2_extent_tree *et,
+ struct ocfs2_path *path,
int index, u32 new_range,
struct ocfs2_alloc_context *meta_ac)
{
@@ -5197,11 +5321,12 @@ static int ocfs2_split_tree(struct inode *inode, struct ocfs2_extent_tree *et,
*/
el = path_leaf_el(path);
rec = &el->l_recs[index];
- ocfs2_make_right_split_rec(inode->i_sb, &split_rec, new_range, rec);
+ ocfs2_make_right_split_rec(ocfs2_metadata_cache_get_super(et->et_ci),
+ &split_rec, new_range, rec);
depth = path->p_tree_depth;
if (depth > 0) {
- ret = ocfs2_read_extent_block(inode,
+ ret = ocfs2_read_extent_block(et->et_ci,
ocfs2_et_get_last_eb_blk(et),
&last_eb_bh);
if (ret < 0) {
@@ -5224,7 +5349,7 @@ static int ocfs2_split_tree(struct inode *inode, struct ocfs2_extent_tree *et,
if (le16_to_cpu(rightmost_el->l_next_free_rec) ==
le16_to_cpu(rightmost_el->l_count)) {
- ret = ocfs2_grow_tree(inode, handle, et, &depth, &last_eb_bh,
+ ret = ocfs2_grow_tree(handle, et, &depth, &last_eb_bh,
meta_ac);
if (ret) {
mlog_errno(ret);
@@ -5238,7 +5363,7 @@ static int ocfs2_split_tree(struct inode *inode, struct ocfs2_extent_tree *et,
insert.ins_split = SPLIT_RIGHT;
insert.ins_tree_depth = depth;
- ret = ocfs2_do_insert_extent(inode, handle, et, &split_rec, &insert);
+ ret = ocfs2_do_insert_extent(handle, et, &split_rec, &insert);
if (ret)
mlog_errno(ret);
@@ -5247,23 +5372,23 @@ out:
return ret;
}
-static int ocfs2_truncate_rec(struct inode *inode, handle_t *handle,
+static int ocfs2_truncate_rec(handle_t *handle,
+ struct ocfs2_extent_tree *et,
struct ocfs2_path *path, int index,
struct ocfs2_cached_dealloc_ctxt *dealloc,
- u32 cpos, u32 len,
- struct ocfs2_extent_tree *et)
+ u32 cpos, u32 len)
{
int ret;
u32 left_cpos, rec_range, trunc_range;
int wants_rotate = 0, is_rightmost_tree_rec = 0;
- struct super_block *sb = inode->i_sb;
+ struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
struct ocfs2_path *left_path = NULL;
struct ocfs2_extent_list *el = path_leaf_el(path);
struct ocfs2_extent_rec *rec;
struct ocfs2_extent_block *eb;
if (ocfs2_is_empty_extent(&el->l_recs[0]) && index > 0) {
- ret = ocfs2_rotate_tree_left(inode, handle, path, dealloc, et);
+ ret = ocfs2_rotate_tree_left(handle, et, path, dealloc);
if (ret) {
mlog_errno(ret);
goto out;
@@ -5295,14 +5420,13 @@ static int ocfs2_truncate_rec(struct inode *inode, handle_t *handle,
* by this leaf and the one to it's left.
*
* There are two cases we can skip:
- * 1) Path is the leftmost one in our inode tree.
+ * 1) Path is the leftmost one in our btree.
* 2) The leaf is rightmost and will be empty after
* we remove the extent record - the rotate code
* knows how to update the newly formed edge.
*/
- ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path,
- &left_cpos);
+ ret = ocfs2_find_cpos_for_left_leaf(sb, path, &left_cpos);
if (ret) {
mlog_errno(ret);
goto out;
@@ -5316,7 +5440,8 @@ static int ocfs2_truncate_rec(struct inode *inode, handle_t *handle,
goto out;
}
- ret = ocfs2_find_path(inode, left_path, left_cpos);
+ ret = ocfs2_find_path(et->et_ci, left_path,
+ left_cpos);
if (ret) {
mlog_errno(ret);
goto out;
@@ -5332,13 +5457,13 @@ static int ocfs2_truncate_rec(struct inode *inode, handle_t *handle,
goto out;
}
- ret = ocfs2_journal_access_path(inode, handle, path);
+ ret = ocfs2_journal_access_path(et->et_ci, handle, path);
if (ret) {
mlog_errno(ret);
goto out;
}
- ret = ocfs2_journal_access_path(inode, handle, left_path);
+ ret = ocfs2_journal_access_path(et->et_ci, handle, left_path);
if (ret) {
mlog_errno(ret);
goto out;
@@ -5361,7 +5486,7 @@ static int ocfs2_truncate_rec(struct inode *inode, handle_t *handle,
* be deleted by the rotate code.
*/
rec = &el->l_recs[next_free - 1];
- ocfs2_adjust_rightmost_records(inode, handle, path,
+ ocfs2_adjust_rightmost_records(handle, et, path,
rec);
}
} else if (le32_to_cpu(rec->e_cpos) == cpos) {
@@ -5373,11 +5498,12 @@ static int ocfs2_truncate_rec(struct inode *inode, handle_t *handle,
/* Remove rightmost portion of the record */
le16_add_cpu(&rec->e_leaf_clusters, -len);
if (is_rightmost_tree_rec)
- ocfs2_adjust_rightmost_records(inode, handle, path, rec);
+ ocfs2_adjust_rightmost_records(handle, et, path, rec);
} else {
/* Caller should have trapped this. */
- mlog(ML_ERROR, "Inode %llu: Invalid record truncate: (%u, %u) "
- "(%u, %u)\n", (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ mlog(ML_ERROR, "Owner %llu: Invalid record truncate: (%u, %u) "
+ "(%u, %u)\n",
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
le32_to_cpu(rec->e_cpos),
le16_to_cpu(rec->e_leaf_clusters), cpos, len);
BUG();
@@ -5386,14 +5512,14 @@ static int ocfs2_truncate_rec(struct inode *inode, handle_t *handle,
if (left_path) {
int subtree_index;
- subtree_index = ocfs2_find_subtree_root(inode, left_path, path);
- ocfs2_complete_edge_insert(inode, handle, left_path, path,
+ subtree_index = ocfs2_find_subtree_root(et, left_path, path);
+ ocfs2_complete_edge_insert(handle, left_path, path,
subtree_index);
}
ocfs2_journal_dirty(handle, path_leaf_bh(path));
- ret = ocfs2_rotate_tree_left(inode, handle, path, dealloc, et);
+ ret = ocfs2_rotate_tree_left(handle, et, path, dealloc);
if (ret) {
mlog_errno(ret);
goto out;
@@ -5404,9 +5530,9 @@ out:
return ret;
}
-int ocfs2_remove_extent(struct inode *inode,
+int ocfs2_remove_extent(handle_t *handle,
struct ocfs2_extent_tree *et,
- u32 cpos, u32 len, handle_t *handle,
+ u32 cpos, u32 len,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
@@ -5416,7 +5542,11 @@ int ocfs2_remove_extent(struct inode *inode,
struct ocfs2_extent_list *el;
struct ocfs2_path *path = NULL;
- ocfs2_extent_map_trunc(inode, 0);
+ /*
+ * XXX: Why are we truncating to 0 instead of wherever this
+ * affects us?
+ */
+ ocfs2_et_extent_map_truncate(et, 0);
path = ocfs2_new_path_from_et(et);
if (!path) {
@@ -5425,7 +5555,7 @@ int ocfs2_remove_extent(struct inode *inode,
goto out;
}
- ret = ocfs2_find_path(inode, path, cpos);
+ ret = ocfs2_find_path(et->et_ci, path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
@@ -5434,10 +5564,11 @@ int ocfs2_remove_extent(struct inode *inode,
el = path_leaf_el(path);
index = ocfs2_search_extent_list(el, cpos);
if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
- ocfs2_error(inode->i_sb,
- "Inode %llu has an extent at cpos %u which can no "
+ ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
+ "Owner %llu has an extent at cpos %u which can no "
"longer be found.\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos);
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
+ cpos);
ret = -EROFS;
goto out;
}
@@ -5464,20 +5595,21 @@ int ocfs2_remove_extent(struct inode *inode,
BUG_ON(cpos < le32_to_cpu(rec->e_cpos) || trunc_range > rec_range);
- mlog(0, "Inode %llu, remove (cpos %u, len %u). Existing index %d "
+ mlog(0, "Owner %llu, remove (cpos %u, len %u). Existing index %d "
"(cpos %u, len %u)\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos, len, index,
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
+ cpos, len, index,
le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec));
if (le32_to_cpu(rec->e_cpos) == cpos || rec_range == trunc_range) {
- ret = ocfs2_truncate_rec(inode, handle, path, index, dealloc,
- cpos, len, et);
+ ret = ocfs2_truncate_rec(handle, et, path, index, dealloc,
+ cpos, len);
if (ret) {
mlog_errno(ret);
goto out;
}
} else {
- ret = ocfs2_split_tree(inode, et, handle, path, index,
+ ret = ocfs2_split_tree(handle, et, path, index,
trunc_range, meta_ac);
if (ret) {
mlog_errno(ret);
@@ -5490,7 +5622,7 @@ int ocfs2_remove_extent(struct inode *inode,
*/
ocfs2_reinit_path(path, 1);
- ret = ocfs2_find_path(inode, path, cpos);
+ ret = ocfs2_find_path(et->et_ci, path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
@@ -5499,9 +5631,9 @@ int ocfs2_remove_extent(struct inode *inode,
el = path_leaf_el(path);
index = ocfs2_search_extent_list(el, cpos);
if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
- ocfs2_error(inode->i_sb,
- "Inode %llu: split at cpos %u lost record.",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
+ "Owner %llu: split at cpos %u lost record.",
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
cpos);
ret = -EROFS;
goto out;
@@ -5515,18 +5647,18 @@ int ocfs2_remove_extent(struct inode *inode,
rec_range = le32_to_cpu(rec->e_cpos) +
ocfs2_rec_clusters(el, rec);
if (rec_range != trunc_range) {
- ocfs2_error(inode->i_sb,
- "Inode %llu: error after split at cpos %u"
+ ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
+ "Owner %llu: error after split at cpos %u"
"trunc len %u, existing record is (%u,%u)",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
cpos, len, le32_to_cpu(rec->e_cpos),
ocfs2_rec_clusters(el, rec));
ret = -EROFS;
goto out;
}
- ret = ocfs2_truncate_rec(inode, handle, path, index, dealloc,
- cpos, len, et);
+ ret = ocfs2_truncate_rec(handle, et, path, index, dealloc,
+ cpos, len);
if (ret) {
mlog_errno(ret);
goto out;
@@ -5573,7 +5705,7 @@ int ocfs2_remove_btree_range(struct inode *inode,
goto out;
}
- ret = ocfs2_et_root_journal_access(handle, inode, et,
+ ret = ocfs2_et_root_journal_access(handle, et,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -5583,14 +5715,13 @@ int ocfs2_remove_btree_range(struct inode *inode,
vfs_dq_free_space_nodirty(inode,
ocfs2_clusters_to_bytes(inode->i_sb, len));
- ret = ocfs2_remove_extent(inode, et, cpos, len, handle, meta_ac,
- dealloc);
+ ret = ocfs2_remove_extent(handle, et, cpos, len, meta_ac, dealloc);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
- ocfs2_et_update_clusters(inode, et, -len);
+ ocfs2_et_update_clusters(et, -len);
ret = ocfs2_journal_dirty(handle, et->et_root_bh);
if (ret) {
@@ -5690,7 +5821,7 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb,
goto bail;
}
- status = ocfs2_journal_access_di(handle, tl_inode, tl_bh,
+ status = ocfs2_journal_access_di(handle, INODE_CACHE(tl_inode), tl_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -5752,7 +5883,7 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
while (i >= 0) {
/* Caller has given us at least enough credits to
* update the truncate log dinode */
- status = ocfs2_journal_access_di(handle, tl_inode, tl_bh,
+ status = ocfs2_journal_access_di(handle, INODE_CACHE(tl_inode), tl_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -6010,7 +6141,7 @@ int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb,
tl->tl_used = 0;
ocfs2_compute_meta_ecc(osb->sb, tl_bh->b_data, &di->i_check);
- status = ocfs2_write_block(osb, tl_bh, tl_inode);
+ status = ocfs2_write_block(osb, tl_bh, INODE_CACHE(tl_inode));
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -6400,9 +6531,9 @@ ocfs2_find_per_slot_free_list(int type,
return fl;
}
-static int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
- int type, int slot, u64 blkno,
- unsigned int bit)
+int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
+ int type, int slot, u64 blkno,
+ unsigned int bit)
{
int ret;
struct ocfs2_per_slot_free_list *fl;
@@ -6518,7 +6649,7 @@ static int ocfs2_find_new_last_ext_blk(struct inode *inode,
goto out;
}
- ret = ocfs2_find_leaf(inode, path_root_el(path), cpos, &bh);
+ ret = ocfs2_find_leaf(INODE_CACHE(inode), path_root_el(path), cpos, &bh);
if (ret) {
mlog_errno(ret);
goto out;
@@ -6551,7 +6682,7 @@ out:
*/
static int ocfs2_trim_tree(struct inode *inode, struct ocfs2_path *path,
handle_t *handle, struct ocfs2_truncate_context *tc,
- u32 clusters_to_del, u64 *delete_start)
+ u32 clusters_to_del, u64 *delete_start, u8 *flags)
{
int ret, i, index = path->p_tree_depth;
u32 new_edge = 0;
@@ -6561,6 +6692,7 @@ static int ocfs2_trim_tree(struct inode *inode, struct ocfs2_path *path,
struct ocfs2_extent_rec *rec;
*delete_start = 0;
+ *flags = 0;
while (index >= 0) {
bh = path->p_node[index].bh;
@@ -6648,6 +6780,7 @@ find_tail_record:
*delete_start = le64_to_cpu(rec->e_blkno)
+ ocfs2_clusters_to_blocks(inode->i_sb,
le16_to_cpu(rec->e_leaf_clusters));
+ *flags = rec->e_flags;
/*
* If it's now empty, remove this record.
@@ -6719,7 +6852,7 @@ delete:
mlog(0, "deleting this extent block.\n");
- ocfs2_remove_from_cache(inode, bh);
+ ocfs2_remove_from_cache(INODE_CACHE(inode), bh);
BUG_ON(ocfs2_rec_clusters(el, &el->l_recs[0]));
BUG_ON(le32_to_cpu(el->l_recs[0].e_cpos));
@@ -6747,7 +6880,8 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb,
struct buffer_head *fe_bh,
handle_t *handle,
struct ocfs2_truncate_context *tc,
- struct ocfs2_path *path)
+ struct ocfs2_path *path,
+ struct ocfs2_alloc_context *meta_ac)
{
int status;
struct ocfs2_dinode *fe;
@@ -6755,6 +6889,7 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb,
struct ocfs2_extent_list *el;
struct buffer_head *last_eb_bh = NULL;
u64 delete_blk = 0;
+ u8 rec_flags;
fe = (struct ocfs2_dinode *) fe_bh->b_data;
@@ -6769,14 +6904,14 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb,
* Each component will be touched, so we might as well journal
* here to avoid having to handle errors later.
*/
- status = ocfs2_journal_access_path(inode, handle, path);
+ status = ocfs2_journal_access_path(INODE_CACHE(inode), handle, path);
if (status < 0) {
mlog_errno(status);
goto bail;
}
if (last_eb_bh) {
- status = ocfs2_journal_access_eb(handle, inode, last_eb_bh,
+ status = ocfs2_journal_access_eb(handle, INODE_CACHE(inode), last_eb_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -6810,7 +6945,7 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb,
inode->i_blocks = ocfs2_inode_sector_count(inode);
status = ocfs2_trim_tree(inode, path, handle, tc,
- clusters_to_del, &delete_blk);
+ clusters_to_del, &delete_blk, &rec_flags);
if (status) {
mlog_errno(status);
goto bail;
@@ -6842,8 +6977,16 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb,
}
if (delete_blk) {
- status = ocfs2_truncate_log_append(osb, handle, delete_blk,
- clusters_to_del);
+ if (rec_flags & OCFS2_EXT_REFCOUNTED)
+ status = ocfs2_decrease_refcount(inode, handle,
+ ocfs2_blocks_to_clusters(osb->sb,
+ delete_blk),
+ clusters_to_del, meta_ac,
+ &tc->tc_dealloc, 1);
+ else
+ status = ocfs2_truncate_log_append(osb, handle,
+ delete_blk,
+ clusters_to_del);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -6863,9 +7006,9 @@ static int ocfs2_zero_func(handle_t *handle, struct buffer_head *bh)
return 0;
}
-static void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
- unsigned int from, unsigned int to,
- struct page *page, int zero, u64 *phys)
+void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
+ unsigned int from, unsigned int to,
+ struct page *page, int zero, u64 *phys)
{
int ret, partial = 0;
@@ -6933,20 +7076,16 @@ out:
ocfs2_unlock_and_free_pages(pages, numpages);
}
-static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
- struct page **pages, int *num)
+int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
+ struct page **pages, int *num)
{
int numpages, ret = 0;
- struct super_block *sb = inode->i_sb;
struct address_space *mapping = inode->i_mapping;
unsigned long index;
loff_t last_page_bytes;
BUG_ON(start > end);
- BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits !=
- (end - 1) >> OCFS2_SB(sb)->s_clustersize_bits);
-
numpages = 0;
last_page_bytes = PAGE_ALIGN(end);
index = start >> PAGE_CACHE_SHIFT;
@@ -6974,6 +7113,17 @@ out:
return ret;
}
+static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
+ struct page **pages, int *num)
+{
+ struct super_block *sb = inode->i_sb;
+
+ BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits !=
+ (end - 1) >> OCFS2_SB(sb)->s_clustersize_bits);
+
+ return ocfs2_grab_pages(inode, start, end, pages, num);
+}
+
/*
* Zero the area past i_size but still within an allocated
* cluster. This avoids exposing nonzero data on subsequent file
@@ -7138,7 +7288,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
goto out_unlock;
}
- ret = ocfs2_journal_access_di(handle, inode, di_bh,
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -7218,9 +7368,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
* this proves to be false, we could always re-build
* the in-inode data from our pages.
*/
- ocfs2_init_dinode_extent_tree(&et, inode, di_bh);
- ret = ocfs2_insert_extent(osb, handle, inode, &et,
- 0, block, 1, 0, NULL);
+ ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
+ ret = ocfs2_insert_extent(handle, &et, 0, block, 1, 0, NULL);
if (ret) {
mlog_errno(ret);
goto out_commit;
@@ -7262,11 +7411,14 @@ int ocfs2_commit_truncate(struct ocfs2_super *osb,
{
int status, i, credits, tl_sem = 0;
u32 clusters_to_del, new_highest_cpos, range;
+ u64 blkno = 0;
struct ocfs2_extent_list *el;
handle_t *handle = NULL;
struct inode *tl_inode = osb->osb_tl_inode;
struct ocfs2_path *path = NULL;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data;
+ struct ocfs2_alloc_context *meta_ac = NULL;
+ struct ocfs2_refcount_tree *ref_tree = NULL;
mlog_entry_void();
@@ -7292,10 +7444,12 @@ start:
goto bail;
}
+ credits = 0;
+
/*
* Truncate always works against the rightmost tree branch.
*/
- status = ocfs2_find_path(inode, path, UINT_MAX);
+ status = ocfs2_find_path(INODE_CACHE(inode), path, UINT_MAX);
if (status) {
mlog_errno(status);
goto bail;
@@ -7332,10 +7486,15 @@ start:
clusters_to_del = 0;
} else if (le32_to_cpu(el->l_recs[i].e_cpos) >= new_highest_cpos) {
clusters_to_del = ocfs2_rec_clusters(el, &el->l_recs[i]);
+ blkno = le64_to_cpu(el->l_recs[i].e_blkno);
} else if (range > new_highest_cpos) {
clusters_to_del = (ocfs2_rec_clusters(el, &el->l_recs[i]) +
le32_to_cpu(el->l_recs[i].e_cpos)) -
new_highest_cpos;
+ blkno = le64_to_cpu(el->l_recs[i].e_blkno) +
+ ocfs2_clusters_to_blocks(inode->i_sb,
+ ocfs2_rec_clusters(el, &el->l_recs[i]) -
+ clusters_to_del);
} else {
status = 0;
goto bail;
@@ -7344,6 +7503,29 @@ start:
mlog(0, "clusters_to_del = %u in this pass, tail blk=%llu\n",
clusters_to_del, (unsigned long long)path_leaf_bh(path)->b_blocknr);
+ if (el->l_recs[i].e_flags & OCFS2_EXT_REFCOUNTED && clusters_to_del) {
+ BUG_ON(!(OCFS2_I(inode)->ip_dyn_features &
+ OCFS2_HAS_REFCOUNT_FL));
+
+ status = ocfs2_lock_refcount_tree(osb,
+ le64_to_cpu(di->i_refcount_loc),
+ 1, &ref_tree, NULL);
+ if (status) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_prepare_refcount_change_for_del(inode, fe_bh,
+ blkno,
+ clusters_to_del,
+ &credits,
+ &meta_ac);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
mutex_lock(&tl_inode->i_mutex);
tl_sem = 1;
/* ocfs2_truncate_log_needs_flush guarantees us at least one
@@ -7357,7 +7539,7 @@ start:
}
}
- credits = ocfs2_calc_tree_trunc_credits(osb->sb, clusters_to_del,
+ credits += ocfs2_calc_tree_trunc_credits(osb->sb, clusters_to_del,
(struct ocfs2_dinode *)fe_bh->b_data,
el);
handle = ocfs2_start_trans(osb, credits);
@@ -7369,7 +7551,7 @@ start:
}
status = ocfs2_do_truncate(osb, clusters_to_del, inode, fe_bh, handle,
- tc, path);
+ tc, path, meta_ac);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -7383,6 +7565,16 @@ start:
ocfs2_reinit_path(path, 1);
+ if (meta_ac) {
+ ocfs2_free_alloc_context(meta_ac);
+ meta_ac = NULL;
+ }
+
+ if (ref_tree) {
+ ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
+ ref_tree = NULL;
+ }
+
/*
* The check above will catch the case where we've truncated
* away all allocation.
@@ -7399,6 +7591,12 @@ bail:
if (handle)
ocfs2_commit_trans(osb, handle);
+ if (meta_ac)
+ ocfs2_free_alloc_context(meta_ac);
+
+ if (ref_tree)
+ ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
+
ocfs2_run_deallocs(osb, &tc->tc_dealloc);
ocfs2_free_path(path);
@@ -7445,7 +7643,7 @@ int ocfs2_prepare_truncate(struct ocfs2_super *osb,
ocfs2_init_dealloc_ctxt(&(*tc)->tc_dealloc);
if (fe->id2.i_list.l_tree_depth) {
- status = ocfs2_read_extent_block(inode,
+ status = ocfs2_read_extent_block(INODE_CACHE(inode),
le64_to_cpu(fe->i_last_eb_blk),
&last_eb_bh);
if (status < 0) {
@@ -7507,7 +7705,7 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
goto out;
}
- ret = ocfs2_journal_access_di(handle, inode, di_bh,
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h
index 353254ba29e..9c122d57446 100644
--- a/fs/ocfs2/alloc.h
+++ b/fs/ocfs2/alloc.h
@@ -45,7 +45,8 @@
*
* ocfs2_extent_tree contains info for the root of the b-tree, it must have a
* root ocfs2_extent_list and a root_bh so that they can be used in the b-tree
- * functions. With metadata ecc, we now call different journal_access
+ * functions. It needs the ocfs2_caching_info structure associated with
+ * I/O on the tree. With metadata ecc, we now call different journal_access
* functions for each type of metadata, so it must have the
* root_journal_access function.
* ocfs2_extent_tree_operations abstract the normal operations we do for
@@ -56,6 +57,7 @@ struct ocfs2_extent_tree {
struct ocfs2_extent_tree_operations *et_ops;
struct buffer_head *et_root_bh;
struct ocfs2_extent_list *et_root_el;
+ struct ocfs2_caching_info *et_ci;
ocfs2_journal_access_func et_root_journal_access;
void *et_object;
unsigned int et_max_leaf_clusters;
@@ -66,31 +68,32 @@ struct ocfs2_extent_tree {
* specified object buffer.
*/
void ocfs2_init_dinode_extent_tree(struct ocfs2_extent_tree *et,
- struct inode *inode,
+ struct ocfs2_caching_info *ci,
struct buffer_head *bh);
void ocfs2_init_xattr_tree_extent_tree(struct ocfs2_extent_tree *et,
- struct inode *inode,
+ struct ocfs2_caching_info *ci,
struct buffer_head *bh);
struct ocfs2_xattr_value_buf;
void ocfs2_init_xattr_value_extent_tree(struct ocfs2_extent_tree *et,
- struct inode *inode,
+ struct ocfs2_caching_info *ci,
struct ocfs2_xattr_value_buf *vb);
void ocfs2_init_dx_root_extent_tree(struct ocfs2_extent_tree *et,
- struct inode *inode,
+ struct ocfs2_caching_info *ci,
struct buffer_head *bh);
+void ocfs2_init_refcount_extent_tree(struct ocfs2_extent_tree *et,
+ struct ocfs2_caching_info *ci,
+ struct buffer_head *bh);
/*
* Read an extent block into *bh. If *bh is NULL, a bh will be
* allocated. This is a cached read. The extent block will be validated
* with ocfs2_validate_extent_block().
*/
-int ocfs2_read_extent_block(struct inode *inode, u64 eb_blkno,
+int ocfs2_read_extent_block(struct ocfs2_caching_info *ci, u64 eb_blkno,
struct buffer_head **bh);
struct ocfs2_alloc_context;
-int ocfs2_insert_extent(struct ocfs2_super *osb,
- handle_t *handle,
- struct inode *inode,
+int ocfs2_insert_extent(handle_t *handle,
struct ocfs2_extent_tree *et,
u32 cpos,
u64 start_blk,
@@ -103,25 +106,36 @@ enum ocfs2_alloc_restarted {
RESTART_TRANS,
RESTART_META
};
-int ocfs2_add_clusters_in_btree(struct ocfs2_super *osb,
- struct inode *inode,
+int ocfs2_add_clusters_in_btree(handle_t *handle,
+ struct ocfs2_extent_tree *et,
u32 *logical_offset,
u32 clusters_to_add,
int mark_unwritten,
- struct ocfs2_extent_tree *et,
- handle_t *handle,
struct ocfs2_alloc_context *data_ac,
struct ocfs2_alloc_context *meta_ac,
enum ocfs2_alloc_restarted *reason_ret);
struct ocfs2_cached_dealloc_ctxt;
+struct ocfs2_path;
+int ocfs2_split_extent(handle_t *handle,
+ struct ocfs2_extent_tree *et,
+ struct ocfs2_path *path,
+ int split_index,
+ struct ocfs2_extent_rec *split_rec,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_cached_dealloc_ctxt *dealloc);
int ocfs2_mark_extent_written(struct inode *inode,
struct ocfs2_extent_tree *et,
handle_t *handle, u32 cpos, u32 len, u32 phys,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc);
-int ocfs2_remove_extent(struct inode *inode,
- struct ocfs2_extent_tree *et,
- u32 cpos, u32 len, handle_t *handle,
+int ocfs2_change_extent_flag(handle_t *handle,
+ struct ocfs2_extent_tree *et,
+ u32 cpos, u32 len, u32 phys,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_cached_dealloc_ctxt *dealloc,
+ int new_flags, int clear_flags);
+int ocfs2_remove_extent(handle_t *handle, struct ocfs2_extent_tree *et,
+ u32 cpos, u32 len,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc);
int ocfs2_remove_btree_range(struct inode *inode,
@@ -130,7 +144,6 @@ int ocfs2_remove_btree_range(struct inode *inode,
struct ocfs2_cached_dealloc_ctxt *dealloc);
int ocfs2_num_free_extents(struct ocfs2_super *osb,
- struct inode *inode,
struct ocfs2_extent_tree *et);
/*
@@ -195,6 +208,9 @@ static inline void ocfs2_init_dealloc_ctxt(struct ocfs2_cached_dealloc_ctxt *c)
}
int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
u64 blkno, unsigned int bit);
+int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
+ int type, int slot, u64 blkno,
+ unsigned int bit);
static inline int ocfs2_dealloc_has_cluster(struct ocfs2_cached_dealloc_ctxt *c)
{
return c->c_global_allocator != NULL;
@@ -222,8 +238,9 @@ int ocfs2_commit_truncate(struct ocfs2_super *osb,
int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
unsigned int start, unsigned int end, int trunc);
-int ocfs2_find_leaf(struct inode *inode, struct ocfs2_extent_list *root_el,
- u32 cpos, struct buffer_head **leaf_bh);
+int ocfs2_find_leaf(struct ocfs2_caching_info *ci,
+ struct ocfs2_extent_list *root_el, u32 cpos,
+ struct buffer_head **leaf_bh);
int ocfs2_search_extent_list(struct ocfs2_extent_list *el, u32 v_cluster);
/*
@@ -254,4 +271,50 @@ static inline int ocfs2_is_empty_extent(struct ocfs2_extent_rec *rec)
return !rec->e_leaf_clusters;
}
+int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
+ struct page **pages, int *num);
+void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
+ unsigned int from, unsigned int to,
+ struct page *page, int zero, u64 *phys);
+/*
+ * Structures which describe a path through a btree, and functions to
+ * manipulate them.
+ *
+ * The idea here is to be as generic as possible with the tree
+ * manipulation code.
+ */
+struct ocfs2_path_item {
+ struct buffer_head *bh;
+ struct ocfs2_extent_list *el;
+};
+
+#define OCFS2_MAX_PATH_DEPTH 5
+
+struct ocfs2_path {
+ int p_tree_depth;
+ ocfs2_journal_access_func p_root_access;
+ struct ocfs2_path_item p_node[OCFS2_MAX_PATH_DEPTH];
+};
+
+#define path_root_bh(_path) ((_path)->p_node[0].bh)
+#define path_root_el(_path) ((_path)->p_node[0].el)
+#define path_root_access(_path)((_path)->p_root_access)
+#define path_leaf_bh(_path) ((_path)->p_node[(_path)->p_tree_depth].bh)
+#define path_leaf_el(_path) ((_path)->p_node[(_path)->p_tree_depth].el)
+#define path_num_items(_path) ((_path)->p_tree_depth + 1)
+
+void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root);
+void ocfs2_free_path(struct ocfs2_path *path);
+int ocfs2_find_path(struct ocfs2_caching_info *ci,
+ struct ocfs2_path *path,
+ u32 cpos);
+struct ocfs2_path *ocfs2_new_path_from_path(struct ocfs2_path *path);
+struct ocfs2_path *ocfs2_new_path_from_et(struct ocfs2_extent_tree *et);
+int ocfs2_path_bh_journal_access(handle_t *handle,
+ struct ocfs2_caching_info *ci,
+ struct ocfs2_path *path,
+ int idx);
+int ocfs2_journal_access_path(struct ocfs2_caching_info *ci,
+ handle_t *handle,
+ struct ocfs2_path *path);
#endif /* OCFS2_ALLOC_H */
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 8a1e61545f4..deb2b132ae5 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -44,6 +44,7 @@
#include "suballoc.h"
#include "super.h"
#include "symlink.h"
+#include "refcounttree.h"
#include "buffer_head_io.h"
@@ -126,8 +127,8 @@ bail:
return err;
}
-static int ocfs2_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
+int ocfs2_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
{
int err = 0;
unsigned int ext_flags;
@@ -590,6 +591,8 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
goto bail;
}
+ /* We should already CoW the refcounted extent. */
+ BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
/*
* get_more_blocks() expects us to describe a hole by clearing
* the mapped bit on bh_result().
@@ -687,6 +690,10 @@ static ssize_t ocfs2_direct_IO(int rw,
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
return 0;
+ /* Fallback to buffered I/O if we are appending. */
+ if (i_size_read(inode) <= offset)
+ return 0;
+
ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
inode->i_sb->s_bdev, iov, offset,
nr_segs,
@@ -1259,7 +1266,8 @@ static int ocfs2_write_cluster(struct address_space *mapping,
goto out;
}
} else if (unwritten) {
- ocfs2_init_dinode_extent_tree(&et, inode, wc->w_di_bh);
+ ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
+ wc->w_di_bh);
ret = ocfs2_mark_extent_written(inode, &et,
wc->w_handle, cpos, 1, phys,
meta_ac, &wc->w_dealloc);
@@ -1448,6 +1456,9 @@ static int ocfs2_populate_write_desc(struct inode *inode,
goto out;
}
+ /* We should already CoW the refcountd extent. */
+ BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
+
/*
* Assume worst case - that we're writing in
* the middle of the extent.
@@ -1528,7 +1539,7 @@ static int ocfs2_write_begin_inline(struct address_space *mapping,
goto out;
}
- ret = ocfs2_journal_access_di(handle, inode, wc->w_di_bh,
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
ocfs2_commit_trans(osb, handle);
@@ -1699,6 +1710,19 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
goto out;
}
+ ret = ocfs2_check_range_for_refcount(inode, pos, len);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ } else if (ret == 1) {
+ ret = ocfs2_refcount_cow(inode, di_bh,
+ wc->w_cpos, wc->w_clen, UINT_MAX);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc,
&extents_to_split);
if (ret) {
@@ -1726,7 +1750,8 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
(long long)i_size_read(inode), le32_to_cpu(di->i_clusters),
clusters_to_alloc, extents_to_split);
- ocfs2_init_dinode_extent_tree(&et, inode, wc->w_di_bh);
+ ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
+ wc->w_di_bh);
ret = ocfs2_lock_allocators(inode, &et,
clusters_to_alloc, extents_to_split,
&data_ac, &meta_ac);
@@ -1773,7 +1798,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
* We don't want this to fail in ocfs2_write_end(), so do it
* here.
*/
- ret = ocfs2_journal_access_di(handle, inode, wc->w_di_bh,
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -1997,4 +2022,5 @@ const struct address_space_operations ocfs2_aops = {
.releasepage = ocfs2_releasepage,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
+ .error_remove_page = generic_error_remove_page,
};
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index 503e49232e1..c48e93ffc51 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -57,6 +57,8 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
struct buffer_head *di_bh);
int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size);
+int ocfs2_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create);
/* all ocfs2_dio_end_io()'s fault */
#define ocfs2_iocb_is_rw_locked(iocb) \
test_bit(0, (unsigned long *)&iocb->private)
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index 15c8e6deee2..d43d34a1dd3 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -52,12 +52,12 @@ enum ocfs2_state_bits {
BUFFER_FNS(NeedsValidate, needs_validate);
int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
- struct inode *inode)
+ struct ocfs2_caching_info *ci)
{
int ret = 0;
- mlog_entry("(bh->b_blocknr = %llu, inode=%p)\n",
- (unsigned long long)bh->b_blocknr, inode);
+ mlog_entry("(bh->b_blocknr = %llu, ci=%p)\n",
+ (unsigned long long)bh->b_blocknr, ci);
BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
BUG_ON(buffer_jbd(bh));
@@ -70,7 +70,7 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
goto out;
}
- mutex_lock(&OCFS2_I(inode)->ip_io_mutex);
+ ocfs2_metadata_cache_io_lock(ci);
lock_buffer(bh);
set_buffer_uptodate(bh);
@@ -85,7 +85,7 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
wait_on_buffer(bh);
if (buffer_uptodate(bh)) {
- ocfs2_set_buffer_uptodate(inode, bh);
+ ocfs2_set_buffer_uptodate(ci, bh);
} else {
/* We don't need to remove the clustered uptodate
* information for this bh as it's not marked locally
@@ -94,7 +94,7 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
put_bh(bh);
}
- mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
+ ocfs2_metadata_cache_io_unlock(ci);
out:
mlog_exit(ret);
return ret;
@@ -177,7 +177,7 @@ bail:
return status;
}
-int ocfs2_read_blocks(struct inode *inode, u64 block, int nr,
+int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
struct buffer_head *bhs[], int flags,
int (*validate)(struct super_block *sb,
struct buffer_head *bh))
@@ -185,11 +185,12 @@ int ocfs2_read_blocks(struct inode *inode, u64 block, int nr,
int status = 0;
int i, ignore_cache = 0;
struct buffer_head *bh;
+ struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
- mlog_entry("(inode=%p, block=(%llu), nr=(%d), flags=%d)\n",
- inode, (unsigned long long)block, nr, flags);
+ mlog_entry("(ci=%p, block=(%llu), nr=(%d), flags=%d)\n",
+ ci, (unsigned long long)block, nr, flags);
- BUG_ON(!inode);
+ BUG_ON(!ci);
BUG_ON((flags & OCFS2_BH_READAHEAD) &&
(flags & OCFS2_BH_IGNORE_CACHE));
@@ -212,12 +213,12 @@ int ocfs2_read_blocks(struct inode *inode, u64 block, int nr,
goto bail;
}
- mutex_lock(&OCFS2_I(inode)->ip_io_mutex);
+ ocfs2_metadata_cache_io_lock(ci);
for (i = 0 ; i < nr ; i++) {
if (bhs[i] == NULL) {
- bhs[i] = sb_getblk(inode->i_sb, block++);
+ bhs[i] = sb_getblk(sb, block++);
if (bhs[i] == NULL) {
- mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
+ ocfs2_metadata_cache_io_unlock(ci);
status = -EIO;
mlog_errno(status);
goto bail;
@@ -250,11 +251,11 @@ int ocfs2_read_blocks(struct inode *inode, u64 block, int nr,
* before our is-it-in-flight check.
*/
- if (!ignore_cache && !ocfs2_buffer_uptodate(inode, bh)) {
+ if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) {
mlog(ML_UPTODATE,
- "bh (%llu), inode %llu not uptodate\n",
+ "bh (%llu), owner %llu not uptodate\n",
(unsigned long long)bh->b_blocknr,
- (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ (unsigned long long)ocfs2_metadata_cache_owner(ci));
/* We're using ignore_cache here to say
* "go to disk" */
ignore_cache = 1;
@@ -283,7 +284,7 @@ int ocfs2_read_blocks(struct inode *inode, u64 block, int nr,
* previously submitted request than we are
* done here. */
if ((flags & OCFS2_BH_READAHEAD)
- && ocfs2_buffer_read_ahead(inode, bh))
+ && ocfs2_buffer_read_ahead(ci, bh))
continue;
lock_buffer(bh);
@@ -305,7 +306,7 @@ int ocfs2_read_blocks(struct inode *inode, u64 block, int nr,
* buffer lock. */
if (!(flags & OCFS2_BH_IGNORE_CACHE)
&& !(flags & OCFS2_BH_READAHEAD)
- && ocfs2_buffer_uptodate(inode, bh)) {
+ && ocfs2_buffer_uptodate(ci, bh)) {
unlock_buffer(bh);
continue;
}
@@ -327,7 +328,7 @@ int ocfs2_read_blocks(struct inode *inode, u64 block, int nr,
if (!(flags & OCFS2_BH_READAHEAD)) {
/* We know this can't have changed as we hold the
- * inode sem. Avoid doing any work on the bh if the
+ * owner sem. Avoid doing any work on the bh if the
* journal has it. */
if (!buffer_jbd(bh))
wait_on_buffer(bh);
@@ -351,7 +352,7 @@ int ocfs2_read_blocks(struct inode *inode, u64 block, int nr,
* that better not have changed */
BUG_ON(buffer_jbd(bh));
clear_buffer_needs_validate(bh);
- status = validate(inode->i_sb, bh);
+ status = validate(sb, bh);
if (status) {
put_bh(bh);
bhs[i] = NULL;
@@ -363,9 +364,9 @@ int ocfs2_read_blocks(struct inode *inode, u64 block, int nr,
/* Always set the buffer in the cache, even if it was
* a forced read, or read-ahead which hasn't yet
* completed. */
- ocfs2_set_buffer_uptodate(inode, bh);
+ ocfs2_set_buffer_uptodate(ci, bh);
}
- mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
+ ocfs2_metadata_cache_io_unlock(ci);
mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
(unsigned long long)block, nr,
@@ -399,7 +400,7 @@ static void ocfs2_check_super_or_backup(struct super_block *sb,
/*
* Write super block and backups doesn't need to collaborate with journal,
- * so we don't need to lock ip_io_mutex and inode doesn't need to bea passed
+ * so we don't need to lock ip_io_mutex and ci doesn't need to bea passed
* into this function.
*/
int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
diff --git a/fs/ocfs2/buffer_head_io.h b/fs/ocfs2/buffer_head_io.h
index c75d682dadd..b97bcc6dde7 100644
--- a/fs/ocfs2/buffer_head_io.h
+++ b/fs/ocfs2/buffer_head_io.h
@@ -33,7 +33,7 @@ void ocfs2_end_buffer_io_sync(struct buffer_head *bh,
int ocfs2_write_block(struct ocfs2_super *osb,
struct buffer_head *bh,
- struct inode *inode);
+ struct ocfs2_caching_info *ci);
int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
unsigned int nr, struct buffer_head *bhs[]);
@@ -44,7 +44,7 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
* be set even for a READAHEAD call, as it marks the buffer for later
* validation.
*/
-int ocfs2_read_blocks(struct inode *inode, u64 block, int nr,
+int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
struct buffer_head *bhs[], int flags,
int (*validate)(struct super_block *sb,
struct buffer_head *bh));
@@ -55,7 +55,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
#define OCFS2_BH_IGNORE_CACHE 1
#define OCFS2_BH_READAHEAD 8
-static inline int ocfs2_read_block(struct inode *inode, u64 off,
+static inline int ocfs2_read_block(struct ocfs2_caching_info *ci, u64 off,
struct buffer_head **bh,
int (*validate)(struct super_block *sb,
struct buffer_head *bh))
@@ -68,7 +68,7 @@ static inline int ocfs2_read_block(struct inode *inode, u64 off,
goto bail;
}
- status = ocfs2_read_blocks(inode, off, 1, bh, 0, validate);
+ status = ocfs2_read_blocks(ci, off, 1, bh, 0, validate);
bail:
return status;
diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
index 96df5416993..1cd2934de61 100644
--- a/fs/ocfs2/cluster/masklog.c
+++ b/fs/ocfs2/cluster/masklog.c
@@ -111,6 +111,7 @@ static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
define_mask(EXPORT),
define_mask(XATTR),
define_mask(QUOTA),
+ define_mask(REFCOUNT),
define_mask(ERROR),
define_mask(NOTICE),
define_mask(KTHREAD),
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
index 696c32e5071..9b4d11726cf 100644
--- a/fs/ocfs2/cluster/masklog.h
+++ b/fs/ocfs2/cluster/masklog.h
@@ -113,6 +113,7 @@
#define ML_EXPORT 0x0000000010000000ULL /* ocfs2 export operations */
#define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */
#define ML_QUOTA 0x0000000040000000ULL /* ocfs2 quota operations */
+#define ML_REFCOUNT 0x0000000080000000ULL /* refcount tree operations */
/* bits that are infrequently given and frequently matched in the high word */
#define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */
#define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
index f8424874fa0..cfb2be708ab 100644
--- a/fs/ocfs2/cluster/netdebug.c
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -163,7 +163,7 @@ static void nst_seq_stop(struct seq_file *seq, void *v)
{
}
-static struct seq_operations nst_seq_ops = {
+static const struct seq_operations nst_seq_ops = {
.start = nst_seq_start,
.next = nst_seq_next,
.stop = nst_seq_stop,
@@ -344,7 +344,7 @@ static void sc_seq_stop(struct seq_file *seq, void *v)
{
}
-static struct seq_operations sc_seq_ops = {
+static const struct seq_operations sc_seq_ops = {
.start = sc_seq_start,
.next = sc_seq_next,
.stop = sc_seq_stop,
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index b358f3bf896..28c3ec23879 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -176,7 +176,7 @@ static int ocfs2_dx_dir_link_trailer(struct inode *dir, handle_t *handle,
struct ocfs2_dx_root_block *dx_root;
struct ocfs2_dir_block_trailer *trailer;
- ret = ocfs2_journal_access_dr(handle, dir, dx_root_bh,
+ ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -564,7 +564,8 @@ static int ocfs2_read_dir_block_direct(struct inode *dir, u64 phys,
int ret;
struct buffer_head *tmp = *bh;
- ret = ocfs2_read_block(dir, phys, &tmp, ocfs2_validate_dir_block);
+ ret = ocfs2_read_block(INODE_CACHE(dir), phys, &tmp,
+ ocfs2_validate_dir_block);
if (ret) {
mlog_errno(ret);
goto out;
@@ -622,7 +623,8 @@ static int ocfs2_read_dx_root(struct inode *dir, struct ocfs2_dinode *di,
u64 blkno = le64_to_cpu(di->i_dx_root);
struct buffer_head *tmp = *dx_root_bh;
- ret = ocfs2_read_block(dir, blkno, &tmp, ocfs2_validate_dx_root);
+ ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp,
+ ocfs2_validate_dx_root);
/* If ocfs2_read_block() got us a new bh, pass it up. */
if (!ret && !*dx_root_bh)
@@ -662,7 +664,8 @@ static int ocfs2_read_dx_leaf(struct inode *dir, u64 blkno,
int ret;
struct buffer_head *tmp = *dx_leaf_bh;
- ret = ocfs2_read_block(dir, blkno, &tmp, ocfs2_validate_dx_leaf);
+ ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp,
+ ocfs2_validate_dx_leaf);
/* If ocfs2_read_block() got us a new bh, pass it up. */
if (!ret && !*dx_leaf_bh)
@@ -680,7 +683,7 @@ static int ocfs2_read_dx_leaves(struct inode *dir, u64 start, int num,
{
int ret;
- ret = ocfs2_read_blocks(dir, start, num, dx_leaf_bhs, 0,
+ ret = ocfs2_read_blocks(INODE_CACHE(dir), start, num, dx_leaf_bhs, 0,
ocfs2_validate_dx_leaf);
if (ret)
mlog_errno(ret);
@@ -802,7 +805,8 @@ static int ocfs2_dx_dir_lookup_rec(struct inode *inode,
struct ocfs2_extent_rec *rec = NULL;
if (el->l_tree_depth) {
- ret = ocfs2_find_leaf(inode, el, major_hash, &eb_bh);
+ ret = ocfs2_find_leaf(INODE_CACHE(inode), el, major_hash,
+ &eb_bh);
if (ret) {
mlog_errno(ret);
goto out;
@@ -1133,7 +1137,8 @@ int ocfs2_update_entry(struct inode *dir, handle_t *handle,
if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
access = ocfs2_journal_access_di;
- ret = access(handle, dir, de_bh, OCFS2_JOURNAL_ACCESS_WRITE);
+ ret = access(handle, INODE_CACHE(dir), de_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
@@ -1176,7 +1181,7 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
goto bail;
}
if (de == de_del) {
- status = access(handle, dir, bh,
+ status = access(handle, INODE_CACHE(dir), bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
status = -EIO;
@@ -1326,7 +1331,7 @@ static int ocfs2_delete_entry_dx(handle_t *handle, struct inode *dir,
* the entry count needs to be updated. Also, we might be
* adding to the start of the free list.
*/
- ret = ocfs2_journal_access_dr(handle, dir, dx_root_bh,
+ ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -1334,7 +1339,7 @@ static int ocfs2_delete_entry_dx(handle_t *handle, struct inode *dir,
}
if (!ocfs2_dx_root_inline(dx_root)) {
- ret = ocfs2_journal_access_dl(handle, dir,
+ ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
lookup->dl_dx_leaf_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
@@ -1493,7 +1498,7 @@ static int __ocfs2_dx_dir_leaf_insert(struct inode *dir, handle_t *handle,
int ret;
struct ocfs2_dx_leaf *dx_leaf;
- ret = ocfs2_journal_access_dl(handle, dir, dx_leaf_bh,
+ ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -1523,7 +1528,7 @@ static int ocfs2_dx_dir_insert(struct inode *dir, handle_t *handle,
struct ocfs2_dx_root_block *dx_root;
struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh;
- ret = ocfs2_journal_access_dr(handle, dir, dx_root_bh,
+ ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -1645,11 +1650,13 @@ int __ocfs2_add_entry(handle_t *handle,
*/
if (ocfs2_free_list_at_root(lookup)) {
bh = lookup->dl_dx_root_bh;
- retval = ocfs2_journal_access_dr(handle, dir, bh,
+ retval = ocfs2_journal_access_dr(handle,
+ INODE_CACHE(dir), bh,
OCFS2_JOURNAL_ACCESS_WRITE);
} else {
bh = lookup->dl_prev_leaf_bh;
- retval = ocfs2_journal_access_db(handle, dir, bh,
+ retval = ocfs2_journal_access_db(handle,
+ INODE_CACHE(dir), bh,
OCFS2_JOURNAL_ACCESS_WRITE);
}
if (retval) {
@@ -1700,11 +1707,13 @@ int __ocfs2_add_entry(handle_t *handle,
}
if (insert_bh == parent_fe_bh)
- status = ocfs2_journal_access_di(handle, dir,
+ status = ocfs2_journal_access_di(handle,
+ INODE_CACHE(dir),
insert_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
else {
- status = ocfs2_journal_access_db(handle, dir,
+ status = ocfs2_journal_access_db(handle,
+ INODE_CACHE(dir),
insert_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
@@ -2280,7 +2289,7 @@ static int ocfs2_fill_new_dir_id(struct ocfs2_super *osb,
struct ocfs2_inline_data *data = &di->id2.i_data;
unsigned int size = le16_to_cpu(data->id_count);
- ret = ocfs2_journal_access_di(handle, inode, di_bh,
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -2332,9 +2341,9 @@ static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb,
goto bail;
}
- ocfs2_set_new_buffer_uptodate(inode, new_bh);
+ ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
- status = ocfs2_journal_access_db(handle, inode, new_bh,
+ status = ocfs2_journal_access_db(handle, INODE_CACHE(inode), new_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
@@ -2418,9 +2427,9 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
ret = -EIO;
goto out;
}
- ocfs2_set_new_buffer_uptodate(dir, dx_root_bh);
+ ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dx_root_bh);
- ret = ocfs2_journal_access_dr(handle, dir, dx_root_bh,
+ ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret < 0) {
mlog_errno(ret);
@@ -2454,7 +2463,7 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
if (ret)
mlog_errno(ret);
- ret = ocfs2_journal_access_di(handle, dir, di_bh,
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret) {
mlog_errno(ret);
@@ -2495,9 +2504,9 @@ static int ocfs2_dx_dir_format_cluster(struct ocfs2_super *osb,
}
dx_leaves[i] = bh;
- ocfs2_set_new_buffer_uptodate(dir, bh);
+ ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), bh);
- ret = ocfs2_journal_access_dl(handle, dir, bh,
+ ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret < 0) {
mlog_errno(ret);
@@ -2582,7 +2591,6 @@ static int ocfs2_dx_dir_new_cluster(struct inode *dir,
{
int ret;
u64 phys_blkno;
- struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
ret = __ocfs2_dx_dir_new_cluster(dir, cpos, handle, data_ac, dx_leaves,
num_dx_leaves, &phys_blkno);
@@ -2591,7 +2599,7 @@ static int ocfs2_dx_dir_new_cluster(struct inode *dir,
goto out;
}
- ret = ocfs2_insert_extent(osb, handle, dir, et, cpos, phys_blkno, 1, 0,
+ ret = ocfs2_insert_extent(handle, et, cpos, phys_blkno, 1, 0,
meta_ac);
if (ret)
mlog_errno(ret);
@@ -2895,7 +2903,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
struct ocfs2_extent_tree dx_et;
int did_quota = 0, bytes_allocated = 0;
- ocfs2_init_dinode_extent_tree(&et, dir, di_bh);
+ ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir), di_bh);
alloc = ocfs2_clusters_for_bytes(sb, bytes);
dx_alloc = 0;
@@ -3005,9 +3013,9 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
goto out_commit;
}
- ocfs2_set_new_buffer_uptodate(dir, dirdata_bh);
+ ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dirdata_bh);
- ret = ocfs2_journal_access_db(handle, dir, dirdata_bh,
+ ret = ocfs2_journal_access_db(handle, INODE_CACHE(dir), dirdata_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret) {
mlog_errno(ret);
@@ -3060,7 +3068,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
* We let the later dirent insert modify c/mtime - to the user
* the data hasn't changed.
*/
- ret = ocfs2_journal_access_di(handle, dir, di_bh,
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret) {
mlog_errno(ret);
@@ -3085,7 +3093,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
* This should never fail as our extent list is empty and all
* related blocks have been journaled already.
*/
- ret = ocfs2_insert_extent(osb, handle, dir, &et, 0, blkno, len,
+ ret = ocfs2_insert_extent(handle, &et, 0, blkno, len,
0, NULL);
if (ret) {
mlog_errno(ret);
@@ -3117,8 +3125,10 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
ocfs2_dx_dir_index_root_block(dir, dx_root_bh,
dirdata_bh);
} else {
- ocfs2_init_dx_root_extent_tree(&dx_et, dir, dx_root_bh);
- ret = ocfs2_insert_extent(osb, handle, dir, &dx_et, 0,
+ ocfs2_init_dx_root_extent_tree(&dx_et,
+ INODE_CACHE(dir),
+ dx_root_bh);
+ ret = ocfs2_insert_extent(handle, &dx_et, 0,
dx_insert_blkno, 1, 0, NULL);
if (ret)
mlog_errno(ret);
@@ -3138,7 +3148,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
}
blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off);
- ret = ocfs2_insert_extent(osb, handle, dir, &et, 1,
+ ret = ocfs2_insert_extent(handle, &et, 1,
blkno, len, 0, NULL);
if (ret) {
mlog_errno(ret);
@@ -3337,8 +3347,9 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
spin_lock(&OCFS2_I(dir)->ip_lock);
if (dir_i_size == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters)) {
spin_unlock(&OCFS2_I(dir)->ip_lock);
- ocfs2_init_dinode_extent_tree(&et, dir, parent_fe_bh);
- num_free_extents = ocfs2_num_free_extents(osb, dir, &et);
+ ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir),
+ parent_fe_bh);
+ num_free_extents = ocfs2_num_free_extents(osb, &et);
if (num_free_extents < 0) {
status = num_free_extents;
mlog_errno(status);
@@ -3387,9 +3398,9 @@ do_extend:
goto bail;
}
- ocfs2_set_new_buffer_uptodate(dir, new_bh);
+ ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), new_bh);
- status = ocfs2_journal_access_db(handle, dir, new_bh,
+ status = ocfs2_journal_access_db(handle, INODE_CACHE(dir), new_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
@@ -3829,7 +3840,7 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
(unsigned long long)OCFS2_I(dir)->ip_blkno,
(unsigned long long)leaf_blkno, insert_hash);
- ocfs2_init_dx_root_extent_tree(&et, dir, dx_root_bh);
+ ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
/*
@@ -3885,7 +3896,7 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
}
did_quota = 1;
- ret = ocfs2_journal_access_dl(handle, dir, dx_leaf_bh,
+ ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -3949,7 +3960,8 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
}
for (i = 0; i < num_dx_leaves; i++) {
- ret = ocfs2_journal_access_dl(handle, dir, orig_dx_leaves[i],
+ ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
+ orig_dx_leaves[i],
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -4165,7 +4177,7 @@ static int ocfs2_expand_inline_dx_root(struct inode *dir,
* failure to add the dx_root_bh to the journal won't result
* us losing clusters.
*/
- ret = ocfs2_journal_access_dr(handle, dir, dx_root_bh,
+ ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -4207,9 +4219,8 @@ static int ocfs2_expand_inline_dx_root(struct inode *dir,
/* This should never fail considering we start with an empty
* dx_root. */
- ocfs2_init_dx_root_extent_tree(&et, dir, dx_root_bh);
- ret = ocfs2_insert_extent(osb, handle, dir, &et, 0,
- insert_blkno, 1, 0, NULL);
+ ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
+ ret = ocfs2_insert_extent(handle, &et, 0, insert_blkno, 1, 0, NULL);
if (ret)
mlog_errno(ret);
did_quota = 0;
@@ -4469,7 +4480,7 @@ static int ocfs2_dx_dir_remove_index(struct inode *dir,
goto out_unlock;
}
- ret = ocfs2_journal_access_di(handle, dir, di_bh,
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -4532,7 +4543,7 @@ int ocfs2_dx_dir_truncate(struct inode *dir, struct buffer_head *di_bh)
if (ocfs2_dx_root_inline(dx_root))
goto remove_index;
- ocfs2_init_dx_root_extent_tree(&et, dir, dx_root_bh);
+ ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh);
/* XXX: What if dr_clusters is too large? */
while (le32_to_cpu(dx_root->dr_clusters)) {
@@ -4565,7 +4576,7 @@ remove_index:
goto out;
}
- ocfs2_remove_from_cache(dir, dx_root_bh);
+ ocfs2_remove_from_cache(INODE_CACHE(dir), dx_root_bh);
out:
ocfs2_schedule_truncate_log_flush(osb, 1);
ocfs2_run_deallocs(osb, &dealloc);
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index 81eff8e5832..01cf8cc3d28 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -30,7 +30,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
-#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/random.h>
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index 75997b4deaf..ca96bce50e1 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -30,7 +30,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
-#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/random.h>
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index df52f706f66..ca46002ec10 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -27,7 +27,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
-#include <linux/utsname.h>
#include <linux/sysctl.h>
#include <linux/spinlock.h>
#include <linux/debugfs.h>
@@ -683,7 +682,7 @@ static int lockres_seq_show(struct seq_file *s, void *v)
return 0;
}
-static struct seq_operations debug_lockres_ops = {
+static const struct seq_operations debug_lockres_ops = {
.start = lockres_seq_start,
.stop = lockres_seq_stop,
.next = lockres_seq_next,
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 4d9e6b288dd..0334000676d 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -28,7 +28,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
-#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 83a9f2972ac..437698e9465 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -30,7 +30,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
-#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/random.h>
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index f8b653fcd4d..83bcaf266b3 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -30,7 +30,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
-#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/random.h>
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 43e6e328056..d9fa3d22e17 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -30,7 +30,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
-#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/random.h>
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index d490b66ad9d..52ec020ea78 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -30,7 +30,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
-#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/random.h>
@@ -212,14 +211,18 @@ static int dlm_purge_lockres(struct dlm_ctxt *dlm,
spin_lock(&dlm->spinlock);
}
+ spin_lock(&res->spinlock);
if (!list_empty(&res->purge)) {
mlog(0, "removing lockres %.*s:%p from purgelist, "
"master = %d\n", res->lockname.len, res->lockname.name,
res, master);
list_del_init(&res->purge);
+ spin_unlock(&res->spinlock);
dlm_lockres_put(res);
dlm->purge_count--;
- }
+ } else
+ spin_unlock(&res->spinlock);
+
__dlm_unhash_lockres(res);
/* lockres is not in the hash now. drop the flag and wake up
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 756f5b0998e..00f53b2aea7 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -30,7 +30,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
-#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/random.h>
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 110bb57c46a..0d38d67194c 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -53,6 +53,7 @@
#include "super.h"
#include "uptodate.h"
#include "quota.h"
+#include "refcounttree.h"
#include "buffer_head_io.h"
@@ -110,6 +111,11 @@ static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres);
+static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
+ int new_level);
+static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
+ int blocking);
+
#define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
/* This aids in debugging situations where a bad LVB might be involved. */
@@ -278,6 +284,12 @@ static struct ocfs2_lock_res_ops ocfs2_qinfo_lops = {
.flags = LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB,
};
+static struct ocfs2_lock_res_ops ocfs2_refcount_block_lops = {
+ .check_downconvert = ocfs2_check_refcount_downconvert,
+ .downconvert_worker = ocfs2_refcount_convert_worker,
+ .flags = 0,
+};
+
static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
{
return lockres->l_type == OCFS2_LOCK_TYPE_META ||
@@ -306,6 +318,12 @@ static inline struct ocfs2_mem_dqinfo *ocfs2_lock_res_qinfo(struct ocfs2_lock_re
return (struct ocfs2_mem_dqinfo *)lockres->l_priv;
}
+static inline struct ocfs2_refcount_tree *
+ocfs2_lock_res_refcount_tree(struct ocfs2_lock_res *res)
+{
+ return container_of(res, struct ocfs2_refcount_tree, rf_lockres);
+}
+
static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
{
if (lockres->l_ops->get_osb)
@@ -693,6 +711,17 @@ void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres,
info);
}
+void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
+ struct ocfs2_super *osb, u64 ref_blkno,
+ unsigned int generation)
+{
+ ocfs2_lock_res_init_once(lockres);
+ ocfs2_build_lock_name(OCFS2_LOCK_TYPE_REFCOUNT, ref_blkno,
+ generation, lockres->l_name);
+ ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_REFCOUNT,
+ &ocfs2_refcount_block_lops, osb);
+}
+
void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
{
mlog_entry_void();
@@ -1548,8 +1577,10 @@ int ocfs2_rw_lock(struct inode *inode, int write)
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
- if (ocfs2_mount_local(osb))
+ if (ocfs2_mount_local(osb)) {
+ mlog_exit(0);
return 0;
+ }
lockres = &OCFS2_I(inode)->ip_rw_lockres;
@@ -2127,7 +2158,7 @@ static int ocfs2_inode_lock_update(struct inode *inode,
/* This will discard any caching information we might have had
* for the inode metadata. */
- ocfs2_metadata_cache_purge(inode);
+ ocfs2_metadata_cache_purge(INODE_CACHE(inode));
ocfs2_extent_map_trunc(inode, 0);
@@ -3009,6 +3040,7 @@ static void ocfs2_unlock_ast(void *opaque, int error)
"unlock_action %d\n", error, lockres->l_name,
lockres->l_unlock_action);
spin_unlock_irqrestore(&lockres->l_lock, flags);
+ mlog_exit_void();
return;
}
@@ -3495,11 +3527,11 @@ out:
return UNBLOCK_CONTINUE;
}
-static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
- int new_level)
+static int ocfs2_ci_checkpointed(struct ocfs2_caching_info *ci,
+ struct ocfs2_lock_res *lockres,
+ int new_level)
{
- struct inode *inode = ocfs2_lock_res_inode(lockres);
- int checkpointed = ocfs2_inode_fully_checkpointed(inode);
+ int checkpointed = ocfs2_ci_fully_checkpointed(ci);
BUG_ON(new_level != DLM_LOCK_NL && new_level != DLM_LOCK_PR);
BUG_ON(lockres->l_level != DLM_LOCK_EX && !checkpointed);
@@ -3507,10 +3539,18 @@ static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
if (checkpointed)
return 1;
- ocfs2_start_checkpoint(OCFS2_SB(inode->i_sb));
+ ocfs2_start_checkpoint(OCFS2_SB(ocfs2_metadata_cache_get_super(ci)));
return 0;
}
+static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
+ int new_level)
+{
+ struct inode *inode = ocfs2_lock_res_inode(lockres);
+
+ return ocfs2_ci_checkpointed(INODE_CACHE(inode), lockres, new_level);
+}
+
static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
{
struct inode *inode = ocfs2_lock_res_inode(lockres);
@@ -3640,6 +3680,26 @@ static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
return UNBLOCK_CONTINUE_POST;
}
+static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
+ int new_level)
+{
+ struct ocfs2_refcount_tree *tree =
+ ocfs2_lock_res_refcount_tree(lockres);
+
+ return ocfs2_ci_checkpointed(&tree->rf_ci, lockres, new_level);
+}
+
+static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
+ int blocking)
+{
+ struct ocfs2_refcount_tree *tree =
+ ocfs2_lock_res_refcount_tree(lockres);
+
+ ocfs2_metadata_cache_purge(&tree->rf_ci);
+
+ return UNBLOCK_CONTINUE;
+}
+
static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
{
struct ocfs2_qinfo_lvb *lvb;
@@ -3752,6 +3812,37 @@ bail:
return status;
}
+int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex)
+{
+ int status;
+ int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
+ struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
+ struct ocfs2_super *osb = lockres->l_priv;
+
+
+ if (ocfs2_is_hard_readonly(osb))
+ return -EROFS;
+
+ if (ocfs2_mount_local(osb))
+ return 0;
+
+ status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
+ if (status < 0)
+ mlog_errno(status);
+
+ return status;
+}
+
+void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex)
+{
+ int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
+ struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
+ struct ocfs2_super *osb = lockres->l_priv;
+
+ if (!ocfs2_mount_local(osb))
+ ocfs2_cluster_unlock(osb, lockres, level);
+}
+
/*
* This is the filesystem locking protocol. It provides the lock handling
* hooks for the underlying DLM. It has a maximum version number.
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index 7553836931d..d1ce48e1b3d 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -101,6 +101,9 @@ void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
struct ocfs2_mem_dqinfo;
void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres,
struct ocfs2_mem_dqinfo *info);
+void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
+ struct ocfs2_super *osb, u64 ref_blkno,
+ unsigned int generation);
void ocfs2_lock_res_free(struct ocfs2_lock_res *res);
int ocfs2_create_new_inode_locks(struct inode *inode);
int ocfs2_drop_inode_locks(struct inode *inode);
@@ -148,6 +151,9 @@ int ocfs2_file_lock(struct file *file, int ex, int trylock);
void ocfs2_file_unlock(struct file *file);
int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex);
void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex);
+struct ocfs2_refcount_tree;
+int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex);
+void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex);
void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres);
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index f2bb1a04d25..843db64e9d4 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -293,7 +293,7 @@ static int ocfs2_last_eb_is_empty(struct inode *inode,
struct ocfs2_extent_block *eb;
struct ocfs2_extent_list *el;
- ret = ocfs2_read_extent_block(inode, last_eb_blk, &eb_bh);
+ ret = ocfs2_read_extent_block(INODE_CACHE(inode), last_eb_blk, &eb_bh);
if (ret) {
mlog_errno(ret);
goto out;
@@ -353,11 +353,11 @@ static int ocfs2_search_for_hole_index(struct ocfs2_extent_list *el,
* eb_bh is NULL. Otherwise, eb_bh should point to the extent block
* containing el.
*/
-static int ocfs2_figure_hole_clusters(struct inode *inode,
- struct ocfs2_extent_list *el,
- struct buffer_head *eb_bh,
- u32 v_cluster,
- u32 *num_clusters)
+int ocfs2_figure_hole_clusters(struct ocfs2_caching_info *ci,
+ struct ocfs2_extent_list *el,
+ struct buffer_head *eb_bh,
+ u32 v_cluster,
+ u32 *num_clusters)
{
int ret, i;
struct buffer_head *next_eb_bh = NULL;
@@ -375,7 +375,7 @@ static int ocfs2_figure_hole_clusters(struct inode *inode,
if (le64_to_cpu(eb->h_next_leaf_blk) == 0ULL)
goto no_more_extents;
- ret = ocfs2_read_extent_block(inode,
+ ret = ocfs2_read_extent_block(ci,
le64_to_cpu(eb->h_next_leaf_blk),
&next_eb_bh);
if (ret) {
@@ -428,7 +428,8 @@ static int ocfs2_get_clusters_nocache(struct inode *inode,
tree_height = le16_to_cpu(el->l_tree_depth);
if (tree_height > 0) {
- ret = ocfs2_find_leaf(inode, el, v_cluster, &eb_bh);
+ ret = ocfs2_find_leaf(INODE_CACHE(inode), el, v_cluster,
+ &eb_bh);
if (ret) {
mlog_errno(ret);
goto out;
@@ -455,7 +456,8 @@ static int ocfs2_get_clusters_nocache(struct inode *inode,
* field.
*/
if (hole_len) {
- ret = ocfs2_figure_hole_clusters(inode, el, eb_bh,
+ ret = ocfs2_figure_hole_clusters(INODE_CACHE(inode),
+ el, eb_bh,
v_cluster, &len);
if (ret) {
mlog_errno(ret);
@@ -539,7 +541,8 @@ static void ocfs2_relative_extent_offsets(struct super_block *sb,
int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
u32 *p_cluster, u32 *num_clusters,
- struct ocfs2_extent_list *el)
+ struct ocfs2_extent_list *el,
+ unsigned int *extent_flags)
{
int ret = 0, i;
struct buffer_head *eb_bh = NULL;
@@ -548,7 +551,8 @@ int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
u32 coff;
if (el->l_tree_depth) {
- ret = ocfs2_find_leaf(inode, el, v_cluster, &eb_bh);
+ ret = ocfs2_find_leaf(INODE_CACHE(inode), el, v_cluster,
+ &eb_bh);
if (ret) {
mlog_errno(ret);
goto out;
@@ -590,6 +594,9 @@ int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
*p_cluster = *p_cluster + coff;
if (num_clusters)
*num_clusters = ocfs2_rec_clusters(el, rec) - coff;
+
+ if (extent_flags)
+ *extent_flags = rec->e_flags;
}
out:
if (eb_bh)
@@ -862,8 +869,8 @@ int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
BUG_ON(bhs[done + i]->b_blocknr != (p_block + i));
}
- rc = ocfs2_read_blocks(inode, p_block, count, bhs + done,
- flags, validate);
+ rc = ocfs2_read_blocks(INODE_CACHE(inode), p_block, count,
+ bhs + done, flags, validate);
if (rc) {
mlog_errno(rc);
break;
diff --git a/fs/ocfs2/extent_map.h b/fs/ocfs2/extent_map.h
index b7dd9731b46..e79d41c2c90 100644
--- a/fs/ocfs2/extent_map.h
+++ b/fs/ocfs2/extent_map.h
@@ -55,12 +55,18 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
u32 *p_cluster, u32 *num_clusters,
- struct ocfs2_extent_list *el);
+ struct ocfs2_extent_list *el,
+ unsigned int *extent_flags);
int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
struct buffer_head *bhs[], int flags,
int (*validate)(struct super_block *sb,
struct buffer_head *bh));
+int ocfs2_figure_hole_clusters(struct ocfs2_caching_info *ci,
+ struct ocfs2_extent_list *el,
+ struct buffer_head *eb_bh,
+ u32 v_cluster,
+ u32 *num_clusters);
static inline int ocfs2_read_virt_block(struct inode *inode, u64 v_block,
struct buffer_head **bh,
int (*validate)(struct super_block *sb,
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 221c5e98957..89fc8ee1f5a 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -59,6 +59,7 @@
#include "xattr.h"
#include "acl.h"
#include "quota.h"
+#include "refcounttree.h"
#include "buffer_head_io.h"
@@ -259,7 +260,7 @@ int ocfs2_update_inode_atime(struct inode *inode,
goto out;
}
- ret = ocfs2_journal_access_di(handle, inode, bh,
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -334,6 +335,39 @@ out:
return ret;
}
+static int ocfs2_cow_file_pos(struct inode *inode,
+ struct buffer_head *fe_bh,
+ u64 offset)
+{
+ int status;
+ u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
+ unsigned int num_clusters = 0;
+ unsigned int ext_flags = 0;
+
+ /*
+ * If the new offset is aligned to the range of the cluster, there is
+ * no space for ocfs2_zero_range_for_truncate to fill, so no need to
+ * CoW either.
+ */
+ if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
+ return 0;
+
+ status = ocfs2_get_clusters(inode, cpos, &phys,
+ &num_clusters, &ext_flags);
+ if (status) {
+ mlog_errno(status);
+ goto out;
+ }
+
+ if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
+ goto out;
+
+ return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
+
+out:
+ return status;
+}
+
static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
struct inode *inode,
struct buffer_head *fe_bh,
@@ -346,6 +380,17 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
mlog_entry_void();
+ /*
+ * We need to CoW the cluster contains the offset if it is reflinked
+ * since we will call ocfs2_zero_range_for_truncate later which will
+ * write "0" from offset to the end of the cluster.
+ */
+ status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
+ if (status) {
+ mlog_errno(status);
+ return status;
+ }
+
/* TODO: This needs to actually orphan the inode in this
* transaction. */
@@ -356,7 +401,7 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
goto out;
}
- status = ocfs2_journal_access_di(handle, inode, fe_bh,
+ status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -486,6 +531,8 @@ bail_unlock_sem:
up_write(&OCFS2_I(inode)->ip_alloc_sem);
bail:
+ if (!status && OCFS2_I(inode)->ip_clusters == 0)
+ status = ocfs2_try_remove_refcount_tree(inode, di_bh);
mlog_exit(status);
return status;
@@ -515,11 +562,10 @@ int ocfs2_add_inode_data(struct ocfs2_super *osb,
int ret;
struct ocfs2_extent_tree et;
- ocfs2_init_dinode_extent_tree(&et, inode, fe_bh);
- ret = ocfs2_add_clusters_in_btree(osb, inode, logical_offset,
- clusters_to_add, mark_unwritten,
- &et, handle,
- data_ac, meta_ac, reason_ret);
+ ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
+ ret = ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
+ clusters_to_add, mark_unwritten,
+ data_ac, meta_ac, reason_ret);
return ret;
}
@@ -564,7 +610,7 @@ restart_all:
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(long long)i_size_read(inode), le32_to_cpu(fe->i_clusters),
clusters_to_add);
- ocfs2_init_dinode_extent_tree(&et, inode, bh);
+ ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
&data_ac, &meta_ac);
if (status) {
@@ -593,7 +639,7 @@ restarted_transaction:
/* reserve a write to the file entry early on - that we if we
* run out of credits in the allocation path, we can still
* update i_size. */
- status = ocfs2_journal_access_di(handle, inode, bh,
+ status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -1131,7 +1177,7 @@ static int __ocfs2_write_remove_suid(struct inode *inode,
goto out;
}
- ret = ocfs2_journal_access_di(handle, inode, bh,
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
@@ -1395,7 +1441,7 @@ static int ocfs2_remove_inode_range(struct inode *inode,
struct address_space *mapping = inode->i_mapping;
struct ocfs2_extent_tree et;
- ocfs2_init_dinode_extent_tree(&et, inode, di_bh);
+ ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
ocfs2_init_dealloc_ctxt(&dealloc);
if (byte_len == 0)
@@ -1657,6 +1703,70 @@ static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset,
OCFS2_IOC_RESVSP64, &sr, change_size);
}
+int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
+ size_t count)
+{
+ int ret = 0;
+ unsigned int extent_flags;
+ u32 cpos, clusters, extent_len, phys_cpos;
+ struct super_block *sb = inode->i_sb;
+
+ if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
+ !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL))
+ return 0;
+
+ cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
+ clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
+
+ while (clusters) {
+ ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
+ &extent_flags);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
+ ret = 1;
+ break;
+ }
+
+ if (extent_len > clusters)
+ extent_len = clusters;
+
+ clusters -= extent_len;
+ cpos += extent_len;
+ }
+out:
+ return ret;
+}
+
+static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
+ loff_t pos, size_t count,
+ int *meta_level)
+{
+ int ret;
+ struct buffer_head *di_bh = NULL;
+ u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
+ u32 clusters =
+ ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
+
+ ret = ocfs2_inode_lock(inode, &di_bh, 1);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ *meta_level = 1;
+
+ ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
+ if (ret)
+ mlog_errno(ret);
+out:
+ brelse(di_bh);
+ return ret;
+}
+
static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
loff_t *ppos,
size_t count,
@@ -1713,6 +1823,22 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
end = saved_pos + count;
+ ret = ocfs2_check_range_for_refcount(inode, saved_pos, count);
+ if (ret == 1) {
+ ocfs2_inode_unlock(inode, meta_level);
+ meta_level = -1;
+
+ ret = ocfs2_prepare_inode_for_refcount(inode,
+ saved_pos,
+ count,
+ &meta_level);
+ }
+
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out_unlock;
+ }
+
/*
* Skip the O_DIRECT checks if we don't need
* them.
@@ -1759,7 +1885,8 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
*ppos = saved_pos;
out_unlock:
- ocfs2_inode_unlock(inode, meta_level);
+ if (meta_level >= 0)
+ ocfs2_inode_unlock(inode, meta_level);
out:
return ret;
diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h
index 172f9fbc9fc..d66cf4f7c70 100644
--- a/fs/ocfs2/file.h
+++ b/fs/ocfs2/file.h
@@ -69,4 +69,6 @@ int ocfs2_update_inode_atime(struct inode *inode,
int ocfs2_change_file_space(struct file *file, unsigned int cmd,
struct ocfs2_space_resv *sr);
+int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
+ size_t count);
#endif /* OCFS2_FILE_H */
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 4dc8890ba31..0297fb8982b 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -53,6 +53,7 @@
#include "sysfile.h"
#include "uptodate.h"
#include "xattr.h"
+#include "refcounttree.h"
#include "buffer_head_io.h"
@@ -562,7 +563,8 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
goto out;
}
- status = ocfs2_journal_access_di(handle, inode, fe_bh,
+ status = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
+ fe_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -646,7 +648,7 @@ static int ocfs2_remove_inode(struct inode *inode,
}
/* set the inodes dtime */
- status = ocfs2_journal_access_di(handle, inode, di_bh,
+ status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -662,7 +664,7 @@ static int ocfs2_remove_inode(struct inode *inode,
goto bail_commit;
}
- ocfs2_remove_from_cache(inode, di_bh);
+ ocfs2_remove_from_cache(INODE_CACHE(inode), di_bh);
vfs_dq_free_inode(inode);
status = ocfs2_free_dinode(handle, inode_alloc_inode,
@@ -781,6 +783,12 @@ static int ocfs2_wipe_inode(struct inode *inode,
goto bail_unlock_dir;
}
+ status = ocfs2_remove_refcount_tree(inode, di_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail_unlock_dir;
+ }
+
status = ocfs2_remove_inode(inode, di_bh, orphan_dir_inode,
orphan_dir_bh);
if (status < 0)
@@ -1112,13 +1120,14 @@ void ocfs2_clear_inode(struct inode *inode)
ocfs2_lock_res_free(&oi->ip_inode_lockres);
ocfs2_lock_res_free(&oi->ip_open_lockres);
- ocfs2_metadata_cache_purge(inode);
+ ocfs2_metadata_cache_exit(INODE_CACHE(inode));
- mlog_bug_on_msg(oi->ip_metadata_cache.ci_num_cached,
+ mlog_bug_on_msg(INODE_CACHE(inode)->ci_num_cached,
"Clear inode of %llu, inode has %u cache items\n",
- (unsigned long long)oi->ip_blkno, oi->ip_metadata_cache.ci_num_cached);
+ (unsigned long long)oi->ip_blkno,
+ INODE_CACHE(inode)->ci_num_cached);
- mlog_bug_on_msg(!(oi->ip_flags & OCFS2_INODE_CACHE_INLINE),
+ mlog_bug_on_msg(!(INODE_CACHE(inode)->ci_flags & OCFS2_CACHE_FL_INLINE),
"Clear inode of %llu, inode has a bad flag\n",
(unsigned long long)oi->ip_blkno);
@@ -1145,9 +1154,7 @@ void ocfs2_clear_inode(struct inode *inode)
(unsigned long long)oi->ip_blkno, oi->ip_open_count);
/* Clear all other flags. */
- oi->ip_flags = OCFS2_INODE_CACHE_INLINE;
- oi->ip_created_trans = 0;
- oi->ip_last_trans = 0;
+ oi->ip_flags = 0;
oi->ip_dir_start_lookup = 0;
oi->ip_blkno = 0ULL;
@@ -1239,7 +1246,7 @@ int ocfs2_mark_inode_dirty(handle_t *handle,
mlog_entry("(inode %llu)\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
- status = ocfs2_journal_access_di(handle, inode, bh,
+ status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -1380,8 +1387,8 @@ int ocfs2_read_inode_block_full(struct inode *inode, struct buffer_head **bh,
int rc;
struct buffer_head *tmp = *bh;
- rc = ocfs2_read_blocks(inode, OCFS2_I(inode)->ip_blkno, 1, &tmp,
- flags, ocfs2_validate_inode_block);
+ rc = ocfs2_read_blocks(INODE_CACHE(inode), OCFS2_I(inode)->ip_blkno,
+ 1, &tmp, flags, ocfs2_validate_inode_block);
/* If ocfs2_read_blocks() got us a new bh, pass it up. */
if (!rc && !*bh)
@@ -1394,3 +1401,56 @@ int ocfs2_read_inode_block(struct inode *inode, struct buffer_head **bh)
{
return ocfs2_read_inode_block_full(inode, bh, 0);
}
+
+
+static u64 ocfs2_inode_cache_owner(struct ocfs2_caching_info *ci)
+{
+ struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
+
+ return oi->ip_blkno;
+}
+
+static struct super_block *ocfs2_inode_cache_get_super(struct ocfs2_caching_info *ci)
+{
+ struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
+
+ return oi->vfs_inode.i_sb;
+}
+
+static void ocfs2_inode_cache_lock(struct ocfs2_caching_info *ci)
+{
+ struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
+
+ spin_lock(&oi->ip_lock);
+}
+
+static void ocfs2_inode_cache_unlock(struct ocfs2_caching_info *ci)
+{
+ struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
+
+ spin_unlock(&oi->ip_lock);
+}
+
+static void ocfs2_inode_cache_io_lock(struct ocfs2_caching_info *ci)
+{
+ struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
+
+ mutex_lock(&oi->ip_io_mutex);
+}
+
+static void ocfs2_inode_cache_io_unlock(struct ocfs2_caching_info *ci)
+{
+ struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
+
+ mutex_unlock(&oi->ip_io_mutex);
+}
+
+const struct ocfs2_caching_operations ocfs2_inode_caching_ops = {
+ .co_owner = ocfs2_inode_cache_owner,
+ .co_get_super = ocfs2_inode_cache_get_super,
+ .co_cache_lock = ocfs2_inode_cache_lock,
+ .co_cache_unlock = ocfs2_inode_cache_unlock,
+ .co_io_lock = ocfs2_inode_cache_io_lock,
+ .co_io_unlock = ocfs2_inode_cache_io_unlock,
+};
+
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index ea71525aad4..ba4fe07b293 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -60,12 +60,6 @@ struct ocfs2_inode_info
u32 ip_dir_start_lookup;
- /* next two are protected by trans_inc_lock */
- /* which transaction were we created on? Zero if none. */
- unsigned long ip_created_trans;
- /* last transaction we were a part of. */
- unsigned long ip_last_trans;
-
struct ocfs2_caching_info ip_metadata_cache;
struct ocfs2_extent_map ip_extent_map;
@@ -106,8 +100,6 @@ struct ocfs2_inode_info
#define OCFS2_INODE_MAYBE_ORPHANED 0x00000020
/* Does someone have the file open O_DIRECT */
#define OCFS2_INODE_OPEN_DIRECT 0x00000040
-/* Indicates that the metadata cache should be used as an array. */
-#define OCFS2_INODE_CACHE_INLINE 0x00000080
static inline struct ocfs2_inode_info *OCFS2_I(struct inode *inode)
{
@@ -120,6 +112,12 @@ static inline struct ocfs2_inode_info *OCFS2_I(struct inode *inode)
extern struct kmem_cache *ocfs2_inode_cache;
extern const struct address_space_operations ocfs2_aops;
+extern const struct ocfs2_caching_operations ocfs2_inode_caching_ops;
+
+static inline struct ocfs2_caching_info *INODE_CACHE(struct inode *inode)
+{
+ return &OCFS2_I(inode)->ip_metadata_cache;
+}
void ocfs2_clear_inode(struct inode *inode);
void ocfs2_delete_inode(struct inode *inode);
@@ -172,4 +170,10 @@ int ocfs2_read_inode_block(struct inode *inode, struct buffer_head **bh);
/* The same, but can be passed OCFS2_BH_* flags */
int ocfs2_read_inode_block_full(struct inode *inode, struct buffer_head **bh,
int flags);
+
+static inline struct ocfs2_inode_info *cache_info_to_inode(struct ocfs2_caching_info *ci)
+{
+ return container_of(ci, struct ocfs2_inode_info, ip_metadata_cache);
+}
+
#endif /* OCFS2_INODE_H */
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 467b413bec2..31fbb061951 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -21,6 +21,7 @@
#include "ocfs2_fs.h"
#include "ioctl.h"
#include "resize.h"
+#include "refcounttree.h"
#include <linux/ext2_fs.h>
@@ -115,6 +116,9 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
int status;
struct ocfs2_space_resv sr;
struct ocfs2_new_group_input input;
+ struct reflink_arguments args;
+ const char *old_path, *new_path;
+ bool preserve;
switch (cmd) {
case OCFS2_IOC_GETFLAGS:
@@ -160,6 +164,15 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -EFAULT;
return ocfs2_group_add(inode, &input);
+ case OCFS2_IOC_REFLINK:
+ if (copy_from_user(&args, (struct reflink_arguments *)arg,
+ sizeof(args)))
+ return -EFAULT;
+ old_path = (const char *)(unsigned long)args.old_path;
+ new_path = (const char *)(unsigned long)args.new_path;
+ preserve = (args.preserve != 0);
+
+ return ocfs2_reflink_ioctl(inode, old_path, new_path, preserve);
default:
return -ENOTTY;
}
@@ -182,6 +195,7 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case OCFS2_IOC_GROUP_EXTEND:
case OCFS2_IOC_GROUP_ADD:
case OCFS2_IOC_GROUP_ADD64:
+ case OCFS2_IOC_REFLINK:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index c48b93ac6b6..54c16b66327 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -48,6 +48,7 @@
#include "slot_map.h"
#include "super.h"
#include "sysfile.h"
+#include "uptodate.h"
#include "quota.h"
#include "buffer_head_io.h"
@@ -554,6 +555,14 @@ static struct ocfs2_triggers eb_triggers = {
.ot_offset = offsetof(struct ocfs2_extent_block, h_check),
};
+static struct ocfs2_triggers rb_triggers = {
+ .ot_triggers = {
+ .t_commit = ocfs2_commit_trigger,
+ .t_abort = ocfs2_abort_trigger,
+ },
+ .ot_offset = offsetof(struct ocfs2_refcount_block, rf_check),
+};
+
static struct ocfs2_triggers gd_triggers = {
.ot_triggers = {
.t_commit = ocfs2_commit_trigger,
@@ -601,14 +610,16 @@ static struct ocfs2_triggers dl_triggers = {
};
static int __ocfs2_journal_access(handle_t *handle,
- struct inode *inode,
+ struct ocfs2_caching_info *ci,
struct buffer_head *bh,
struct ocfs2_triggers *triggers,
int type)
{
int status;
+ struct ocfs2_super *osb =
+ OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
- BUG_ON(!inode);
+ BUG_ON(!ci || !ci->ci_ops);
BUG_ON(!handle);
BUG_ON(!bh);
@@ -627,15 +638,15 @@ static int __ocfs2_journal_access(handle_t *handle,
BUG();
}
- /* Set the current transaction information on the inode so
+ /* Set the current transaction information on the ci so
* that the locking code knows whether it can drop it's locks
- * on this inode or not. We're protected from the commit
+ * on this ci or not. We're protected from the commit
* thread updating the current transaction id until
* ocfs2_commit_trans() because ocfs2_start_trans() took
* j_trans_barrier for us. */
- ocfs2_set_inode_lock_trans(OCFS2_SB(inode->i_sb)->journal, inode);
+ ocfs2_set_ci_lock_trans(osb->journal, ci);
- mutex_lock(&OCFS2_I(inode)->ip_io_mutex);
+ ocfs2_metadata_cache_io_lock(ci);
switch (type) {
case OCFS2_JOURNAL_ACCESS_CREATE:
case OCFS2_JOURNAL_ACCESS_WRITE:
@@ -650,9 +661,9 @@ static int __ocfs2_journal_access(handle_t *handle,
status = -EINVAL;
mlog(ML_ERROR, "Uknown access type!\n");
}
- if (!status && ocfs2_meta_ecc(OCFS2_SB(inode->i_sb)) && triggers)
+ if (!status && ocfs2_meta_ecc(osb) && triggers)
jbd2_journal_set_triggers(bh, &triggers->ot_triggers);
- mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
+ ocfs2_metadata_cache_io_unlock(ci);
if (status < 0)
mlog(ML_ERROR, "Error %d getting %d access to buffer!\n",
@@ -662,66 +673,65 @@ static int __ocfs2_journal_access(handle_t *handle,
return status;
}
-int ocfs2_journal_access_di(handle_t *handle, struct inode *inode,
- struct buffer_head *bh, int type)
+int ocfs2_journal_access_di(handle_t *handle, struct ocfs2_caching_info *ci,
+ struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, inode, bh, &di_triggers,
- type);
+ return __ocfs2_journal_access(handle, ci, bh, &di_triggers, type);
}
-int ocfs2_journal_access_eb(handle_t *handle, struct inode *inode,
+int ocfs2_journal_access_eb(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, inode, bh, &eb_triggers,
- type);
+ return __ocfs2_journal_access(handle, ci, bh, &eb_triggers, type);
}
-int ocfs2_journal_access_gd(handle_t *handle, struct inode *inode,
+int ocfs2_journal_access_rb(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, inode, bh, &gd_triggers,
+ return __ocfs2_journal_access(handle, ci, bh, &rb_triggers,
type);
}
-int ocfs2_journal_access_db(handle_t *handle, struct inode *inode,
+int ocfs2_journal_access_gd(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, inode, bh, &db_triggers,
- type);
+ return __ocfs2_journal_access(handle, ci, bh, &gd_triggers, type);
}
-int ocfs2_journal_access_xb(handle_t *handle, struct inode *inode,
+int ocfs2_journal_access_db(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, inode, bh, &xb_triggers,
- type);
+ return __ocfs2_journal_access(handle, ci, bh, &db_triggers, type);
}
-int ocfs2_journal_access_dq(handle_t *handle, struct inode *inode,
+int ocfs2_journal_access_xb(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, inode, bh, &dq_triggers,
- type);
+ return __ocfs2_journal_access(handle, ci, bh, &xb_triggers, type);
}
-int ocfs2_journal_access_dr(handle_t *handle, struct inode *inode,
+int ocfs2_journal_access_dq(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, inode, bh, &dr_triggers,
- type);
+ return __ocfs2_journal_access(handle, ci, bh, &dq_triggers, type);
}
-int ocfs2_journal_access_dl(handle_t *handle, struct inode *inode,
+int ocfs2_journal_access_dr(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, inode, bh, &dl_triggers,
- type);
+ return __ocfs2_journal_access(handle, ci, bh, &dr_triggers, type);
+}
+
+int ocfs2_journal_access_dl(handle_t *handle, struct ocfs2_caching_info *ci,
+ struct buffer_head *bh, int type)
+{
+ return __ocfs2_journal_access(handle, ci, bh, &dl_triggers, type);
}
-int ocfs2_journal_access(handle_t *handle, struct inode *inode,
+int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, inode, bh, NULL, type);
+ return __ocfs2_journal_access(handle, ci, bh, NULL, type);
}
int ocfs2_journal_dirty(handle_t *handle,
@@ -898,7 +908,7 @@ static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
ocfs2_bump_recovery_generation(fe);
ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check);
- status = ocfs2_write_block(osb, bh, journal->j_inode);
+ status = ocfs2_write_block(osb, bh, INODE_CACHE(journal->j_inode));
if (status < 0)
mlog_errno(status);
@@ -1642,7 +1652,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
ocfs2_get_recovery_generation(fe);
ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check);
- status = ocfs2_write_block(osb, bh, inode);
+ status = ocfs2_write_block(osb, bh, INODE_CACHE(inode));
if (status < 0)
mlog_errno(status);
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 2c3222aec62..3f74e09b0d8 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -90,56 +90,66 @@ static inline unsigned long ocfs2_inc_trans_id(struct ocfs2_journal *j)
return old_id;
}
-static inline void ocfs2_set_inode_lock_trans(struct ocfs2_journal *journal,
- struct inode *inode)
+static inline void ocfs2_set_ci_lock_trans(struct ocfs2_journal *journal,
+ struct ocfs2_caching_info *ci)
{
spin_lock(&trans_inc_lock);
- OCFS2_I(inode)->ip_last_trans = journal->j_trans_id;
+ ci->ci_last_trans = journal->j_trans_id;
spin_unlock(&trans_inc_lock);
}
/* Used to figure out whether it's safe to drop a metadata lock on an
- * inode. Returns true if all the inodes changes have been
+ * cached object. Returns true if all the object's changes have been
* checkpointed to disk. You should be holding the spinlock on the
* metadata lock while calling this to be sure that nobody can take
* the lock and put it on another transaction. */
-static inline int ocfs2_inode_fully_checkpointed(struct inode *inode)
+static inline int ocfs2_ci_fully_checkpointed(struct ocfs2_caching_info *ci)
{
int ret;
- struct ocfs2_journal *journal = OCFS2_SB(inode->i_sb)->journal;
+ struct ocfs2_journal *journal =
+ OCFS2_SB(ocfs2_metadata_cache_get_super(ci))->journal;
spin_lock(&trans_inc_lock);
- ret = time_after(journal->j_trans_id, OCFS2_I(inode)->ip_last_trans);
+ ret = time_after(journal->j_trans_id, ci->ci_last_trans);
spin_unlock(&trans_inc_lock);
return ret;
}
-/* convenience function to check if an inode is still new (has never
- * hit disk) Will do you a favor and set created_trans = 0 when you've
- * been checkpointed. returns '1' if the inode is still new. */
-static inline int ocfs2_inode_is_new(struct inode *inode)
+/* convenience function to check if an object backed by struct
+ * ocfs2_caching_info is still new (has never hit disk) Will do you a
+ * favor and set created_trans = 0 when you've
+ * been checkpointed. returns '1' if the ci is still new. */
+static inline int ocfs2_ci_is_new(struct ocfs2_caching_info *ci)
{
int ret;
+ struct ocfs2_journal *journal =
+ OCFS2_SB(ocfs2_metadata_cache_get_super(ci))->journal;
+ spin_lock(&trans_inc_lock);
+ ret = !(time_after(journal->j_trans_id, ci->ci_created_trans));
+ if (!ret)
+ ci->ci_created_trans = 0;
+ spin_unlock(&trans_inc_lock);
+ return ret;
+}
+
+/* Wrapper for inodes so we can check system files */
+static inline int ocfs2_inode_is_new(struct inode *inode)
+{
/* System files are never "new" as they're written out by
* mkfs. This helps us early during mount, before we have the
* journal open and j_trans_id could be junk. */
if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
return 0;
- spin_lock(&trans_inc_lock);
- ret = !(time_after(OCFS2_SB(inode->i_sb)->journal->j_trans_id,
- OCFS2_I(inode)->ip_created_trans));
- if (!ret)
- OCFS2_I(inode)->ip_created_trans = 0;
- spin_unlock(&trans_inc_lock);
- return ret;
+
+ return ocfs2_ci_is_new(INODE_CACHE(inode));
}
-static inline void ocfs2_inode_set_new(struct ocfs2_super *osb,
- struct inode *inode)
+static inline void ocfs2_ci_set_new(struct ocfs2_super *osb,
+ struct ocfs2_caching_info *ci)
{
spin_lock(&trans_inc_lock);
- OCFS2_I(inode)->ip_created_trans = osb->journal->j_trans_id;
+ ci->ci_created_trans = osb->journal->j_trans_id;
spin_unlock(&trans_inc_lock);
}
@@ -200,7 +210,7 @@ static inline void ocfs2_checkpoint_inode(struct inode *inode)
if (ocfs2_mount_local(osb))
return;
- if (!ocfs2_inode_fully_checkpointed(inode)) {
+ if (!ocfs2_ci_fully_checkpointed(INODE_CACHE(inode))) {
/* WARNING: This only kicks off a single
* checkpoint. If someone races you and adds more
* metadata to the journal, you won't know, and will
@@ -210,7 +220,7 @@ static inline void ocfs2_checkpoint_inode(struct inode *inode)
ocfs2_start_checkpoint(osb);
wait_event(osb->journal->j_checkpointed,
- ocfs2_inode_fully_checkpointed(inode));
+ ocfs2_ci_fully_checkpointed(INODE_CACHE(inode)));
}
}
@@ -266,31 +276,34 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks);
/* ocfs2_inode */
-int ocfs2_journal_access_di(handle_t *handle, struct inode *inode,
+int ocfs2_journal_access_di(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type);
/* ocfs2_extent_block */
-int ocfs2_journal_access_eb(handle_t *handle, struct inode *inode,
+int ocfs2_journal_access_eb(handle_t *handle, struct ocfs2_caching_info *ci,
+ struct buffer_head *bh, int type);
+/* ocfs2_refcount_block */
+int ocfs2_journal_access_rb(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type);
/* ocfs2_group_desc */
-int ocfs2_journal_access_gd(handle_t *handle, struct inode *inode,
+int ocfs2_journal_access_gd(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type);
/* ocfs2_xattr_block */
-int ocfs2_journal_access_xb(handle_t *handle, struct inode *inode,
+int ocfs2_journal_access_xb(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type);
/* quota blocks */
-int ocfs2_journal_access_dq(handle_t *handle, struct inode *inode,
+int ocfs2_journal_access_dq(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type);
/* dirblock */
-int ocfs2_journal_access_db(handle_t *handle, struct inode *inode,
+int ocfs2_journal_access_db(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type);
/* ocfs2_dx_root_block */
-int ocfs2_journal_access_dr(handle_t *handle, struct inode *inode,
+int ocfs2_journal_access_dr(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type);
/* ocfs2_dx_leaf */
-int ocfs2_journal_access_dl(handle_t *handle, struct inode *inode,
+int ocfs2_journal_access_dl(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type);
/* Anything that has no ecc */
-int ocfs2_journal_access(handle_t *handle, struct inode *inode,
+int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type);
/*
@@ -477,6 +490,23 @@ static inline int ocfs2_calc_dxi_expand_credits(struct super_block *sb)
return credits;
}
+/* inode update, new refcount block and its allocation credits. */
+#define OCFS2_REFCOUNT_TREE_CREATE_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1 \
+ + OCFS2_SUBALLOC_ALLOC)
+
+/* inode and the refcount block update. */
+#define OCFS2_REFCOUNT_TREE_SET_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1)
+
+/*
+ * inode and the refcount block update.
+ * It doesn't include the credits for sub alloc change.
+ * So if we need to free the bit, OCFS2_SUBALLOC_FREE needs to be added.
+ */
+#define OCFS2_REFCOUNT_TREE_REMOVE_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1)
+
+/* 2 metadata alloc, 2 new blocks and root refcount block */
+#define OCFS2_EXPAND_REFCOUNT_TREE_CREDITS (OCFS2_SUBALLOC_ALLOC * 2 + 3)
+
/*
* Please note that the caller must make sure that root_el is the root
* of extent tree. So for an inode, it should be &fe->id2.i_list. Otherwise
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index bac7e6abaf4..ac10f83edb9 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -297,8 +297,8 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
}
memcpy(alloc_copy, alloc, bh->b_size);
- status = ocfs2_journal_access_di(handle, local_alloc_inode, bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
+ status = ocfs2_journal_access_di(handle, INODE_CACHE(local_alloc_inode),
+ bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto out_commit;
@@ -392,7 +392,7 @@ int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb,
ocfs2_clear_local_alloc(alloc);
ocfs2_compute_meta_ecc(osb->sb, alloc_bh->b_data, &alloc->i_check);
- status = ocfs2_write_block(osb, alloc_bh, inode);
+ status = ocfs2_write_block(osb, alloc_bh, INODE_CACHE(inode));
if (status < 0)
mlog_errno(status);
@@ -678,7 +678,8 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
* delete bits from it! */
*num_bits = bits_wanted;
- status = ocfs2_journal_access_di(handle, local_alloc_inode,
+ status = ocfs2_journal_access_di(handle,
+ INODE_CACHE(local_alloc_inode),
osb->local_alloc_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
@@ -1156,7 +1157,8 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
}
memcpy(alloc_copy, alloc, osb->local_alloc_bh->b_size);
- status = ocfs2_journal_access_di(handle, local_alloc_inode,
+ status = ocfs2_journal_access_di(handle,
+ INODE_CACHE(local_alloc_inode),
osb->local_alloc_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 8601f934010..f010b22b1c4 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -69,7 +69,6 @@
static int ocfs2_mknod_locked(struct ocfs2_super *osb,
struct inode *dir,
struct inode *inode,
- struct dentry *dentry,
dev_t dev,
struct buffer_head **new_fe_bh,
struct buffer_head *parent_fe_bh,
@@ -78,7 +77,7 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
struct inode **ret_orphan_dir,
- struct inode *inode,
+ u64 blkno,
char *name,
struct ocfs2_dir_lookup_result *lookup);
@@ -358,8 +357,12 @@ static int ocfs2_mknod(struct inode *dir,
}
did_quota_inode = 1;
+ mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry,
+ inode->i_mode, (unsigned long)dev, dentry->d_name.len,
+ dentry->d_name.name);
+
/* do the real work now. */
- status = ocfs2_mknod_locked(osb, dir, inode, dentry, dev,
+ status = ocfs2_mknod_locked(osb, dir, inode, dev,
&new_fe_bh, parent_fe_bh, handle,
inode_ac);
if (status < 0) {
@@ -375,7 +378,8 @@ static int ocfs2_mknod(struct inode *dir,
goto leave;
}
- status = ocfs2_journal_access_di(handle, dir, parent_fe_bh,
+ status = ocfs2_journal_access_di(handle, INODE_CACHE(dir),
+ parent_fe_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -465,7 +469,6 @@ leave:
static int ocfs2_mknod_locked(struct ocfs2_super *osb,
struct inode *dir,
struct inode *inode,
- struct dentry *dentry,
dev_t dev,
struct buffer_head **new_fe_bh,
struct buffer_head *parent_fe_bh,
@@ -479,10 +482,6 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
u16 suballoc_bit;
u16 feat;
- mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry,
- inode->i_mode, (unsigned long)dev, dentry->d_name.len,
- dentry->d_name.name);
-
*new_fe_bh = NULL;
status = ocfs2_claim_new_inode(osb, handle, dir, parent_fe_bh,
@@ -507,9 +506,10 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
mlog_errno(status);
goto leave;
}
- ocfs2_set_new_buffer_uptodate(inode, *new_fe_bh);
+ ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), *new_fe_bh);
- status = ocfs2_journal_access_di(handle, inode, *new_fe_bh,
+ status = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
+ *new_fe_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
@@ -565,7 +565,7 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
}
ocfs2_populate_inode(inode, fe, 1);
- ocfs2_inode_set_new(osb, inode);
+ ocfs2_ci_set_new(osb, INODE_CACHE(inode));
if (!ocfs2_mount_local(osb)) {
status = ocfs2_create_new_inode_locks(inode);
if (status < 0)
@@ -682,7 +682,7 @@ static int ocfs2_link(struct dentry *old_dentry,
goto out_unlock_inode;
}
- err = ocfs2_journal_access_di(handle, inode, fe_bh,
+ err = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (err < 0) {
mlog_errno(err);
@@ -850,7 +850,8 @@ static int ocfs2_unlink(struct inode *dir,
}
if (inode_is_unlinkable(inode)) {
- status = ocfs2_prepare_orphan_dir(osb, &orphan_dir, inode,
+ status = ocfs2_prepare_orphan_dir(osb, &orphan_dir,
+ OCFS2_I(inode)->ip_blkno,
orphan_name, &orphan_insert);
if (status < 0) {
mlog_errno(status);
@@ -866,7 +867,7 @@ static int ocfs2_unlink(struct inode *dir,
goto leave;
}
- status = ocfs2_journal_access_di(handle, inode, fe_bh,
+ status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -1241,9 +1242,8 @@ static int ocfs2_rename(struct inode *old_dir,
if (S_ISDIR(new_inode->i_mode) || (new_inode->i_nlink == 1)) {
status = ocfs2_prepare_orphan_dir(osb, &orphan_dir,
- new_inode,
- orphan_name,
- &orphan_insert);
+ OCFS2_I(new_inode)->ip_blkno,
+ orphan_name, &orphan_insert);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -1284,7 +1284,8 @@ static int ocfs2_rename(struct inode *old_dir,
goto bail;
}
}
- status = ocfs2_journal_access_di(handle, new_inode, newfe_bh,
+ status = ocfs2_journal_access_di(handle, INODE_CACHE(new_inode),
+ newfe_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -1331,7 +1332,8 @@ static int ocfs2_rename(struct inode *old_dir,
old_inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(old_inode);
- status = ocfs2_journal_access_di(handle, old_inode, old_inode_bh,
+ status = ocfs2_journal_access_di(handle, INODE_CACHE(old_inode),
+ old_inode_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status >= 0) {
old_di = (struct ocfs2_dinode *) old_inode_bh->b_data;
@@ -1407,9 +1409,10 @@ static int ocfs2_rename(struct inode *old_dir,
(int)old_dir_nlink, old_dir->i_nlink);
} else {
struct ocfs2_dinode *fe;
- status = ocfs2_journal_access_di(handle, old_dir,
- old_dir_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
+ status = ocfs2_journal_access_di(handle,
+ INODE_CACHE(old_dir),
+ old_dir_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
fe = (struct ocfs2_dinode *) old_dir_bh->b_data;
ocfs2_set_links_count(fe, old_dir->i_nlink);
status = ocfs2_journal_dirty(handle, old_dir_bh);
@@ -1527,9 +1530,11 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
mlog_errno(status);
goto bail;
}
- ocfs2_set_new_buffer_uptodate(inode, bhs[virtual]);
+ ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode),
+ bhs[virtual]);
- status = ocfs2_journal_access(handle, inode, bhs[virtual],
+ status = ocfs2_journal_access(handle, INODE_CACHE(inode),
+ bhs[virtual],
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
@@ -1692,7 +1697,11 @@ static int ocfs2_symlink(struct inode *dir,
}
did_quota_inode = 1;
- status = ocfs2_mknod_locked(osb, dir, inode, dentry,
+ mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry,
+ inode->i_mode, dentry->d_name.len,
+ dentry->d_name.name);
+
+ status = ocfs2_mknod_locked(osb, dir, inode,
0, &new_fe_bh, parent_fe_bh, handle,
inode_ac);
if (status < 0) {
@@ -1842,7 +1851,7 @@ bail:
static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
struct inode **ret_orphan_dir,
- struct inode *inode,
+ u64 blkno,
char *name,
struct ocfs2_dir_lookup_result *lookup)
{
@@ -1850,7 +1859,7 @@ static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
struct buffer_head *orphan_dir_bh = NULL;
int status = 0;
- status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name);
+ status = ocfs2_blkno_stringify(blkno, name);
if (status < 0) {
mlog_errno(status);
return status;
@@ -1917,7 +1926,9 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb,
goto leave;
}
- status = ocfs2_journal_access_di(handle, orphan_dir_inode, orphan_dir_bh,
+ status = ocfs2_journal_access_di(handle,
+ INODE_CACHE(orphan_dir_inode),
+ orphan_dir_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -2002,7 +2013,9 @@ int ocfs2_orphan_del(struct ocfs2_super *osb,
goto leave;
}
- status = ocfs2_journal_access_di(handle,orphan_dir_inode, orphan_dir_bh,
+ status = ocfs2_journal_access_di(handle,
+ INODE_CACHE(orphan_dir_inode),
+ orphan_dir_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -2028,6 +2041,274 @@ leave:
return status;
}
+int ocfs2_create_inode_in_orphan(struct inode *dir,
+ int mode,
+ struct inode **new_inode)
+{
+ int status, did_quota_inode = 0;
+ struct inode *inode = NULL;
+ struct inode *orphan_dir = NULL;
+ struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
+ struct ocfs2_dinode *di = NULL;
+ handle_t *handle = NULL;
+ char orphan_name[OCFS2_ORPHAN_NAMELEN + 1];
+ struct buffer_head *parent_di_bh = NULL;
+ struct buffer_head *new_di_bh = NULL;
+ struct ocfs2_alloc_context *inode_ac = NULL;
+ struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
+
+ status = ocfs2_inode_lock(dir, &parent_di_bh, 1);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ return status;
+ }
+
+ /*
+ * We give the orphan dir the root blkno to fake an orphan name,
+ * and allocate enough space for our insertion.
+ */
+ status = ocfs2_prepare_orphan_dir(osb, &orphan_dir,
+ osb->root_blkno,
+ orphan_name, &orphan_insert);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ /* reserve an inode spot */
+ status = ocfs2_reserve_new_inode(osb, &inode_ac);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto leave;
+ }
+
+ inode = ocfs2_get_init_inode(dir, mode);
+ if (!inode) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto leave;
+ }
+
+ handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb, 0, 0));
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ handle = NULL;
+ mlog_errno(status);
+ goto leave;
+ }
+
+ /* We don't use standard VFS wrapper because we don't want vfs_dq_init
+ * to be called. */
+ if (sb_any_quota_active(osb->sb) &&
+ osb->sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) {
+ status = -EDQUOT;
+ goto leave;
+ }
+ did_quota_inode = 1;
+
+ /* do the real work now. */
+ status = ocfs2_mknod_locked(osb, dir, inode,
+ 0, &new_di_bh, parent_di_bh, handle,
+ inode_ac);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, orphan_name);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ di = (struct ocfs2_dinode *)new_di_bh->b_data;
+ status = ocfs2_orphan_add(osb, handle, inode, di, orphan_name,
+ &orphan_insert, orphan_dir);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ /* get open lock so that only nodes can't remove it from orphan dir. */
+ status = ocfs2_open_lock(inode);
+ if (status < 0)
+ mlog_errno(status);
+
+leave:
+ if (status < 0 && did_quota_inode)
+ vfs_dq_free_inode(inode);
+ if (handle)
+ ocfs2_commit_trans(osb, handle);
+
+ if (orphan_dir) {
+ /* This was locked for us in ocfs2_prepare_orphan_dir() */
+ ocfs2_inode_unlock(orphan_dir, 1);
+ mutex_unlock(&orphan_dir->i_mutex);
+ iput(orphan_dir);
+ }
+
+ if (status == -ENOSPC)
+ mlog(0, "Disk is full\n");
+
+ if ((status < 0) && inode) {
+ clear_nlink(inode);
+ iput(inode);
+ }
+
+ if (inode_ac)
+ ocfs2_free_alloc_context(inode_ac);
+
+ brelse(new_di_bh);
+
+ if (!status)
+ *new_inode = inode;
+
+ ocfs2_free_dir_lookup_result(&orphan_insert);
+
+ ocfs2_inode_unlock(dir, 1);
+ brelse(parent_di_bh);
+ return status;
+}
+
+int ocfs2_mv_orphaned_inode_to_new(struct inode *dir,
+ struct inode *inode,
+ struct dentry *dentry)
+{
+ int status = 0;
+ struct buffer_head *parent_di_bh = NULL;
+ handle_t *handle = NULL;
+ struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
+ struct ocfs2_dinode *dir_di, *di;
+ struct inode *orphan_dir_inode = NULL;
+ struct buffer_head *orphan_dir_bh = NULL;
+ struct buffer_head *di_bh = NULL;
+ struct ocfs2_dir_lookup_result lookup = { NULL, };
+
+ mlog_entry("(0x%p, 0x%p, %.*s')\n", dir, dentry,
+ dentry->d_name.len, dentry->d_name.name);
+
+ status = ocfs2_inode_lock(dir, &parent_di_bh, 1);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ return status;
+ }
+
+ dir_di = (struct ocfs2_dinode *) parent_di_bh->b_data;
+ if (!dir_di->i_links_count) {
+ /* can't make a file in a deleted directory. */
+ status = -ENOENT;
+ goto leave;
+ }
+
+ status = ocfs2_check_dir_for_entry(dir, dentry->d_name.name,
+ dentry->d_name.len);
+ if (status)
+ goto leave;
+
+ /* get a spot inside the dir. */
+ status = ocfs2_prepare_dir_for_insert(osb, dir, parent_di_bh,
+ dentry->d_name.name,
+ dentry->d_name.len, &lookup);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ orphan_dir_inode = ocfs2_get_system_file_inode(osb,
+ ORPHAN_DIR_SYSTEM_INODE,
+ osb->slot_num);
+ if (!orphan_dir_inode) {
+ status = -EEXIST;
+ mlog_errno(status);
+ goto leave;
+ }
+
+ mutex_lock(&orphan_dir_inode->i_mutex);
+
+ status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
+ if (status < 0) {
+ mlog_errno(status);
+ mutex_unlock(&orphan_dir_inode->i_mutex);
+ iput(orphan_dir_inode);
+ goto leave;
+ }
+
+ status = ocfs2_read_inode_block(inode, &di_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto orphan_unlock;
+ }
+
+ handle = ocfs2_start_trans(osb, ocfs2_rename_credits(osb->sb));
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ handle = NULL;
+ mlog_errno(status);
+ goto orphan_unlock;
+ }
+
+ status = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
+ di_bh, OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto out_commit;
+ }
+
+ status = ocfs2_orphan_del(osb, handle, orphan_dir_inode, inode,
+ orphan_dir_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto out_commit;
+ }
+
+ di = (struct ocfs2_dinode *)di_bh->b_data;
+ le32_add_cpu(&di->i_flags, -OCFS2_ORPHANED_FL);
+ di->i_orphaned_slot = 0;
+ ocfs2_journal_dirty(handle, di_bh);
+
+ status = ocfs2_add_entry(handle, dentry, inode,
+ OCFS2_I(inode)->ip_blkno, parent_di_bh,
+ &lookup);
+ if (status < 0) {
+ mlog_errno(status);
+ goto out_commit;
+ }
+
+ status = ocfs2_dentry_attach_lock(dentry, inode,
+ OCFS2_I(dir)->ip_blkno);
+ if (status) {
+ mlog_errno(status);
+ goto out_commit;
+ }
+
+ insert_inode_hash(inode);
+ dentry->d_op = &ocfs2_dentry_ops;
+ d_instantiate(dentry, inode);
+ status = 0;
+out_commit:
+ ocfs2_commit_trans(osb, handle);
+orphan_unlock:
+ ocfs2_inode_unlock(orphan_dir_inode, 1);
+ mutex_unlock(&orphan_dir_inode->i_mutex);
+ iput(orphan_dir_inode);
+leave:
+
+ ocfs2_inode_unlock(dir, 1);
+
+ brelse(di_bh);
+ brelse(parent_di_bh);
+ brelse(orphan_dir_bh);
+
+ ocfs2_free_dir_lookup_result(&lookup);
+
+ mlog_exit(status);
+
+ return status;
+}
+
const struct inode_operations ocfs2_dir_iops = {
.create = ocfs2_create,
.lookup = ocfs2_lookup,
diff --git a/fs/ocfs2/namei.h b/fs/ocfs2/namei.h
index 688aef64c87..e5d059d4f11 100644
--- a/fs/ocfs2/namei.h
+++ b/fs/ocfs2/namei.h
@@ -35,5 +35,11 @@ int ocfs2_orphan_del(struct ocfs2_super *osb,
struct inode *orphan_dir_inode,
struct inode *inode,
struct buffer_head *orphan_dir_bh);
+int ocfs2_create_inode_in_orphan(struct inode *dir,
+ int mode,
+ struct inode **new_inode);
+int ocfs2_mv_orphaned_inode_to_new(struct inode *dir,
+ struct inode *new_inode,
+ struct dentry *new_dentry);
#endif /* OCFS2_NAMEI_H */
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 39e1d5a3950..eae40460242 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -51,20 +51,51 @@
/* For struct ocfs2_blockcheck_stats */
#include "blockcheck.h"
+
+/* Caching of metadata buffers */
+
/* Most user visible OCFS2 inodes will have very few pieces of
* metadata, but larger files (including bitmaps, etc) must be taken
* into account when designing an access scheme. We allow a small
* amount of inlined blocks to be stored on an array and grow the
* structure into a rb tree when necessary. */
-#define OCFS2_INODE_MAX_CACHE_ARRAY 2
+#define OCFS2_CACHE_INFO_MAX_ARRAY 2
+
+/* Flags for ocfs2_caching_info */
+
+enum ocfs2_caching_info_flags {
+ /* Indicates that the metadata cache is using the inline array */
+ OCFS2_CACHE_FL_INLINE = 1<<1,
+};
+struct ocfs2_caching_operations;
struct ocfs2_caching_info {
+ /*
+ * The parent structure provides the locks, but because the
+ * parent structure can differ, it provides locking operations
+ * to struct ocfs2_caching_info.
+ */
+ const struct ocfs2_caching_operations *ci_ops;
+
+ /* next two are protected by trans_inc_lock */
+ /* which transaction were we created on? Zero if none. */
+ unsigned long ci_created_trans;
+ /* last transaction we were a part of. */
+ unsigned long ci_last_trans;
+
+ /* Cache structures */
+ unsigned int ci_flags;
unsigned int ci_num_cached;
union {
- sector_t ci_array[OCFS2_INODE_MAX_CACHE_ARRAY];
+ sector_t ci_array[OCFS2_CACHE_INFO_MAX_ARRAY];
struct rb_root ci_tree;
} ci_cache;
};
+/*
+ * Need this prototype here instead of in uptodate.h because journal.h
+ * uses it.
+ */
+struct super_block *ocfs2_metadata_cache_get_super(struct ocfs2_caching_info *ci);
/* this limits us to 256 nodes
* if we need more, we can do a kmalloc for the map */
@@ -377,12 +408,17 @@ struct ocfs2_super
/* the group we used to allocate inodes. */
u64 osb_inode_alloc_group;
+
+ /* rb tree root for refcount lock. */
+ struct rb_root osb_rf_lock_tree;
+ struct ocfs2_refcount_tree *osb_ref_tree_lru;
};
#define OCFS2_SB(sb) ((struct ocfs2_super *)(sb)->s_fs_info)
/* Useful typedef for passing around journal access functions */
-typedef int (*ocfs2_journal_access_func)(handle_t *handle, struct inode *inode,
+typedef int (*ocfs2_journal_access_func)(handle_t *handle,
+ struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type);
static inline int ocfs2_should_order_data(struct inode *inode)
@@ -480,6 +516,13 @@ static inline void ocfs2_add_links_count(struct ocfs2_dinode *di, int n)
ocfs2_set_links_count(di, links);
}
+static inline int ocfs2_refcount_tree(struct ocfs2_super *osb)
+{
+ if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE)
+ return 1;
+ return 0;
+}
+
/* set / clear functions because cluster events can make these happen
* in parallel so we want the transitions to be atomic. this also
* means that any future flags osb_flags must be protected by spinlock
@@ -578,6 +621,9 @@ static inline int ocfs2_uses_extended_slot_map(struct ocfs2_super *osb)
#define OCFS2_IS_VALID_DX_LEAF(ptr) \
(!strcmp((ptr)->dl_signature, OCFS2_DX_LEAF_SIGNATURE))
+#define OCFS2_IS_VALID_REFCOUNT_BLOCK(ptr) \
+ (!strcmp((ptr)->rf_signature, OCFS2_REFCOUNT_BLOCK_SIGNATURE))
+
static inline unsigned long ino_from_blkno(struct super_block *sb,
u64 blkno)
{
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 7ab6e9e5e77..e9431e4a5e7 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -68,6 +68,7 @@
#define OCFS2_DIR_TRAILER_SIGNATURE "DIRTRL1"
#define OCFS2_DX_ROOT_SIGNATURE "DXDIR01"
#define OCFS2_DX_LEAF_SIGNATURE "DXLEAF1"
+#define OCFS2_REFCOUNT_BLOCK_SIGNATURE "REFCNT1"
/* Compatibility flags */
#define OCFS2_HAS_COMPAT_FEATURE(sb,mask) \
@@ -98,7 +99,8 @@
| OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK \
| OCFS2_FEATURE_INCOMPAT_XATTR \
| OCFS2_FEATURE_INCOMPAT_META_ECC \
- | OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS)
+ | OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS \
+ | OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE)
#define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \
| OCFS2_FEATURE_RO_COMPAT_USRQUOTA \
| OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)
@@ -160,6 +162,9 @@
/* Metadata checksum and error correction */
#define OCFS2_FEATURE_INCOMPAT_META_ECC 0x0800
+/* Refcount tree support */
+#define OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE 0x1000
+
/*
* backup superblock flag is used to indicate that this volume
* has backup superblocks.
@@ -223,6 +228,7 @@
#define OCFS2_HAS_XATTR_FL (0x0002)
#define OCFS2_INLINE_XATTR_FL (0x0004)
#define OCFS2_INDEXED_DIR_FL (0x0008)
+#define OCFS2_HAS_REFCOUNT_FL (0x0010)
/* Inode attributes, keep in sync with EXT2 */
#define OCFS2_SECRM_FL (0x00000001) /* Secure deletion */
@@ -241,8 +247,11 @@
/*
* Extent record flags (e_node.leaf.flags)
*/
-#define OCFS2_EXT_UNWRITTEN (0x01) /* Extent is allocated but
- * unwritten */
+#define OCFS2_EXT_UNWRITTEN (0x01) /* Extent is allocated but
+ * unwritten */
+#define OCFS2_EXT_REFCOUNTED (0x02) /* Extent is reference
+ * counted in an associated
+ * refcount tree */
/*
* ioctl commands
@@ -292,6 +301,15 @@ struct ocfs2_new_group_input {
#define OCFS2_IOC_GROUP_ADD _IOW('o', 2,struct ocfs2_new_group_input)
#define OCFS2_IOC_GROUP_ADD64 _IOW('o', 3,struct ocfs2_new_group_input)
+/* Used to pass 2 file names to reflink. */
+struct reflink_arguments {
+ __u64 old_path;
+ __u64 new_path;
+ __u64 preserve;
+};
+#define OCFS2_IOC_REFLINK _IOW('o', 4, struct reflink_arguments)
+
+
/*
* Journal Flags (ocfs2_dinode.id1.journal1.i_flags)
*/
@@ -717,7 +735,8 @@ struct ocfs2_dinode {
__le64 i_xattr_loc;
/*80*/ struct ocfs2_block_check i_check; /* Error checking */
/*88*/ __le64 i_dx_root; /* Pointer to dir index root block */
- __le64 i_reserved2[5];
+/*90*/ __le64 i_refcount_loc;
+ __le64 i_reserved2[4];
/*B8*/ union {
__le64 i_pad1; /* Generic way to refer to this
64bit union */
@@ -901,6 +920,60 @@ struct ocfs2_group_desc
/*40*/ __u8 bg_bitmap[0];
};
+struct ocfs2_refcount_rec {
+/*00*/ __le64 r_cpos; /* Physical offset, in clusters */
+ __le32 r_clusters; /* Clusters covered by this extent */
+ __le32 r_refcount; /* Reference count of this extent */
+/*10*/
+};
+#define OCFS2_32BIT_POS_MASK (0xffffffffULL)
+
+#define OCFS2_REFCOUNT_LEAF_FL (0x00000001)
+#define OCFS2_REFCOUNT_TREE_FL (0x00000002)
+
+struct ocfs2_refcount_list {
+/*00*/ __le16 rl_count; /* Maximum number of entries possible
+ in rl_records */
+ __le16 rl_used; /* Current number of used records */
+ __le32 rl_reserved2;
+ __le64 rl_reserved1; /* Pad to sizeof(ocfs2_refcount_record) */
+/*10*/ struct ocfs2_refcount_rec rl_recs[0]; /* Refcount records */
+};
+
+
+struct ocfs2_refcount_block {
+/*00*/ __u8 rf_signature[8]; /* Signature for verification */
+ __le16 rf_suballoc_slot; /* Slot suballocator this block
+ belongs to */
+ __le16 rf_suballoc_bit; /* Bit offset in suballocator
+ block group */
+ __le32 rf_fs_generation; /* Must match superblock */
+/*10*/ __le64 rf_blkno; /* Offset on disk, in blocks */
+ __le64 rf_parent; /* Parent block, only valid if
+ OCFS2_REFCOUNT_LEAF_FL is set in
+ rf_flags */
+/*20*/ struct ocfs2_block_check rf_check; /* Error checking */
+ __le64 rf_last_eb_blk; /* Pointer to last extent block */
+/*30*/ __le32 rf_count; /* Number of inodes sharing this
+ refcount tree */
+ __le32 rf_flags; /* See the flags above */
+ __le32 rf_clusters; /* clusters covered by refcount tree. */
+ __le32 rf_cpos; /* cluster offset in refcount tree.*/
+/*40*/ __le32 rf_generation; /* generation number. all be the same
+ * for the same refcount tree. */
+ __le32 rf_reserved0;
+ __le64 rf_reserved1[7];
+/*80*/ union {
+ struct ocfs2_refcount_list rf_records; /* List of refcount
+ records */
+ struct ocfs2_extent_list rf_list; /* Extent record list,
+ only valid if
+ OCFS2_REFCOUNT_TREE_FL
+ is set in rf_flags */
+ };
+/* Actual on-disk size is one block */
+};
+
/*
* On disk extended attribute structure for OCFS2.
*/
@@ -1312,6 +1385,32 @@ static inline u16 ocfs2_xattr_recs_per_xb(struct super_block *sb)
return size / sizeof(struct ocfs2_extent_rec);
}
+
+static inline u16 ocfs2_extent_recs_per_rb(struct super_block *sb)
+{
+ int size;
+
+ size = sb->s_blocksize -
+ offsetof(struct ocfs2_refcount_block, rf_list.l_recs);
+
+ return size / sizeof(struct ocfs2_extent_rec);
+}
+
+static inline u16 ocfs2_refcount_recs_per_rb(struct super_block *sb)
+{
+ int size;
+
+ size = sb->s_blocksize -
+ offsetof(struct ocfs2_refcount_block, rf_records.rl_recs);
+
+ return size / sizeof(struct ocfs2_refcount_rec);
+}
+
+static inline u32
+ocfs2_get_ref_rec_low_cpos(const struct ocfs2_refcount_rec *rec)
+{
+ return le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK;
+}
#else
static inline int ocfs2_fast_symlink_chars(int blocksize)
{
diff --git a/fs/ocfs2/ocfs2_lockid.h b/fs/ocfs2/ocfs2_lockid.h
index c212cf5a2bd..d277aabf5df 100644
--- a/fs/ocfs2/ocfs2_lockid.h
+++ b/fs/ocfs2/ocfs2_lockid.h
@@ -49,6 +49,7 @@ enum ocfs2_lock_type {
OCFS2_LOCK_TYPE_QINFO,
OCFS2_LOCK_TYPE_NFS_SYNC,
OCFS2_LOCK_TYPE_ORPHAN_SCAN,
+ OCFS2_LOCK_TYPE_REFCOUNT,
OCFS2_NUM_LOCK_TYPES
};
@@ -89,6 +90,9 @@ static inline char ocfs2_lock_type_char(enum ocfs2_lock_type type)
case OCFS2_LOCK_TYPE_ORPHAN_SCAN:
c = 'P';
break;
+ case OCFS2_LOCK_TYPE_REFCOUNT:
+ c = 'T';
+ break;
default:
c = '\0';
}
@@ -110,6 +114,7 @@ static char *ocfs2_lock_type_strings[] = {
[OCFS2_LOCK_TYPE_QINFO] = "Quota",
[OCFS2_LOCK_TYPE_NFS_SYNC] = "NFSSync",
[OCFS2_LOCK_TYPE_ORPHAN_SCAN] = "OrphanScan",
+ [OCFS2_LOCK_TYPE_REFCOUNT] = "Refcount",
};
static inline const char *ocfs2_lock_type_string(enum ocfs2_lock_type type)
diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h
index 3fb96fcd4c8..e5df9d170b0 100644
--- a/fs/ocfs2/quota.h
+++ b/fs/ocfs2/quota.h
@@ -109,7 +109,7 @@ void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex);
int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
struct buffer_head **bh);
-extern struct dquot_operations ocfs2_quota_operations;
+extern const struct dquot_operations ocfs2_quota_operations;
extern struct quota_format_type ocfs2_quota_format;
int ocfs2_quota_setup(void);
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 44f2a5e1d04..b437dc0c4ca 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -154,7 +154,7 @@ static int ocfs2_get_quota_block(struct inode *inode, int block,
err = -EIO;
mlog_errno(err);
}
- return err;;
+ return err;
}
/* Read data from global quotafile - avoid pagecache and such because we cannot
@@ -253,8 +253,9 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
flush_dcache_page(bh->b_page);
set_buffer_uptodate(bh);
unlock_buffer(bh);
- ocfs2_set_buffer_uptodate(gqinode, bh);
- err = ocfs2_journal_access_dq(handle, gqinode, bh, ja_type);
+ ocfs2_set_buffer_uptodate(INODE_CACHE(gqinode), bh);
+ err = ocfs2_journal_access_dq(handle, INODE_CACHE(gqinode), bh,
+ ja_type);
if (err < 0) {
brelse(bh);
goto out;
@@ -849,7 +850,7 @@ static void ocfs2_destroy_dquot(struct dquot *dquot)
kmem_cache_free(ocfs2_dquot_cachep, dquot);
}
-struct dquot_operations ocfs2_quota_operations = {
+const struct dquot_operations ocfs2_quota_operations = {
.initialize = dquot_initialize,
.drop = dquot_drop,
.alloc_space = dquot_alloc_space,
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index bdb09cb6e1f..1a2c50a759f 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -108,7 +108,7 @@ static int ocfs2_modify_bh(struct inode *inode, struct buffer_head *bh,
mlog_errno(status);
return status;
}
- status = ocfs2_journal_access_dq(handle, inode, bh,
+ status = ocfs2_journal_access_dq(handle, INODE_CACHE(inode), bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -510,7 +510,8 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
goto out_commit;
}
/* Release local quota file entry */
- status = ocfs2_journal_access_dq(handle, lqinode,
+ status = ocfs2_journal_access_dq(handle,
+ INODE_CACHE(lqinode),
qbh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -619,7 +620,8 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
mlog_errno(status);
goto out_bh;
}
- status = ocfs2_journal_access_dq(handle, lqinode, bh,
+ status = ocfs2_journal_access_dq(handle, INODE_CACHE(lqinode),
+ bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -993,8 +995,8 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
goto out_trans;
}
dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data;
- ocfs2_set_new_buffer_uptodate(lqinode, bh);
- status = ocfs2_journal_access_dq(handle, lqinode, bh,
+ ocfs2_set_new_buffer_uptodate(INODE_CACHE(lqinode), bh);
+ status = ocfs2_journal_access_dq(handle, INODE_CACHE(lqinode), bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
@@ -1027,8 +1029,8 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
mlog_errno(status);
goto out_trans;
}
- ocfs2_set_new_buffer_uptodate(lqinode, dbh);
- status = ocfs2_journal_access_dq(handle, lqinode, dbh,
+ ocfs2_set_new_buffer_uptodate(INODE_CACHE(lqinode), dbh);
+ status = ocfs2_journal_access_dq(handle, INODE_CACHE(lqinode), dbh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
@@ -1131,7 +1133,7 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
mlog_errno(status);
goto out;
}
- ocfs2_set_new_buffer_uptodate(lqinode, bh);
+ ocfs2_set_new_buffer_uptodate(INODE_CACHE(lqinode), bh);
/* Local quota info, chunk header and the new block we initialize */
handle = ocfs2_start_trans(OCFS2_SB(sb),
@@ -1143,7 +1145,7 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
goto out;
}
/* Zero created block */
- status = ocfs2_journal_access_dq(handle, lqinode, bh,
+ status = ocfs2_journal_access_dq(handle, INODE_CACHE(lqinode), bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
@@ -1158,7 +1160,8 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
goto out_trans;
}
/* Update chunk header */
- status = ocfs2_journal_access_dq(handle, lqinode, chunk->qc_headerbh,
+ status = ocfs2_journal_access_dq(handle, INODE_CACHE(lqinode),
+ chunk->qc_headerbh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -1292,7 +1295,8 @@ static int ocfs2_local_release_dquot(struct dquot *dquot)
goto out;
}
- status = ocfs2_journal_access_dq(handle, sb_dqopt(sb)->files[type],
+ status = ocfs2_journal_access_dq(handle,
+ INODE_CACHE(sb_dqopt(sb)->files[type]),
od->dq_chunk->qc_headerbh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
new file mode 100644
index 00000000000..60287fc56bc
--- /dev/null
+++ b/fs/ocfs2/refcounttree.c
@@ -0,0 +1,4313 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * refcounttree.c
+ *
+ * Copyright (C) 2009 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/sort.h>
+#define MLOG_MASK_PREFIX ML_REFCOUNT
+#include <cluster/masklog.h>
+#include "ocfs2.h"
+#include "inode.h"
+#include "alloc.h"
+#include "suballoc.h"
+#include "journal.h"
+#include "uptodate.h"
+#include "super.h"
+#include "buffer_head_io.h"
+#include "blockcheck.h"
+#include "refcounttree.h"
+#include "sysfile.h"
+#include "dlmglue.h"
+#include "extent_map.h"
+#include "aops.h"
+#include "xattr.h"
+#include "namei.h"
+
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/writeback.h>
+#include <linux/pagevec.h>
+#include <linux/swap.h>
+#include <linux/security.h>
+#include <linux/fsnotify.h>
+#include <linux/quotaops.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+
+struct ocfs2_cow_context {
+ struct inode *inode;
+ u32 cow_start;
+ u32 cow_len;
+ struct ocfs2_extent_tree data_et;
+ struct ocfs2_refcount_tree *ref_tree;
+ struct buffer_head *ref_root_bh;
+ struct ocfs2_alloc_context *meta_ac;
+ struct ocfs2_alloc_context *data_ac;
+ struct ocfs2_cached_dealloc_ctxt dealloc;
+ void *cow_object;
+ struct ocfs2_post_refcount *post_refcount;
+ int extra_credits;
+ int (*get_clusters)(struct ocfs2_cow_context *context,
+ u32 v_cluster, u32 *p_cluster,
+ u32 *num_clusters,
+ unsigned int *extent_flags);
+ int (*cow_duplicate_clusters)(handle_t *handle,
+ struct ocfs2_cow_context *context,
+ u32 cpos, u32 old_cluster,
+ u32 new_cluster, u32 new_len);
+};
+
+static inline struct ocfs2_refcount_tree *
+cache_info_to_refcount(struct ocfs2_caching_info *ci)
+{
+ return container_of(ci, struct ocfs2_refcount_tree, rf_ci);
+}
+
+static int ocfs2_validate_refcount_block(struct super_block *sb,
+ struct buffer_head *bh)
+{
+ int rc;
+ struct ocfs2_refcount_block *rb =
+ (struct ocfs2_refcount_block *)bh->b_data;
+
+ mlog(0, "Validating refcount block %llu\n",
+ (unsigned long long)bh->b_blocknr);
+
+ BUG_ON(!buffer_uptodate(bh));
+
+ /*
+ * If the ecc fails, we return the error but otherwise
+ * leave the filesystem running. We know any error is
+ * local to this block.
+ */
+ rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check);
+ if (rc) {
+ mlog(ML_ERROR, "Checksum failed for refcount block %llu\n",
+ (unsigned long long)bh->b_blocknr);
+ return rc;
+ }
+
+
+ if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) {
+ ocfs2_error(sb,
+ "Refcount block #%llu has bad signature %.*s",
+ (unsigned long long)bh->b_blocknr, 7,
+ rb->rf_signature);
+ return -EINVAL;
+ }
+
+ if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) {
+ ocfs2_error(sb,
+ "Refcount block #%llu has an invalid rf_blkno "
+ "of %llu",
+ (unsigned long long)bh->b_blocknr,
+ (unsigned long long)le64_to_cpu(rb->rf_blkno));
+ return -EINVAL;
+ }
+
+ if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) {
+ ocfs2_error(sb,
+ "Refcount block #%llu has an invalid "
+ "rf_fs_generation of #%u",
+ (unsigned long long)bh->b_blocknr,
+ le32_to_cpu(rb->rf_fs_generation));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci,
+ u64 rb_blkno,
+ struct buffer_head **bh)
+{
+ int rc;
+ struct buffer_head *tmp = *bh;
+
+ rc = ocfs2_read_block(ci, rb_blkno, &tmp,
+ ocfs2_validate_refcount_block);
+
+ /* If ocfs2_read_block() got us a new bh, pass it up. */
+ if (!rc && !*bh)
+ *bh = tmp;
+
+ return rc;
+}
+
+static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci)
+{
+ struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
+
+ return rf->rf_blkno;
+}
+
+static struct super_block *
+ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci)
+{
+ struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
+
+ return rf->rf_sb;
+}
+
+static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci)
+{
+ struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
+
+ spin_lock(&rf->rf_lock);
+}
+
+static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci)
+{
+ struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
+
+ spin_unlock(&rf->rf_lock);
+}
+
+static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci)
+{
+ struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
+
+ mutex_lock(&rf->rf_io_mutex);
+}
+
+static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci)
+{
+ struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
+
+ mutex_unlock(&rf->rf_io_mutex);
+}
+
+static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = {
+ .co_owner = ocfs2_refcount_cache_owner,
+ .co_get_super = ocfs2_refcount_cache_get_super,
+ .co_cache_lock = ocfs2_refcount_cache_lock,
+ .co_cache_unlock = ocfs2_refcount_cache_unlock,
+ .co_io_lock = ocfs2_refcount_cache_io_lock,
+ .co_io_unlock = ocfs2_refcount_cache_io_unlock,
+};
+
+static struct ocfs2_refcount_tree *
+ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno)
+{
+ struct rb_node *n = osb->osb_rf_lock_tree.rb_node;
+ struct ocfs2_refcount_tree *tree = NULL;
+
+ while (n) {
+ tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node);
+
+ if (blkno < tree->rf_blkno)
+ n = n->rb_left;
+ else if (blkno > tree->rf_blkno)
+ n = n->rb_right;
+ else
+ return tree;
+ }
+
+ return NULL;
+}
+
+/* osb_lock is already locked. */
+static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb,
+ struct ocfs2_refcount_tree *new)
+{
+ u64 rf_blkno = new->rf_blkno;
+ struct rb_node *parent = NULL;
+ struct rb_node **p = &osb->osb_rf_lock_tree.rb_node;
+ struct ocfs2_refcount_tree *tmp;
+
+ while (*p) {
+ parent = *p;
+
+ tmp = rb_entry(parent, struct ocfs2_refcount_tree,
+ rf_node);
+
+ if (rf_blkno < tmp->rf_blkno)
+ p = &(*p)->rb_left;
+ else if (rf_blkno > tmp->rf_blkno)
+ p = &(*p)->rb_right;
+ else {
+ /* This should never happen! */
+ mlog(ML_ERROR, "Duplicate refcount block %llu found!\n",
+ (unsigned long long)rf_blkno);
+ BUG();
+ }
+ }
+
+ rb_link_node(&new->rf_node, parent, p);
+ rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree);
+}
+
+static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree)
+{
+ ocfs2_metadata_cache_exit(&tree->rf_ci);
+ ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres);
+ ocfs2_lock_res_free(&tree->rf_lockres);
+ kfree(tree);
+}
+
+static inline void
+ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb,
+ struct ocfs2_refcount_tree *tree)
+{
+ rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree);
+ if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree)
+ osb->osb_ref_tree_lru = NULL;
+}
+
+static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb,
+ struct ocfs2_refcount_tree *tree)
+{
+ spin_lock(&osb->osb_lock);
+ ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
+ spin_unlock(&osb->osb_lock);
+}
+
+void ocfs2_kref_remove_refcount_tree(struct kref *kref)
+{
+ struct ocfs2_refcount_tree *tree =
+ container_of(kref, struct ocfs2_refcount_tree, rf_getcnt);
+
+ ocfs2_free_refcount_tree(tree);
+}
+
+static inline void
+ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree)
+{
+ kref_get(&tree->rf_getcnt);
+}
+
+static inline void
+ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree)
+{
+ kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree);
+}
+
+static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new,
+ struct super_block *sb)
+{
+ ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops);
+ mutex_init(&new->rf_io_mutex);
+ new->rf_sb = sb;
+ spin_lock_init(&new->rf_lock);
+}
+
+static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb,
+ struct ocfs2_refcount_tree *new,
+ u64 rf_blkno, u32 generation)
+{
+ init_rwsem(&new->rf_sem);
+ ocfs2_refcount_lock_res_init(&new->rf_lockres, osb,
+ rf_blkno, generation);
+}
+
+static struct ocfs2_refcount_tree*
+ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno)
+{
+ struct ocfs2_refcount_tree *new;
+
+ new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS);
+ if (!new)
+ return NULL;
+
+ new->rf_blkno = rf_blkno;
+ kref_init(&new->rf_getcnt);
+ ocfs2_init_refcount_tree_ci(new, osb->sb);
+
+ return new;
+}
+
+static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno,
+ struct ocfs2_refcount_tree **ret_tree)
+{
+ int ret = 0;
+ struct ocfs2_refcount_tree *tree, *new = NULL;
+ struct buffer_head *ref_root_bh = NULL;
+ struct ocfs2_refcount_block *ref_rb;
+
+ spin_lock(&osb->osb_lock);
+ if (osb->osb_ref_tree_lru &&
+ osb->osb_ref_tree_lru->rf_blkno == rf_blkno)
+ tree = osb->osb_ref_tree_lru;
+ else
+ tree = ocfs2_find_refcount_tree(osb, rf_blkno);
+ if (tree)
+ goto out;
+
+ spin_unlock(&osb->osb_lock);
+
+ new = ocfs2_allocate_refcount_tree(osb, rf_blkno);
+ if (!new) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ return ret;
+ }
+ /*
+ * We need the generation to create the refcount tree lock and since
+ * it isn't changed during the tree modification, we are safe here to
+ * read without protection.
+ * We also have to purge the cache after we create the lock since the
+ * refcount block may have the stale data. It can only be trusted when
+ * we hold the refcount lock.
+ */
+ ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh);
+ if (ret) {
+ mlog_errno(ret);
+ ocfs2_metadata_cache_exit(&new->rf_ci);
+ kfree(new);
+ return ret;
+ }
+
+ ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
+ new->rf_generation = le32_to_cpu(ref_rb->rf_generation);
+ ocfs2_init_refcount_tree_lock(osb, new, rf_blkno,
+ new->rf_generation);
+ ocfs2_metadata_cache_purge(&new->rf_ci);
+
+ spin_lock(&osb->osb_lock);
+ tree = ocfs2_find_refcount_tree(osb, rf_blkno);
+ if (tree)
+ goto out;
+
+ ocfs2_insert_refcount_tree(osb, new);
+
+ tree = new;
+ new = NULL;
+
+out:
+ *ret_tree = tree;
+
+ osb->osb_ref_tree_lru = tree;
+
+ spin_unlock(&osb->osb_lock);
+
+ if (new)
+ ocfs2_free_refcount_tree(new);
+
+ brelse(ref_root_bh);
+ return ret;
+}
+
+static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno)
+{
+ int ret;
+ struct buffer_head *di_bh = NULL;
+ struct ocfs2_dinode *di;
+
+ ret = ocfs2_read_inode_block(inode, &di_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
+
+ di = (struct ocfs2_dinode *)di_bh->b_data;
+ *ref_blkno = le64_to_cpu(di->i_refcount_loc);
+ brelse(di_bh);
+out:
+ return ret;
+}
+
+static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
+ struct ocfs2_refcount_tree *tree, int rw)
+{
+ int ret;
+
+ ret = ocfs2_refcount_lock(tree, rw);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (rw)
+ down_write(&tree->rf_sem);
+ else
+ down_read(&tree->rf_sem);
+
+out:
+ return ret;
+}
+
+/*
+ * Lock the refcount tree pointed by ref_blkno and return the tree.
+ * In most case, we lock the tree and read the refcount block.
+ * So read it here if the caller really needs it.
+ *
+ * If the tree has been re-created by other node, it will free the
+ * old one and re-create it.
+ */
+int ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
+ u64 ref_blkno, int rw,
+ struct ocfs2_refcount_tree **ret_tree,
+ struct buffer_head **ref_bh)
+{
+ int ret, delete_tree = 0;
+ struct ocfs2_refcount_tree *tree = NULL;
+ struct buffer_head *ref_root_bh = NULL;
+ struct ocfs2_refcount_block *rb;
+
+again:
+ ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree);
+ if (ret) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ ocfs2_refcount_tree_get(tree);
+
+ ret = __ocfs2_lock_refcount_tree(osb, tree, rw);
+ if (ret) {
+ mlog_errno(ret);
+ ocfs2_refcount_tree_put(tree);
+ goto out;
+ }
+
+ ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
+ &ref_root_bh);
+ if (ret) {
+ mlog_errno(ret);
+ ocfs2_unlock_refcount_tree(osb, tree, rw);
+ ocfs2_refcount_tree_put(tree);
+ goto out;
+ }
+
+ rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
+ /*
+ * If the refcount block has been freed and re-created, we may need
+ * to recreate the refcount tree also.
+ *
+ * Here we just remove the tree from the rb-tree, and the last
+ * kref holder will unlock and delete this refcount_tree.
+ * Then we goto "again" and ocfs2_get_refcount_tree will create
+ * the new refcount tree for us.
+ */
+ if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) {
+ if (!tree->rf_removed) {
+ ocfs2_erase_refcount_tree_from_list(osb, tree);
+ tree->rf_removed = 1;
+ delete_tree = 1;
+ }
+
+ ocfs2_unlock_refcount_tree(osb, tree, rw);
+ /*
+ * We get an extra reference when we create the refcount
+ * tree, so another put will destroy it.
+ */
+ if (delete_tree)
+ ocfs2_refcount_tree_put(tree);
+ brelse(ref_root_bh);
+ ref_root_bh = NULL;
+ goto again;
+ }
+
+ *ret_tree = tree;
+ if (ref_bh) {
+ *ref_bh = ref_root_bh;
+ ref_root_bh = NULL;
+ }
+out:
+ brelse(ref_root_bh);
+ return ret;
+}
+
+int ocfs2_lock_refcount_tree_by_inode(struct inode *inode, int rw,
+ struct ocfs2_refcount_tree **ret_tree,
+ struct buffer_head **ref_bh)
+{
+ int ret;
+ u64 ref_blkno;
+
+ ret = ocfs2_get_refcount_block(inode, &ref_blkno);
+ if (ret) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ return ocfs2_lock_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno,
+ rw, ret_tree, ref_bh);
+}
+
+void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb,
+ struct ocfs2_refcount_tree *tree, int rw)
+{
+ if (rw)
+ up_write(&tree->rf_sem);
+ else
+ up_read(&tree->rf_sem);
+
+ ocfs2_refcount_unlock(tree, rw);
+ ocfs2_refcount_tree_put(tree);
+}
+
+void ocfs2_purge_refcount_trees(struct ocfs2_super *osb)
+{
+ struct rb_node *node;
+ struct ocfs2_refcount_tree *tree;
+ struct rb_root *root = &osb->osb_rf_lock_tree;
+
+ while ((node = rb_last(root)) != NULL) {
+ tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node);
+
+ mlog(0, "Purge tree %llu\n",
+ (unsigned long long) tree->rf_blkno);
+
+ rb_erase(&tree->rf_node, root);
+ ocfs2_free_refcount_tree(tree);
+ }
+}
+
+/*
+ * Create a refcount tree for an inode.
+ * We take for granted that the inode is already locked.
+ */
+static int ocfs2_create_refcount_tree(struct inode *inode,
+ struct buffer_head *di_bh)
+{
+ int ret;
+ handle_t *handle = NULL;
+ struct ocfs2_alloc_context *meta_ac = NULL;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct buffer_head *new_bh = NULL;
+ struct ocfs2_refcount_block *rb;
+ struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL;
+ u16 suballoc_bit_start;
+ u32 num_got;
+ u64 first_blkno;
+
+ BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
+
+ mlog(0, "create tree for inode %lu\n", inode->i_ino);
+
+ ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1,
+ &suballoc_bit_start, &num_got,
+ &first_blkno);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno);
+ if (!new_tree) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ new_bh = sb_getblk(inode->i_sb, first_blkno);
+ ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh);
+
+ ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ /* Initialize ocfs2_refcount_block. */
+ rb = (struct ocfs2_refcount_block *)new_bh->b_data;
+ memset(rb, 0, inode->i_sb->s_blocksize);
+ strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
+ rb->rf_suballoc_slot = cpu_to_le16(osb->slot_num);
+ rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
+ rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
+ rb->rf_blkno = cpu_to_le64(first_blkno);
+ rb->rf_count = cpu_to_le32(1);
+ rb->rf_records.rl_count =
+ cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb));
+ spin_lock(&osb->osb_lock);
+ rb->rf_generation = osb->s_next_generation++;
+ spin_unlock(&osb->osb_lock);
+
+ ocfs2_journal_dirty(handle, new_bh);
+
+ spin_lock(&oi->ip_lock);
+ oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
+ di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
+ di->i_refcount_loc = cpu_to_le64(first_blkno);
+ spin_unlock(&oi->ip_lock);
+
+ mlog(0, "created tree for inode %lu, refblock %llu\n",
+ inode->i_ino, (unsigned long long)first_blkno);
+
+ ocfs2_journal_dirty(handle, di_bh);
+
+ /*
+ * We have to init the tree lock here since it will use
+ * the generation number to create it.
+ */
+ new_tree->rf_generation = le32_to_cpu(rb->rf_generation);
+ ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno,
+ new_tree->rf_generation);
+
+ spin_lock(&osb->osb_lock);
+ tree = ocfs2_find_refcount_tree(osb, first_blkno);
+
+ /*
+ * We've just created a new refcount tree in this block. If
+ * we found a refcount tree on the ocfs2_super, it must be
+ * one we just deleted. We free the old tree before
+ * inserting the new tree.
+ */
+ BUG_ON(tree && tree->rf_generation == new_tree->rf_generation);
+ if (tree)
+ ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
+ ocfs2_insert_refcount_tree(osb, new_tree);
+ spin_unlock(&osb->osb_lock);
+ new_tree = NULL;
+ if (tree)
+ ocfs2_refcount_tree_put(tree);
+
+out_commit:
+ ocfs2_commit_trans(osb, handle);
+
+out:
+ if (new_tree) {
+ ocfs2_metadata_cache_exit(&new_tree->rf_ci);
+ kfree(new_tree);
+ }
+
+ brelse(new_bh);
+ if (meta_ac)
+ ocfs2_free_alloc_context(meta_ac);
+
+ return ret;
+}
+
+static int ocfs2_set_refcount_tree(struct inode *inode,
+ struct buffer_head *di_bh,
+ u64 refcount_loc)
+{
+ int ret;
+ handle_t *handle = NULL;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct buffer_head *ref_root_bh = NULL;
+ struct ocfs2_refcount_block *rb;
+ struct ocfs2_refcount_tree *ref_tree;
+
+ BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
+
+ ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
+ &ref_tree, &ref_root_bh);
+ if (ret) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
+ le32_add_cpu(&rb->rf_count, 1);
+
+ ocfs2_journal_dirty(handle, ref_root_bh);
+
+ spin_lock(&oi->ip_lock);
+ oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
+ di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
+ di->i_refcount_loc = cpu_to_le64(refcount_loc);
+ spin_unlock(&oi->ip_lock);
+ ocfs2_journal_dirty(handle, di_bh);
+
+out_commit:
+ ocfs2_commit_trans(osb, handle);
+out:
+ ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
+ brelse(ref_root_bh);
+
+ return ret;
+}
+
+int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh)
+{
+ int ret, delete_tree = 0;
+ handle_t *handle = NULL;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_refcount_block *rb;
+ struct inode *alloc_inode = NULL;
+ struct buffer_head *alloc_bh = NULL;
+ struct buffer_head *blk_bh = NULL;
+ struct ocfs2_refcount_tree *ref_tree;
+ int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS;
+ u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc);
+ u16 bit = 0;
+
+ if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL))
+ return 0;
+
+ BUG_ON(!ref_blkno);
+ ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh);
+ if (ret) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ rb = (struct ocfs2_refcount_block *)blk_bh->b_data;
+
+ /*
+ * If we are the last user, we need to free the block.
+ * So lock the allocator ahead.
+ */
+ if (le32_to_cpu(rb->rf_count) == 1) {
+ blk = le64_to_cpu(rb->rf_blkno);
+ bit = le16_to_cpu(rb->rf_suballoc_bit);
+ bg_blkno = ocfs2_which_suballoc_group(blk, bit);
+
+ alloc_inode = ocfs2_get_system_file_inode(osb,
+ EXTENT_ALLOC_SYSTEM_INODE,
+ le16_to_cpu(rb->rf_suballoc_slot));
+ if (!alloc_inode) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+ mutex_lock(&alloc_inode->i_mutex);
+
+ ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_mutex;
+ }
+
+ credits += OCFS2_SUBALLOC_FREE;
+ }
+
+ handle = ocfs2_start_trans(osb, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out_unlock;
+ }
+
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ spin_lock(&oi->ip_lock);
+ oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL;
+ di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
+ di->i_refcount_loc = 0;
+ spin_unlock(&oi->ip_lock);
+ ocfs2_journal_dirty(handle, di_bh);
+
+ le32_add_cpu(&rb->rf_count , -1);
+ ocfs2_journal_dirty(handle, blk_bh);
+
+ if (!rb->rf_count) {
+ delete_tree = 1;
+ ocfs2_erase_refcount_tree_from_list(osb, ref_tree);
+ ret = ocfs2_free_suballoc_bits(handle, alloc_inode,
+ alloc_bh, bit, bg_blkno, 1);
+ if (ret)
+ mlog_errno(ret);
+ }
+
+out_commit:
+ ocfs2_commit_trans(osb, handle);
+out_unlock:
+ if (alloc_inode) {
+ ocfs2_inode_unlock(alloc_inode, 1);
+ brelse(alloc_bh);
+ }
+out_mutex:
+ if (alloc_inode) {
+ mutex_unlock(&alloc_inode->i_mutex);
+ iput(alloc_inode);
+ }
+out:
+ ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
+ if (delete_tree)
+ ocfs2_refcount_tree_put(ref_tree);
+ brelse(blk_bh);
+
+ return ret;
+}
+
+static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci,
+ struct buffer_head *ref_leaf_bh,
+ u64 cpos, unsigned int len,
+ struct ocfs2_refcount_rec *ret_rec,
+ int *index)
+{
+ int i = 0;
+ struct ocfs2_refcount_block *rb =
+ (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
+ struct ocfs2_refcount_rec *rec = NULL;
+
+ for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) {
+ rec = &rb->rf_records.rl_recs[i];
+
+ if (le64_to_cpu(rec->r_cpos) +
+ le32_to_cpu(rec->r_clusters) <= cpos)
+ continue;
+ else if (le64_to_cpu(rec->r_cpos) > cpos)
+ break;
+
+ /* ok, cpos fail in this rec. Just return. */
+ if (ret_rec)
+ *ret_rec = *rec;
+ goto out;
+ }
+
+ if (ret_rec) {
+ /* We meet with a hole here, so fake the rec. */
+ ret_rec->r_cpos = cpu_to_le64(cpos);
+ ret_rec->r_refcount = 0;
+ if (i < le16_to_cpu(rb->rf_records.rl_used) &&
+ le64_to_cpu(rec->r_cpos) < cpos + len)
+ ret_rec->r_clusters =
+ cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos);
+ else
+ ret_rec->r_clusters = cpu_to_le32(len);
+ }
+
+out:
+ *index = i;
+}
+
+/*
+ * Try to remove refcount tree. The mechanism is:
+ * 1) Check whether i_clusters == 0, if no, exit.
+ * 2) check whether we have i_xattr_loc in dinode. if yes, exit.
+ * 3) Check whether we have inline xattr stored outside, if yes, exit.
+ * 4) Remove the tree.
+ */
+int ocfs2_try_remove_refcount_tree(struct inode *inode,
+ struct buffer_head *di_bh)
+{
+ int ret;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+
+ down_write(&oi->ip_xattr_sem);
+ down_write(&oi->ip_alloc_sem);
+
+ if (oi->ip_clusters)
+ goto out;
+
+ if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) && di->i_xattr_loc)
+ goto out;
+
+ if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL &&
+ ocfs2_has_inline_xattr_value_outside(inode, di))
+ goto out;
+
+ ret = ocfs2_remove_refcount_tree(inode, di_bh);
+ if (ret)
+ mlog_errno(ret);
+out:
+ up_write(&oi->ip_alloc_sem);
+ up_write(&oi->ip_xattr_sem);
+ return 0;
+}
+
+/*
+ * Given a cpos and len, try to find the refcount record which contains cpos.
+ * 1. If cpos can be found in one refcount record, return the record.
+ * 2. If cpos can't be found, return a fake record which start from cpos
+ * and end at a small value between cpos+len and start of the next record.
+ * This fake record has r_refcount = 0.
+ */
+static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci,
+ struct buffer_head *ref_root_bh,
+ u64 cpos, unsigned int len,
+ struct ocfs2_refcount_rec *ret_rec,
+ int *index,
+ struct buffer_head **ret_bh)
+{
+ int ret = 0, i, found;
+ u32 low_cpos;
+ struct ocfs2_extent_list *el;
+ struct ocfs2_extent_rec *tmp, *rec = NULL;
+ struct ocfs2_extent_block *eb;
+ struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL;
+ struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
+ struct ocfs2_refcount_block *rb =
+ (struct ocfs2_refcount_block *)ref_root_bh->b_data;
+
+ if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) {
+ ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len,
+ ret_rec, index);
+ *ret_bh = ref_root_bh;
+ get_bh(ref_root_bh);
+ return 0;
+ }
+
+ el = &rb->rf_list;
+ low_cpos = cpos & OCFS2_32BIT_POS_MASK;
+
+ if (el->l_tree_depth) {
+ ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ eb = (struct ocfs2_extent_block *) eb_bh->b_data;
+ el = &eb->h_list;
+
+ if (el->l_tree_depth) {
+ ocfs2_error(sb,
+ "refcount tree %llu has non zero tree "
+ "depth in leaf btree tree block %llu\n",
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)eb_bh->b_blocknr);
+ ret = -EROFS;
+ goto out;
+ }
+ }
+
+ found = 0;
+ for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
+ rec = &el->l_recs[i];
+
+ if (le32_to_cpu(rec->e_cpos) <= low_cpos) {
+ found = 1;
+ break;
+ }
+ }
+
+ /* adjust len when we have ocfs2_extent_rec after it. */
+ if (found && i < le16_to_cpu(el->l_next_free_rec) - 1) {
+ tmp = &el->l_recs[i+1];
+
+ if (le32_to_cpu(tmp->e_cpos) < cpos + len)
+ len = le32_to_cpu(tmp->e_cpos) - cpos;
+ }
+
+ ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno),
+ &ref_leaf_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len,
+ ret_rec, index);
+ *ret_bh = ref_leaf_bh;
+out:
+ brelse(eb_bh);
+ return ret;
+}
+
+enum ocfs2_ref_rec_contig {
+ REF_CONTIG_NONE = 0,
+ REF_CONTIG_LEFT,
+ REF_CONTIG_RIGHT,
+ REF_CONTIG_LEFTRIGHT,
+};
+
+static enum ocfs2_ref_rec_contig
+ ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb,
+ int index)
+{
+ if ((rb->rf_records.rl_recs[index].r_refcount ==
+ rb->rf_records.rl_recs[index + 1].r_refcount) &&
+ (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) +
+ le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) ==
+ le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos)))
+ return REF_CONTIG_RIGHT;
+
+ return REF_CONTIG_NONE;
+}
+
+static enum ocfs2_ref_rec_contig
+ ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb,
+ int index)
+{
+ enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE;
+
+ if (index < le16_to_cpu(rb->rf_records.rl_used) - 1)
+ ret = ocfs2_refcount_rec_adjacent(rb, index);
+
+ if (index > 0) {
+ enum ocfs2_ref_rec_contig tmp;
+
+ tmp = ocfs2_refcount_rec_adjacent(rb, index - 1);
+
+ if (tmp == REF_CONTIG_RIGHT) {
+ if (ret == REF_CONTIG_RIGHT)
+ ret = REF_CONTIG_LEFTRIGHT;
+ else
+ ret = REF_CONTIG_LEFT;
+ }
+ }
+
+ return ret;
+}
+
+static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb,
+ int index)
+{
+ BUG_ON(rb->rf_records.rl_recs[index].r_refcount !=
+ rb->rf_records.rl_recs[index+1].r_refcount);
+
+ le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters,
+ le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters));
+
+ if (index < le16_to_cpu(rb->rf_records.rl_used) - 2)
+ memmove(&rb->rf_records.rl_recs[index + 1],
+ &rb->rf_records.rl_recs[index + 2],
+ sizeof(struct ocfs2_refcount_rec) *
+ (le16_to_cpu(rb->rf_records.rl_used) - index - 2));
+
+ memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1],
+ 0, sizeof(struct ocfs2_refcount_rec));
+ le16_add_cpu(&rb->rf_records.rl_used, -1);
+}
+
+/*
+ * Merge the refcount rec if we are contiguous with the adjacent recs.
+ */
+static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb,
+ int index)
+{
+ enum ocfs2_ref_rec_contig contig =
+ ocfs2_refcount_rec_contig(rb, index);
+
+ if (contig == REF_CONTIG_NONE)
+ return;
+
+ if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) {
+ BUG_ON(index == 0);
+ index--;
+ }
+
+ ocfs2_rotate_refcount_rec_left(rb, index);
+
+ if (contig == REF_CONTIG_LEFTRIGHT)
+ ocfs2_rotate_refcount_rec_left(rb, index);
+}
+
+/*
+ * Change the refcount indexed by "index" in ref_bh.
+ * If refcount reaches 0, remove it.
+ */
+static int ocfs2_change_refcount_rec(handle_t *handle,
+ struct ocfs2_caching_info *ci,
+ struct buffer_head *ref_leaf_bh,
+ int index, int merge, int change)
+{
+ int ret;
+ struct ocfs2_refcount_block *rb =
+ (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
+ struct ocfs2_refcount_list *rl = &rb->rf_records;
+ struct ocfs2_refcount_rec *rec = &rl->rl_recs[index];
+
+ ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ mlog(0, "change index %d, old count %u, change %d\n", index,
+ le32_to_cpu(rec->r_refcount), change);
+ le32_add_cpu(&rec->r_refcount, change);
+
+ if (!rec->r_refcount) {
+ if (index != le16_to_cpu(rl->rl_used) - 1) {
+ memmove(rec, rec + 1,
+ (le16_to_cpu(rl->rl_used) - index - 1) *
+ sizeof(struct ocfs2_refcount_rec));
+ memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1],
+ 0, sizeof(struct ocfs2_refcount_rec));
+ }
+
+ le16_add_cpu(&rl->rl_used, -1);
+ } else if (merge)
+ ocfs2_refcount_rec_merge(rb, index);
+
+ ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
+ if (ret)
+ mlog_errno(ret);
+out:
+ return ret;
+}
+
+static int ocfs2_expand_inline_ref_root(handle_t *handle,
+ struct ocfs2_caching_info *ci,
+ struct buffer_head *ref_root_bh,
+ struct buffer_head **ref_leaf_bh,
+ struct ocfs2_alloc_context *meta_ac)
+{
+ int ret;
+ u16 suballoc_bit_start;
+ u32 num_got;
+ u64 blkno;
+ struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
+ struct buffer_head *new_bh = NULL;
+ struct ocfs2_refcount_block *new_rb;
+ struct ocfs2_refcount_block *root_rb =
+ (struct ocfs2_refcount_block *)ref_root_bh->b_data;
+
+ ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_claim_metadata(OCFS2_SB(sb), handle, meta_ac, 1,
+ &suballoc_bit_start, &num_got,
+ &blkno);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ new_bh = sb_getblk(sb, blkno);
+ if (new_bh == NULL) {
+ ret = -EIO;
+ mlog_errno(ret);
+ goto out;
+ }
+ ocfs2_set_new_buffer_uptodate(ci, new_bh);
+
+ ret = ocfs2_journal_access_rb(handle, ci, new_bh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * Initialize ocfs2_refcount_block.
+ * It should contain the same information as the old root.
+ * so just memcpy it and change the corresponding field.
+ */
+ memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize);
+
+ new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
+ new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num);
+ new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
+ new_rb->rf_blkno = cpu_to_le64(blkno);
+ new_rb->rf_cpos = cpu_to_le32(0);
+ new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
+ new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
+ ocfs2_journal_dirty(handle, new_bh);
+
+ /* Now change the root. */
+ memset(&root_rb->rf_list, 0, sb->s_blocksize -
+ offsetof(struct ocfs2_refcount_block, rf_list));
+ root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb));
+ root_rb->rf_clusters = cpu_to_le32(1);
+ root_rb->rf_list.l_next_free_rec = cpu_to_le16(1);
+ root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
+ root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
+ root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL);
+
+ ocfs2_journal_dirty(handle, ref_root_bh);
+
+ mlog(0, "new leaf block %llu, used %u\n", (unsigned long long)blkno,
+ le16_to_cpu(new_rb->rf_records.rl_used));
+
+ *ref_leaf_bh = new_bh;
+ new_bh = NULL;
+out:
+ brelse(new_bh);
+ return ret;
+}
+
+static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev,
+ struct ocfs2_refcount_rec *next)
+{
+ if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <=
+ ocfs2_get_ref_rec_low_cpos(next))
+ return 1;
+
+ return 0;
+}
+
+static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b)
+{
+ const struct ocfs2_refcount_rec *l = a, *r = b;
+ u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l);
+ u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r);
+
+ if (l_cpos > r_cpos)
+ return 1;
+ if (l_cpos < r_cpos)
+ return -1;
+ return 0;
+}
+
+static int cmp_refcount_rec_by_cpos(const void *a, const void *b)
+{
+ const struct ocfs2_refcount_rec *l = a, *r = b;
+ u64 l_cpos = le64_to_cpu(l->r_cpos);
+ u64 r_cpos = le64_to_cpu(r->r_cpos);
+
+ if (l_cpos > r_cpos)
+ return 1;
+ if (l_cpos < r_cpos)
+ return -1;
+ return 0;
+}
+
+static void swap_refcount_rec(void *a, void *b, int size)
+{
+ struct ocfs2_refcount_rec *l = a, *r = b, tmp;
+
+ tmp = *(struct ocfs2_refcount_rec *)l;
+ *(struct ocfs2_refcount_rec *)l =
+ *(struct ocfs2_refcount_rec *)r;
+ *(struct ocfs2_refcount_rec *)r = tmp;
+}
+
+/*
+ * The refcount cpos are ordered by their 64bit cpos,
+ * But we will use the low 32 bit to be the e_cpos in the b-tree.
+ * So we need to make sure that this pos isn't intersected with others.
+ *
+ * Note: The refcount block is already sorted by their low 32 bit cpos,
+ * So just try the middle pos first, and we will exit when we find
+ * the good position.
+ */
+static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl,
+ u32 *split_pos, int *split_index)
+{
+ int num_used = le16_to_cpu(rl->rl_used);
+ int delta, middle = num_used / 2;
+
+ for (delta = 0; delta < middle; delta++) {
+ /* Let's check delta earlier than middle */
+ if (ocfs2_refcount_rec_no_intersect(
+ &rl->rl_recs[middle - delta - 1],
+ &rl->rl_recs[middle - delta])) {
+ *split_index = middle - delta;
+ break;
+ }
+
+ /* For even counts, don't walk off the end */
+ if ((middle + delta + 1) == num_used)
+ continue;
+
+ /* Now try delta past middle */
+ if (ocfs2_refcount_rec_no_intersect(
+ &rl->rl_recs[middle + delta],
+ &rl->rl_recs[middle + delta + 1])) {
+ *split_index = middle + delta + 1;
+ break;
+ }
+ }
+
+ if (delta >= middle)
+ return -ENOSPC;
+
+ *split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]);
+ return 0;
+}
+
+static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
+ struct buffer_head *new_bh,
+ u32 *split_cpos)
+{
+ int split_index = 0, num_moved, ret;
+ u32 cpos = 0;
+ struct ocfs2_refcount_block *rb =
+ (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
+ struct ocfs2_refcount_list *rl = &rb->rf_records;
+ struct ocfs2_refcount_block *new_rb =
+ (struct ocfs2_refcount_block *)new_bh->b_data;
+ struct ocfs2_refcount_list *new_rl = &new_rb->rf_records;
+
+ mlog(0, "split old leaf refcount block %llu, count = %u, used = %u\n",
+ (unsigned long long)ref_leaf_bh->b_blocknr,
+ le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used));
+
+ /*
+ * XXX: Improvement later.
+ * If we know all the high 32 bit cpos is the same, no need to sort.
+ *
+ * In order to make the whole process safe, we do:
+ * 1. sort the entries by their low 32 bit cpos first so that we can
+ * find the split cpos easily.
+ * 2. call ocfs2_insert_extent to insert the new refcount block.
+ * 3. move the refcount rec to the new block.
+ * 4. sort the entries by their 64 bit cpos.
+ * 5. dirty the new_rb and rb.
+ */
+ sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
+ sizeof(struct ocfs2_refcount_rec),
+ cmp_refcount_rec_by_low_cpos, swap_refcount_rec);
+
+ ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index);
+ if (ret) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ new_rb->rf_cpos = cpu_to_le32(cpos);
+
+ /* move refcount records starting from split_index to the new block. */
+ num_moved = le16_to_cpu(rl->rl_used) - split_index;
+ memcpy(new_rl->rl_recs, &rl->rl_recs[split_index],
+ num_moved * sizeof(struct ocfs2_refcount_rec));
+
+ /*ok, remove the entries we just moved over to the other block. */
+ memset(&rl->rl_recs[split_index], 0,
+ num_moved * sizeof(struct ocfs2_refcount_rec));
+
+ /* change old and new rl_used accordingly. */
+ le16_add_cpu(&rl->rl_used, -num_moved);
+ new_rl->rl_used = cpu_to_le32(num_moved);
+
+ sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
+ sizeof(struct ocfs2_refcount_rec),
+ cmp_refcount_rec_by_cpos, swap_refcount_rec);
+
+ sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used),
+ sizeof(struct ocfs2_refcount_rec),
+ cmp_refcount_rec_by_cpos, swap_refcount_rec);
+
+ *split_cpos = cpos;
+ return 0;
+}
+
+static int ocfs2_new_leaf_refcount_block(handle_t *handle,
+ struct ocfs2_caching_info *ci,
+ struct buffer_head *ref_root_bh,
+ struct buffer_head *ref_leaf_bh,
+ struct ocfs2_alloc_context *meta_ac)
+{
+ int ret;
+ u16 suballoc_bit_start;
+ u32 num_got, new_cpos;
+ u64 blkno;
+ struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
+ struct ocfs2_refcount_block *root_rb =
+ (struct ocfs2_refcount_block *)ref_root_bh->b_data;
+ struct buffer_head *new_bh = NULL;
+ struct ocfs2_refcount_block *new_rb;
+ struct ocfs2_extent_tree ref_et;
+
+ BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL));
+
+ ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_claim_metadata(OCFS2_SB(sb), handle, meta_ac, 1,
+ &suballoc_bit_start, &num_got,
+ &blkno);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ new_bh = sb_getblk(sb, blkno);
+ if (new_bh == NULL) {
+ ret = -EIO;
+ mlog_errno(ret);
+ goto out;
+ }
+ ocfs2_set_new_buffer_uptodate(ci, new_bh);
+
+ ret = ocfs2_journal_access_rb(handle, ci, new_bh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /* Initialize ocfs2_refcount_block. */
+ new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
+ memset(new_rb, 0, sb->s_blocksize);
+ strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
+ new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num);
+ new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
+ new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
+ new_rb->rf_blkno = cpu_to_le64(blkno);
+ new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
+ new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
+ new_rb->rf_records.rl_count =
+ cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
+ new_rb->rf_generation = root_rb->rf_generation;
+
+ ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ocfs2_journal_dirty(handle, ref_leaf_bh);
+ ocfs2_journal_dirty(handle, new_bh);
+
+ ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh);
+
+ mlog(0, "insert new leaf block %llu at %u\n",
+ (unsigned long long)new_bh->b_blocknr, new_cpos);
+
+ /* Insert the new leaf block with the specific offset cpos. */
+ ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr,
+ 1, 0, meta_ac);
+ if (ret)
+ mlog_errno(ret);
+
+out:
+ brelse(new_bh);
+ return ret;
+}
+
+static int ocfs2_expand_refcount_tree(handle_t *handle,
+ struct ocfs2_caching_info *ci,
+ struct buffer_head *ref_root_bh,
+ struct buffer_head *ref_leaf_bh,
+ struct ocfs2_alloc_context *meta_ac)
+{
+ int ret;
+ struct buffer_head *expand_bh = NULL;
+
+ if (ref_root_bh == ref_leaf_bh) {
+ /*
+ * the old root bh hasn't been expanded to a b-tree,
+ * so expand it first.
+ */
+ ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh,
+ &expand_bh, meta_ac);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ } else {
+ expand_bh = ref_leaf_bh;
+ get_bh(expand_bh);
+ }
+
+
+ /* Now add a new refcount block into the tree.*/
+ ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh,
+ expand_bh, meta_ac);
+ if (ret)
+ mlog_errno(ret);
+out:
+ brelse(expand_bh);
+ return ret;
+}
+
+/*
+ * Adjust the extent rec in b-tree representing ref_leaf_bh.
+ *
+ * Only called when we have inserted a new refcount rec at index 0
+ * which means ocfs2_extent_rec.e_cpos may need some change.
+ */
+static int ocfs2_adjust_refcount_rec(handle_t *handle,
+ struct ocfs2_caching_info *ci,
+ struct buffer_head *ref_root_bh,
+ struct buffer_head *ref_leaf_bh,
+ struct ocfs2_refcount_rec *rec)
+{
+ int ret = 0, i;
+ u32 new_cpos, old_cpos;
+ struct ocfs2_path *path = NULL;
+ struct ocfs2_extent_tree et;
+ struct ocfs2_refcount_block *rb =
+ (struct ocfs2_refcount_block *)ref_root_bh->b_data;
+ struct ocfs2_extent_list *el;
+
+ if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL))
+ goto out;
+
+ rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
+ old_cpos = le32_to_cpu(rb->rf_cpos);
+ new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK;
+ if (old_cpos <= new_cpos)
+ goto out;
+
+ ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
+
+ path = ocfs2_new_path_from_et(&et);
+ if (!path) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_find_path(ci, path, old_cpos);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * 2 more credits, one for the leaf refcount block, one for
+ * the extent block contains the extent rec.
+ */
+ ret = ocfs2_extend_trans(handle, handle->h_buffer_credits + 2);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path),
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /* change the leaf extent block first. */
+ el = path_leaf_el(path);
+
+ for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++)
+ if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos)
+ break;
+
+ BUG_ON(i == le16_to_cpu(el->l_next_free_rec));
+
+ el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
+
+ /* change the r_cpos in the leaf block. */
+ rb->rf_cpos = cpu_to_le32(new_cpos);
+
+ ocfs2_journal_dirty(handle, path_leaf_bh(path));
+ ocfs2_journal_dirty(handle, ref_leaf_bh);
+
+out:
+ ocfs2_free_path(path);
+ return ret;
+}
+
+static int ocfs2_insert_refcount_rec(handle_t *handle,
+ struct ocfs2_caching_info *ci,
+ struct buffer_head *ref_root_bh,
+ struct buffer_head *ref_leaf_bh,
+ struct ocfs2_refcount_rec *rec,
+ int index, int merge,
+ struct ocfs2_alloc_context *meta_ac)
+{
+ int ret;
+ struct ocfs2_refcount_block *rb =
+ (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
+ struct ocfs2_refcount_list *rf_list = &rb->rf_records;
+ struct buffer_head *new_bh = NULL;
+
+ BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
+
+ if (rf_list->rl_used == rf_list->rl_count) {
+ u64 cpos = le64_to_cpu(rec->r_cpos);
+ u32 len = le32_to_cpu(rec->r_clusters);
+
+ ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
+ ref_leaf_bh, meta_ac);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
+ cpos, len, NULL, &index,
+ &new_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ref_leaf_bh = new_bh;
+ rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
+ rf_list = &rb->rf_records;
+ }
+
+ ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (index < le16_to_cpu(rf_list->rl_used))
+ memmove(&rf_list->rl_recs[index + 1],
+ &rf_list->rl_recs[index],
+ (le16_to_cpu(rf_list->rl_used) - index) *
+ sizeof(struct ocfs2_refcount_rec));
+
+ mlog(0, "insert refcount record start %llu, len %u, count %u "
+ "to leaf block %llu at index %d\n",
+ (unsigned long long)le64_to_cpu(rec->r_cpos),
+ le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount),
+ (unsigned long long)ref_leaf_bh->b_blocknr, index);
+
+ rf_list->rl_recs[index] = *rec;
+
+ le16_add_cpu(&rf_list->rl_used, 1);
+
+ if (merge)
+ ocfs2_refcount_rec_merge(rb, index);
+
+ ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (index == 0) {
+ ret = ocfs2_adjust_refcount_rec(handle, ci,
+ ref_root_bh,
+ ref_leaf_bh, rec);
+ if (ret)
+ mlog_errno(ret);
+ }
+out:
+ brelse(new_bh);
+ return ret;
+}
+
+/*
+ * Split the refcount_rec indexed by "index" in ref_leaf_bh.
+ * This is much simple than our b-tree code.
+ * split_rec is the new refcount rec we want to insert.
+ * If split_rec->r_refcount > 0, we are changing the refcount(in case we
+ * increase refcount or decrease a refcount to non-zero).
+ * If split_rec->r_refcount == 0, we are punching a hole in current refcount
+ * rec( in case we decrease a refcount to zero).
+ */
+static int ocfs2_split_refcount_rec(handle_t *handle,
+ struct ocfs2_caching_info *ci,
+ struct buffer_head *ref_root_bh,
+ struct buffer_head *ref_leaf_bh,
+ struct ocfs2_refcount_rec *split_rec,
+ int index, int merge,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_cached_dealloc_ctxt *dealloc)
+{
+ int ret, recs_need;
+ u32 len;
+ struct ocfs2_refcount_block *rb =
+ (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
+ struct ocfs2_refcount_list *rf_list = &rb->rf_records;
+ struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index];
+ struct ocfs2_refcount_rec *tail_rec = NULL;
+ struct buffer_head *new_bh = NULL;
+
+ BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
+
+ mlog(0, "original r_pos %llu, cluster %u, split %llu, cluster %u\n",
+ le64_to_cpu(orig_rec->r_cpos), le32_to_cpu(orig_rec->r_clusters),
+ le64_to_cpu(split_rec->r_cpos),
+ le32_to_cpu(split_rec->r_clusters));
+
+ /*
+ * If we just need to split the header or tail clusters,
+ * no more recs are needed, just split is OK.
+ * Otherwise we at least need one new recs.
+ */
+ if (!split_rec->r_refcount &&
+ (split_rec->r_cpos == orig_rec->r_cpos ||
+ le64_to_cpu(split_rec->r_cpos) +
+ le32_to_cpu(split_rec->r_clusters) ==
+ le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
+ recs_need = 0;
+ else
+ recs_need = 1;
+
+ /*
+ * We need one more rec if we split in the middle and the new rec have
+ * some refcount in it.
+ */
+ if (split_rec->r_refcount &&
+ (split_rec->r_cpos != orig_rec->r_cpos &&
+ le64_to_cpu(split_rec->r_cpos) +
+ le32_to_cpu(split_rec->r_clusters) !=
+ le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
+ recs_need++;
+
+ /* If the leaf block don't have enough record, expand it. */
+ if (le16_to_cpu(rf_list->rl_used) + recs_need > rf_list->rl_count) {
+ struct ocfs2_refcount_rec tmp_rec;
+ u64 cpos = le64_to_cpu(orig_rec->r_cpos);
+ len = le32_to_cpu(orig_rec->r_clusters);
+ ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
+ ref_leaf_bh, meta_ac);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * We have to re-get it since now cpos may be moved to
+ * another leaf block.
+ */
+ ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
+ cpos, len, &tmp_rec, &index,
+ &new_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ref_leaf_bh = new_bh;
+ rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
+ rf_list = &rb->rf_records;
+ orig_rec = &rf_list->rl_recs[index];
+ }
+
+ ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * We have calculated out how many new records we need and store
+ * in recs_need, so spare enough space first by moving the records
+ * after "index" to the end.
+ */
+ if (index != le16_to_cpu(rf_list->rl_used) - 1)
+ memmove(&rf_list->rl_recs[index + 1 + recs_need],
+ &rf_list->rl_recs[index + 1],
+ (le16_to_cpu(rf_list->rl_used) - index - 1) *
+ sizeof(struct ocfs2_refcount_rec));
+
+ len = (le64_to_cpu(orig_rec->r_cpos) +
+ le32_to_cpu(orig_rec->r_clusters)) -
+ (le64_to_cpu(split_rec->r_cpos) +
+ le32_to_cpu(split_rec->r_clusters));
+
+ /*
+ * If we have "len", the we will split in the tail and move it
+ * to the end of the space we have just spared.
+ */
+ if (len) {
+ tail_rec = &rf_list->rl_recs[index + recs_need];
+
+ memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec));
+ le64_add_cpu(&tail_rec->r_cpos,
+ le32_to_cpu(tail_rec->r_clusters) - len);
+ tail_rec->r_clusters = le32_to_cpu(len);
+ }
+
+ /*
+ * If the split pos isn't the same as the original one, we need to
+ * split in the head.
+ *
+ * Note: We have the chance that split_rec.r_refcount = 0,
+ * recs_need = 0 and len > 0, which means we just cut the head from
+ * the orig_rec and in that case we have done some modification in
+ * orig_rec above, so the check for r_cpos is faked.
+ */
+ if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) {
+ len = le64_to_cpu(split_rec->r_cpos) -
+ le64_to_cpu(orig_rec->r_cpos);
+ orig_rec->r_clusters = cpu_to_le32(len);
+ index++;
+ }
+
+ le16_add_cpu(&rf_list->rl_used, recs_need);
+
+ if (split_rec->r_refcount) {
+ rf_list->rl_recs[index] = *split_rec;
+ mlog(0, "insert refcount record start %llu, len %u, count %u "
+ "to leaf block %llu at index %d\n",
+ (unsigned long long)le64_to_cpu(split_rec->r_cpos),
+ le32_to_cpu(split_rec->r_clusters),
+ le32_to_cpu(split_rec->r_refcount),
+ (unsigned long long)ref_leaf_bh->b_blocknr, index);
+
+ if (merge)
+ ocfs2_refcount_rec_merge(rb, index);
+ }
+
+ ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
+ if (ret)
+ mlog_errno(ret);
+
+out:
+ brelse(new_bh);
+ return ret;
+}
+
+static int __ocfs2_increase_refcount(handle_t *handle,
+ struct ocfs2_caching_info *ci,
+ struct buffer_head *ref_root_bh,
+ u64 cpos, u32 len, int merge,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_cached_dealloc_ctxt *dealloc)
+{
+ int ret = 0, index;
+ struct buffer_head *ref_leaf_bh = NULL;
+ struct ocfs2_refcount_rec rec;
+ unsigned int set_len = 0;
+
+ mlog(0, "Tree owner %llu, add refcount start %llu, len %u\n",
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)cpos, len);
+
+ while (len) {
+ ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
+ cpos, len, &rec, &index,
+ &ref_leaf_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ set_len = le32_to_cpu(rec.r_clusters);
+
+ /*
+ * Here we may meet with 3 situations:
+ *
+ * 1. If we find an already existing record, and the length
+ * is the same, cool, we just need to increase the r_refcount
+ * and it is OK.
+ * 2. If we find a hole, just insert it with r_refcount = 1.
+ * 3. If we are in the middle of one extent record, split
+ * it.
+ */
+ if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos &&
+ set_len <= len) {
+ mlog(0, "increase refcount rec, start %llu, len %u, "
+ "count %u\n", (unsigned long long)cpos, set_len,
+ le32_to_cpu(rec.r_refcount));
+ ret = ocfs2_change_refcount_rec(handle, ci,
+ ref_leaf_bh, index,
+ merge, 1);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ } else if (!rec.r_refcount) {
+ rec.r_refcount = cpu_to_le32(1);
+
+ mlog(0, "insert refcount rec, start %llu, len %u\n",
+ (unsigned long long)le64_to_cpu(rec.r_cpos),
+ set_len);
+ ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh,
+ ref_leaf_bh,
+ &rec, index,
+ merge, meta_ac);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ } else {
+ set_len = min((u64)(cpos + len),
+ le64_to_cpu(rec.r_cpos) + set_len) - cpos;
+ rec.r_cpos = cpu_to_le64(cpos);
+ rec.r_clusters = cpu_to_le32(set_len);
+ le32_add_cpu(&rec.r_refcount, 1);
+
+ mlog(0, "split refcount rec, start %llu, "
+ "len %u, count %u\n",
+ (unsigned long long)le64_to_cpu(rec.r_cpos),
+ set_len, le32_to_cpu(rec.r_refcount));
+ ret = ocfs2_split_refcount_rec(handle, ci,
+ ref_root_bh, ref_leaf_bh,
+ &rec, index, merge,
+ meta_ac, dealloc);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ cpos += set_len;
+ len -= set_len;
+ brelse(ref_leaf_bh);
+ ref_leaf_bh = NULL;
+ }
+
+out:
+ brelse(ref_leaf_bh);
+ return ret;
+}
+
+static int ocfs2_remove_refcount_extent(handle_t *handle,
+ struct ocfs2_caching_info *ci,
+ struct buffer_head *ref_root_bh,
+ struct buffer_head *ref_leaf_bh,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_cached_dealloc_ctxt *dealloc)
+{
+ int ret;
+ struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
+ struct ocfs2_refcount_block *rb =
+ (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
+ struct ocfs2_extent_tree et;
+
+ BUG_ON(rb->rf_records.rl_used);
+
+ ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
+ ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos),
+ 1, meta_ac, dealloc);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ocfs2_remove_from_cache(ci, ref_leaf_bh);
+
+ /*
+ * add the freed block to the dealloc so that it will be freed
+ * when we run dealloc.
+ */
+ ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE,
+ le16_to_cpu(rb->rf_suballoc_slot),
+ le64_to_cpu(rb->rf_blkno),
+ le16_to_cpu(rb->rf_suballoc_bit));
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
+
+ le32_add_cpu(&rb->rf_clusters, -1);
+
+ /*
+ * check whether we need to restore the root refcount block if
+ * there is no leaf extent block at atll.
+ */
+ if (!rb->rf_list.l_next_free_rec) {
+ BUG_ON(rb->rf_clusters);
+
+ mlog(0, "reset refcount tree root %llu to be a record block.\n",
+ (unsigned long long)ref_root_bh->b_blocknr);
+
+ rb->rf_flags = 0;
+ rb->rf_parent = 0;
+ rb->rf_cpos = 0;
+ memset(&rb->rf_records, 0, sb->s_blocksize -
+ offsetof(struct ocfs2_refcount_block, rf_records));
+ rb->rf_records.rl_count =
+ cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
+ }
+
+ ocfs2_journal_dirty(handle, ref_root_bh);
+
+out:
+ return ret;
+}
+
+int ocfs2_increase_refcount(handle_t *handle,
+ struct ocfs2_caching_info *ci,
+ struct buffer_head *ref_root_bh,
+ u64 cpos, u32 len,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_cached_dealloc_ctxt *dealloc)
+{
+ return __ocfs2_increase_refcount(handle, ci, ref_root_bh,
+ cpos, len, 1,
+ meta_ac, dealloc);
+}
+
+static int ocfs2_decrease_refcount_rec(handle_t *handle,
+ struct ocfs2_caching_info *ci,
+ struct buffer_head *ref_root_bh,
+ struct buffer_head *ref_leaf_bh,
+ int index, u64 cpos, unsigned int len,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_cached_dealloc_ctxt *dealloc)
+{
+ int ret;
+ struct ocfs2_refcount_block *rb =
+ (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
+ struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index];
+
+ BUG_ON(cpos < le64_to_cpu(rec->r_cpos));
+ BUG_ON(cpos + len >
+ le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters));
+
+ if (cpos == le64_to_cpu(rec->r_cpos) &&
+ len == le32_to_cpu(rec->r_clusters))
+ ret = ocfs2_change_refcount_rec(handle, ci,
+ ref_leaf_bh, index, 1, -1);
+ else {
+ struct ocfs2_refcount_rec split = *rec;
+ split.r_cpos = cpu_to_le64(cpos);
+ split.r_clusters = cpu_to_le32(len);
+
+ le32_add_cpu(&split.r_refcount, -1);
+
+ mlog(0, "split refcount rec, start %llu, "
+ "len %u, count %u, original start %llu, len %u\n",
+ (unsigned long long)le64_to_cpu(split.r_cpos),
+ len, le32_to_cpu(split.r_refcount),
+ (unsigned long long)le64_to_cpu(rec->r_cpos),
+ le32_to_cpu(rec->r_clusters));
+ ret = ocfs2_split_refcount_rec(handle, ci,
+ ref_root_bh, ref_leaf_bh,
+ &split, index, 1,
+ meta_ac, dealloc);
+ }
+
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /* Remove the leaf refcount block if it contains no refcount record. */
+ if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) {
+ ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh,
+ ref_leaf_bh, meta_ac,
+ dealloc);
+ if (ret)
+ mlog_errno(ret);
+ }
+
+out:
+ return ret;
+}
+
+static int __ocfs2_decrease_refcount(handle_t *handle,
+ struct ocfs2_caching_info *ci,
+ struct buffer_head *ref_root_bh,
+ u64 cpos, u32 len,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_cached_dealloc_ctxt *dealloc,
+ int delete)
+{
+ int ret = 0, index = 0;
+ struct ocfs2_refcount_rec rec;
+ unsigned int r_count = 0, r_len;
+ struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
+ struct buffer_head *ref_leaf_bh = NULL;
+
+ mlog(0, "Tree owner %llu, decrease refcount start %llu, "
+ "len %u, delete %u\n",
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ (unsigned long long)cpos, len, delete);
+
+ while (len) {
+ ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
+ cpos, len, &rec, &index,
+ &ref_leaf_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ r_count = le32_to_cpu(rec.r_refcount);
+ BUG_ON(r_count == 0);
+ if (!delete)
+ BUG_ON(r_count > 1);
+
+ r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) +
+ le32_to_cpu(rec.r_clusters)) - cpos;
+
+ ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh,
+ ref_leaf_bh, index,
+ cpos, r_len,
+ meta_ac, dealloc);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (le32_to_cpu(rec.r_refcount) == 1 && delete) {
+ ret = ocfs2_cache_cluster_dealloc(dealloc,
+ ocfs2_clusters_to_blocks(sb, cpos),
+ r_len);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ cpos += r_len;
+ len -= r_len;
+ brelse(ref_leaf_bh);
+ ref_leaf_bh = NULL;
+ }
+
+out:
+ brelse(ref_leaf_bh);
+ return ret;
+}
+
+/* Caller must hold refcount tree lock. */
+int ocfs2_decrease_refcount(struct inode *inode,
+ handle_t *handle, u32 cpos, u32 len,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_cached_dealloc_ctxt *dealloc,
+ int delete)
+{
+ int ret;
+ u64 ref_blkno;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct buffer_head *ref_root_bh = NULL;
+ struct ocfs2_refcount_tree *tree;
+
+ BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
+
+ ret = ocfs2_get_refcount_block(inode, &ref_blkno);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
+ &ref_root_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh,
+ cpos, len, meta_ac, dealloc, delete);
+ if (ret)
+ mlog_errno(ret);
+out:
+ brelse(ref_root_bh);
+ return ret;
+}
+
+/*
+ * Mark the already-existing extent at cpos as refcounted for len clusters.
+ * This adds the refcount extent flag.
+ *
+ * If the existing extent is larger than the request, initiate a
+ * split. An attempt will be made at merging with adjacent extents.
+ *
+ * The caller is responsible for passing down meta_ac if we'll need it.
+ */
+static int ocfs2_mark_extent_refcounted(struct inode *inode,
+ struct ocfs2_extent_tree *et,
+ handle_t *handle, u32 cpos,
+ u32 len, u32 phys,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_cached_dealloc_ctxt *dealloc)
+{
+ int ret;
+
+ mlog(0, "Inode %lu refcount tree cpos %u, len %u, phys cluster %u\n",
+ inode->i_ino, cpos, len, phys);
+
+ if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
+ ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
+ "tree, but the feature bit is not set in the "
+ "super block.", inode->i_ino);
+ ret = -EROFS;
+ goto out;
+ }
+
+ ret = ocfs2_change_extent_flag(handle, et, cpos,
+ len, phys, meta_ac, dealloc,
+ OCFS2_EXT_REFCOUNTED, 0);
+ if (ret)
+ mlog_errno(ret);
+
+out:
+ return ret;
+}
+
+/*
+ * Given some contiguous physical clusters, calculate what we need
+ * for modifying their refcount.
+ */
+static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
+ struct ocfs2_caching_info *ci,
+ struct buffer_head *ref_root_bh,
+ u64 start_cpos,
+ u32 clusters,
+ int *meta_add,
+ int *credits)
+{
+ int ret = 0, index, ref_blocks = 0, recs_add = 0;
+ u64 cpos = start_cpos;
+ struct ocfs2_refcount_block *rb;
+ struct ocfs2_refcount_rec rec;
+ struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL;
+ u32 len;
+
+ mlog(0, "start_cpos %llu, clusters %u\n",
+ (unsigned long long)start_cpos, clusters);
+ while (clusters) {
+ ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
+ cpos, clusters, &rec,
+ &index, &ref_leaf_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (ref_leaf_bh != prev_bh) {
+ /*
+ * Now we encounter a new leaf block, so calculate
+ * whether we need to extend the old leaf.
+ */
+ if (prev_bh) {
+ rb = (struct ocfs2_refcount_block *)
+ prev_bh->b_data;
+
+ if (le64_to_cpu(rb->rf_records.rl_used) +
+ recs_add >
+ le16_to_cpu(rb->rf_records.rl_count))
+ ref_blocks++;
+ }
+
+ recs_add = 0;
+ *credits += 1;
+ brelse(prev_bh);
+ prev_bh = ref_leaf_bh;
+ get_bh(prev_bh);
+ }
+
+ rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
+
+ mlog(0, "recs_add %d,cpos %llu, clusters %u, rec->r_cpos %llu,"
+ "rec->r_clusters %u, rec->r_refcount %u, index %d\n",
+ recs_add, (unsigned long long)cpos, clusters,
+ (unsigned long long)le64_to_cpu(rec.r_cpos),
+ le32_to_cpu(rec.r_clusters),
+ le32_to_cpu(rec.r_refcount), index);
+
+ len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
+ le32_to_cpu(rec.r_clusters)) - cpos;
+ /*
+ * If the refcount rec already exist, cool. We just need
+ * to check whether there is a split. Otherwise we just need
+ * to increase the refcount.
+ * If we will insert one, increases recs_add.
+ *
+ * We record all the records which will be inserted to the
+ * same refcount block, so that we can tell exactly whether
+ * we need a new refcount block or not.
+ */
+ if (rec.r_refcount) {
+ /* Check whether we need a split at the beginning. */
+ if (cpos == start_cpos &&
+ cpos != le64_to_cpu(rec.r_cpos))
+ recs_add++;
+
+ /* Check whether we need a split in the end. */
+ if (cpos + clusters < le64_to_cpu(rec.r_cpos) +
+ le32_to_cpu(rec.r_clusters))
+ recs_add++;
+ } else
+ recs_add++;
+
+ brelse(ref_leaf_bh);
+ ref_leaf_bh = NULL;
+ clusters -= len;
+ cpos += len;
+ }
+
+ if (prev_bh) {
+ rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
+
+ if (le64_to_cpu(rb->rf_records.rl_used) + recs_add >
+ le16_to_cpu(rb->rf_records.rl_count))
+ ref_blocks++;
+
+ *credits += 1;
+ }
+
+ if (!ref_blocks)
+ goto out;
+
+ mlog(0, "we need ref_blocks %d\n", ref_blocks);
+ *meta_add += ref_blocks;
+ *credits += ref_blocks;
+
+ /*
+ * So we may need ref_blocks to insert into the tree.
+ * That also means we need to change the b-tree and add that number
+ * of records since we never merge them.
+ * We need one more block for expansion since the new created leaf
+ * block is also full and needs split.
+ */
+ rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
+ if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) {
+ struct ocfs2_extent_tree et;
+
+ ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
+ *meta_add += ocfs2_extend_meta_needed(et.et_root_el);
+ *credits += ocfs2_calc_extend_credits(sb,
+ et.et_root_el,
+ ref_blocks);
+ } else {
+ *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
+ *meta_add += 1;
+ }
+
+out:
+ brelse(ref_leaf_bh);
+ brelse(prev_bh);
+ return ret;
+}
+
+/*
+ * For refcount tree, we will decrease some contiguous clusters
+ * refcount count, so just go through it to see how many blocks
+ * we gonna touch and whether we need to create new blocks.
+ *
+ * Normally the refcount blocks store these refcount should be
+ * continguous also, so that we can get the number easily.
+ * As for meta_ac, we will at most add split 2 refcount record and
+ * 2 more refcount block, so just check it in a rough way.
+ *
+ * Caller must hold refcount tree lock.
+ */
+int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
+ struct buffer_head *di_bh,
+ u64 phys_blkno,
+ u32 clusters,
+ int *credits,
+ struct ocfs2_alloc_context **meta_ac)
+{
+ int ret, ref_blocks = 0;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct buffer_head *ref_root_bh = NULL;
+ struct ocfs2_refcount_tree *tree;
+ u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno);
+
+ if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
+ ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
+ "tree, but the feature bit is not set in the "
+ "super block.", inode->i_ino);
+ ret = -EROFS;
+ goto out;
+ }
+
+ BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
+
+ ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb),
+ le64_to_cpu(di->i_refcount_loc), &tree);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_read_refcount_block(&tree->rf_ci,
+ le64_to_cpu(di->i_refcount_loc),
+ &ref_root_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
+ &tree->rf_ci,
+ ref_root_bh,
+ start_cpos, clusters,
+ &ref_blocks, credits);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ mlog(0, "reserve new metadata %d, credits = %d\n",
+ ref_blocks, *credits);
+
+ if (ref_blocks) {
+ ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
+ ref_blocks, meta_ac);
+ if (ret)
+ mlog_errno(ret);
+ }
+
+out:
+ brelse(ref_root_bh);
+ return ret;
+}
+
+#define MAX_CONTIG_BYTES 1048576
+
+static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb)
+{
+ return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES);
+}
+
+static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb)
+{
+ return ~(ocfs2_cow_contig_clusters(sb) - 1);
+}
+
+/*
+ * Given an extent that starts at 'start' and an I/O that starts at 'cpos',
+ * find an offset (start + (n * contig_clusters)) that is closest to cpos
+ * while still being less than or equal to it.
+ *
+ * The goal is to break the extent at a multiple of contig_clusters.
+ */
+static inline unsigned int ocfs2_cow_align_start(struct super_block *sb,
+ unsigned int start,
+ unsigned int cpos)
+{
+ BUG_ON(start > cpos);
+
+ return start + ((cpos - start) & ocfs2_cow_contig_mask(sb));
+}
+
+/*
+ * Given a cluster count of len, pad it out so that it is a multiple
+ * of contig_clusters.
+ */
+static inline unsigned int ocfs2_cow_align_length(struct super_block *sb,
+ unsigned int len)
+{
+ unsigned int padded =
+ (len + (ocfs2_cow_contig_clusters(sb) - 1)) &
+ ocfs2_cow_contig_mask(sb);
+
+ /* Did we wrap? */
+ if (padded < len)
+ padded = UINT_MAX;
+
+ return padded;
+}
+
+/*
+ * Calculate out the start and number of virtual clusters we need to to CoW.
+ *
+ * cpos is vitual start cluster position we want to do CoW in a
+ * file and write_len is the cluster length.
+ * max_cpos is the place where we want to stop CoW intentionally.
+ *
+ * Normal we will start CoW from the beginning of extent record cotaining cpos.
+ * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we
+ * get good I/O from the resulting extent tree.
+ */
+static int ocfs2_refcount_cal_cow_clusters(struct inode *inode,
+ struct ocfs2_extent_list *el,
+ u32 cpos,
+ u32 write_len,
+ u32 max_cpos,
+ u32 *cow_start,
+ u32 *cow_len)
+{
+ int ret = 0;
+ int tree_height = le16_to_cpu(el->l_tree_depth), i;
+ struct buffer_head *eb_bh = NULL;
+ struct ocfs2_extent_block *eb = NULL;
+ struct ocfs2_extent_rec *rec;
+ unsigned int want_clusters, rec_end = 0;
+ int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb);
+ int leaf_clusters;
+
+ BUG_ON(cpos + write_len > max_cpos);
+
+ if (tree_height > 0) {
+ ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ eb = (struct ocfs2_extent_block *) eb_bh->b_data;
+ el = &eb->h_list;
+
+ if (el->l_tree_depth) {
+ ocfs2_error(inode->i_sb,
+ "Inode %lu has non zero tree depth in "
+ "leaf block %llu\n", inode->i_ino,
+ (unsigned long long)eb_bh->b_blocknr);
+ ret = -EROFS;
+ goto out;
+ }
+ }
+
+ *cow_len = 0;
+ for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
+ rec = &el->l_recs[i];
+
+ if (ocfs2_is_empty_extent(rec)) {
+ mlog_bug_on_msg(i != 0, "Inode %lu has empty record in "
+ "index %d\n", inode->i_ino, i);
+ continue;
+ }
+
+ if (le32_to_cpu(rec->e_cpos) +
+ le16_to_cpu(rec->e_leaf_clusters) <= cpos)
+ continue;
+
+ if (*cow_len == 0) {
+ /*
+ * We should find a refcounted record in the
+ * first pass.
+ */
+ BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED));
+ *cow_start = le32_to_cpu(rec->e_cpos);
+ }
+
+ /*
+ * If we encounter a hole, a non-refcounted record or
+ * pass the max_cpos, stop the search.
+ */
+ if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) ||
+ (*cow_len && rec_end != le32_to_cpu(rec->e_cpos)) ||
+ (max_cpos <= le32_to_cpu(rec->e_cpos)))
+ break;
+
+ leaf_clusters = le16_to_cpu(rec->e_leaf_clusters);
+ rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters;
+ if (rec_end > max_cpos) {
+ rec_end = max_cpos;
+ leaf_clusters = rec_end - le32_to_cpu(rec->e_cpos);
+ }
+
+ /*
+ * How many clusters do we actually need from
+ * this extent? First we see how many we actually
+ * need to complete the write. If that's smaller
+ * than contig_clusters, we try for contig_clusters.
+ */
+ if (!*cow_len)
+ want_clusters = write_len;
+ else
+ want_clusters = (cpos + write_len) -
+ (*cow_start + *cow_len);
+ if (want_clusters < contig_clusters)
+ want_clusters = contig_clusters;
+
+ /*
+ * If the write does not cover the whole extent, we
+ * need to calculate how we're going to split the extent.
+ * We try to do it on contig_clusters boundaries.
+ *
+ * Any extent smaller than contig_clusters will be
+ * CoWed in its entirety.
+ */
+ if (leaf_clusters <= contig_clusters)
+ *cow_len += leaf_clusters;
+ else if (*cow_len || (*cow_start == cpos)) {
+ /*
+ * This extent needs to be CoW'd from its
+ * beginning, so all we have to do is compute
+ * how many clusters to grab. We align
+ * want_clusters to the edge of contig_clusters
+ * to get better I/O.
+ */
+ want_clusters = ocfs2_cow_align_length(inode->i_sb,
+ want_clusters);
+
+ if (leaf_clusters < want_clusters)
+ *cow_len += leaf_clusters;
+ else
+ *cow_len += want_clusters;
+ } else if ((*cow_start + contig_clusters) >=
+ (cpos + write_len)) {
+ /*
+ * Breaking off contig_clusters at the front
+ * of the extent will cover our write. That's
+ * easy.
+ */
+ *cow_len = contig_clusters;
+ } else if ((rec_end - cpos) <= contig_clusters) {
+ /*
+ * Breaking off contig_clusters at the tail of
+ * this extent will cover cpos.
+ */
+ *cow_start = rec_end - contig_clusters;
+ *cow_len = contig_clusters;
+ } else if ((rec_end - cpos) <= want_clusters) {
+ /*
+ * While we can't fit the entire write in this
+ * extent, we know that the write goes from cpos
+ * to the end of the extent. Break that off.
+ * We try to break it at some multiple of
+ * contig_clusters from the front of the extent.
+ * Failing that (ie, cpos is within
+ * contig_clusters of the front), we'll CoW the
+ * entire extent.
+ */
+ *cow_start = ocfs2_cow_align_start(inode->i_sb,
+ *cow_start, cpos);
+ *cow_len = rec_end - *cow_start;
+ } else {
+ /*
+ * Ok, the entire write lives in the middle of
+ * this extent. Let's try to slice the extent up
+ * nicely. Optimally, our CoW region starts at
+ * m*contig_clusters from the beginning of the
+ * extent and goes for n*contig_clusters,
+ * covering the entire write.
+ */
+ *cow_start = ocfs2_cow_align_start(inode->i_sb,
+ *cow_start, cpos);
+
+ want_clusters = (cpos + write_len) - *cow_start;
+ want_clusters = ocfs2_cow_align_length(inode->i_sb,
+ want_clusters);
+ if (*cow_start + want_clusters <= rec_end)
+ *cow_len = want_clusters;
+ else
+ *cow_len = rec_end - *cow_start;
+ }
+
+ /* Have we covered our entire write yet? */
+ if ((*cow_start + *cow_len) >= (cpos + write_len))
+ break;
+
+ /*
+ * If we reach the end of the extent block and don't get enough
+ * clusters, continue with the next extent block if possible.
+ */
+ if (i + 1 == le16_to_cpu(el->l_next_free_rec) &&
+ eb && eb->h_next_leaf_blk) {
+ brelse(eb_bh);
+ eb_bh = NULL;
+
+ ret = ocfs2_read_extent_block(INODE_CACHE(inode),
+ le64_to_cpu(eb->h_next_leaf_blk),
+ &eb_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ eb = (struct ocfs2_extent_block *) eb_bh->b_data;
+ el = &eb->h_list;
+ i = -1;
+ }
+ }
+
+out:
+ brelse(eb_bh);
+ return ret;
+}
+
+/*
+ * Prepare meta_ac, data_ac and calculate credits when we want to add some
+ * num_clusters in data_tree "et" and change the refcount for the old
+ * clusters(starting form p_cluster) in the refcount tree.
+ *
+ * Note:
+ * 1. since we may split the old tree, so we at most will need num_clusters + 2
+ * more new leaf records.
+ * 2. In some case, we may not need to reserve new clusters(e.g, reflink), so
+ * just give data_ac = NULL.
+ */
+static int ocfs2_lock_refcount_allocators(struct super_block *sb,
+ u32 p_cluster, u32 num_clusters,
+ struct ocfs2_extent_tree *et,
+ struct ocfs2_caching_info *ref_ci,
+ struct buffer_head *ref_root_bh,
+ struct ocfs2_alloc_context **meta_ac,
+ struct ocfs2_alloc_context **data_ac,
+ int *credits)
+{
+ int ret = 0, meta_add = 0;
+ int num_free_extents = ocfs2_num_free_extents(OCFS2_SB(sb), et);
+
+ if (num_free_extents < 0) {
+ ret = num_free_extents;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (num_free_extents < num_clusters + 2)
+ meta_add =
+ ocfs2_extend_meta_needed(et->et_root_el);
+
+ *credits += ocfs2_calc_extend_credits(sb, et->et_root_el,
+ num_clusters + 2);
+
+ ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh,
+ p_cluster, num_clusters,
+ &meta_add, credits);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ mlog(0, "reserve new metadata %d, clusters %u, credits = %d\n",
+ meta_add, num_clusters, *credits);
+ ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add,
+ meta_ac);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (data_ac) {
+ ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters,
+ data_ac);
+ if (ret)
+ mlog_errno(ret);
+ }
+
+out:
+ if (ret) {
+ if (*meta_ac) {
+ ocfs2_free_alloc_context(*meta_ac);
+ *meta_ac = NULL;
+ }
+ }
+
+ return ret;
+}
+
+static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
+{
+ BUG_ON(buffer_dirty(bh));
+
+ clear_buffer_mapped(bh);
+
+ return 0;
+}
+
+static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
+ struct ocfs2_cow_context *context,
+ u32 cpos, u32 old_cluster,
+ u32 new_cluster, u32 new_len)
+{
+ int ret = 0, partial;
+ struct ocfs2_caching_info *ci = context->data_et.et_ci;
+ struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
+ u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
+ struct page *page;
+ pgoff_t page_index;
+ unsigned int from, to;
+ loff_t offset, end, map_end;
+ struct address_space *mapping = context->inode->i_mapping;
+
+ mlog(0, "old_cluster %u, new %u, len %u at offset %u\n", old_cluster,
+ new_cluster, new_len, cpos);
+
+ offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
+ end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits);
+
+ while (offset < end) {
+ page_index = offset >> PAGE_CACHE_SHIFT;
+ map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
+ if (map_end > end)
+ map_end = end;
+
+ /* from, to is the offset within the page. */
+ from = offset & (PAGE_CACHE_SIZE - 1);
+ to = PAGE_CACHE_SIZE;
+ if (map_end & (PAGE_CACHE_SIZE - 1))
+ to = map_end & (PAGE_CACHE_SIZE - 1);
+
+ page = grab_cache_page(mapping, page_index);
+
+ /* This page can't be dirtied before we CoW it out. */
+ BUG_ON(PageDirty(page));
+
+ if (!PageUptodate(page)) {
+ ret = block_read_full_page(page, ocfs2_get_block);
+ if (ret) {
+ mlog_errno(ret);
+ goto unlock;
+ }
+ lock_page(page);
+ }
+
+ if (page_has_buffers(page)) {
+ ret = walk_page_buffers(handle, page_buffers(page),
+ from, to, &partial,
+ ocfs2_clear_cow_buffer);
+ if (ret) {
+ mlog_errno(ret);
+ goto unlock;
+ }
+ }
+
+ ocfs2_map_and_dirty_page(context->inode,
+ handle, from, to,
+ page, 0, &new_block);
+ mark_page_accessed(page);
+unlock:
+ unlock_page(page);
+ page_cache_release(page);
+ page = NULL;
+ offset = map_end;
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
+ struct ocfs2_cow_context *context,
+ u32 cpos, u32 old_cluster,
+ u32 new_cluster, u32 new_len)
+{
+ int ret = 0;
+ struct super_block *sb = context->inode->i_sb;
+ struct ocfs2_caching_info *ci = context->data_et.et_ci;
+ int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
+ u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster);
+ u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
+ struct ocfs2_super *osb = OCFS2_SB(sb);
+ struct buffer_head *old_bh = NULL;
+ struct buffer_head *new_bh = NULL;
+
+ mlog(0, "old_cluster %u, new %u, len %u\n", old_cluster,
+ new_cluster, new_len);
+
+ for (i = 0; i < blocks; i++, old_block++, new_block++) {
+ new_bh = sb_getblk(osb->sb, new_block);
+ if (new_bh == NULL) {
+ ret = -EIO;
+ mlog_errno(ret);
+ break;
+ }
+
+ ocfs2_set_new_buffer_uptodate(ci, new_bh);
+
+ ret = ocfs2_read_block(ci, old_block, &old_bh, NULL);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
+
+ ret = ocfs2_journal_access(handle, ci, new_bh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
+
+ memcpy(new_bh->b_data, old_bh->b_data, sb->s_blocksize);
+ ret = ocfs2_journal_dirty(handle, new_bh);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
+
+ brelse(new_bh);
+ brelse(old_bh);
+ new_bh = NULL;
+ old_bh = NULL;
+ }
+
+ brelse(new_bh);
+ brelse(old_bh);
+ return ret;
+}
+
+static int ocfs2_clear_ext_refcount(handle_t *handle,
+ struct ocfs2_extent_tree *et,
+ u32 cpos, u32 p_cluster, u32 len,
+ unsigned int ext_flags,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_cached_dealloc_ctxt *dealloc)
+{
+ int ret, index;
+ struct ocfs2_extent_rec replace_rec;
+ struct ocfs2_path *path = NULL;
+ struct ocfs2_extent_list *el;
+ struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
+ u64 ino = ocfs2_metadata_cache_owner(et->et_ci);
+
+ mlog(0, "inode %llu cpos %u, len %u, p_cluster %u, ext_flags %u\n",
+ (unsigned long long)ino, cpos, len, p_cluster, ext_flags);
+
+ memset(&replace_rec, 0, sizeof(replace_rec));
+ replace_rec.e_cpos = cpu_to_le32(cpos);
+ replace_rec.e_leaf_clusters = cpu_to_le16(len);
+ replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb,
+ p_cluster));
+ replace_rec.e_flags = ext_flags;
+ replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED;
+
+ path = ocfs2_new_path_from_et(et);
+ if (!path) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_find_path(et->et_ci, path, cpos);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ el = path_leaf_el(path);
+
+ index = ocfs2_search_extent_list(el, cpos);
+ if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
+ ocfs2_error(sb,
+ "Inode %llu has an extent at cpos %u which can no "
+ "longer be found.\n",
+ (unsigned long long)ino, cpos);
+ ret = -EROFS;
+ goto out;
+ }
+
+ ret = ocfs2_split_extent(handle, et, path, index,
+ &replace_rec, meta_ac, dealloc);
+ if (ret)
+ mlog_errno(ret);
+
+out:
+ ocfs2_free_path(path);
+ return ret;
+}
+
+static int ocfs2_replace_clusters(handle_t *handle,
+ struct ocfs2_cow_context *context,
+ u32 cpos, u32 old,
+ u32 new, u32 len,
+ unsigned int ext_flags)
+{
+ int ret;
+ struct ocfs2_caching_info *ci = context->data_et.et_ci;
+ u64 ino = ocfs2_metadata_cache_owner(ci);
+
+ mlog(0, "inode %llu, cpos %u, old %u, new %u, len %u, ext_flags %u\n",
+ (unsigned long long)ino, cpos, old, new, len, ext_flags);
+
+ /*If the old clusters is unwritten, no need to duplicate. */
+ if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
+ ret = context->cow_duplicate_clusters(handle, context, cpos,
+ old, new, len);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ ret = ocfs2_clear_ext_refcount(handle, &context->data_et,
+ cpos, new, len, ext_flags,
+ context->meta_ac, &context->dealloc);
+ if (ret)
+ mlog_errno(ret);
+out:
+ return ret;
+}
+
+static int ocfs2_cow_sync_writeback(struct super_block *sb,
+ struct ocfs2_cow_context *context,
+ u32 cpos, u32 num_clusters)
+{
+ int ret = 0;
+ loff_t offset, end, map_end;
+ pgoff_t page_index;
+ struct page *page;
+
+ if (ocfs2_should_order_data(context->inode))
+ return 0;
+
+ offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
+ end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits);
+
+ ret = filemap_fdatawrite_range(context->inode->i_mapping,
+ offset, end - 1);
+ if (ret < 0) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ while (offset < end) {
+ page_index = offset >> PAGE_CACHE_SHIFT;
+ map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
+ if (map_end > end)
+ map_end = end;
+
+ page = grab_cache_page(context->inode->i_mapping, page_index);
+ BUG_ON(!page);
+
+ wait_on_page_writeback(page);
+ if (PageError(page)) {
+ ret = -EIO;
+ mlog_errno(ret);
+ } else
+ mark_page_accessed(page);
+
+ unlock_page(page);
+ page_cache_release(page);
+ page = NULL;
+ offset = map_end;
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int ocfs2_di_get_clusters(struct ocfs2_cow_context *context,
+ u32 v_cluster, u32 *p_cluster,
+ u32 *num_clusters,
+ unsigned int *extent_flags)
+{
+ return ocfs2_get_clusters(context->inode, v_cluster, p_cluster,
+ num_clusters, extent_flags);
+}
+
+static int ocfs2_make_clusters_writable(struct super_block *sb,
+ struct ocfs2_cow_context *context,
+ u32 cpos, u32 p_cluster,
+ u32 num_clusters, unsigned int e_flags)
+{
+ int ret, delete, index, credits = 0;
+ u32 new_bit, new_len;
+ unsigned int set_len;
+ struct ocfs2_super *osb = OCFS2_SB(sb);
+ handle_t *handle;
+ struct buffer_head *ref_leaf_bh = NULL;
+ struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci;
+ struct ocfs2_refcount_rec rec;
+
+ mlog(0, "cpos %u, p_cluster %u, num_clusters %u, e_flags %u\n",
+ cpos, p_cluster, num_clusters, e_flags);
+
+ ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters,
+ &context->data_et,
+ ref_ci,
+ context->ref_root_bh,
+ &context->meta_ac,
+ &context->data_ac, &credits);
+ if (ret) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ if (context->post_refcount)
+ credits += context->post_refcount->credits;
+
+ credits += context->extra_credits;
+ handle = ocfs2_start_trans(osb, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out;
+ }
+
+ while (num_clusters) {
+ ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
+ p_cluster, num_clusters,
+ &rec, &index, &ref_leaf_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ BUG_ON(!rec.r_refcount);
+ set_len = min((u64)p_cluster + num_clusters,
+ le64_to_cpu(rec.r_cpos) +
+ le32_to_cpu(rec.r_clusters)) - p_cluster;
+
+ /*
+ * There are many different situation here.
+ * 1. If refcount == 1, remove the flag and don't COW.
+ * 2. If refcount > 1, allocate clusters.
+ * Here we may not allocate r_len once at a time, so continue
+ * until we reach num_clusters.
+ */
+ if (le32_to_cpu(rec.r_refcount) == 1) {
+ delete = 0;
+ ret = ocfs2_clear_ext_refcount(handle,
+ &context->data_et,
+ cpos, p_cluster,
+ set_len, e_flags,
+ context->meta_ac,
+ &context->dealloc);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+ } else {
+ delete = 1;
+
+ ret = __ocfs2_claim_clusters(osb, handle,
+ context->data_ac,
+ 1, set_len,
+ &new_bit, &new_len);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ ret = ocfs2_replace_clusters(handle, context,
+ cpos, p_cluster, new_bit,
+ new_len, e_flags);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+ set_len = new_len;
+ }
+
+ ret = __ocfs2_decrease_refcount(handle, ref_ci,
+ context->ref_root_bh,
+ p_cluster, set_len,
+ context->meta_ac,
+ &context->dealloc, delete);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ cpos += set_len;
+ p_cluster += set_len;
+ num_clusters -= set_len;
+ brelse(ref_leaf_bh);
+ ref_leaf_bh = NULL;
+ }
+
+ /* handle any post_cow action. */
+ if (context->post_refcount && context->post_refcount->func) {
+ ret = context->post_refcount->func(context->inode, handle,
+ context->post_refcount->para);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+ }
+
+ /*
+ * Here we should write the new page out first if we are
+ * in write-back mode.
+ */
+ if (context->get_clusters == ocfs2_di_get_clusters) {
+ ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters);
+ if (ret)
+ mlog_errno(ret);
+ }
+
+out_commit:
+ ocfs2_commit_trans(osb, handle);
+
+out:
+ if (context->data_ac) {
+ ocfs2_free_alloc_context(context->data_ac);
+ context->data_ac = NULL;
+ }
+ if (context->meta_ac) {
+ ocfs2_free_alloc_context(context->meta_ac);
+ context->meta_ac = NULL;
+ }
+ brelse(ref_leaf_bh);
+
+ return ret;
+}
+
+static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
+{
+ int ret = 0;
+ struct inode *inode = context->inode;
+ u32 cow_start = context->cow_start, cow_len = context->cow_len;
+ u32 p_cluster, num_clusters;
+ unsigned int ext_flags;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
+ ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
+ "tree, but the feature bit is not set in the "
+ "super block.", inode->i_ino);
+ return -EROFS;
+ }
+
+ ocfs2_init_dealloc_ctxt(&context->dealloc);
+
+ while (cow_len) {
+ ret = context->get_clusters(context, cow_start, &p_cluster,
+ &num_clusters, &ext_flags);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
+
+ BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED));
+
+ if (cow_len < num_clusters)
+ num_clusters = cow_len;
+
+ ret = ocfs2_make_clusters_writable(inode->i_sb, context,
+ cow_start, p_cluster,
+ num_clusters, ext_flags);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
+
+ cow_len -= num_clusters;
+ cow_start += num_clusters;
+ }
+
+ if (ocfs2_dealloc_has_cluster(&context->dealloc)) {
+ ocfs2_schedule_truncate_log_flush(osb, 1);
+ ocfs2_run_deallocs(osb, &context->dealloc);
+ }
+
+ return ret;
+}
+
+/*
+ * Starting at cpos, try to CoW write_len clusters. Don't CoW
+ * past max_cpos. This will stop when it runs into a hole or an
+ * unrefcounted extent.
+ */
+static int ocfs2_refcount_cow_hunk(struct inode *inode,
+ struct buffer_head *di_bh,
+ u32 cpos, u32 write_len, u32 max_cpos)
+{
+ int ret;
+ u32 cow_start = 0, cow_len = 0;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct buffer_head *ref_root_bh = NULL;
+ struct ocfs2_refcount_tree *ref_tree;
+ struct ocfs2_cow_context *context = NULL;
+
+ BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
+
+ ret = ocfs2_refcount_cal_cow_clusters(inode, &di->id2.i_list,
+ cpos, write_len, max_cpos,
+ &cow_start, &cow_len);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ mlog(0, "CoW inode %lu, cpos %u, write_len %u, cow_start %u, "
+ "cow_len %u\n", inode->i_ino,
+ cpos, write_len, cow_start, cow_len);
+
+ BUG_ON(cow_len == 0);
+
+ context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
+ if (!context) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
+ 1, &ref_tree, &ref_root_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ context->inode = inode;
+ context->cow_start = cow_start;
+ context->cow_len = cow_len;
+ context->ref_tree = ref_tree;
+ context->ref_root_bh = ref_root_bh;
+ context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
+ context->get_clusters = ocfs2_di_get_clusters;
+
+ ocfs2_init_dinode_extent_tree(&context->data_et,
+ INODE_CACHE(inode), di_bh);
+
+ ret = ocfs2_replace_cow(context);
+ if (ret)
+ mlog_errno(ret);
+
+ /*
+ * truncate the extent map here since no matter whether we meet with
+ * any error during the action, we shouldn't trust cached extent map
+ * any more.
+ */
+ ocfs2_extent_map_trunc(inode, cow_start);
+
+ ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
+ brelse(ref_root_bh);
+out:
+ kfree(context);
+ return ret;
+}
+
+/*
+ * CoW any and all clusters between cpos and cpos+write_len.
+ * Don't CoW past max_cpos. If this returns successfully, all
+ * clusters between cpos and cpos+write_len are safe to modify.
+ */
+int ocfs2_refcount_cow(struct inode *inode,
+ struct buffer_head *di_bh,
+ u32 cpos, u32 write_len, u32 max_cpos)
+{
+ int ret = 0;
+ u32 p_cluster, num_clusters;
+ unsigned int ext_flags;
+
+ while (write_len) {
+ ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
+ &num_clusters, &ext_flags);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
+
+ if (write_len < num_clusters)
+ num_clusters = write_len;
+
+ if (ext_flags & OCFS2_EXT_REFCOUNTED) {
+ ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
+ num_clusters, max_cpos);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
+ }
+
+ write_len -= num_clusters;
+ cpos += num_clusters;
+ }
+
+ return ret;
+}
+
+static int ocfs2_xattr_value_get_clusters(struct ocfs2_cow_context *context,
+ u32 v_cluster, u32 *p_cluster,
+ u32 *num_clusters,
+ unsigned int *extent_flags)
+{
+ struct inode *inode = context->inode;
+ struct ocfs2_xattr_value_root *xv = context->cow_object;
+
+ return ocfs2_xattr_get_clusters(inode, v_cluster, p_cluster,
+ num_clusters, &xv->xr_list,
+ extent_flags);
+}
+
+/*
+ * Given a xattr value root, calculate the most meta/credits we need for
+ * refcount tree change if we truncate it to 0.
+ */
+int ocfs2_refcounted_xattr_delete_need(struct inode *inode,
+ struct ocfs2_caching_info *ref_ci,
+ struct buffer_head *ref_root_bh,
+ struct ocfs2_xattr_value_root *xv,
+ int *meta_add, int *credits)
+{
+ int ret = 0, index, ref_blocks = 0;
+ u32 p_cluster, num_clusters;
+ u32 cpos = 0, clusters = le32_to_cpu(xv->xr_clusters);
+ struct ocfs2_refcount_block *rb;
+ struct ocfs2_refcount_rec rec;
+ struct buffer_head *ref_leaf_bh = NULL;
+
+ while (cpos < clusters) {
+ ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
+ &num_clusters, &xv->xr_list,
+ NULL);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ cpos += num_clusters;
+
+ while (num_clusters) {
+ ret = ocfs2_get_refcount_rec(ref_ci, ref_root_bh,
+ p_cluster, num_clusters,
+ &rec, &index,
+ &ref_leaf_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ BUG_ON(!rec.r_refcount);
+
+ rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
+
+ /*
+ * We really don't know whether the other clusters is in
+ * this refcount block or not, so just take the worst
+ * case that all the clusters are in this block and each
+ * one will split a refcount rec, so totally we need
+ * clusters * 2 new refcount rec.
+ */
+ if (le64_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
+ le16_to_cpu(rb->rf_records.rl_count))
+ ref_blocks++;
+
+ *credits += 1;
+ brelse(ref_leaf_bh);
+ ref_leaf_bh = NULL;
+
+ if (num_clusters <= le32_to_cpu(rec.r_clusters))
+ break;
+ else
+ num_clusters -= le32_to_cpu(rec.r_clusters);
+ p_cluster += num_clusters;
+ }
+ }
+
+ *meta_add += ref_blocks;
+ if (!ref_blocks)
+ goto out;
+
+ rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
+ if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
+ *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
+ else {
+ struct ocfs2_extent_tree et;
+
+ ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh);
+ *credits += ocfs2_calc_extend_credits(inode->i_sb,
+ et.et_root_el,
+ ref_blocks);
+ }
+
+out:
+ brelse(ref_leaf_bh);
+ return ret;
+}
+
+/*
+ * Do CoW for xattr.
+ */
+int ocfs2_refcount_cow_xattr(struct inode *inode,
+ struct ocfs2_dinode *di,
+ struct ocfs2_xattr_value_buf *vb,
+ struct ocfs2_refcount_tree *ref_tree,
+ struct buffer_head *ref_root_bh,
+ u32 cpos, u32 write_len,
+ struct ocfs2_post_refcount *post)
+{
+ int ret;
+ struct ocfs2_xattr_value_root *xv = vb->vb_xv;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_cow_context *context = NULL;
+ u32 cow_start, cow_len;
+
+ BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
+
+ ret = ocfs2_refcount_cal_cow_clusters(inode, &xv->xr_list,
+ cpos, write_len, UINT_MAX,
+ &cow_start, &cow_len);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ BUG_ON(cow_len == 0);
+
+ context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
+ if (!context) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ context->inode = inode;
+ context->cow_start = cow_start;
+ context->cow_len = cow_len;
+ context->ref_tree = ref_tree;
+ context->ref_root_bh = ref_root_bh;;
+ context->cow_object = xv;
+
+ context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_jbd;
+ /* We need the extra credits for duplicate_clusters by jbd. */
+ context->extra_credits =
+ ocfs2_clusters_to_blocks(inode->i_sb, 1) * cow_len;
+ context->get_clusters = ocfs2_xattr_value_get_clusters;
+ context->post_refcount = post;
+
+ ocfs2_init_xattr_value_extent_tree(&context->data_et,
+ INODE_CACHE(inode), vb);
+
+ ret = ocfs2_replace_cow(context);
+ if (ret)
+ mlog_errno(ret);
+
+out:
+ kfree(context);
+ return ret;
+}
+
+/*
+ * Insert a new extent into refcount tree and mark a extent rec
+ * as refcounted in the dinode tree.
+ */
+int ocfs2_add_refcount_flag(struct inode *inode,
+ struct ocfs2_extent_tree *data_et,
+ struct ocfs2_caching_info *ref_ci,
+ struct buffer_head *ref_root_bh,
+ u32 cpos, u32 p_cluster, u32 num_clusters,
+ struct ocfs2_cached_dealloc_ctxt *dealloc,
+ struct ocfs2_post_refcount *post)
+{
+ int ret;
+ handle_t *handle;
+ int credits = 1, ref_blocks = 0;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_alloc_context *meta_ac = NULL;
+
+ ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
+ ref_ci, ref_root_bh,
+ p_cluster, num_clusters,
+ &ref_blocks, &credits);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ mlog(0, "reserve new metadata %d, credits = %d\n",
+ ref_blocks, credits);
+
+ if (ref_blocks) {
+ ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
+ ref_blocks, &meta_ac);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ if (post)
+ credits += post->credits;
+
+ handle = ocfs2_start_trans(osb, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_mark_extent_refcounted(inode, data_et, handle,
+ cpos, num_clusters, p_cluster,
+ meta_ac, dealloc);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
+ p_cluster, num_clusters, 0,
+ meta_ac, dealloc);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ if (post && post->func) {
+ ret = post->func(inode, handle, post->para);
+ if (ret)
+ mlog_errno(ret);
+ }
+
+out_commit:
+ ocfs2_commit_trans(osb, handle);
+out:
+ if (meta_ac)
+ ocfs2_free_alloc_context(meta_ac);
+ return ret;
+}
+
+static int ocfs2_change_ctime(struct inode *inode,
+ struct buffer_head *di_bh)
+{
+ int ret;
+ handle_t *handle;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+
+ handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
+ OCFS2_INODE_UPDATE_CREDITS);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ inode->i_ctime = CURRENT_TIME;
+ di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
+ di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
+
+ ocfs2_journal_dirty(handle, di_bh);
+
+out_commit:
+ ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
+out:
+ return ret;
+}
+
+static int ocfs2_attach_refcount_tree(struct inode *inode,
+ struct buffer_head *di_bh)
+{
+ int ret, data_changed = 0;
+ struct buffer_head *ref_root_bh = NULL;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_refcount_tree *ref_tree;
+ unsigned int ext_flags;
+ loff_t size;
+ u32 cpos, num_clusters, clusters, p_cluster;
+ struct ocfs2_cached_dealloc_ctxt dealloc;
+ struct ocfs2_extent_tree di_et;
+
+ ocfs2_init_dealloc_ctxt(&dealloc);
+
+ if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)) {
+ ret = ocfs2_create_refcount_tree(inode, di_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ BUG_ON(!di->i_refcount_loc);
+ ret = ocfs2_lock_refcount_tree(osb,
+ le64_to_cpu(di->i_refcount_loc), 1,
+ &ref_tree, &ref_root_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh);
+
+ size = i_size_read(inode);
+ clusters = ocfs2_clusters_for_bytes(inode->i_sb, size);
+
+ cpos = 0;
+ while (cpos < clusters) {
+ ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
+ &num_clusters, &ext_flags);
+
+ if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) {
+ ret = ocfs2_add_refcount_flag(inode, &di_et,
+ &ref_tree->rf_ci,
+ ref_root_bh, cpos,
+ p_cluster, num_clusters,
+ &dealloc, NULL);
+ if (ret) {
+ mlog_errno(ret);
+ goto unlock;
+ }
+
+ data_changed = 1;
+ }
+ cpos += num_clusters;
+ }
+
+ if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
+ ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh,
+ &ref_tree->rf_ci,
+ ref_root_bh,
+ &dealloc);
+ if (ret) {
+ mlog_errno(ret);
+ goto unlock;
+ }
+ }
+
+ if (data_changed) {
+ ret = ocfs2_change_ctime(inode, di_bh);
+ if (ret)
+ mlog_errno(ret);
+ }
+
+unlock:
+ ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
+ brelse(ref_root_bh);
+
+ if (!ret && ocfs2_dealloc_has_cluster(&dealloc)) {
+ ocfs2_schedule_truncate_log_flush(osb, 1);
+ ocfs2_run_deallocs(osb, &dealloc);
+ }
+out:
+ /*
+ * Empty the extent map so that we may get the right extent
+ * record from the disk.
+ */
+ ocfs2_extent_map_trunc(inode, 0);
+
+ return ret;
+}
+
+static int ocfs2_add_refcounted_extent(struct inode *inode,
+ struct ocfs2_extent_tree *et,
+ struct ocfs2_caching_info *ref_ci,
+ struct buffer_head *ref_root_bh,
+ u32 cpos, u32 p_cluster, u32 num_clusters,
+ unsigned int ext_flags,
+ struct ocfs2_cached_dealloc_ctxt *dealloc)
+{
+ int ret;
+ handle_t *handle;
+ int credits = 0;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_alloc_context *meta_ac = NULL;
+
+ ret = ocfs2_lock_refcount_allocators(inode->i_sb,
+ p_cluster, num_clusters,
+ et, ref_ci,
+ ref_root_bh, &meta_ac,
+ NULL, &credits);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ handle = ocfs2_start_trans(osb, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_insert_extent(handle, et, cpos,
+ cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb,
+ p_cluster)),
+ num_clusters, ext_flags, meta_ac);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ ret = ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
+ p_cluster, num_clusters,
+ meta_ac, dealloc);
+ if (ret)
+ mlog_errno(ret);
+
+out_commit:
+ ocfs2_commit_trans(osb, handle);
+out:
+ if (meta_ac)
+ ocfs2_free_alloc_context(meta_ac);
+ return ret;
+}
+
+static int ocfs2_duplicate_extent_list(struct inode *s_inode,
+ struct inode *t_inode,
+ struct buffer_head *t_bh,
+ struct ocfs2_caching_info *ref_ci,
+ struct buffer_head *ref_root_bh,
+ struct ocfs2_cached_dealloc_ctxt *dealloc)
+{
+ int ret = 0;
+ u32 p_cluster, num_clusters, clusters, cpos;
+ loff_t size;
+ unsigned int ext_flags;
+ struct ocfs2_extent_tree et;
+
+ ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(t_inode), t_bh);
+
+ size = i_size_read(s_inode);
+ clusters = ocfs2_clusters_for_bytes(s_inode->i_sb, size);
+
+ cpos = 0;
+ while (cpos < clusters) {
+ ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster,
+ &num_clusters, &ext_flags);
+
+ if (p_cluster) {
+ ret = ocfs2_add_refcounted_extent(t_inode, &et,
+ ref_ci, ref_root_bh,
+ cpos, p_cluster,
+ num_clusters,
+ ext_flags,
+ dealloc);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ cpos += num_clusters;
+ }
+
+out:
+ return ret;
+}
+
+/*
+ * change the new file's attributes to the src.
+ *
+ * reflink creates a snapshot of a file, that means the attributes
+ * must be identical except for three exceptions - nlink, ino, and ctime.
+ */
+static int ocfs2_complete_reflink(struct inode *s_inode,
+ struct buffer_head *s_bh,
+ struct inode *t_inode,
+ struct buffer_head *t_bh,
+ bool preserve)
+{
+ int ret;
+ handle_t *handle;
+ struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)t_bh->b_data;
+ loff_t size = i_size_read(s_inode);
+
+ handle = ocfs2_start_trans(OCFS2_SB(t_inode->i_sb),
+ OCFS2_INODE_UPDATE_CREDITS);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ return ret;
+ }
+
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ spin_lock(&OCFS2_I(t_inode)->ip_lock);
+ OCFS2_I(t_inode)->ip_clusters = OCFS2_I(s_inode)->ip_clusters;
+ OCFS2_I(t_inode)->ip_attr = OCFS2_I(s_inode)->ip_attr;
+ OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features;
+ spin_unlock(&OCFS2_I(t_inode)->ip_lock);
+ i_size_write(t_inode, size);
+
+ di->i_xattr_inline_size = s_di->i_xattr_inline_size;
+ di->i_clusters = s_di->i_clusters;
+ di->i_size = s_di->i_size;
+ di->i_dyn_features = s_di->i_dyn_features;
+ di->i_attr = s_di->i_attr;
+
+ if (preserve) {
+ di->i_uid = s_di->i_uid;
+ di->i_gid = s_di->i_gid;
+ di->i_mode = s_di->i_mode;
+
+ /*
+ * update time.
+ * we want mtime to appear identical to the source and
+ * update ctime.
+ */
+ t_inode->i_ctime = CURRENT_TIME;
+
+ di->i_ctime = cpu_to_le64(t_inode->i_ctime.tv_sec);
+ di->i_ctime_nsec = cpu_to_le32(t_inode->i_ctime.tv_nsec);
+
+ t_inode->i_mtime = s_inode->i_mtime;
+ di->i_mtime = s_di->i_mtime;
+ di->i_mtime_nsec = s_di->i_mtime_nsec;
+ }
+
+ ocfs2_journal_dirty(handle, t_bh);
+
+out_commit:
+ ocfs2_commit_trans(OCFS2_SB(t_inode->i_sb), handle);
+ return ret;
+}
+
+static int ocfs2_create_reflink_node(struct inode *s_inode,
+ struct buffer_head *s_bh,
+ struct inode *t_inode,
+ struct buffer_head *t_bh,
+ bool preserve)
+{
+ int ret;
+ struct buffer_head *ref_root_bh = NULL;
+ struct ocfs2_cached_dealloc_ctxt dealloc;
+ struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
+ struct ocfs2_refcount_block *rb;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)s_bh->b_data;
+ struct ocfs2_refcount_tree *ref_tree;
+
+ ocfs2_init_dealloc_ctxt(&dealloc);
+
+ ret = ocfs2_set_refcount_tree(t_inode, t_bh,
+ le64_to_cpu(di->i_refcount_loc));
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
+ 1, &ref_tree, &ref_root_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
+
+ ret = ocfs2_duplicate_extent_list(s_inode, t_inode, t_bh,
+ &ref_tree->rf_ci, ref_root_bh,
+ &dealloc);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_unlock_refcount;
+ }
+
+ ret = ocfs2_complete_reflink(s_inode, s_bh, t_inode, t_bh, preserve);
+ if (ret)
+ mlog_errno(ret);
+
+out_unlock_refcount:
+ ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
+ brelse(ref_root_bh);
+out:
+ if (ocfs2_dealloc_has_cluster(&dealloc)) {
+ ocfs2_schedule_truncate_log_flush(osb, 1);
+ ocfs2_run_deallocs(osb, &dealloc);
+ }
+
+ return ret;
+}
+
+static int __ocfs2_reflink(struct dentry *old_dentry,
+ struct buffer_head *old_bh,
+ struct inode *new_inode,
+ bool preserve)
+{
+ int ret;
+ struct inode *inode = old_dentry->d_inode;
+ struct buffer_head *new_bh = NULL;
+
+ ret = filemap_fdatawrite(inode->i_mapping);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_attach_refcount_tree(inode, old_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ mutex_lock(&new_inode->i_mutex);
+ ret = ocfs2_inode_lock(new_inode, &new_bh, 1);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_unlock;
+ }
+
+ ret = ocfs2_create_reflink_node(inode, old_bh,
+ new_inode, new_bh, preserve);
+ if (ret) {
+ mlog_errno(ret);
+ goto inode_unlock;
+ }
+
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
+ ret = ocfs2_reflink_xattrs(inode, old_bh,
+ new_inode, new_bh,
+ preserve);
+ if (ret)
+ mlog_errno(ret);
+ }
+inode_unlock:
+ ocfs2_inode_unlock(new_inode, 1);
+ brelse(new_bh);
+out_unlock:
+ mutex_unlock(&new_inode->i_mutex);
+out:
+ if (!ret) {
+ ret = filemap_fdatawait(inode->i_mapping);
+ if (ret)
+ mlog_errno(ret);
+ }
+ return ret;
+}
+
+static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *new_dentry, bool preserve)
+{
+ int error;
+ struct inode *inode = old_dentry->d_inode;
+ struct buffer_head *old_bh = NULL;
+ struct inode *new_orphan_inode = NULL;
+
+ if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
+ return -EOPNOTSUPP;
+
+ error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
+ &new_orphan_inode);
+ if (error) {
+ mlog_errno(error);
+ goto out;
+ }
+
+ error = ocfs2_inode_lock(inode, &old_bh, 1);
+ if (error) {
+ mlog_errno(error);
+ goto out;
+ }
+
+ down_write(&OCFS2_I(inode)->ip_xattr_sem);
+ down_write(&OCFS2_I(inode)->ip_alloc_sem);
+ error = __ocfs2_reflink(old_dentry, old_bh,
+ new_orphan_inode, preserve);
+ up_write(&OCFS2_I(inode)->ip_alloc_sem);
+ up_write(&OCFS2_I(inode)->ip_xattr_sem);
+
+ ocfs2_inode_unlock(inode, 1);
+ brelse(old_bh);
+
+ if (error) {
+ mlog_errno(error);
+ goto out;
+ }
+
+ /* If the security isn't preserved, we need to re-initialize them. */
+ if (!preserve) {
+ error = ocfs2_init_security_and_acl(dir, new_orphan_inode);
+ if (error)
+ mlog_errno(error);
+ }
+out:
+ if (!error) {
+ error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
+ new_dentry);
+ if (error)
+ mlog_errno(error);
+ }
+
+ if (new_orphan_inode) {
+ /*
+ * We need to open_unlock the inode no matter whether we
+ * succeed or not, so that other nodes can delete it later.
+ */
+ ocfs2_open_unlock(new_orphan_inode);
+ if (error)
+ iput(new_orphan_inode);
+ }
+
+ return error;
+}
+
+/*
+ * Below here are the bits used by OCFS2_IOC_REFLINK() to fake
+ * sys_reflink(). This will go away when vfs_reflink() exists in
+ * fs/namei.c.
+ */
+
+/* copied from may_create in VFS. */
+static inline int ocfs2_may_create(struct inode *dir, struct dentry *child)
+{
+ if (child->d_inode)
+ return -EEXIST;
+ if (IS_DEADDIR(dir))
+ return -ENOENT;
+ return inode_permission(dir, MAY_WRITE | MAY_EXEC);
+}
+
+/* copied from user_path_parent. */
+static int ocfs2_user_path_parent(const char __user *path,
+ struct nameidata *nd, char **name)
+{
+ char *s = getname(path);
+ int error;
+
+ if (IS_ERR(s))
+ return PTR_ERR(s);
+
+ error = path_lookup(s, LOOKUP_PARENT, nd);
+ if (error)
+ putname(s);
+ else
+ *name = s;
+
+ return error;
+}
+
+/**
+ * ocfs2_vfs_reflink - Create a reference-counted link
+ *
+ * @old_dentry: source dentry + inode
+ * @dir: directory to create the target
+ * @new_dentry: target dentry
+ * @preserve: if true, preserve all file attributes
+ */
+int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *new_dentry, bool preserve)
+{
+ struct inode *inode = old_dentry->d_inode;
+ int error;
+
+ if (!inode)
+ return -ENOENT;
+
+ error = ocfs2_may_create(dir, new_dentry);
+ if (error)
+ return error;
+
+ if (dir->i_sb != inode->i_sb)
+ return -EXDEV;
+
+ /*
+ * A reflink to an append-only or immutable file cannot be created.
+ */
+ if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+ return -EPERM;
+
+ /* Only regular files can be reflinked. */
+ if (!S_ISREG(inode->i_mode))
+ return -EPERM;
+
+ /*
+ * If the caller wants to preserve ownership, they require the
+ * rights to do so.
+ */
+ if (preserve) {
+ if ((current_fsuid() != inode->i_uid) && !capable(CAP_CHOWN))
+ return -EPERM;
+ if (!in_group_p(inode->i_gid) && !capable(CAP_CHOWN))
+ return -EPERM;
+ }
+
+ /*
+ * If the caller is modifying any aspect of the attributes, they
+ * are not creating a snapshot. They need read permission on the
+ * file.
+ */
+ if (!preserve) {
+ error = inode_permission(inode, MAY_READ);
+ if (error)
+ return error;
+ }
+
+ mutex_lock(&inode->i_mutex);
+ vfs_dq_init(dir);
+ error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve);
+ mutex_unlock(&inode->i_mutex);
+ if (!error)
+ fsnotify_create(dir, new_dentry);
+ return error;
+}
+/*
+ * Most codes are copied from sys_linkat.
+ */
+int ocfs2_reflink_ioctl(struct inode *inode,
+ const char __user *oldname,
+ const char __user *newname,
+ bool preserve)
+{
+ struct dentry *new_dentry;
+ struct nameidata nd;
+ struct path old_path;
+ int error;
+ char *to = NULL;
+
+ if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
+ return -EOPNOTSUPP;
+
+ error = user_path_at(AT_FDCWD, oldname, 0, &old_path);
+ if (error) {
+ mlog_errno(error);
+ return error;
+ }
+
+ error = ocfs2_user_path_parent(newname, &nd, &to);
+ if (error) {
+ mlog_errno(error);
+ goto out;
+ }
+
+ error = -EXDEV;
+ if (old_path.mnt != nd.path.mnt)
+ goto out_release;
+ new_dentry = lookup_create(&nd, 0);
+ error = PTR_ERR(new_dentry);
+ if (IS_ERR(new_dentry)) {
+ mlog_errno(error);
+ goto out_unlock;
+ }
+
+ error = mnt_want_write(nd.path.mnt);
+ if (error) {
+ mlog_errno(error);
+ goto out_dput;
+ }
+
+ error = ocfs2_vfs_reflink(old_path.dentry,
+ nd.path.dentry->d_inode,
+ new_dentry, preserve);
+ mnt_drop_write(nd.path.mnt);
+out_dput:
+ dput(new_dentry);
+out_unlock:
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+out_release:
+ path_put(&nd.path);
+ putname(to);
+out:
+ path_put(&old_path);
+
+ return error;
+}
diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h
new file mode 100644
index 00000000000..c1d19b1d3ec
--- /dev/null
+++ b/fs/ocfs2/refcounttree.h
@@ -0,0 +1,106 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * refcounttree.h
+ *
+ * Copyright (C) 2009 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef OCFS2_REFCOUNTTREE_H
+#define OCFS2_REFCOUNTTREE_H
+
+struct ocfs2_refcount_tree {
+ struct rb_node rf_node;
+ u64 rf_blkno;
+ u32 rf_generation;
+ struct rw_semaphore rf_sem;
+ struct ocfs2_lock_res rf_lockres;
+ struct kref rf_getcnt;
+ int rf_removed;
+
+ /* the following 4 fields are used by caching_info. */
+ struct ocfs2_caching_info rf_ci;
+ spinlock_t rf_lock;
+ struct mutex rf_io_mutex;
+ struct super_block *rf_sb;
+};
+
+void ocfs2_purge_refcount_trees(struct ocfs2_super *osb);
+int ocfs2_lock_refcount_tree(struct ocfs2_super *osb, u64 ref_blkno, int rw,
+ struct ocfs2_refcount_tree **tree,
+ struct buffer_head **ref_bh);
+void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb,
+ struct ocfs2_refcount_tree *tree,
+ int rw);
+
+int ocfs2_decrease_refcount(struct inode *inode,
+ handle_t *handle, u32 cpos, u32 len,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_cached_dealloc_ctxt *dealloc,
+ int delete);
+int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
+ struct buffer_head *di_bh,
+ u64 phys_blkno,
+ u32 clusters,
+ int *credits,
+ struct ocfs2_alloc_context **meta_ac);
+int ocfs2_refcount_cow(struct inode *inode, struct buffer_head *di_bh,
+ u32 cpos, u32 write_len, u32 max_cpos);
+
+typedef int (ocfs2_post_refcount_func)(struct inode *inode,
+ handle_t *handle,
+ void *para);
+/*
+ * Some refcount caller need to do more work after we modify the data b-tree
+ * during refcount operation(including CoW and add refcount flag), and make the
+ * transaction complete. So it must give us this structure so that we can do it
+ * within our transaction.
+ *
+ */
+struct ocfs2_post_refcount {
+ int credits; /* credits it need for journal. */
+ ocfs2_post_refcount_func *func; /* real function. */
+ void *para;
+};
+
+int ocfs2_refcounted_xattr_delete_need(struct inode *inode,
+ struct ocfs2_caching_info *ref_ci,
+ struct buffer_head *ref_root_bh,
+ struct ocfs2_xattr_value_root *xv,
+ int *meta_add, int *credits);
+int ocfs2_refcount_cow_xattr(struct inode *inode,
+ struct ocfs2_dinode *di,
+ struct ocfs2_xattr_value_buf *vb,
+ struct ocfs2_refcount_tree *ref_tree,
+ struct buffer_head *ref_root_bh,
+ u32 cpos, u32 write_len,
+ struct ocfs2_post_refcount *post);
+int ocfs2_add_refcount_flag(struct inode *inode,
+ struct ocfs2_extent_tree *data_et,
+ struct ocfs2_caching_info *ref_ci,
+ struct buffer_head *ref_root_bh,
+ u32 cpos, u32 p_cluster, u32 num_clusters,
+ struct ocfs2_cached_dealloc_ctxt *dealloc,
+ struct ocfs2_post_refcount *post);
+int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh);
+int ocfs2_try_remove_refcount_tree(struct inode *inode,
+ struct buffer_head *di_bh);
+int ocfs2_increase_refcount(handle_t *handle,
+ struct ocfs2_caching_info *ci,
+ struct buffer_head *ref_root_bh,
+ u64 cpos, u32 len,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_cached_dealloc_ctxt *dealloc);
+int ocfs2_reflink_ioctl(struct inode *inode,
+ const char __user *oldname,
+ const char __user *newname,
+ bool preserve);
+#endif /* OCFS2_REFCOUNTTREE_H */
diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c
index 424adaa5f90..3c3d673a4d2 100644
--- a/fs/ocfs2/resize.c
+++ b/fs/ocfs2/resize.c
@@ -106,8 +106,8 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
mlog_entry("(new_clusters=%d, first_new_cluster = %u)\n",
new_clusters, first_new_cluster);
- ret = ocfs2_journal_access_gd(handle, bm_inode, group_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
+ ret = ocfs2_journal_access_gd(handle, INODE_CACHE(bm_inode),
+ group_bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
goto out;
@@ -141,7 +141,7 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
}
/* update the inode accordingly. */
- ret = ocfs2_journal_access_di(handle, bm_inode, bm_bh,
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(bm_inode), bm_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
@@ -514,7 +514,7 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
goto out_unlock;
}
- ocfs2_set_new_buffer_uptodate(inode, group_bh);
+ ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), group_bh);
ret = ocfs2_verify_group_and_input(main_bm_inode, fe, input, group_bh);
if (ret) {
@@ -536,8 +536,8 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
cl = &fe->id2.i_chain;
cr = &cl->cl_recs[input->chain];
- ret = ocfs2_journal_access_gd(handle, main_bm_inode, group_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
+ ret = ocfs2_journal_access_gd(handle, INODE_CACHE(main_bm_inode),
+ group_bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
goto out_commit;
@@ -552,8 +552,8 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
goto out_commit;
}
- ret = ocfs2_journal_access_di(handle, main_bm_inode, main_bm_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(main_bm_inode),
+ main_bm_bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
goto out_commit;
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c
index 40661e7824e..bfbd7e9e949 100644
--- a/fs/ocfs2/slot_map.c
+++ b/fs/ocfs2/slot_map.c
@@ -150,8 +150,8 @@ int ocfs2_refresh_slot_info(struct ocfs2_super *osb)
* be !NULL. Thus, ocfs2_read_blocks() will ignore blocknr. If
* this is not true, the read of -1 (UINT64_MAX) will fail.
*/
- ret = ocfs2_read_blocks(si->si_inode, -1, si->si_blocks, si->si_bh,
- OCFS2_BH_IGNORE_CACHE, NULL);
+ ret = ocfs2_read_blocks(INODE_CACHE(si->si_inode), -1, si->si_blocks,
+ si->si_bh, OCFS2_BH_IGNORE_CACHE, NULL);
if (ret == 0) {
spin_lock(&osb->osb_lock);
ocfs2_update_slot_info(si);
@@ -213,7 +213,7 @@ static int ocfs2_update_disk_slot(struct ocfs2_super *osb,
ocfs2_update_disk_slot_old(si, slot_num, &bh);
spin_unlock(&osb->osb_lock);
- status = ocfs2_write_block(osb, bh, si->si_inode);
+ status = ocfs2_write_block(osb, bh, INODE_CACHE(si->si_inode));
if (status < 0)
mlog_errno(status);
@@ -404,8 +404,8 @@ static int ocfs2_map_slot_buffers(struct ocfs2_super *osb,
(unsigned long long)blkno);
bh = NULL; /* Acquire a fresh bh */
- status = ocfs2_read_blocks(si->si_inode, blkno, 1, &bh,
- OCFS2_BH_IGNORE_CACHE, NULL);
+ status = ocfs2_read_blocks(INODE_CACHE(si->si_inode), blkno,
+ 1, &bh, OCFS2_BH_IGNORE_CACHE, NULL);
if (status < 0) {
mlog_errno(status);
goto bail;
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 73a16d4666d..c30b644d957 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -310,7 +310,7 @@ int ocfs2_read_group_descriptor(struct inode *inode, struct ocfs2_dinode *di,
int rc;
struct buffer_head *tmp = *bh;
- rc = ocfs2_read_block(inode, gd_blkno, &tmp,
+ rc = ocfs2_read_block(INODE_CACHE(inode), gd_blkno, &tmp,
ocfs2_validate_group_descriptor);
if (rc)
goto out;
@@ -352,7 +352,7 @@ static int ocfs2_block_group_fill(handle_t *handle,
}
status = ocfs2_journal_access_gd(handle,
- alloc_inode,
+ INODE_CACHE(alloc_inode),
bg_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
@@ -476,7 +476,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
mlog_errno(status);
goto bail;
}
- ocfs2_set_new_buffer_uptodate(alloc_inode, bg_bh);
+ ocfs2_set_new_buffer_uptodate(INODE_CACHE(alloc_inode), bg_bh);
status = ocfs2_block_group_fill(handle,
alloc_inode,
@@ -491,7 +491,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
bg = (struct ocfs2_group_desc *) bg_bh->b_data;
- status = ocfs2_journal_access_di(handle, alloc_inode,
+ status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode),
bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -1033,7 +1033,7 @@ static inline int ocfs2_block_group_set_bits(handle_t *handle,
journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
status = ocfs2_journal_access_gd(handle,
- alloc_inode,
+ INODE_CACHE(alloc_inode),
group_bh,
journal_type);
if (status < 0) {
@@ -1106,7 +1106,8 @@ static int ocfs2_relink_block_group(handle_t *handle,
bg_ptr = le64_to_cpu(bg->bg_next_group);
prev_bg_ptr = le64_to_cpu(prev_bg->bg_next_group);
- status = ocfs2_journal_access_gd(handle, alloc_inode, prev_bg_bh,
+ status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode),
+ prev_bg_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -1121,8 +1122,8 @@ static int ocfs2_relink_block_group(handle_t *handle,
goto out_rollback;
}
- status = ocfs2_journal_access_gd(handle, alloc_inode, bg_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
+ status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode),
+ bg_bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto out_rollback;
@@ -1136,8 +1137,8 @@ static int ocfs2_relink_block_group(handle_t *handle,
goto out_rollback;
}
- status = ocfs2_journal_access_di(handle, alloc_inode, fe_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
+ status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode),
+ fe_bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto out_rollback;
@@ -1288,7 +1289,7 @@ static int ocfs2_alloc_dinode_update_counts(struct inode *inode,
struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &di->id2.i_chain;
- ret = ocfs2_journal_access_di(handle, inode, di_bh,
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
@@ -1461,7 +1462,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
/* Ok, claim our bits now: set the info on dinode, chainlist
* and then the group */
status = ocfs2_journal_access_di(handle,
- alloc_inode,
+ INODE_CACHE(alloc_inode),
ac->ac_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
@@ -1907,8 +1908,8 @@ static inline int ocfs2_block_group_clear_bits(handle_t *handle,
if (ocfs2_is_cluster_bitmap(alloc_inode))
journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
- status = ocfs2_journal_access_gd(handle, alloc_inode, group_bh,
- journal_type);
+ status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode),
+ group_bh, journal_type);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -1993,8 +1994,8 @@ int ocfs2_free_suballoc_bits(handle_t *handle,
goto bail;
}
- status = ocfs2_journal_access_di(handle, alloc_inode, alloc_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
+ status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode),
+ alloc_bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -2151,7 +2152,7 @@ int ocfs2_lock_allocators(struct inode *inode,
BUG_ON(clusters_to_add != 0 && data_ac == NULL);
- num_free_extents = ocfs2_num_free_extents(osb, inode, et);
+ num_free_extents = ocfs2_num_free_extents(osb, et);
if (num_free_extents < 0) {
ret = num_free_extents;
mlog_errno(ret);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index a3f8871d21f..4cc3c890a2c 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -28,7 +28,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
-#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/random.h>
#include <linux/statfs.h>
@@ -69,6 +68,7 @@
#include "ver.h"
#include "xattr.h"
#include "quota.h"
+#include "refcounttree.h"
#include "buffer_head_io.h"
@@ -965,7 +965,7 @@ static int ocfs2_quota_off(struct super_block *sb, int type, int remount)
return vfs_quota_disable(sb, type, DQUOT_LIMITS_ENABLED);
}
-static struct quotactl_ops ocfs2_quotactl_ops = {
+static const struct quotactl_ops ocfs2_quotactl_ops = {
.quota_on = ocfs2_quota_on,
.quota_off = ocfs2_quota_off,
.quota_sync = vfs_quota_sync,
@@ -1668,8 +1668,6 @@ static void ocfs2_inode_init_once(void *data)
spin_lock_init(&oi->ip_lock);
ocfs2_extent_map_init(&oi->vfs_inode);
INIT_LIST_HEAD(&oi->ip_io_markers);
- oi->ip_created_trans = 0;
- oi->ip_last_trans = 0;
oi->ip_dir_start_lookup = 0;
init_rwsem(&oi->ip_alloc_sem);
@@ -1683,7 +1681,8 @@ static void ocfs2_inode_init_once(void *data)
ocfs2_lock_res_init_once(&oi->ip_inode_lockres);
ocfs2_lock_res_init_once(&oi->ip_open_lockres);
- ocfs2_metadata_cache_init(&oi->vfs_inode);
+ ocfs2_metadata_cache_init(INODE_CACHE(&oi->vfs_inode),
+ &ocfs2_inode_caching_ops);
inode_init_once(&oi->vfs_inode);
}
@@ -1859,6 +1858,8 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
ocfs2_sync_blockdev(sb);
+ ocfs2_purge_refcount_trees(osb);
+
/* No cluster connection means we've failed during mount, so skip
* all the steps which depended on that to complete. */
if (osb->cconn) {
@@ -2065,6 +2066,8 @@ static int ocfs2_initialize_super(struct super_block *sb,
goto bail;
}
+ osb->osb_rf_lock_tree = RB_ROOT;
+
osb->s_feature_compat =
le32_to_cpu(OCFS2_RAW_SB(di)->s_feature_compat);
osb->s_feature_ro_compat =
@@ -2490,7 +2493,8 @@ void __ocfs2_abort(struct super_block* sb,
/* Force a panic(). This stinks, but it's better than letting
* things continue without having a proper hard readonly
* here. */
- OCFS2_SB(sb)->s_mount_opt |= OCFS2_MOUNT_ERRORS_PANIC;
+ if (!ocfs2_mount_local(OCFS2_SB(sb)))
+ OCFS2_SB(sb)->s_mount_opt |= OCFS2_MOUNT_ERRORS_PANIC;
ocfs2_handle_error(sb);
}
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index 579dd1b1110..e3421030a69 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -38,7 +38,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
-#include <linux/utsname.h>
#include <linux/namei.h>
#define MLOG_MASK_PREFIX ML_NAMEI
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index 187b99ff036..b6284f235d2 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -75,15 +75,77 @@ struct ocfs2_meta_cache_item {
static struct kmem_cache *ocfs2_uptodate_cachep = NULL;
-void ocfs2_metadata_cache_init(struct inode *inode)
+u64 ocfs2_metadata_cache_owner(struct ocfs2_caching_info *ci)
{
- struct ocfs2_inode_info *oi = OCFS2_I(inode);
- struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
+ BUG_ON(!ci || !ci->ci_ops);
- oi->ip_flags |= OCFS2_INODE_CACHE_INLINE;
+ return ci->ci_ops->co_owner(ci);
+}
+
+struct super_block *ocfs2_metadata_cache_get_super(struct ocfs2_caching_info *ci)
+{
+ BUG_ON(!ci || !ci->ci_ops);
+
+ return ci->ci_ops->co_get_super(ci);
+}
+
+static void ocfs2_metadata_cache_lock(struct ocfs2_caching_info *ci)
+{
+ BUG_ON(!ci || !ci->ci_ops);
+
+ ci->ci_ops->co_cache_lock(ci);
+}
+
+static void ocfs2_metadata_cache_unlock(struct ocfs2_caching_info *ci)
+{
+ BUG_ON(!ci || !ci->ci_ops);
+
+ ci->ci_ops->co_cache_unlock(ci);
+}
+
+void ocfs2_metadata_cache_io_lock(struct ocfs2_caching_info *ci)
+{
+ BUG_ON(!ci || !ci->ci_ops);
+
+ ci->ci_ops->co_io_lock(ci);
+}
+
+void ocfs2_metadata_cache_io_unlock(struct ocfs2_caching_info *ci)
+{
+ BUG_ON(!ci || !ci->ci_ops);
+
+ ci->ci_ops->co_io_unlock(ci);
+}
+
+
+static void ocfs2_metadata_cache_reset(struct ocfs2_caching_info *ci,
+ int clear)
+{
+ ci->ci_flags |= OCFS2_CACHE_FL_INLINE;
ci->ci_num_cached = 0;
+
+ if (clear) {
+ ci->ci_created_trans = 0;
+ ci->ci_last_trans = 0;
+ }
+}
+
+void ocfs2_metadata_cache_init(struct ocfs2_caching_info *ci,
+ const struct ocfs2_caching_operations *ops)
+{
+ BUG_ON(!ops);
+
+ ci->ci_ops = ops;
+ ocfs2_metadata_cache_reset(ci, 1);
}
+void ocfs2_metadata_cache_exit(struct ocfs2_caching_info *ci)
+{
+ ocfs2_metadata_cache_purge(ci);
+ ocfs2_metadata_cache_reset(ci, 1);
+}
+
+
/* No lock taken here as 'root' is not expected to be visible to other
* processes. */
static unsigned int ocfs2_purge_copied_metadata_tree(struct rb_root *root)
@@ -112,19 +174,20 @@ static unsigned int ocfs2_purge_copied_metadata_tree(struct rb_root *root)
* This function is a few more lines longer than necessary due to some
* accounting done here, but I think it's worth tracking down those
* bugs sooner -- Mark */
-void ocfs2_metadata_cache_purge(struct inode *inode)
+void ocfs2_metadata_cache_purge(struct ocfs2_caching_info *ci)
{
- struct ocfs2_inode_info *oi = OCFS2_I(inode);
unsigned int tree, to_purge, purged;
- struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
struct rb_root root = RB_ROOT;
- spin_lock(&oi->ip_lock);
- tree = !(oi->ip_flags & OCFS2_INODE_CACHE_INLINE);
+ BUG_ON(!ci || !ci->ci_ops);
+
+ ocfs2_metadata_cache_lock(ci);
+ tree = !(ci->ci_flags & OCFS2_CACHE_FL_INLINE);
to_purge = ci->ci_num_cached;
- mlog(0, "Purge %u %s items from Inode %llu\n", to_purge,
- tree ? "array" : "tree", (unsigned long long)oi->ip_blkno);
+ mlog(0, "Purge %u %s items from Owner %llu\n", to_purge,
+ tree ? "array" : "tree",
+ (unsigned long long)ocfs2_metadata_cache_owner(ci));
/* If we're a tree, save off the root so that we can safely
* initialize the cache. We do the work to free tree members
@@ -132,16 +195,17 @@ void ocfs2_metadata_cache_purge(struct inode *inode)
if (tree)
root = ci->ci_cache.ci_tree;
- ocfs2_metadata_cache_init(inode);
- spin_unlock(&oi->ip_lock);
+ ocfs2_metadata_cache_reset(ci, 0);
+ ocfs2_metadata_cache_unlock(ci);
purged = ocfs2_purge_copied_metadata_tree(&root);
/* If possible, track the number wiped so that we can more
* easily detect counting errors. Unfortunately, this is only
* meaningful for trees. */
if (tree && purged != to_purge)
- mlog(ML_ERROR, "Inode %llu, count = %u, purged = %u\n",
- (unsigned long long)oi->ip_blkno, to_purge, purged);
+ mlog(ML_ERROR, "Owner %llu, count = %u, purged = %u\n",
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ to_purge, purged);
}
/* Returns the index in the cache array, -1 if not found.
@@ -182,27 +246,25 @@ ocfs2_search_cache_tree(struct ocfs2_caching_info *ci,
return NULL;
}
-static int ocfs2_buffer_cached(struct ocfs2_inode_info *oi,
+static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci,
struct buffer_head *bh)
{
int index = -1;
struct ocfs2_meta_cache_item *item = NULL;
- spin_lock(&oi->ip_lock);
+ ocfs2_metadata_cache_lock(ci);
- mlog(0, "Inode %llu, query block %llu (inline = %u)\n",
- (unsigned long long)oi->ip_blkno,
+ mlog(0, "Owner %llu, query block %llu (inline = %u)\n",
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long) bh->b_blocknr,
- !!(oi->ip_flags & OCFS2_INODE_CACHE_INLINE));
+ !!(ci->ci_flags & OCFS2_CACHE_FL_INLINE));
- if (oi->ip_flags & OCFS2_INODE_CACHE_INLINE)
- index = ocfs2_search_cache_array(&oi->ip_metadata_cache,
- bh->b_blocknr);
+ if (ci->ci_flags & OCFS2_CACHE_FL_INLINE)
+ index = ocfs2_search_cache_array(ci, bh->b_blocknr);
else
- item = ocfs2_search_cache_tree(&oi->ip_metadata_cache,
- bh->b_blocknr);
+ item = ocfs2_search_cache_tree(ci, bh->b_blocknr);
- spin_unlock(&oi->ip_lock);
+ ocfs2_metadata_cache_unlock(ci);
mlog(0, "index = %d, item = %p\n", index, item);
@@ -214,7 +276,7 @@ static int ocfs2_buffer_cached(struct ocfs2_inode_info *oi,
*
* This can be called under lock_buffer()
*/
-int ocfs2_buffer_uptodate(struct inode *inode,
+int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci,
struct buffer_head *bh)
{
/* Doesn't matter if the bh is in our cache or not -- if it's
@@ -230,24 +292,24 @@ int ocfs2_buffer_uptodate(struct inode *inode,
/* Ok, locally the buffer is marked as up to date, now search
* our cache to see if we can trust that. */
- return ocfs2_buffer_cached(OCFS2_I(inode), bh);
+ return ocfs2_buffer_cached(ci, bh);
}
-/*
+/*
* Determine whether a buffer is currently out on a read-ahead request.
- * ip_io_sem should be held to serialize submitters with the logic here.
+ * ci_io_sem should be held to serialize submitters with the logic here.
*/
-int ocfs2_buffer_read_ahead(struct inode *inode,
+int ocfs2_buffer_read_ahead(struct ocfs2_caching_info *ci,
struct buffer_head *bh)
{
- return buffer_locked(bh) && ocfs2_buffer_cached(OCFS2_I(inode), bh);
+ return buffer_locked(bh) && ocfs2_buffer_cached(ci, bh);
}
/* Requires ip_lock */
static void ocfs2_append_cache_array(struct ocfs2_caching_info *ci,
sector_t block)
{
- BUG_ON(ci->ci_num_cached >= OCFS2_INODE_MAX_CACHE_ARRAY);
+ BUG_ON(ci->ci_num_cached >= OCFS2_CACHE_INFO_MAX_ARRAY);
mlog(0, "block %llu takes position %u\n", (unsigned long long) block,
ci->ci_num_cached);
@@ -292,66 +354,64 @@ static void __ocfs2_insert_cache_tree(struct ocfs2_caching_info *ci,
ci->ci_num_cached++;
}
-static inline int ocfs2_insert_can_use_array(struct ocfs2_inode_info *oi,
- struct ocfs2_caching_info *ci)
+/* co_cache_lock() must be held */
+static inline int ocfs2_insert_can_use_array(struct ocfs2_caching_info *ci)
{
- assert_spin_locked(&oi->ip_lock);
-
- return (oi->ip_flags & OCFS2_INODE_CACHE_INLINE) &&
- (ci->ci_num_cached < OCFS2_INODE_MAX_CACHE_ARRAY);
+ return (ci->ci_flags & OCFS2_CACHE_FL_INLINE) &&
+ (ci->ci_num_cached < OCFS2_CACHE_INFO_MAX_ARRAY);
}
-/* tree should be exactly OCFS2_INODE_MAX_CACHE_ARRAY wide. NULL the
+/* tree should be exactly OCFS2_CACHE_INFO_MAX_ARRAY wide. NULL the
* pointers in tree after we use them - this allows caller to detect
- * when to free in case of error. */
-static void ocfs2_expand_cache(struct ocfs2_inode_info *oi,
+ * when to free in case of error.
+ *
+ * The co_cache_lock() must be held. */
+static void ocfs2_expand_cache(struct ocfs2_caching_info *ci,
struct ocfs2_meta_cache_item **tree)
{
int i;
- struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
- mlog_bug_on_msg(ci->ci_num_cached != OCFS2_INODE_MAX_CACHE_ARRAY,
- "Inode %llu, num cached = %u, should be %u\n",
- (unsigned long long)oi->ip_blkno, ci->ci_num_cached,
- OCFS2_INODE_MAX_CACHE_ARRAY);
- mlog_bug_on_msg(!(oi->ip_flags & OCFS2_INODE_CACHE_INLINE),
- "Inode %llu not marked as inline anymore!\n",
- (unsigned long long)oi->ip_blkno);
- assert_spin_locked(&oi->ip_lock);
+ mlog_bug_on_msg(ci->ci_num_cached != OCFS2_CACHE_INFO_MAX_ARRAY,
+ "Owner %llu, num cached = %u, should be %u\n",
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ ci->ci_num_cached, OCFS2_CACHE_INFO_MAX_ARRAY);
+ mlog_bug_on_msg(!(ci->ci_flags & OCFS2_CACHE_FL_INLINE),
+ "Owner %llu not marked as inline anymore!\n",
+ (unsigned long long)ocfs2_metadata_cache_owner(ci));
/* Be careful to initialize the tree members *first* because
* once the ci_tree is used, the array is junk... */
- for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++)
+ for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++)
tree[i]->c_block = ci->ci_cache.ci_array[i];
- oi->ip_flags &= ~OCFS2_INODE_CACHE_INLINE;
+ ci->ci_flags &= ~OCFS2_CACHE_FL_INLINE;
ci->ci_cache.ci_tree = RB_ROOT;
/* this will be set again by __ocfs2_insert_cache_tree */
ci->ci_num_cached = 0;
- for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) {
+ for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) {
__ocfs2_insert_cache_tree(ci, tree[i]);
tree[i] = NULL;
}
mlog(0, "Expanded %llu to a tree cache: flags 0x%x, num = %u\n",
- (unsigned long long)oi->ip_blkno, oi->ip_flags, ci->ci_num_cached);
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ ci->ci_flags, ci->ci_num_cached);
}
/* Slow path function - memory allocation is necessary. See the
* comment above ocfs2_set_buffer_uptodate for more information. */
-static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi,
+static void __ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci,
sector_t block,
int expand_tree)
{
int i;
- struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
struct ocfs2_meta_cache_item *new = NULL;
- struct ocfs2_meta_cache_item *tree[OCFS2_INODE_MAX_CACHE_ARRAY] =
+ struct ocfs2_meta_cache_item *tree[OCFS2_CACHE_INFO_MAX_ARRAY] =
{ NULL, };
- mlog(0, "Inode %llu, block %llu, expand = %d\n",
- (unsigned long long)oi->ip_blkno,
+ mlog(0, "Owner %llu, block %llu, expand = %d\n",
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)block, expand_tree);
new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS);
@@ -364,7 +424,7 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi,
if (expand_tree) {
/* Do *not* allocate an array here - the removal code
* has no way of tracking that. */
- for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) {
+ for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) {
tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep,
GFP_NOFS);
if (!tree[i]) {
@@ -376,21 +436,21 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi,
}
}
- spin_lock(&oi->ip_lock);
- if (ocfs2_insert_can_use_array(oi, ci)) {
+ ocfs2_metadata_cache_lock(ci);
+ if (ocfs2_insert_can_use_array(ci)) {
mlog(0, "Someone cleared the tree underneath us\n");
/* Ok, items were removed from the cache in between
* locks. Detect this and revert back to the fast path */
ocfs2_append_cache_array(ci, block);
- spin_unlock(&oi->ip_lock);
+ ocfs2_metadata_cache_unlock(ci);
goto out_free;
}
if (expand_tree)
- ocfs2_expand_cache(oi, tree);
+ ocfs2_expand_cache(ci, tree);
__ocfs2_insert_cache_tree(ci, new);
- spin_unlock(&oi->ip_lock);
+ ocfs2_metadata_cache_unlock(ci);
new = NULL;
out_free:
@@ -400,14 +460,14 @@ out_free:
/* If these were used, then ocfs2_expand_cache re-set them to
* NULL for us. */
if (tree[0]) {
- for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++)
+ for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++)
if (tree[i])
kmem_cache_free(ocfs2_uptodate_cachep,
tree[i]);
}
}
-/* Item insertion is guarded by ip_io_mutex, so the insertion path takes
+/* Item insertion is guarded by co_io_lock(), so the insertion path takes
* advantage of this by not rechecking for a duplicate insert during
* the slow case. Additionally, if the cache needs to be bumped up to
* a tree, the code will not recheck after acquiring the lock --
@@ -425,59 +485,55 @@ out_free:
* Readahead buffers can be passed in here before the I/O request is
* completed.
*/
-void ocfs2_set_buffer_uptodate(struct inode *inode,
+void ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci,
struct buffer_head *bh)
{
int expand;
- struct ocfs2_inode_info *oi = OCFS2_I(inode);
- struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
/* The block may very well exist in our cache already, so avoid
* doing any more work in that case. */
- if (ocfs2_buffer_cached(oi, bh))
+ if (ocfs2_buffer_cached(ci, bh))
return;
- mlog(0, "Inode %llu, inserting block %llu\n",
- (unsigned long long)oi->ip_blkno,
+ mlog(0, "Owner %llu, inserting block %llu\n",
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)bh->b_blocknr);
/* No need to recheck under spinlock - insertion is guarded by
- * ip_io_mutex */
- spin_lock(&oi->ip_lock);
- if (ocfs2_insert_can_use_array(oi, ci)) {
+ * co_io_lock() */
+ ocfs2_metadata_cache_lock(ci);
+ if (ocfs2_insert_can_use_array(ci)) {
/* Fast case - it's an array and there's a free
* spot. */
ocfs2_append_cache_array(ci, bh->b_blocknr);
- spin_unlock(&oi->ip_lock);
+ ocfs2_metadata_cache_unlock(ci);
return;
}
expand = 0;
- if (oi->ip_flags & OCFS2_INODE_CACHE_INLINE) {
+ if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) {
/* We need to bump things up to a tree. */
expand = 1;
}
- spin_unlock(&oi->ip_lock);
+ ocfs2_metadata_cache_unlock(ci);
- __ocfs2_set_buffer_uptodate(oi, bh->b_blocknr, expand);
+ __ocfs2_set_buffer_uptodate(ci, bh->b_blocknr, expand);
}
/* Called against a newly allocated buffer. Most likely nobody should
* be able to read this sort of metadata while it's still being
- * allocated, but this is careful to take ip_io_mutex anyway. */
-void ocfs2_set_new_buffer_uptodate(struct inode *inode,
+ * allocated, but this is careful to take co_io_lock() anyway. */
+void ocfs2_set_new_buffer_uptodate(struct ocfs2_caching_info *ci,
struct buffer_head *bh)
{
- struct ocfs2_inode_info *oi = OCFS2_I(inode);
-
/* This should definitely *not* exist in our cache */
- BUG_ON(ocfs2_buffer_cached(oi, bh));
+ BUG_ON(ocfs2_buffer_cached(ci, bh));
set_buffer_uptodate(bh);
- mutex_lock(&oi->ip_io_mutex);
- ocfs2_set_buffer_uptodate(inode, bh);
- mutex_unlock(&oi->ip_io_mutex);
+ ocfs2_metadata_cache_io_lock(ci);
+ ocfs2_set_buffer_uptodate(ci, bh);
+ ocfs2_metadata_cache_io_unlock(ci);
}
/* Requires ip_lock. */
@@ -487,7 +543,7 @@ static void ocfs2_remove_metadata_array(struct ocfs2_caching_info *ci,
sector_t *array = ci->ci_cache.ci_array;
int bytes;
- BUG_ON(index < 0 || index >= OCFS2_INODE_MAX_CACHE_ARRAY);
+ BUG_ON(index < 0 || index >= OCFS2_CACHE_INFO_MAX_ARRAY);
BUG_ON(index >= ci->ci_num_cached);
BUG_ON(!ci->ci_num_cached);
@@ -515,21 +571,19 @@ static void ocfs2_remove_metadata_tree(struct ocfs2_caching_info *ci,
ci->ci_num_cached--;
}
-static void ocfs2_remove_block_from_cache(struct inode *inode,
+static void ocfs2_remove_block_from_cache(struct ocfs2_caching_info *ci,
sector_t block)
{
int index;
struct ocfs2_meta_cache_item *item = NULL;
- struct ocfs2_inode_info *oi = OCFS2_I(inode);
- struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
- spin_lock(&oi->ip_lock);
- mlog(0, "Inode %llu, remove %llu, items = %u, array = %u\n",
- (unsigned long long)oi->ip_blkno,
+ ocfs2_metadata_cache_lock(ci);
+ mlog(0, "Owner %llu, remove %llu, items = %u, array = %u\n",
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long) block, ci->ci_num_cached,
- oi->ip_flags & OCFS2_INODE_CACHE_INLINE);
+ ci->ci_flags & OCFS2_CACHE_FL_INLINE);
- if (oi->ip_flags & OCFS2_INODE_CACHE_INLINE) {
+ if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) {
index = ocfs2_search_cache_array(ci, block);
if (index != -1)
ocfs2_remove_metadata_array(ci, index);
@@ -538,7 +592,7 @@ static void ocfs2_remove_block_from_cache(struct inode *inode,
if (item)
ocfs2_remove_metadata_tree(ci, item);
}
- spin_unlock(&oi->ip_lock);
+ ocfs2_metadata_cache_unlock(ci);
if (item)
kmem_cache_free(ocfs2_uptodate_cachep, item);
@@ -549,23 +603,24 @@ static void ocfs2_remove_block_from_cache(struct inode *inode,
* bother reverting things to an inlined array in the case of a remove
* which moves us back under the limit.
*/
-void ocfs2_remove_from_cache(struct inode *inode,
+void ocfs2_remove_from_cache(struct ocfs2_caching_info *ci,
struct buffer_head *bh)
{
sector_t block = bh->b_blocknr;
- ocfs2_remove_block_from_cache(inode, block);
+ ocfs2_remove_block_from_cache(ci, block);
}
/* Called when we remove xattr clusters from an inode. */
-void ocfs2_remove_xattr_clusters_from_cache(struct inode *inode,
+void ocfs2_remove_xattr_clusters_from_cache(struct ocfs2_caching_info *ci,
sector_t block,
u32 c_len)
{
- unsigned int i, b_len = ocfs2_clusters_to_blocks(inode->i_sb, 1) * c_len;
+ struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
+ unsigned int i, b_len = ocfs2_clusters_to_blocks(sb, 1) * c_len;
for (i = 0; i < b_len; i++, block++)
- ocfs2_remove_block_from_cache(inode, block);
+ ocfs2_remove_block_from_cache(ci, block);
}
int __init init_ocfs2_uptodate_cache(void)
@@ -577,7 +632,7 @@ int __init init_ocfs2_uptodate_cache(void)
return -ENOMEM;
mlog(0, "%u inlined cache items per inode.\n",
- OCFS2_INODE_MAX_CACHE_ARRAY);
+ OCFS2_CACHE_INFO_MAX_ARRAY);
return 0;
}
diff --git a/fs/ocfs2/uptodate.h b/fs/ocfs2/uptodate.h
index 531b4b3a0c4..0d826fe2da0 100644
--- a/fs/ocfs2/uptodate.h
+++ b/fs/ocfs2/uptodate.h
@@ -26,24 +26,59 @@
#ifndef OCFS2_UPTODATE_H
#define OCFS2_UPTODATE_H
+/*
+ * The caching code relies on locking provided by the user of
+ * struct ocfs2_caching_info. These operations connect that up.
+ */
+struct ocfs2_caching_operations {
+ /*
+ * A u64 representing the owning structure. Usually this
+ * is the block number (i_blkno or whatnot). This is used so
+ * that caching log messages can identify the owning structure.
+ */
+ u64 (*co_owner)(struct ocfs2_caching_info *ci);
+
+ /* The superblock is needed during I/O. */
+ struct super_block *(*co_get_super)(struct ocfs2_caching_info *ci);
+ /*
+ * Lock and unlock the caching data. These will not sleep, and
+ * should probably be spinlocks.
+ */
+ void (*co_cache_lock)(struct ocfs2_caching_info *ci);
+ void (*co_cache_unlock)(struct ocfs2_caching_info *ci);
+
+ /*
+ * Lock and unlock for disk I/O. These will sleep, and should
+ * be mutexes.
+ */
+ void (*co_io_lock)(struct ocfs2_caching_info *ci);
+ void (*co_io_unlock)(struct ocfs2_caching_info *ci);
+};
+
int __init init_ocfs2_uptodate_cache(void);
void exit_ocfs2_uptodate_cache(void);
-void ocfs2_metadata_cache_init(struct inode *inode);
-void ocfs2_metadata_cache_purge(struct inode *inode);
+void ocfs2_metadata_cache_init(struct ocfs2_caching_info *ci,
+ const struct ocfs2_caching_operations *ops);
+void ocfs2_metadata_cache_purge(struct ocfs2_caching_info *ci);
+void ocfs2_metadata_cache_exit(struct ocfs2_caching_info *ci);
+
+u64 ocfs2_metadata_cache_owner(struct ocfs2_caching_info *ci);
+void ocfs2_metadata_cache_io_lock(struct ocfs2_caching_info *ci);
+void ocfs2_metadata_cache_io_unlock(struct ocfs2_caching_info *ci);
-int ocfs2_buffer_uptodate(struct inode *inode,
+int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci,
struct buffer_head *bh);
-void ocfs2_set_buffer_uptodate(struct inode *inode,
+void ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci,
struct buffer_head *bh);
-void ocfs2_set_new_buffer_uptodate(struct inode *inode,
+void ocfs2_set_new_buffer_uptodate(struct ocfs2_caching_info *ci,
struct buffer_head *bh);
-void ocfs2_remove_from_cache(struct inode *inode,
+void ocfs2_remove_from_cache(struct ocfs2_caching_info *ci,
struct buffer_head *bh);
-void ocfs2_remove_xattr_clusters_from_cache(struct inode *inode,
+void ocfs2_remove_xattr_clusters_from_cache(struct ocfs2_caching_info *ci,
sector_t block,
u32 c_len);
-int ocfs2_buffer_read_ahead(struct inode *inode,
+int ocfs2_buffer_read_ahead(struct ocfs2_caching_info *ci,
struct buffer_head *bh);
#endif /* OCFS2_UPTODATE_H */
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index d1a27cda984..fe3419068df 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -55,7 +55,8 @@
#include "buffer_head_io.h"
#include "super.h"
#include "xattr.h"
-
+#include "refcounttree.h"
+#include "acl.h"
struct ocfs2_xattr_def_value_root {
struct ocfs2_xattr_value_root xv;
@@ -140,7 +141,7 @@ struct ocfs2_xattr_search {
int not_found;
};
-static int ocfs2_xattr_bucket_get_name_value(struct inode *inode,
+static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb,
struct ocfs2_xattr_header *xh,
int index,
int *block_off,
@@ -157,7 +158,7 @@ static int ocfs2_xattr_index_block_find(struct inode *inode,
struct ocfs2_xattr_search *xs);
static int ocfs2_xattr_tree_list_index_block(struct inode *inode,
- struct ocfs2_xattr_tree_root *xt,
+ struct buffer_head *blk_bh,
char *buffer,
size_t buffer_size);
@@ -170,12 +171,42 @@ static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
struct ocfs2_xattr_search *xs,
struct ocfs2_xattr_set_ctxt *ctxt);
-static int ocfs2_delete_xattr_index_block(struct inode *inode,
- struct buffer_head *xb_bh);
+typedef int (xattr_tree_rec_func)(struct inode *inode,
+ struct buffer_head *root_bh,
+ u64 blkno, u32 cpos, u32 len, void *para);
+static int ocfs2_iterate_xattr_index_block(struct inode *inode,
+ struct buffer_head *root_bh,
+ xattr_tree_rec_func *rec_func,
+ void *para);
+static int ocfs2_delete_xattr_in_bucket(struct inode *inode,
+ struct ocfs2_xattr_bucket *bucket,
+ void *para);
+static int ocfs2_rm_xattr_cluster(struct inode *inode,
+ struct buffer_head *root_bh,
+ u64 blkno,
+ u32 cpos,
+ u32 len,
+ void *para);
+
static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle,
u64 src_blk, u64 last_blk, u64 to_blk,
unsigned int start_bucket,
u32 *first_hash);
+static int ocfs2_prepare_refcount_xattr(struct inode *inode,
+ struct ocfs2_dinode *di,
+ struct ocfs2_xattr_info *xi,
+ struct ocfs2_xattr_search *xis,
+ struct ocfs2_xattr_search *xbs,
+ struct ocfs2_refcount_tree **ref_tree,
+ int *meta_need,
+ int *credits);
+static int ocfs2_get_xattr_tree_value_root(struct super_block *sb,
+ struct ocfs2_xattr_bucket *bucket,
+ int offset,
+ struct ocfs2_xattr_value_root **xv,
+ struct buffer_head **bh);
+static int ocfs2_xattr_security_set(struct inode *inode, const char *name,
+ const void *value, size_t size, int flags);
static inline u16 ocfs2_xattr_buckets_per_cluster(struct ocfs2_super *osb)
{
@@ -254,9 +285,9 @@ static int ocfs2_init_xattr_bucket(struct ocfs2_xattr_bucket *bucket,
break;
}
- if (!ocfs2_buffer_uptodate(bucket->bu_inode,
+ if (!ocfs2_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
bucket->bu_bhs[i]))
- ocfs2_set_new_buffer_uptodate(bucket->bu_inode,
+ ocfs2_set_new_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
bucket->bu_bhs[i]);
}
@@ -271,7 +302,7 @@ static int ocfs2_read_xattr_bucket(struct ocfs2_xattr_bucket *bucket,
{
int rc;
- rc = ocfs2_read_blocks(bucket->bu_inode, xb_blkno,
+ rc = ocfs2_read_blocks(INODE_CACHE(bucket->bu_inode), xb_blkno,
bucket->bu_blocks, bucket->bu_bhs, 0,
NULL);
if (!rc) {
@@ -297,7 +328,8 @@ static int ocfs2_xattr_bucket_journal_access(handle_t *handle,
int i, rc = 0;
for (i = 0; i < bucket->bu_blocks; i++) {
- rc = ocfs2_journal_access(handle, bucket->bu_inode,
+ rc = ocfs2_journal_access(handle,
+ INODE_CACHE(bucket->bu_inode),
bucket->bu_bhs[i], type);
if (rc) {
mlog_errno(rc);
@@ -399,7 +431,7 @@ static int ocfs2_read_xattr_block(struct inode *inode, u64 xb_blkno,
int rc;
struct buffer_head *tmp = *bh;
- rc = ocfs2_read_block(inode, xb_blkno, &tmp,
+ rc = ocfs2_read_block(INODE_CACHE(inode), xb_blkno, &tmp,
ocfs2_validate_xattr_block);
/* If ocfs2_read_block() got us a new bh, pass it up. */
@@ -596,15 +628,14 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
int status = 0;
handle_t *handle = ctxt->handle;
enum ocfs2_alloc_restarted why;
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters);
struct ocfs2_extent_tree et;
mlog(0, "(clusters_to_add for xattr= %u)\n", clusters_to_add);
- ocfs2_init_xattr_value_extent_tree(&et, inode, vb);
+ ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
- status = vb->vb_access(handle, inode, vb->vb_bh,
+ status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
@@ -612,13 +643,11 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
}
prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
- status = ocfs2_add_clusters_in_btree(osb,
- inode,
+ status = ocfs2_add_clusters_in_btree(handle,
+ &et,
&logical_start,
clusters_to_add,
0,
- &et,
- handle,
ctxt->data_ac,
ctxt->meta_ac,
&why);
@@ -649,6 +678,7 @@ leave:
static int __ocfs2_remove_xattr_range(struct inode *inode,
struct ocfs2_xattr_value_buf *vb,
u32 cpos, u32 phys_cpos, u32 len,
+ unsigned int ext_flags,
struct ocfs2_xattr_set_ctxt *ctxt)
{
int ret;
@@ -656,16 +686,16 @@ static int __ocfs2_remove_xattr_range(struct inode *inode,
handle_t *handle = ctxt->handle;
struct ocfs2_extent_tree et;
- ocfs2_init_xattr_value_extent_tree(&et, inode, vb);
+ ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
- ret = vb->vb_access(handle, inode, vb->vb_bh,
+ ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
- ret = ocfs2_remove_extent(inode, &et, cpos, len, handle, ctxt->meta_ac,
+ ret = ocfs2_remove_extent(handle, &et, cpos, len, ctxt->meta_ac,
&ctxt->dealloc);
if (ret) {
mlog_errno(ret);
@@ -680,7 +710,14 @@ static int __ocfs2_remove_xattr_range(struct inode *inode,
goto out;
}
- ret = ocfs2_cache_cluster_dealloc(&ctxt->dealloc, phys_blkno, len);
+ if (ext_flags & OCFS2_EXT_REFCOUNTED)
+ ret = ocfs2_decrease_refcount(inode, handle,
+ ocfs2_blocks_to_clusters(inode->i_sb,
+ phys_blkno),
+ len, ctxt->meta_ac, &ctxt->dealloc, 1);
+ else
+ ret = ocfs2_cache_cluster_dealloc(&ctxt->dealloc,
+ phys_blkno, len);
if (ret)
mlog_errno(ret);
@@ -695,6 +732,7 @@ static int ocfs2_xattr_shrink_size(struct inode *inode,
struct ocfs2_xattr_set_ctxt *ctxt)
{
int ret = 0;
+ unsigned int ext_flags;
u32 trunc_len, cpos, phys_cpos, alloc_size;
u64 block;
@@ -706,7 +744,7 @@ static int ocfs2_xattr_shrink_size(struct inode *inode,
while (trunc_len) {
ret = ocfs2_xattr_get_clusters(inode, cpos, &phys_cpos,
&alloc_size,
- &vb->vb_xv->xr_list);
+ &vb->vb_xv->xr_list, &ext_flags);
if (ret) {
mlog_errno(ret);
goto out;
@@ -717,15 +755,15 @@ static int ocfs2_xattr_shrink_size(struct inode *inode,
ret = __ocfs2_remove_xattr_range(inode, vb, cpos,
phys_cpos, alloc_size,
- ctxt);
+ ext_flags, ctxt);
if (ret) {
mlog_errno(ret);
goto out;
}
block = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
- ocfs2_remove_xattr_clusters_from_cache(inode, block,
- alloc_size);
+ ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode),
+ block, alloc_size);
cpos += alloc_size;
trunc_len -= alloc_size;
}
@@ -810,6 +848,23 @@ static int ocfs2_xattr_list_entries(struct inode *inode,
return result;
}
+int ocfs2_has_inline_xattr_value_outside(struct inode *inode,
+ struct ocfs2_dinode *di)
+{
+ struct ocfs2_xattr_header *xh;
+ int i;
+
+ xh = (struct ocfs2_xattr_header *)
+ ((void *)di + inode->i_sb->s_blocksize -
+ le16_to_cpu(di->i_xattr_inline_size));
+
+ for (i = 0; i < le16_to_cpu(xh->xh_count); i++)
+ if (!ocfs2_xattr_is_local(&xh->xh_entries[i]))
+ return 1;
+
+ return 0;
+}
+
static int ocfs2_xattr_ibody_list(struct inode *inode,
struct ocfs2_dinode *di,
char *buffer,
@@ -855,11 +910,9 @@ static int ocfs2_xattr_block_list(struct inode *inode,
struct ocfs2_xattr_header *header = &xb->xb_attrs.xb_header;
ret = ocfs2_xattr_list_entries(inode, header,
buffer, buffer_size);
- } else {
- struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root;
- ret = ocfs2_xattr_tree_list_index_block(inode, xt,
+ } else
+ ret = ocfs2_xattr_tree_list_index_block(inode, blk_bh,
buffer, buffer_size);
- }
brelse(blk_bh);
@@ -961,7 +1014,7 @@ static int ocfs2_xattr_get_value_outside(struct inode *inode,
cpos = 0;
while (cpos < clusters) {
ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
- &num_clusters, el);
+ &num_clusters, el, NULL);
if (ret) {
mlog_errno(ret);
goto out;
@@ -970,7 +1023,8 @@ static int ocfs2_xattr_get_value_outside(struct inode *inode,
blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
/* Copy ocfs2_xattr_value */
for (i = 0; i < num_clusters * bpc; i++, blkno++) {
- ret = ocfs2_read_block(inode, blkno, &bh, NULL);
+ ret = ocfs2_read_block(INODE_CACHE(inode), blkno,
+ &bh, NULL);
if (ret) {
mlog_errno(ret);
goto out;
@@ -1085,7 +1139,7 @@ static int ocfs2_xattr_block_get(struct inode *inode,
i = xs->here - xs->header->xh_entries;
if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
- ret = ocfs2_xattr_bucket_get_name_value(inode,
+ ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
bucket_xh(xs->bucket),
i,
&block_off,
@@ -1183,7 +1237,7 @@ static int ocfs2_xattr_get(struct inode *inode,
static int __ocfs2_xattr_set_value_outside(struct inode *inode,
handle_t *handle,
- struct ocfs2_xattr_value_root *xv,
+ struct ocfs2_xattr_value_buf *vb,
const void *value,
int value_len)
{
@@ -1194,28 +1248,34 @@ static int __ocfs2_xattr_set_value_outside(struct inode *inode,
u32 clusters = ocfs2_clusters_for_bytes(inode->i_sb, value_len);
u64 blkno;
struct buffer_head *bh = NULL;
+ unsigned int ext_flags;
+ struct ocfs2_xattr_value_root *xv = vb->vb_xv;
BUG_ON(clusters > le32_to_cpu(xv->xr_clusters));
while (cpos < clusters) {
ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
- &num_clusters, &xv->xr_list);
+ &num_clusters, &xv->xr_list,
+ &ext_flags);
if (ret) {
mlog_errno(ret);
goto out;
}
+ BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
+
blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
for (i = 0; i < num_clusters * bpc; i++, blkno++) {
- ret = ocfs2_read_block(inode, blkno, &bh, NULL);
+ ret = ocfs2_read_block(INODE_CACHE(inode), blkno,
+ &bh, NULL);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access(handle,
- inode,
+ INODE_CACHE(inode),
bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
@@ -1266,7 +1326,7 @@ static int ocfs2_xattr_cleanup(struct inode *inode,
void *val = xs->base + offs;
size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
- ret = vb->vb_access(handle, inode, vb->vb_bh,
+ ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -1294,7 +1354,7 @@ static int ocfs2_xattr_update_entry(struct inode *inode,
{
int ret;
- ret = vb->vb_access(handle, inode, vb->vb_bh,
+ ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -1355,7 +1415,7 @@ static int ocfs2_xattr_set_value_outside(struct inode *inode,
mlog_errno(ret);
return ret;
}
- ret = __ocfs2_xattr_set_value_outside(inode, ctxt->handle, vb->vb_xv,
+ ret = __ocfs2_xattr_set_value_outside(inode, ctxt->handle, vb,
xi->value, xi->value_len);
if (ret < 0)
mlog_errno(ret);
@@ -1594,7 +1654,7 @@ static int ocfs2_xattr_set_entry(struct inode *inode,
ret = __ocfs2_xattr_set_value_outside(inode,
handle,
- vb.vb_xv,
+ &vb,
xi->value,
xi->value_len);
if (ret < 0)
@@ -1615,7 +1675,7 @@ static int ocfs2_xattr_set_entry(struct inode *inode,
}
}
- ret = ocfs2_journal_access_di(handle, inode, xs->inode_bh,
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), xs->inode_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -1623,7 +1683,7 @@ static int ocfs2_xattr_set_entry(struct inode *inode,
}
if (!(flag & OCFS2_INLINE_XATTR_FL)) {
- ret = vb.vb_access(handle, inode, vb.vb_bh,
+ ret = vb.vb_access(handle, INODE_CACHE(inode), vb.vb_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -1700,51 +1760,112 @@ out:
return ret;
}
+/*
+ * In xattr remove, if it is stored outside and refcounted, we may have
+ * the chance to split the refcount tree. So need the allocators.
+ */
+static int ocfs2_lock_xattr_remove_allocators(struct inode *inode,
+ struct ocfs2_xattr_value_root *xv,
+ struct ocfs2_caching_info *ref_ci,
+ struct buffer_head *ref_root_bh,
+ struct ocfs2_alloc_context **meta_ac,
+ int *ref_credits)
+{
+ int ret, meta_add = 0;
+ u32 p_cluster, num_clusters;
+ unsigned int ext_flags;
+
+ *ref_credits = 0;
+ ret = ocfs2_xattr_get_clusters(inode, 0, &p_cluster,
+ &num_clusters,
+ &xv->xr_list,
+ &ext_flags);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
+ goto out;
+
+ ret = ocfs2_refcounted_xattr_delete_need(inode, ref_ci,
+ ref_root_bh, xv,
+ &meta_add, ref_credits);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
+ meta_add, meta_ac);
+ if (ret)
+ mlog_errno(ret);
+
+out:
+ return ret;
+}
+
static int ocfs2_remove_value_outside(struct inode*inode,
struct ocfs2_xattr_value_buf *vb,
- struct ocfs2_xattr_header *header)
+ struct ocfs2_xattr_header *header,
+ struct ocfs2_caching_info *ref_ci,
+ struct buffer_head *ref_root_bh)
{
- int ret = 0, i;
+ int ret = 0, i, ref_credits;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, };
+ void *val;
ocfs2_init_dealloc_ctxt(&ctxt.dealloc);
- ctxt.handle = ocfs2_start_trans(osb,
- ocfs2_remove_extent_credits(osb->sb));
- if (IS_ERR(ctxt.handle)) {
- ret = PTR_ERR(ctxt.handle);
- mlog_errno(ret);
- goto out;
- }
-
for (i = 0; i < le16_to_cpu(header->xh_count); i++) {
struct ocfs2_xattr_entry *entry = &header->xh_entries[i];
- if (!ocfs2_xattr_is_local(entry)) {
- void *val;
+ if (ocfs2_xattr_is_local(entry))
+ continue;
- val = (void *)header +
- le16_to_cpu(entry->xe_name_offset);
- vb->vb_xv = (struct ocfs2_xattr_value_root *)
- (val + OCFS2_XATTR_SIZE(entry->xe_name_len));
- ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt);
- if (ret < 0) {
- mlog_errno(ret);
- break;
- }
+ val = (void *)header +
+ le16_to_cpu(entry->xe_name_offset);
+ vb->vb_xv = (struct ocfs2_xattr_value_root *)
+ (val + OCFS2_XATTR_SIZE(entry->xe_name_len));
+
+ ret = ocfs2_lock_xattr_remove_allocators(inode, vb->vb_xv,
+ ref_ci, ref_root_bh,
+ &ctxt.meta_ac,
+ &ref_credits);
+
+ ctxt.handle = ocfs2_start_trans(osb, ref_credits +
+ ocfs2_remove_extent_credits(osb->sb));
+ if (IS_ERR(ctxt.handle)) {
+ ret = PTR_ERR(ctxt.handle);
+ mlog_errno(ret);
+ break;
+ }
+
+ ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt);
+ if (ret < 0) {
+ mlog_errno(ret);
+ break;
+ }
+
+ ocfs2_commit_trans(osb, ctxt.handle);
+ if (ctxt.meta_ac) {
+ ocfs2_free_alloc_context(ctxt.meta_ac);
+ ctxt.meta_ac = NULL;
}
}
- ocfs2_commit_trans(osb, ctxt.handle);
+ if (ctxt.meta_ac)
+ ocfs2_free_alloc_context(ctxt.meta_ac);
ocfs2_schedule_truncate_log_flush(osb, 1);
ocfs2_run_deallocs(osb, &ctxt.dealloc);
-out:
return ret;
}
static int ocfs2_xattr_ibody_remove(struct inode *inode,
- struct buffer_head *di_bh)
+ struct buffer_head *di_bh,
+ struct ocfs2_caching_info *ref_ci,
+ struct buffer_head *ref_root_bh)
{
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
@@ -1759,13 +1880,21 @@ static int ocfs2_xattr_ibody_remove(struct inode *inode,
((void *)di + inode->i_sb->s_blocksize -
le16_to_cpu(di->i_xattr_inline_size));
- ret = ocfs2_remove_value_outside(inode, &vb, header);
+ ret = ocfs2_remove_value_outside(inode, &vb, header,
+ ref_ci, ref_root_bh);
return ret;
}
+struct ocfs2_rm_xattr_bucket_para {
+ struct ocfs2_caching_info *ref_ci;
+ struct buffer_head *ref_root_bh;
+};
+
static int ocfs2_xattr_block_remove(struct inode *inode,
- struct buffer_head *blk_bh)
+ struct buffer_head *blk_bh,
+ struct ocfs2_caching_info *ref_ci,
+ struct buffer_head *ref_root_bh)
{
struct ocfs2_xattr_block *xb;
int ret = 0;
@@ -1773,19 +1902,29 @@ static int ocfs2_xattr_block_remove(struct inode *inode,
.vb_bh = blk_bh,
.vb_access = ocfs2_journal_access_xb,
};
+ struct ocfs2_rm_xattr_bucket_para args = {
+ .ref_ci = ref_ci,
+ .ref_root_bh = ref_root_bh,
+ };
xb = (struct ocfs2_xattr_block *)blk_bh->b_data;
if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
struct ocfs2_xattr_header *header = &(xb->xb_attrs.xb_header);
- ret = ocfs2_remove_value_outside(inode, &vb, header);
+ ret = ocfs2_remove_value_outside(inode, &vb, header,
+ ref_ci, ref_root_bh);
} else
- ret = ocfs2_delete_xattr_index_block(inode, blk_bh);
+ ret = ocfs2_iterate_xattr_index_block(inode,
+ blk_bh,
+ ocfs2_rm_xattr_cluster,
+ &args);
return ret;
}
static int ocfs2_xattr_free_block(struct inode *inode,
- u64 block)
+ u64 block,
+ struct ocfs2_caching_info *ref_ci,
+ struct buffer_head *ref_root_bh)
{
struct inode *xb_alloc_inode;
struct buffer_head *xb_alloc_bh = NULL;
@@ -1803,7 +1942,7 @@ static int ocfs2_xattr_free_block(struct inode *inode,
goto out;
}
- ret = ocfs2_xattr_block_remove(inode, blk_bh);
+ ret = ocfs2_xattr_block_remove(inode, blk_bh, ref_ci, ref_root_bh);
if (ret < 0) {
mlog_errno(ret);
goto out;
@@ -1863,6 +2002,9 @@ int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct ocfs2_refcount_tree *ref_tree = NULL;
+ struct buffer_head *ref_root_bh = NULL;
+ struct ocfs2_caching_info *ref_ci = NULL;
handle_t *handle;
int ret;
@@ -1872,8 +2014,21 @@ int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh)
if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL))
return 0;
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) {
+ ret = ocfs2_lock_refcount_tree(OCFS2_SB(inode->i_sb),
+ le64_to_cpu(di->i_refcount_loc),
+ 1, &ref_tree, &ref_root_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ ref_ci = &ref_tree->rf_ci;
+
+ }
+
if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
- ret = ocfs2_xattr_ibody_remove(inode, di_bh);
+ ret = ocfs2_xattr_ibody_remove(inode, di_bh,
+ ref_ci, ref_root_bh);
if (ret < 0) {
mlog_errno(ret);
goto out;
@@ -1882,7 +2037,8 @@ int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh)
if (di->i_xattr_loc) {
ret = ocfs2_xattr_free_block(inode,
- le64_to_cpu(di->i_xattr_loc));
+ le64_to_cpu(di->i_xattr_loc),
+ ref_ci, ref_root_bh);
if (ret < 0) {
mlog_errno(ret);
goto out;
@@ -1896,7 +2052,7 @@ int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh)
mlog_errno(ret);
goto out;
}
- ret = ocfs2_journal_access_di(handle, inode, di_bh,
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -1916,6 +2072,9 @@ int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh)
out_commit:
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
out:
+ if (ref_tree)
+ ocfs2_unlock_refcount_tree(OCFS2_SB(inode->i_sb), ref_tree, 1);
+ brelse(ref_root_bh);
return ret;
}
@@ -2083,6 +2242,84 @@ cleanup:
return ret;
}
+static int ocfs2_create_xattr_block(handle_t *handle,
+ struct inode *inode,
+ struct buffer_head *inode_bh,
+ struct ocfs2_alloc_context *meta_ac,
+ struct buffer_head **ret_bh,
+ int indexed)
+{
+ int ret;
+ u16 suballoc_bit_start;
+ u32 num_got;
+ u64 first_blkno;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)inode_bh->b_data;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct buffer_head *new_bh = NULL;
+ struct ocfs2_xattr_block *xblk;
+
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), inode_bh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto end;
+ }
+
+ ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1,
+ &suballoc_bit_start, &num_got,
+ &first_blkno);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto end;
+ }
+
+ new_bh = sb_getblk(inode->i_sb, first_blkno);
+ ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
+
+ ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode),
+ new_bh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto end;
+ }
+
+ /* Initialize ocfs2_xattr_block */
+ xblk = (struct ocfs2_xattr_block *)new_bh->b_data;
+ memset(xblk, 0, inode->i_sb->s_blocksize);
+ strcpy((void *)xblk, OCFS2_XATTR_BLOCK_SIGNATURE);
+ xblk->xb_suballoc_slot = cpu_to_le16(osb->slot_num);
+ xblk->xb_suballoc_bit = cpu_to_le16(suballoc_bit_start);
+ xblk->xb_fs_generation = cpu_to_le32(osb->fs_generation);
+ xblk->xb_blkno = cpu_to_le64(first_blkno);
+
+ if (indexed) {
+ struct ocfs2_xattr_tree_root *xr = &xblk->xb_attrs.xb_root;
+ xr->xt_clusters = cpu_to_le32(1);
+ xr->xt_last_eb_blk = 0;
+ xr->xt_list.l_tree_depth = 0;
+ xr->xt_list.l_count = cpu_to_le16(
+ ocfs2_xattr_recs_per_xb(inode->i_sb));
+ xr->xt_list.l_next_free_rec = cpu_to_le16(1);
+ xblk->xb_flags = cpu_to_le16(OCFS2_XATTR_INDEXED);
+ }
+
+ ret = ocfs2_journal_dirty(handle, new_bh);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto end;
+ }
+ di->i_xattr_loc = cpu_to_le64(first_blkno);
+ ocfs2_journal_dirty(handle, inode_bh);
+
+ *ret_bh = new_bh;
+ new_bh = NULL;
+
+end:
+ brelse(new_bh);
+ return ret;
+}
+
/*
* ocfs2_xattr_block_set()
*
@@ -2095,63 +2332,24 @@ static int ocfs2_xattr_block_set(struct inode *inode,
struct ocfs2_xattr_set_ctxt *ctxt)
{
struct buffer_head *new_bh = NULL;
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
handle_t *handle = ctxt->handle;
struct ocfs2_xattr_block *xblk = NULL;
- u16 suballoc_bit_start;
- u32 num_got;
- u64 first_blkno;
int ret;
if (!xs->xattr_bh) {
- ret = ocfs2_journal_access_di(handle, inode, xs->inode_bh,
- OCFS2_JOURNAL_ACCESS_CREATE);
- if (ret < 0) {
- mlog_errno(ret);
- goto end;
- }
-
- ret = ocfs2_claim_metadata(osb, handle, ctxt->meta_ac, 1,
- &suballoc_bit_start, &num_got,
- &first_blkno);
- if (ret < 0) {
- mlog_errno(ret);
- goto end;
- }
-
- new_bh = sb_getblk(inode->i_sb, first_blkno);
- ocfs2_set_new_buffer_uptodate(inode, new_bh);
-
- ret = ocfs2_journal_access_xb(handle, inode, new_bh,
- OCFS2_JOURNAL_ACCESS_CREATE);
- if (ret < 0) {
+ ret = ocfs2_create_xattr_block(handle, inode, xs->inode_bh,
+ ctxt->meta_ac, &new_bh, 0);
+ if (ret) {
mlog_errno(ret);
goto end;
}
- /* Initialize ocfs2_xattr_block */
xs->xattr_bh = new_bh;
- xblk = (struct ocfs2_xattr_block *)new_bh->b_data;
- memset(xblk, 0, inode->i_sb->s_blocksize);
- strcpy((void *)xblk, OCFS2_XATTR_BLOCK_SIGNATURE);
- xblk->xb_suballoc_slot = cpu_to_le16(osb->slot_num);
- xblk->xb_suballoc_bit = cpu_to_le16(suballoc_bit_start);
- xblk->xb_fs_generation = cpu_to_le32(osb->fs_generation);
- xblk->xb_blkno = cpu_to_le64(first_blkno);
-
+ xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
xs->header = &xblk->xb_attrs.xb_header;
xs->base = (void *)xs->header;
xs->end = (void *)xblk + inode->i_sb->s_blocksize;
xs->here = xs->header->xh_entries;
-
- ret = ocfs2_journal_dirty(handle, new_bh);
- if (ret < 0) {
- mlog_errno(ret);
- goto end;
- }
- di->i_xattr_loc = cpu_to_le64(first_blkno);
- ocfs2_journal_dirty(handle, xs->inode_bh);
} else
xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
@@ -2273,7 +2471,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode,
old_in_xb = 1;
if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
- ret = ocfs2_xattr_bucket_get_name_value(inode,
+ ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
bucket_xh(xbs->bucket),
i, &block_off,
&name_offset);
@@ -2428,6 +2626,7 @@ static int ocfs2_init_xattr_set_ctxt(struct inode *inode,
struct ocfs2_xattr_search *xis,
struct ocfs2_xattr_search *xbs,
struct ocfs2_xattr_set_ctxt *ctxt,
+ int extra_meta,
int *credits)
{
int clusters_add, meta_add, ret;
@@ -2444,6 +2643,7 @@ static int ocfs2_init_xattr_set_ctxt(struct inode *inode,
return ret;
}
+ meta_add += extra_meta;
mlog(0, "Set xattr %s, reserve meta blocks = %d, clusters = %d, "
"credits = %d\n", xi->name, meta_add, clusters_add, *credits);
@@ -2598,7 +2798,7 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
if (!ret) {
/* Update inode ctime. */
- ret = ocfs2_journal_access_di(ctxt->handle, inode,
+ ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode),
xis->inode_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
@@ -2711,10 +2911,11 @@ int ocfs2_xattr_set(struct inode *inode,
{
struct buffer_head *di_bh = NULL;
struct ocfs2_dinode *di;
- int ret, credits;
+ int ret, credits, ref_meta = 0, ref_credits = 0;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct inode *tl_inode = osb->osb_tl_inode;
struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, };
+ struct ocfs2_refcount_tree *ref_tree = NULL;
struct ocfs2_xattr_info xi = {
.name_index = name_index,
@@ -2779,6 +2980,17 @@ int ocfs2_xattr_set(struct inode *inode,
goto cleanup;
}
+ /* Check whether the value is refcounted and do some prepartion. */
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL &&
+ (!xis.not_found || !xbs.not_found)) {
+ ret = ocfs2_prepare_refcount_xattr(inode, di, &xi,
+ &xis, &xbs, &ref_tree,
+ &ref_meta, &ref_credits);
+ if (ret) {
+ mlog_errno(ret);
+ goto cleanup;
+ }
+ }
mutex_lock(&tl_inode->i_mutex);
@@ -2793,7 +3005,7 @@ int ocfs2_xattr_set(struct inode *inode,
mutex_unlock(&tl_inode->i_mutex);
ret = ocfs2_init_xattr_set_ctxt(inode, di, &xi, &xis,
- &xbs, &ctxt, &credits);
+ &xbs, &ctxt, ref_meta, &credits);
if (ret) {
mlog_errno(ret);
goto cleanup;
@@ -2801,7 +3013,7 @@ int ocfs2_xattr_set(struct inode *inode,
/* we need to update inode's ctime field, so add credit for it. */
credits += OCFS2_INODE_UPDATE_CREDITS;
- ctxt.handle = ocfs2_start_trans(osb, credits);
+ ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits);
if (IS_ERR(ctxt.handle)) {
ret = PTR_ERR(ctxt.handle);
mlog_errno(ret);
@@ -2819,8 +3031,16 @@ int ocfs2_xattr_set(struct inode *inode,
if (ocfs2_dealloc_has_cluster(&ctxt.dealloc))
ocfs2_schedule_truncate_log_flush(osb, 1);
ocfs2_run_deallocs(osb, &ctxt.dealloc);
+
cleanup:
+ if (ref_tree)
+ ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
up_write(&OCFS2_I(inode)->ip_xattr_sem);
+ if (!value && !ret) {
+ ret = ocfs2_try_remove_refcount_tree(inode, di_bh);
+ if (ret)
+ mlog_errno(ret);
+ }
ocfs2_inode_unlock(inode, 1);
cleanup_nolock:
brelse(di_bh);
@@ -2849,7 +3069,8 @@ static int ocfs2_xattr_get_rec(struct inode *inode,
u64 e_blkno = 0;
if (el->l_tree_depth) {
- ret = ocfs2_find_leaf(inode, el, name_hash, &eb_bh);
+ ret = ocfs2_find_leaf(INODE_CACHE(inode), el, name_hash,
+ &eb_bh);
if (ret) {
mlog_errno(ret);
goto out;
@@ -2931,7 +3152,7 @@ static int ocfs2_find_xe_in_bucket(struct inode *inode,
if (cmp)
continue;
- ret = ocfs2_xattr_bucket_get_name_value(inode,
+ ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
xh,
i,
&block_off,
@@ -3175,7 +3396,7 @@ struct ocfs2_xattr_tree_list {
size_t result;
};
-static int ocfs2_xattr_bucket_get_name_value(struct inode *inode,
+static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb,
struct ocfs2_xattr_header *xh,
int index,
int *block_off,
@@ -3188,8 +3409,8 @@ static int ocfs2_xattr_bucket_get_name_value(struct inode *inode,
name_offset = le16_to_cpu(xh->xh_entries[index].xe_name_offset);
- *block_off = name_offset >> inode->i_sb->s_blocksize_bits;
- *new_offset = name_offset % inode->i_sb->s_blocksize;
+ *block_off = name_offset >> sb->s_blocksize_bits;
+ *new_offset = name_offset % sb->s_blocksize;
return 0;
}
@@ -3209,7 +3430,7 @@ static int ocfs2_list_xattr_bucket(struct inode *inode,
prefix = ocfs2_xattr_prefix(type);
if (prefix) {
- ret = ocfs2_xattr_bucket_get_name_value(inode,
+ ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
bucket_xh(bucket),
i,
&block_off,
@@ -3232,22 +3453,19 @@ static int ocfs2_list_xattr_bucket(struct inode *inode,
return ret;
}
-static int ocfs2_xattr_tree_list_index_block(struct inode *inode,
- struct ocfs2_xattr_tree_root *xt,
- char *buffer,
- size_t buffer_size)
+static int ocfs2_iterate_xattr_index_block(struct inode *inode,
+ struct buffer_head *blk_bh,
+ xattr_tree_rec_func *rec_func,
+ void *para)
{
- struct ocfs2_extent_list *el = &xt->xt_list;
+ struct ocfs2_xattr_block *xb =
+ (struct ocfs2_xattr_block *)blk_bh->b_data;
+ struct ocfs2_extent_list *el = &xb->xb_attrs.xb_root.xt_list;
int ret = 0;
u32 name_hash = UINT_MAX, e_cpos = 0, num_clusters = 0;
u64 p_blkno = 0;
- struct ocfs2_xattr_tree_list xl = {
- .buffer = buffer,
- .buffer_size = buffer_size,
- .result = 0,
- };
- if (le16_to_cpu(el->l_next_free_rec) == 0)
+ if (!el->l_next_free_rec || !rec_func)
return 0;
while (name_hash > 0) {
@@ -3255,16 +3473,15 @@ static int ocfs2_xattr_tree_list_index_block(struct inode *inode,
&e_cpos, &num_clusters, el);
if (ret) {
mlog_errno(ret);
- goto out;
+ break;
}
- ret = ocfs2_iterate_xattr_buckets(inode, p_blkno, num_clusters,
- ocfs2_list_xattr_bucket,
- &xl);
+ ret = rec_func(inode, blk_bh, p_blkno, e_cpos,
+ num_clusters, para);
if (ret) {
if (ret != -ERANGE)
mlog_errno(ret);
- goto out;
+ break;
}
if (e_cpos == 0)
@@ -3273,6 +3490,37 @@ static int ocfs2_xattr_tree_list_index_block(struct inode *inode,
name_hash = e_cpos - 1;
}
+ return ret;
+
+}
+
+static int ocfs2_list_xattr_tree_rec(struct inode *inode,
+ struct buffer_head *root_bh,
+ u64 blkno, u32 cpos, u32 len, void *para)
+{
+ return ocfs2_iterate_xattr_buckets(inode, blkno, len,
+ ocfs2_list_xattr_bucket, para);
+}
+
+static int ocfs2_xattr_tree_list_index_block(struct inode *inode,
+ struct buffer_head *blk_bh,
+ char *buffer,
+ size_t buffer_size)
+{
+ int ret;
+ struct ocfs2_xattr_tree_list xl = {
+ .buffer = buffer,
+ .buffer_size = buffer_size,
+ .result = 0,
+ };
+
+ ret = ocfs2_iterate_xattr_index_block(inode, blk_bh,
+ ocfs2_list_xattr_tree_rec, &xl);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
ret = xl.result;
out:
return ret;
@@ -3426,7 +3674,7 @@ static int ocfs2_xattr_create_index_block(struct inode *inode,
*/
down_write(&oi->ip_alloc_sem);
- ret = ocfs2_journal_access_xb(handle, inode, xb_bh,
+ ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), xb_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
@@ -4263,9 +4511,9 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
prev_cpos, (unsigned long long)bucket_blkno(first));
- ocfs2_init_xattr_tree_extent_tree(&et, inode, root_bh);
+ ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh);
- ret = ocfs2_journal_access_xb(handle, inode, root_bh,
+ ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
@@ -4319,7 +4567,7 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode,
mlog(0, "Insert %u clusters at block %llu for xattr at %u\n",
num_bits, (unsigned long long)block, v_start);
- ret = ocfs2_insert_extent(osb, handle, inode, &et, v_start, block,
+ ret = ocfs2_insert_extent(handle, &et, v_start, block,
num_bits, 0, ctxt->meta_ac);
if (ret < 0) {
mlog_errno(ret);
@@ -4798,10 +5046,13 @@ static int ocfs2_xattr_bucket_set_value_outside(struct inode *inode,
struct ocfs2_xattr_entry *xe = xs->here;
struct ocfs2_xattr_header *xh = bucket_xh(xs->bucket);
void *base;
+ struct ocfs2_xattr_value_buf vb = {
+ .vb_access = ocfs2_journal_access,
+ };
BUG_ON(!xs->base || !xe || ocfs2_xattr_is_local(xe));
- ret = ocfs2_xattr_bucket_get_name_value(inode, xh,
+ ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb, xh,
xe - xh->xh_entries,
&block_off,
&offset);
@@ -4814,8 +5065,10 @@ static int ocfs2_xattr_bucket_set_value_outside(struct inode *inode,
xv = (struct ocfs2_xattr_value_root *)(base + offset +
OCFS2_XATTR_SIZE(xe->xe_name_len));
+ vb.vb_xv = xv;
+ vb.vb_bh = xs->bucket->bu_bhs[block_off];
ret = __ocfs2_xattr_set_value_outside(inode, handle,
- xv, val, value_len);
+ &vb, val, value_len);
if (ret)
mlog_errno(ret);
out:
@@ -4826,7 +5079,8 @@ static int ocfs2_rm_xattr_cluster(struct inode *inode,
struct buffer_head *root_bh,
u64 blkno,
u32 cpos,
- u32 len)
+ u32 len,
+ void *para)
{
int ret;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
@@ -4838,14 +5092,22 @@ static int ocfs2_rm_xattr_cluster(struct inode *inode,
struct ocfs2_cached_dealloc_ctxt dealloc;
struct ocfs2_extent_tree et;
- ocfs2_init_xattr_tree_extent_tree(&et, inode, root_bh);
+ ret = ocfs2_iterate_xattr_buckets(inode, blkno, len,
+ ocfs2_delete_xattr_in_bucket, para);
+ if (ret) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh);
ocfs2_init_dealloc_ctxt(&dealloc);
mlog(0, "rm xattr extent rec at %u len = %u, start from %llu\n",
cpos, len, (unsigned long long)blkno);
- ocfs2_remove_xattr_clusters_from_cache(inode, blkno, len);
+ ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode), blkno,
+ len);
ret = ocfs2_lock_allocators(inode, &et, 0, 1, NULL, &meta_ac);
if (ret) {
@@ -4870,14 +5132,14 @@ static int ocfs2_rm_xattr_cluster(struct inode *inode,
goto out;
}
- ret = ocfs2_journal_access_xb(handle, inode, root_bh,
+ ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
- ret = ocfs2_remove_extent(inode, &et, cpos, len, handle, meta_ac,
+ ret = ocfs2_remove_extent(handle, &et, cpos, len, meta_ac,
&dealloc);
if (ret) {
mlog_errno(ret);
@@ -5220,7 +5482,7 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode,
struct ocfs2_xattr_bucket *bucket,
void *para)
{
- int ret = 0;
+ int ret = 0, ref_credits;
struct ocfs2_xattr_header *xh = bucket_xh(bucket);
u16 i;
struct ocfs2_xattr_entry *xe;
@@ -5228,7 +5490,9 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode,
struct ocfs2_xattr_set_ctxt ctxt = {NULL, NULL,};
int credits = ocfs2_remove_extent_credits(osb->sb) +
ocfs2_blocks_per_xattr_bucket(inode->i_sb);
-
+ struct ocfs2_xattr_value_root *xv;
+ struct ocfs2_rm_xattr_bucket_para *args =
+ (struct ocfs2_rm_xattr_bucket_para *)para;
ocfs2_init_dealloc_ctxt(&ctxt.dealloc);
@@ -5237,7 +5501,16 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode,
if (ocfs2_xattr_is_local(xe))
continue;
- ctxt.handle = ocfs2_start_trans(osb, credits);
+ ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket,
+ i, &xv, NULL);
+
+ ret = ocfs2_lock_xattr_remove_allocators(inode, xv,
+ args->ref_ci,
+ args->ref_root_bh,
+ &ctxt.meta_ac,
+ &ref_credits);
+
+ ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits);
if (IS_ERR(ctxt.handle)) {
ret = PTR_ERR(ctxt.handle);
mlog_errno(ret);
@@ -5248,57 +5521,1439 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode,
i, 0, &ctxt);
ocfs2_commit_trans(osb, ctxt.handle);
+ if (ctxt.meta_ac) {
+ ocfs2_free_alloc_context(ctxt.meta_ac);
+ ctxt.meta_ac = NULL;
+ }
if (ret) {
mlog_errno(ret);
break;
}
}
+ if (ctxt.meta_ac)
+ ocfs2_free_alloc_context(ctxt.meta_ac);
ocfs2_schedule_truncate_log_flush(osb, 1);
ocfs2_run_deallocs(osb, &ctxt.dealloc);
return ret;
}
-static int ocfs2_delete_xattr_index_block(struct inode *inode,
- struct buffer_head *xb_bh)
+/*
+ * Whenever we modify a xattr value root in the bucket(e.g, CoW
+ * or change the extent record flag), we need to recalculate
+ * the metaecc for the whole bucket. So it is done here.
+ *
+ * Note:
+ * We have to give the extra credits for the caller.
+ */
+static int ocfs2_xattr_bucket_post_refcount(struct inode *inode,
+ handle_t *handle,
+ void *para)
+{
+ int ret;
+ struct ocfs2_xattr_bucket *bucket =
+ (struct ocfs2_xattr_bucket *)para;
+
+ ret = ocfs2_xattr_bucket_journal_access(handle, bucket,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ ocfs2_xattr_bucket_journal_dirty(handle, bucket);
+
+ return 0;
+}
+
+/*
+ * Special action we need if the xattr value is refcounted.
+ *
+ * 1. If the xattr is refcounted, lock the tree.
+ * 2. CoW the xattr if we are setting the new value and the value
+ * will be stored outside.
+ * 3. In other case, decrease_refcount will work for us, so just
+ * lock the refcount tree, calculate the meta and credits is OK.
+ *
+ * We have to do CoW before ocfs2_init_xattr_set_ctxt since
+ * currently CoW is a completed transaction, while this function
+ * will also lock the allocators and let us deadlock. So we will
+ * CoW the whole xattr value.
+ */
+static int ocfs2_prepare_refcount_xattr(struct inode *inode,
+ struct ocfs2_dinode *di,
+ struct ocfs2_xattr_info *xi,
+ struct ocfs2_xattr_search *xis,
+ struct ocfs2_xattr_search *xbs,
+ struct ocfs2_refcount_tree **ref_tree,
+ int *meta_add,
+ int *credits)
{
- struct ocfs2_xattr_block *xb =
- (struct ocfs2_xattr_block *)xb_bh->b_data;
- struct ocfs2_extent_list *el = &xb->xb_attrs.xb_root.xt_list;
int ret = 0;
- u32 name_hash = UINT_MAX, e_cpos, num_clusters;
- u64 p_blkno;
+ struct ocfs2_xattr_block *xb;
+ struct ocfs2_xattr_entry *xe;
+ char *base;
+ u32 p_cluster, num_clusters;
+ unsigned int ext_flags;
+ int name_offset, name_len;
+ struct ocfs2_xattr_value_buf vb;
+ struct ocfs2_xattr_bucket *bucket = NULL;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_post_refcount refcount;
+ struct ocfs2_post_refcount *p = NULL;
+ struct buffer_head *ref_root_bh = NULL;
- if (le16_to_cpu(el->l_next_free_rec) == 0)
- return 0;
+ if (!xis->not_found) {
+ xe = xis->here;
+ name_offset = le16_to_cpu(xe->xe_name_offset);
+ name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
+ base = xis->base;
+ vb.vb_bh = xis->inode_bh;
+ vb.vb_access = ocfs2_journal_access_di;
+ } else {
+ int i, block_off = 0;
+ xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data;
+ xe = xbs->here;
+ name_offset = le16_to_cpu(xe->xe_name_offset);
+ name_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
+ i = xbs->here - xbs->header->xh_entries;
- while (name_hash > 0) {
- ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno,
- &e_cpos, &num_clusters, el);
+ if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) {
+ ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb,
+ bucket_xh(xbs->bucket),
+ i, &block_off,
+ &name_offset);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ base = bucket_block(xbs->bucket, block_off);
+ vb.vb_bh = xbs->bucket->bu_bhs[block_off];
+ vb.vb_access = ocfs2_journal_access;
+
+ if (ocfs2_meta_ecc(osb)) {
+ /*create parameters for ocfs2_post_refcount. */
+ bucket = xbs->bucket;
+ refcount.credits = bucket->bu_blocks;
+ refcount.para = bucket;
+ refcount.func =
+ ocfs2_xattr_bucket_post_refcount;
+ p = &refcount;
+ }
+ } else {
+ base = xbs->base;
+ vb.vb_bh = xbs->xattr_bh;
+ vb.vb_access = ocfs2_journal_access_xb;
+ }
+ }
+
+ if (ocfs2_xattr_is_local(xe))
+ goto out;
+
+ vb.vb_xv = (struct ocfs2_xattr_value_root *)
+ (base + name_offset + name_len);
+
+ ret = ocfs2_xattr_get_clusters(inode, 0, &p_cluster,
+ &num_clusters, &vb.vb_xv->xr_list,
+ &ext_flags);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * We just need to check the 1st extent record, since we always
+ * CoW the whole xattr. So there shouldn't be a xattr with
+ * some REFCOUNT extent recs after the 1st one.
+ */
+ if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
+ goto out;
+
+ ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
+ 1, ref_tree, &ref_root_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * If we are deleting the xattr or the new size will be stored inside,
+ * cool, leave it there, the xattr truncate process will remove them
+ * for us(it still needs the refcount tree lock and the meta, credits).
+ * And the worse case is that every cluster truncate will split the
+ * refcount tree, and make the original extent become 3. So we will need
+ * 2 * cluster more extent recs at most.
+ */
+ if (!xi->value || xi->value_len <= OCFS2_XATTR_INLINE_SIZE) {
+
+ ret = ocfs2_refcounted_xattr_delete_need(inode,
+ &(*ref_tree)->rf_ci,
+ ref_root_bh, vb.vb_xv,
+ meta_add, credits);
+ if (ret)
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_refcount_cow_xattr(inode, di, &vb,
+ *ref_tree, ref_root_bh, 0,
+ le32_to_cpu(vb.vb_xv->xr_clusters), p);
+ if (ret)
+ mlog_errno(ret);
+
+out:
+ brelse(ref_root_bh);
+ return ret;
+}
+
+/*
+ * Add the REFCOUNTED flags for all the extent rec in ocfs2_xattr_value_root.
+ * The physical clusters will be added to refcount tree.
+ */
+static int ocfs2_xattr_value_attach_refcount(struct inode *inode,
+ struct ocfs2_xattr_value_root *xv,
+ struct ocfs2_extent_tree *value_et,
+ struct ocfs2_caching_info *ref_ci,
+ struct buffer_head *ref_root_bh,
+ struct ocfs2_cached_dealloc_ctxt *dealloc,
+ struct ocfs2_post_refcount *refcount)
+{
+ int ret = 0;
+ u32 clusters = le32_to_cpu(xv->xr_clusters);
+ u32 cpos, p_cluster, num_clusters;
+ struct ocfs2_extent_list *el = &xv->xr_list;
+ unsigned int ext_flags;
+
+ cpos = 0;
+ while (cpos < clusters) {
+ ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
+ &num_clusters, el, &ext_flags);
+
+ cpos += num_clusters;
+ if ((ext_flags & OCFS2_EXT_REFCOUNTED))
+ continue;
+
+ BUG_ON(!p_cluster);
+
+ ret = ocfs2_add_refcount_flag(inode, value_et,
+ ref_ci, ref_root_bh,
+ cpos - num_clusters,
+ p_cluster, num_clusters,
+ dealloc, refcount);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Given a normal ocfs2_xattr_header, refcount all the entries which
+ * have value stored outside.
+ * Used for xattrs stored in inode and ocfs2_xattr_block.
+ */
+static int ocfs2_xattr_attach_refcount_normal(struct inode *inode,
+ struct ocfs2_xattr_value_buf *vb,
+ struct ocfs2_xattr_header *header,
+ struct ocfs2_caching_info *ref_ci,
+ struct buffer_head *ref_root_bh,
+ struct ocfs2_cached_dealloc_ctxt *dealloc)
+{
+
+ struct ocfs2_xattr_entry *xe;
+ struct ocfs2_xattr_value_root *xv;
+ struct ocfs2_extent_tree et;
+ int i, ret = 0;
+
+ for (i = 0; i < le16_to_cpu(header->xh_count); i++) {
+ xe = &header->xh_entries[i];
+
+ if (ocfs2_xattr_is_local(xe))
+ continue;
+
+ xv = (struct ocfs2_xattr_value_root *)((void *)header +
+ le16_to_cpu(xe->xe_name_offset) +
+ OCFS2_XATTR_SIZE(xe->xe_name_len));
+
+ vb->vb_xv = xv;
+ ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
+
+ ret = ocfs2_xattr_value_attach_refcount(inode, xv, &et,
+ ref_ci, ref_root_bh,
+ dealloc, NULL);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int ocfs2_xattr_inline_attach_refcount(struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct ocfs2_caching_info *ref_ci,
+ struct buffer_head *ref_root_bh,
+ struct ocfs2_cached_dealloc_ctxt *dealloc)
+{
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data;
+ struct ocfs2_xattr_header *header = (struct ocfs2_xattr_header *)
+ (fe_bh->b_data + inode->i_sb->s_blocksize -
+ le16_to_cpu(di->i_xattr_inline_size));
+ struct ocfs2_xattr_value_buf vb = {
+ .vb_bh = fe_bh,
+ .vb_access = ocfs2_journal_access_di,
+ };
+
+ return ocfs2_xattr_attach_refcount_normal(inode, &vb, header,
+ ref_ci, ref_root_bh, dealloc);
+}
+
+struct ocfs2_xattr_tree_value_refcount_para {
+ struct ocfs2_caching_info *ref_ci;
+ struct buffer_head *ref_root_bh;
+ struct ocfs2_cached_dealloc_ctxt *dealloc;
+};
+
+static int ocfs2_get_xattr_tree_value_root(struct super_block *sb,
+ struct ocfs2_xattr_bucket *bucket,
+ int offset,
+ struct ocfs2_xattr_value_root **xv,
+ struct buffer_head **bh)
+{
+ int ret, block_off, name_offset;
+ struct ocfs2_xattr_header *xh = bucket_xh(bucket);
+ struct ocfs2_xattr_entry *xe = &xh->xh_entries[offset];
+ void *base;
+
+ ret = ocfs2_xattr_bucket_get_name_value(sb,
+ bucket_xh(bucket),
+ offset,
+ &block_off,
+ &name_offset);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ base = bucket_block(bucket, block_off);
+
+ *xv = (struct ocfs2_xattr_value_root *)(base + name_offset +
+ OCFS2_XATTR_SIZE(xe->xe_name_len));
+
+ if (bh)
+ *bh = bucket->bu_bhs[block_off];
+out:
+ return ret;
+}
+
+/*
+ * For a given xattr bucket, refcount all the entries which
+ * have value stored outside.
+ */
+static int ocfs2_xattr_bucket_value_refcount(struct inode *inode,
+ struct ocfs2_xattr_bucket *bucket,
+ void *para)
+{
+ int i, ret = 0;
+ struct ocfs2_extent_tree et;
+ struct ocfs2_xattr_tree_value_refcount_para *ref =
+ (struct ocfs2_xattr_tree_value_refcount_para *)para;
+ struct ocfs2_xattr_header *xh =
+ (struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data;
+ struct ocfs2_xattr_entry *xe;
+ struct ocfs2_xattr_value_buf vb = {
+ .vb_access = ocfs2_journal_access,
+ };
+ struct ocfs2_post_refcount refcount = {
+ .credits = bucket->bu_blocks,
+ .para = bucket,
+ .func = ocfs2_xattr_bucket_post_refcount,
+ };
+ struct ocfs2_post_refcount *p = NULL;
+
+ /* We only need post_refcount if we support metaecc. */
+ if (ocfs2_meta_ecc(OCFS2_SB(inode->i_sb)))
+ p = &refcount;
+
+ mlog(0, "refcount bucket %llu, count = %u\n",
+ (unsigned long long)bucket_blkno(bucket),
+ le16_to_cpu(xh->xh_count));
+ for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
+ xe = &xh->xh_entries[i];
+
+ if (ocfs2_xattr_is_local(xe))
+ continue;
+
+ ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket, i,
+ &vb.vb_xv, &vb.vb_bh);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
+
+ ocfs2_init_xattr_value_extent_tree(&et,
+ INODE_CACHE(inode), &vb);
+
+ ret = ocfs2_xattr_value_attach_refcount(inode, vb.vb_xv,
+ &et, ref->ref_ci,
+ ref->ref_root_bh,
+ ref->dealloc, p);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
+ }
+
+ return ret;
+
+}
+
+static int ocfs2_refcount_xattr_tree_rec(struct inode *inode,
+ struct buffer_head *root_bh,
+ u64 blkno, u32 cpos, u32 len, void *para)
+{
+ return ocfs2_iterate_xattr_buckets(inode, blkno, len,
+ ocfs2_xattr_bucket_value_refcount,
+ para);
+}
+
+static int ocfs2_xattr_block_attach_refcount(struct inode *inode,
+ struct buffer_head *blk_bh,
+ struct ocfs2_caching_info *ref_ci,
+ struct buffer_head *ref_root_bh,
+ struct ocfs2_cached_dealloc_ctxt *dealloc)
+{
+ int ret = 0;
+ struct ocfs2_xattr_block *xb =
+ (struct ocfs2_xattr_block *)blk_bh->b_data;
+
+ if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) {
+ struct ocfs2_xattr_header *header = &xb->xb_attrs.xb_header;
+ struct ocfs2_xattr_value_buf vb = {
+ .vb_bh = blk_bh,
+ .vb_access = ocfs2_journal_access_xb,
+ };
+
+ ret = ocfs2_xattr_attach_refcount_normal(inode, &vb, header,
+ ref_ci, ref_root_bh,
+ dealloc);
+ } else {
+ struct ocfs2_xattr_tree_value_refcount_para para = {
+ .ref_ci = ref_ci,
+ .ref_root_bh = ref_root_bh,
+ .dealloc = dealloc,
+ };
+
+ ret = ocfs2_iterate_xattr_index_block(inode, blk_bh,
+ ocfs2_refcount_xattr_tree_rec,
+ &para);
+ }
+
+ return ret;
+}
+
+int ocfs2_xattr_attach_refcount_tree(struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct ocfs2_caching_info *ref_ci,
+ struct buffer_head *ref_root_bh,
+ struct ocfs2_cached_dealloc_ctxt *dealloc)
+{
+ int ret = 0;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data;
+ struct buffer_head *blk_bh = NULL;
+
+ if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
+ ret = ocfs2_xattr_inline_attach_refcount(inode, fe_bh,
+ ref_ci, ref_root_bh,
+ dealloc);
if (ret) {
mlog_errno(ret);
goto out;
}
+ }
+
+ if (!di->i_xattr_loc)
+ goto out;
+
+ ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc),
+ &blk_bh);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_xattr_block_attach_refcount(inode, blk_bh, ref_ci,
+ ref_root_bh, dealloc);
+ if (ret)
+ mlog_errno(ret);
+
+ brelse(blk_bh);
+out:
+
+ return ret;
+}
+
+typedef int (should_xattr_reflinked)(struct ocfs2_xattr_entry *xe);
+/*
+ * Store the information we need in xattr reflink.
+ * old_bh and new_bh are inode bh for the old and new inode.
+ */
+struct ocfs2_xattr_reflink {
+ struct inode *old_inode;
+ struct inode *new_inode;
+ struct buffer_head *old_bh;
+ struct buffer_head *new_bh;
+ struct ocfs2_caching_info *ref_ci;
+ struct buffer_head *ref_root_bh;
+ struct ocfs2_cached_dealloc_ctxt *dealloc;
+ should_xattr_reflinked *xattr_reflinked;
+};
+
+/*
+ * Given a xattr header and xe offset,
+ * return the proper xv and the corresponding bh.
+ * xattr in inode, block and xattr tree have different implementaions.
+ */
+typedef int (get_xattr_value_root)(struct super_block *sb,
+ struct buffer_head *bh,
+ struct ocfs2_xattr_header *xh,
+ int offset,
+ struct ocfs2_xattr_value_root **xv,
+ struct buffer_head **ret_bh,
+ void *para);
+
+/*
+ * Calculate all the xattr value root metadata stored in this xattr header and
+ * credits we need if we create them from the scratch.
+ * We use get_xattr_value_root so that all types of xattr container can use it.
+ */
+static int ocfs2_value_metas_in_xattr_header(struct super_block *sb,
+ struct buffer_head *bh,
+ struct ocfs2_xattr_header *xh,
+ int *metas, int *credits,
+ int *num_recs,
+ get_xattr_value_root *func,
+ void *para)
+{
+ int i, ret = 0;
+ struct ocfs2_xattr_value_root *xv;
+ struct ocfs2_xattr_entry *xe;
+
+ for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
+ xe = &xh->xh_entries[i];
+ if (ocfs2_xattr_is_local(xe))
+ continue;
+
+ ret = func(sb, bh, xh, i, &xv, NULL, para);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
+
+ *metas += le16_to_cpu(xv->xr_list.l_tree_depth) *
+ le16_to_cpu(xv->xr_list.l_next_free_rec);
+
+ *credits += ocfs2_calc_extend_credits(sb,
+ &def_xv.xv.xr_list,
+ le32_to_cpu(xv->xr_clusters));
+
+ /*
+ * If the value is a tree with depth > 1, We don't go deep
+ * to the extent block, so just calculate a maximum record num.
+ */
+ if (!xv->xr_list.l_tree_depth)
+ *num_recs += xv->xr_list.l_next_free_rec;
+ else
+ *num_recs += ocfs2_clusters_for_bytes(sb,
+ XATTR_SIZE_MAX);
+ }
+
+ return ret;
+}
+
+/* Used by xattr inode and block to return the right xv and buffer_head. */
+static int ocfs2_get_xattr_value_root(struct super_block *sb,
+ struct buffer_head *bh,
+ struct ocfs2_xattr_header *xh,
+ int offset,
+ struct ocfs2_xattr_value_root **xv,
+ struct buffer_head **ret_bh,
+ void *para)
+{
+ struct ocfs2_xattr_entry *xe = &xh->xh_entries[offset];
+
+ *xv = (struct ocfs2_xattr_value_root *)((void *)xh +
+ le16_to_cpu(xe->xe_name_offset) +
+ OCFS2_XATTR_SIZE(xe->xe_name_len));
+
+ if (ret_bh)
+ *ret_bh = bh;
+
+ return 0;
+}
+
+/*
+ * Lock the meta_ac and caculate how much credits we need for reflink xattrs.
+ * It is only used for inline xattr and xattr block.
+ */
+static int ocfs2_reflink_lock_xattr_allocators(struct ocfs2_super *osb,
+ struct ocfs2_xattr_header *xh,
+ struct buffer_head *ref_root_bh,
+ int *credits,
+ struct ocfs2_alloc_context **meta_ac)
+{
+ int ret, meta_add = 0, num_recs = 0;
+ struct ocfs2_refcount_block *rb =
+ (struct ocfs2_refcount_block *)ref_root_bh->b_data;
+
+ *credits = 0;
+
+ ret = ocfs2_value_metas_in_xattr_header(osb->sb, NULL, xh,
+ &meta_add, credits, &num_recs,
+ ocfs2_get_xattr_value_root,
+ NULL);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * We need to add/modify num_recs in refcount tree, so just calculate
+ * an approximate number we need for refcount tree change.
+ * Sometimes we need to split the tree, and after split, half recs
+ * will be moved to the new block, and a new block can only provide
+ * half number of recs. So we multiple new blocks by 2.
+ */
+ num_recs = num_recs / ocfs2_refcount_recs_per_rb(osb->sb) * 2;
+ meta_add += num_recs;
+ *credits += num_recs + num_recs * OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
+ if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
+ *credits += le16_to_cpu(rb->rf_list.l_tree_depth) *
+ le16_to_cpu(rb->rf_list.l_next_free_rec) + 1;
+ else
+ *credits += 1;
+
+ ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add, meta_ac);
+ if (ret)
+ mlog_errno(ret);
+
+out:
+ return ret;
+}
- ret = ocfs2_iterate_xattr_buckets(inode, p_blkno, num_clusters,
- ocfs2_delete_xattr_in_bucket,
- NULL);
+/*
+ * Given a xattr header, reflink all the xattrs in this container.
+ * It can be used for inode, block and bucket.
+ *
+ * NOTE:
+ * Before we call this function, the caller has memcpy the xattr in
+ * old_xh to the new_xh.
+ *
+ * If args.xattr_reflinked is set, call it to decide whether the xe should
+ * be reflinked or not. If not, remove it from the new xattr header.
+ */
+static int ocfs2_reflink_xattr_header(handle_t *handle,
+ struct ocfs2_xattr_reflink *args,
+ struct buffer_head *old_bh,
+ struct ocfs2_xattr_header *xh,
+ struct buffer_head *new_bh,
+ struct ocfs2_xattr_header *new_xh,
+ struct ocfs2_xattr_value_buf *vb,
+ struct ocfs2_alloc_context *meta_ac,
+ get_xattr_value_root *func,
+ void *para)
+{
+ int ret = 0, i, j;
+ struct super_block *sb = args->old_inode->i_sb;
+ struct buffer_head *value_bh;
+ struct ocfs2_xattr_entry *xe, *last;
+ struct ocfs2_xattr_value_root *xv, *new_xv;
+ struct ocfs2_extent_tree data_et;
+ u32 clusters, cpos, p_cluster, num_clusters;
+ unsigned int ext_flags = 0;
+
+ mlog(0, "reflink xattr in container %llu, count = %u\n",
+ (unsigned long long)old_bh->b_blocknr, le16_to_cpu(xh->xh_count));
+
+ last = &new_xh->xh_entries[le16_to_cpu(new_xh->xh_count)];
+ for (i = 0, j = 0; i < le16_to_cpu(xh->xh_count); i++, j++) {
+ xe = &xh->xh_entries[i];
+
+ if (args->xattr_reflinked && !args->xattr_reflinked(xe)) {
+ xe = &new_xh->xh_entries[j];
+
+ le16_add_cpu(&new_xh->xh_count, -1);
+ if (new_xh->xh_count) {
+ memmove(xe, xe + 1,
+ (void *)last - (void *)xe);
+ memset(last, 0,
+ sizeof(struct ocfs2_xattr_entry));
+ }
+
+ /*
+ * We don't want j to increase in the next round since
+ * it is already moved ahead.
+ */
+ j--;
+ continue;
+ }
+
+ if (ocfs2_xattr_is_local(xe))
+ continue;
+
+ ret = func(sb, old_bh, xh, i, &xv, NULL, para);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
+
+ ret = func(sb, new_bh, new_xh, j, &new_xv, &value_bh, para);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
+
+ /*
+ * For the xattr which has l_tree_depth = 0, all the extent
+ * recs have already be copied to the new xh with the
+ * propriate OCFS2_EXT_REFCOUNTED flag we just need to
+ * increase the refount count int the refcount tree.
+ *
+ * For the xattr which has l_tree_depth > 0, we need
+ * to initialize it to the empty default value root,
+ * and then insert the extents one by one.
+ */
+ if (xv->xr_list.l_tree_depth) {
+ memcpy(new_xv, &def_xv, sizeof(def_xv));
+ vb->vb_xv = new_xv;
+ vb->vb_bh = value_bh;
+ ocfs2_init_xattr_value_extent_tree(&data_et,
+ INODE_CACHE(args->new_inode), vb);
+ }
+
+ clusters = le32_to_cpu(xv->xr_clusters);
+ cpos = 0;
+ while (cpos < clusters) {
+ ret = ocfs2_xattr_get_clusters(args->old_inode,
+ cpos,
+ &p_cluster,
+ &num_clusters,
+ &xv->xr_list,
+ &ext_flags);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ BUG_ON(!p_cluster);
+
+ if (xv->xr_list.l_tree_depth) {
+ ret = ocfs2_insert_extent(handle,
+ &data_et, cpos,
+ ocfs2_clusters_to_blocks(
+ args->old_inode->i_sb,
+ p_cluster),
+ num_clusters, ext_flags,
+ meta_ac);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ ret = ocfs2_increase_refcount(handle, args->ref_ci,
+ args->ref_root_bh,
+ p_cluster, num_clusters,
+ meta_ac, args->dealloc);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ cpos += num_clusters;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args)
+{
+ int ret = 0, credits = 0;
+ handle_t *handle;
+ struct ocfs2_super *osb = OCFS2_SB(args->old_inode->i_sb);
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)args->old_bh->b_data;
+ int inline_size = le16_to_cpu(di->i_xattr_inline_size);
+ int header_off = osb->sb->s_blocksize - inline_size;
+ struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)
+ (args->old_bh->b_data + header_off);
+ struct ocfs2_xattr_header *new_xh = (struct ocfs2_xattr_header *)
+ (args->new_bh->b_data + header_off);
+ struct ocfs2_alloc_context *meta_ac = NULL;
+ struct ocfs2_inode_info *new_oi;
+ struct ocfs2_dinode *new_di;
+ struct ocfs2_xattr_value_buf vb = {
+ .vb_bh = args->new_bh,
+ .vb_access = ocfs2_journal_access_di,
+ };
+
+ ret = ocfs2_reflink_lock_xattr_allocators(osb, xh, args->ref_root_bh,
+ &credits, &meta_ac);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ handle = ocfs2_start_trans(osb, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(args->new_inode),
+ args->new_bh, OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ memcpy(args->new_bh->b_data + header_off,
+ args->old_bh->b_data + header_off, inline_size);
+
+ new_di = (struct ocfs2_dinode *)args->new_bh->b_data;
+ new_di->i_xattr_inline_size = cpu_to_le16(inline_size);
+
+ ret = ocfs2_reflink_xattr_header(handle, args, args->old_bh, xh,
+ args->new_bh, new_xh, &vb, meta_ac,
+ ocfs2_get_xattr_value_root, NULL);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ new_oi = OCFS2_I(args->new_inode);
+ spin_lock(&new_oi->ip_lock);
+ new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL;
+ new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
+ spin_unlock(&new_oi->ip_lock);
+
+ ocfs2_journal_dirty(handle, args->new_bh);
+
+out_commit:
+ ocfs2_commit_trans(osb, handle);
+
+out:
+ if (meta_ac)
+ ocfs2_free_alloc_context(meta_ac);
+ return ret;
+}
+
+static int ocfs2_create_empty_xattr_block(struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct buffer_head **ret_bh,
+ int indexed)
+{
+ int ret;
+ handle_t *handle;
+ struct ocfs2_alloc_context *meta_ac;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
+ if (ret < 0) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out;
+ }
+
+ mlog(0, "create new xattr block for inode %llu, index = %d\n",
+ (unsigned long long)fe_bh->b_blocknr, indexed);
+ ret = ocfs2_create_xattr_block(handle, inode, fe_bh,
+ meta_ac, ret_bh, indexed);
+ if (ret)
+ mlog_errno(ret);
+
+ ocfs2_commit_trans(osb, handle);
+out:
+ ocfs2_free_alloc_context(meta_ac);
+ return ret;
+}
+
+static int ocfs2_reflink_xattr_block(struct ocfs2_xattr_reflink *args,
+ struct buffer_head *blk_bh,
+ struct buffer_head *new_blk_bh)
+{
+ int ret = 0, credits = 0;
+ handle_t *handle;
+ struct ocfs2_inode_info *new_oi = OCFS2_I(args->new_inode);
+ struct ocfs2_dinode *new_di;
+ struct ocfs2_super *osb = OCFS2_SB(args->new_inode->i_sb);
+ int header_off = offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header);
+ struct ocfs2_xattr_block *xb =
+ (struct ocfs2_xattr_block *)blk_bh->b_data;
+ struct ocfs2_xattr_header *xh = &xb->xb_attrs.xb_header;
+ struct ocfs2_xattr_block *new_xb =
+ (struct ocfs2_xattr_block *)new_blk_bh->b_data;
+ struct ocfs2_xattr_header *new_xh = &new_xb->xb_attrs.xb_header;
+ struct ocfs2_alloc_context *meta_ac;
+ struct ocfs2_xattr_value_buf vb = {
+ .vb_bh = new_blk_bh,
+ .vb_access = ocfs2_journal_access_xb,
+ };
+
+ ret = ocfs2_reflink_lock_xattr_allocators(osb, xh, args->ref_root_bh,
+ &credits, &meta_ac);
+ if (ret) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ /* One more credits in case we need to add xattr flags in new inode. */
+ handle = ocfs2_start_trans(osb, credits + 1);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (!(new_oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) {
+ ret = ocfs2_journal_access_di(handle,
+ INODE_CACHE(args->new_inode),
+ args->new_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+ }
+
+ ret = ocfs2_journal_access_xb(handle, INODE_CACHE(args->new_inode),
+ new_blk_bh, OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ memcpy(new_blk_bh->b_data + header_off, blk_bh->b_data + header_off,
+ osb->sb->s_blocksize - header_off);
+
+ ret = ocfs2_reflink_xattr_header(handle, args, blk_bh, xh,
+ new_blk_bh, new_xh, &vb, meta_ac,
+ ocfs2_get_xattr_value_root, NULL);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ ocfs2_journal_dirty(handle, new_blk_bh);
+
+ if (!(new_oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) {
+ new_di = (struct ocfs2_dinode *)args->new_bh->b_data;
+ spin_lock(&new_oi->ip_lock);
+ new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL;
+ new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
+ spin_unlock(&new_oi->ip_lock);
+
+ ocfs2_journal_dirty(handle, args->new_bh);
+ }
+
+out_commit:
+ ocfs2_commit_trans(osb, handle);
+
+out:
+ ocfs2_free_alloc_context(meta_ac);
+ return ret;
+}
+
+struct ocfs2_reflink_xattr_tree_args {
+ struct ocfs2_xattr_reflink *reflink;
+ struct buffer_head *old_blk_bh;
+ struct buffer_head *new_blk_bh;
+ struct ocfs2_xattr_bucket *old_bucket;
+ struct ocfs2_xattr_bucket *new_bucket;
+};
+
+/*
+ * NOTE:
+ * We have to handle the case that both old bucket and new bucket
+ * will call this function to get the right ret_bh.
+ * So The caller must give us the right bh.
+ */
+static int ocfs2_get_reflink_xattr_value_root(struct super_block *sb,
+ struct buffer_head *bh,
+ struct ocfs2_xattr_header *xh,
+ int offset,
+ struct ocfs2_xattr_value_root **xv,
+ struct buffer_head **ret_bh,
+ void *para)
+{
+ struct ocfs2_reflink_xattr_tree_args *args =
+ (struct ocfs2_reflink_xattr_tree_args *)para;
+ struct ocfs2_xattr_bucket *bucket;
+
+ if (bh == args->old_bucket->bu_bhs[0])
+ bucket = args->old_bucket;
+ else
+ bucket = args->new_bucket;
+
+ return ocfs2_get_xattr_tree_value_root(sb, bucket, offset,
+ xv, ret_bh);
+}
+
+struct ocfs2_value_tree_metas {
+ int num_metas;
+ int credits;
+ int num_recs;
+};
+
+static int ocfs2_value_tree_metas_in_bucket(struct super_block *sb,
+ struct buffer_head *bh,
+ struct ocfs2_xattr_header *xh,
+ int offset,
+ struct ocfs2_xattr_value_root **xv,
+ struct buffer_head **ret_bh,
+ void *para)
+{
+ struct ocfs2_xattr_bucket *bucket =
+ (struct ocfs2_xattr_bucket *)para;
+
+ return ocfs2_get_xattr_tree_value_root(sb, bucket, offset,
+ xv, ret_bh);
+}
+
+static int ocfs2_calc_value_tree_metas(struct inode *inode,
+ struct ocfs2_xattr_bucket *bucket,
+ void *para)
+{
+ struct ocfs2_value_tree_metas *metas =
+ (struct ocfs2_value_tree_metas *)para;
+ struct ocfs2_xattr_header *xh =
+ (struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data;
+
+ /* Add the credits for this bucket first. */
+ metas->credits += bucket->bu_blocks;
+ return ocfs2_value_metas_in_xattr_header(inode->i_sb, bucket->bu_bhs[0],
+ xh, &metas->num_metas,
+ &metas->credits, &metas->num_recs,
+ ocfs2_value_tree_metas_in_bucket,
+ bucket);
+}
+
+/*
+ * Given a xattr extent rec starting from blkno and having len clusters,
+ * iterate all the buckets calculate how much metadata we need for reflinking
+ * all the ocfs2_xattr_value_root and lock the allocators accordingly.
+ */
+static int ocfs2_lock_reflink_xattr_rec_allocators(
+ struct ocfs2_reflink_xattr_tree_args *args,
+ struct ocfs2_extent_tree *xt_et,
+ u64 blkno, u32 len, int *credits,
+ struct ocfs2_alloc_context **meta_ac,
+ struct ocfs2_alloc_context **data_ac)
+{
+ int ret, num_free_extents;
+ struct ocfs2_value_tree_metas metas;
+ struct ocfs2_super *osb = OCFS2_SB(args->reflink->old_inode->i_sb);
+ struct ocfs2_refcount_block *rb;
+
+ memset(&metas, 0, sizeof(metas));
+
+ ret = ocfs2_iterate_xattr_buckets(args->reflink->old_inode, blkno, len,
+ ocfs2_calc_value_tree_metas, &metas);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ *credits = metas.credits;
+
+ /*
+ * Calculate we need for refcount tree change.
+ *
+ * We need to add/modify num_recs in refcount tree, so just calculate
+ * an approximate number we need for refcount tree change.
+ * Sometimes we need to split the tree, and after split, half recs
+ * will be moved to the new block, and a new block can only provide
+ * half number of recs. So we multiple new blocks by 2.
+ * In the end, we have to add credits for modifying the already
+ * existed refcount block.
+ */
+ rb = (struct ocfs2_refcount_block *)args->reflink->ref_root_bh->b_data;
+ metas.num_recs =
+ (metas.num_recs + ocfs2_refcount_recs_per_rb(osb->sb) - 1) /
+ ocfs2_refcount_recs_per_rb(osb->sb) * 2;
+ metas.num_metas += metas.num_recs;
+ *credits += metas.num_recs +
+ metas.num_recs * OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
+ if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
+ *credits += le16_to_cpu(rb->rf_list.l_tree_depth) *
+ le16_to_cpu(rb->rf_list.l_next_free_rec) + 1;
+ else
+ *credits += 1;
+
+ /* count in the xattr tree change. */
+ num_free_extents = ocfs2_num_free_extents(osb, xt_et);
+ if (num_free_extents < 0) {
+ ret = num_free_extents;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (num_free_extents < len)
+ metas.num_metas += ocfs2_extend_meta_needed(xt_et->et_root_el);
+
+ *credits += ocfs2_calc_extend_credits(osb->sb,
+ xt_et->et_root_el, len);
+
+ if (metas.num_metas) {
+ ret = ocfs2_reserve_new_metadata_blocks(osb, metas.num_metas,
+ meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
+ }
- ret = ocfs2_rm_xattr_cluster(inode, xb_bh,
- p_blkno, e_cpos, num_clusters);
+ if (len) {
+ ret = ocfs2_reserve_clusters(osb, len, data_ac);
+ if (ret)
+ mlog_errno(ret);
+ }
+out:
+ if (ret) {
+ if (*meta_ac) {
+ ocfs2_free_alloc_context(*meta_ac);
+ meta_ac = NULL;
+ }
+ }
+
+ return ret;
+}
+
+static int ocfs2_reflink_xattr_buckets(handle_t *handle,
+ u64 blkno, u64 new_blkno, u32 clusters,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_alloc_context *data_ac,
+ struct ocfs2_reflink_xattr_tree_args *args)
+{
+ int i, j, ret = 0;
+ struct super_block *sb = args->reflink->old_inode->i_sb;
+ u32 bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(sb));
+ u32 num_buckets = clusters * bpc;
+ int bpb = args->old_bucket->bu_blocks;
+ struct ocfs2_xattr_value_buf vb = {
+ .vb_access = ocfs2_journal_access,
+ };
+
+ for (i = 0; i < num_buckets; i++, blkno += bpb, new_blkno += bpb) {
+ ret = ocfs2_read_xattr_bucket(args->old_bucket, blkno);
if (ret) {
mlog_errno(ret);
break;
}
- if (e_cpos == 0)
+ ret = ocfs2_init_xattr_bucket(args->new_bucket, new_blkno);
+ if (ret) {
+ mlog_errno(ret);
break;
+ }
- name_hash = e_cpos - 1;
+ /*
+ * The real bucket num in this series of blocks is stored
+ * in the 1st bucket.
+ */
+ if (i == 0)
+ num_buckets = le16_to_cpu(
+ bucket_xh(args->old_bucket)->xh_num_buckets);
+
+ ret = ocfs2_xattr_bucket_journal_access(handle,
+ args->new_bucket,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
+
+ for (j = 0; j < bpb; j++)
+ memcpy(bucket_block(args->new_bucket, j),
+ bucket_block(args->old_bucket, j),
+ sb->s_blocksize);
+
+ ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
+
+ ret = ocfs2_reflink_xattr_header(handle, args->reflink,
+ args->old_bucket->bu_bhs[0],
+ bucket_xh(args->old_bucket),
+ args->new_bucket->bu_bhs[0],
+ bucket_xh(args->new_bucket),
+ &vb, meta_ac,
+ ocfs2_get_reflink_xattr_value_root,
+ args);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
+
+ /*
+ * Re-access and dirty the bucket to calculate metaecc.
+ * Because we may extend the transaction in reflink_xattr_header
+ * which will let the already accessed block gone.
+ */
+ ret = ocfs2_xattr_bucket_journal_access(handle,
+ args->new_bucket,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
+
+ ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
+ ocfs2_xattr_bucket_relse(args->old_bucket);
+ ocfs2_xattr_bucket_relse(args->new_bucket);
+ }
+
+ ocfs2_xattr_bucket_relse(args->old_bucket);
+ ocfs2_xattr_bucket_relse(args->new_bucket);
+ return ret;
+}
+/*
+ * Create the same xattr extent record in the new inode's xattr tree.
+ */
+static int ocfs2_reflink_xattr_rec(struct inode *inode,
+ struct buffer_head *root_bh,
+ u64 blkno,
+ u32 cpos,
+ u32 len,
+ void *para)
+{
+ int ret, credits = 0;
+ u32 p_cluster, num_clusters;
+ u64 new_blkno;
+ handle_t *handle;
+ struct ocfs2_reflink_xattr_tree_args *args =
+ (struct ocfs2_reflink_xattr_tree_args *)para;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_alloc_context *meta_ac = NULL;
+ struct ocfs2_alloc_context *data_ac = NULL;
+ struct ocfs2_extent_tree et;
+
+ ocfs2_init_xattr_tree_extent_tree(&et,
+ INODE_CACHE(args->reflink->new_inode),
+ args->new_blk_bh);
+
+ ret = ocfs2_lock_reflink_xattr_rec_allocators(args, &et, blkno,
+ len, &credits,
+ &meta_ac, &data_ac);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ handle = ocfs2_start_trans(osb, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_claim_clusters(osb, handle, data_ac,
+ len, &p_cluster, &num_clusters);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ new_blkno = ocfs2_clusters_to_blocks(osb->sb, p_cluster);
+
+ mlog(0, "reflink xattr buckets %llu to %llu, len %u\n",
+ (unsigned long long)blkno, (unsigned long long)new_blkno, len);
+ ret = ocfs2_reflink_xattr_buckets(handle, blkno, new_blkno, len,
+ meta_ac, data_ac, args);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ mlog(0, "insert new xattr extent rec start %llu len %u to %u\n",
+ (unsigned long long)new_blkno, len, cpos);
+ ret = ocfs2_insert_extent(handle, &et, cpos, new_blkno,
+ len, 0, meta_ac);
+ if (ret)
+ mlog_errno(ret);
+
+out_commit:
+ ocfs2_commit_trans(osb, handle);
+
+out:
+ if (meta_ac)
+ ocfs2_free_alloc_context(meta_ac);
+ if (data_ac)
+ ocfs2_free_alloc_context(data_ac);
+ return ret;
+}
+
+/*
+ * Create reflinked xattr buckets.
+ * We will add bucket one by one, and refcount all the xattrs in the bucket
+ * if they are stored outside.
+ */
+static int ocfs2_reflink_xattr_tree(struct ocfs2_xattr_reflink *args,
+ struct buffer_head *blk_bh,
+ struct buffer_head *new_blk_bh)
+{
+ int ret;
+ struct ocfs2_reflink_xattr_tree_args para;
+
+ memset(&para, 0, sizeof(para));
+ para.reflink = args;
+ para.old_blk_bh = blk_bh;
+ para.new_blk_bh = new_blk_bh;
+
+ para.old_bucket = ocfs2_xattr_bucket_new(args->old_inode);
+ if (!para.old_bucket) {
+ mlog_errno(-ENOMEM);
+ return -ENOMEM;
+ }
+
+ para.new_bucket = ocfs2_xattr_bucket_new(args->new_inode);
+ if (!para.new_bucket) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_iterate_xattr_index_block(args->old_inode, blk_bh,
+ ocfs2_reflink_xattr_rec,
+ &para);
+ if (ret)
+ mlog_errno(ret);
+
+out:
+ ocfs2_xattr_bucket_free(para.old_bucket);
+ ocfs2_xattr_bucket_free(para.new_bucket);
+ return ret;
+}
+
+static int ocfs2_reflink_xattr_in_block(struct ocfs2_xattr_reflink *args,
+ struct buffer_head *blk_bh)
+{
+ int ret, indexed = 0;
+ struct buffer_head *new_blk_bh = NULL;
+ struct ocfs2_xattr_block *xb =
+ (struct ocfs2_xattr_block *)blk_bh->b_data;
+
+
+ if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)
+ indexed = 1;
+
+ ret = ocfs2_create_empty_xattr_block(args->new_inode, args->new_bh,
+ &new_blk_bh, indexed);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED))
+ ret = ocfs2_reflink_xattr_block(args, blk_bh, new_blk_bh);
+ else
+ ret = ocfs2_reflink_xattr_tree(args, blk_bh, new_blk_bh);
+ if (ret)
+ mlog_errno(ret);
+
+out:
+ brelse(new_blk_bh);
+ return ret;
+}
+
+static int ocfs2_reflink_xattr_no_security(struct ocfs2_xattr_entry *xe)
+{
+ int type = ocfs2_xattr_get_type(xe);
+
+ return type != OCFS2_XATTR_INDEX_SECURITY &&
+ type != OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS &&
+ type != OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT;
+}
+
+int ocfs2_reflink_xattrs(struct inode *old_inode,
+ struct buffer_head *old_bh,
+ struct inode *new_inode,
+ struct buffer_head *new_bh,
+ bool preserve_security)
+{
+ int ret;
+ struct ocfs2_xattr_reflink args;
+ struct ocfs2_inode_info *oi = OCFS2_I(old_inode);
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)old_bh->b_data;
+ struct buffer_head *blk_bh = NULL;
+ struct ocfs2_cached_dealloc_ctxt dealloc;
+ struct ocfs2_refcount_tree *ref_tree;
+ struct buffer_head *ref_root_bh = NULL;
+
+ ret = ocfs2_lock_refcount_tree(OCFS2_SB(old_inode->i_sb),
+ le64_to_cpu(di->i_refcount_loc),
+ 1, &ref_tree, &ref_root_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ocfs2_init_dealloc_ctxt(&dealloc);
+
+ args.old_inode = old_inode;
+ args.new_inode = new_inode;
+ args.old_bh = old_bh;
+ args.new_bh = new_bh;
+ args.ref_ci = &ref_tree->rf_ci;
+ args.ref_root_bh = ref_root_bh;
+ args.dealloc = &dealloc;
+ if (preserve_security)
+ args.xattr_reflinked = NULL;
+ else
+ args.xattr_reflinked = ocfs2_reflink_xattr_no_security;
+
+ if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) {
+ ret = ocfs2_reflink_xattr_inline(&args);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_unlock;
+ }
+ }
+
+ if (!di->i_xattr_loc)
+ goto out_unlock;
+
+ ret = ocfs2_read_xattr_block(old_inode, le64_to_cpu(di->i_xattr_loc),
+ &blk_bh);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out_unlock;
+ }
+
+ ret = ocfs2_reflink_xattr_in_block(&args, blk_bh);
+ if (ret)
+ mlog_errno(ret);
+
+ brelse(blk_bh);
+
+out_unlock:
+ ocfs2_unlock_refcount_tree(OCFS2_SB(old_inode->i_sb),
+ ref_tree, 1);
+ brelse(ref_root_bh);
+
+ if (ocfs2_dealloc_has_cluster(&dealloc)) {
+ ocfs2_schedule_truncate_log_flush(OCFS2_SB(old_inode->i_sb), 1);
+ ocfs2_run_deallocs(OCFS2_SB(old_inode->i_sb), &dealloc);
}
out:
@@ -5306,6 +6961,51 @@ out:
}
/*
+ * Initialize security and acl for a already created inode.
+ * Used for reflink a non-preserve-security file.
+ *
+ * It uses common api like ocfs2_xattr_set, so the caller
+ * must not hold any lock expect i_mutex.
+ */
+int ocfs2_init_security_and_acl(struct inode *dir,
+ struct inode *inode)
+{
+ int ret = 0;
+ struct buffer_head *dir_bh = NULL;
+ struct ocfs2_security_xattr_info si = {
+ .enable = 1,
+ };
+
+ ret = ocfs2_init_security_get(inode, dir, &si);
+ if (!ret) {
+ ret = ocfs2_xattr_security_set(inode, si.name,
+ si.value, si.value_len,
+ XATTR_CREATE);
+ if (ret) {
+ mlog_errno(ret);
+ goto leave;
+ }
+ } else if (ret != -EOPNOTSUPP) {
+ mlog_errno(ret);
+ goto leave;
+ }
+
+ ret = ocfs2_inode_lock(dir, &dir_bh, 0);
+ if (ret) {
+ mlog_errno(ret);
+ goto leave;
+ }
+
+ ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL);
+ if (ret)
+ mlog_errno(ret);
+
+ ocfs2_inode_unlock(dir, 0);
+ brelse(dir_bh);
+leave:
+ return ret;
+}
+/*
* 'security' attributes support
*/
static size_t ocfs2_xattr_security_list(struct inode *inode, char *list,
diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h
index 1ca7e9a1b7b..08e36389f56 100644
--- a/fs/ocfs2/xattr.h
+++ b/fs/ocfs2/xattr.h
@@ -55,6 +55,8 @@ int ocfs2_xattr_set_handle(handle_t *, struct inode *, struct buffer_head *,
int, const char *, const void *, size_t, int,
struct ocfs2_alloc_context *,
struct ocfs2_alloc_context *);
+int ocfs2_has_inline_xattr_value_outside(struct inode *inode,
+ struct ocfs2_dinode *di);
int ocfs2_xattr_remove(struct inode *, struct buffer_head *);
int ocfs2_init_security_get(struct inode *, struct inode *,
struct ocfs2_security_xattr_info *);
@@ -83,5 +85,16 @@ struct ocfs2_xattr_value_buf {
struct ocfs2_xattr_value_root *vb_xv;
};
-
+int ocfs2_xattr_attach_refcount_tree(struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct ocfs2_caching_info *ref_ci,
+ struct buffer_head *ref_root_bh,
+ struct ocfs2_cached_dealloc_ctxt *dealloc);
+int ocfs2_reflink_xattrs(struct inode *old_inode,
+ struct buffer_head *old_bh,
+ struct inode *new_inode,
+ struct buffer_head *new_bh,
+ bool preserve_security);
+int ocfs2_init_security_and_acl(struct inode *dir,
+ struct inode *inode);
#endif /* OCFS2_XATTR_H */
diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c
index c7275cfbdcf..3680bae335b 100644
--- a/fs/omfs/dir.c
+++ b/fs/omfs/dir.c
@@ -489,7 +489,7 @@ out:
return ret;
}
-struct inode_operations omfs_dir_inops = {
+const struct inode_operations omfs_dir_inops = {
.lookup = omfs_lookup,
.mkdir = omfs_mkdir,
.rename = omfs_rename,
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index d17e774eaf4..4845fbb18e6 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -333,11 +333,11 @@ struct file_operations omfs_file_operations = {
.splice_read = generic_file_splice_read,
};
-struct inode_operations omfs_file_inops = {
+const struct inode_operations omfs_file_inops = {
.truncate = omfs_truncate
};
-struct address_space_operations omfs_aops = {
+const struct address_space_operations omfs_aops = {
.readpage = omfs_readpage,
.readpages = omfs_readpages,
.writepage = omfs_writepage,
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index 379ae5fb441..f3b7c1541f3 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -278,7 +278,7 @@ static int omfs_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
-static struct super_operations omfs_sops = {
+static const struct super_operations omfs_sops = {
.write_inode = omfs_write_inode,
.delete_inode = omfs_delete_inode,
.put_super = omfs_put_super,
diff --git a/fs/omfs/omfs.h b/fs/omfs/omfs.h
index 2bc0f067040..df71039945a 100644
--- a/fs/omfs/omfs.h
+++ b/fs/omfs/omfs.h
@@ -45,15 +45,15 @@ extern int omfs_clear_range(struct super_block *sb, u64 block, int count);
/* dir.c */
extern struct file_operations omfs_dir_operations;
-extern struct inode_operations omfs_dir_inops;
+extern const struct inode_operations omfs_dir_inops;
extern int omfs_make_empty(struct inode *inode, struct super_block *sb);
extern int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header,
u64 fsblock);
/* file.c */
extern struct file_operations omfs_file_operations;
-extern struct inode_operations omfs_file_inops;
-extern struct address_space_operations omfs_aops;
+extern const struct inode_operations omfs_file_inops;
+extern const struct address_space_operations omfs_aops;
extern void omfs_make_empty_table(struct buffer_head *bh, int offset);
extern int omfs_shrink_inode(struct inode *inode);
diff --git a/fs/open.c b/fs/open.c
index 31191bf513e..4f01e06227c 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -290,10 +290,9 @@ out:
return error;
}
-SYSCALL_DEFINE2(truncate, const char __user *, path, unsigned long, length)
+SYSCALL_DEFINE2(truncate, const char __user *, path, long, length)
{
- /* on 32-bit boxen it will cut the range 2^31--2^32-1 off */
- return do_sys_truncate(path, (long)length);
+ return do_sys_truncate(path, length);
}
static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index fbeaddf595d..7b685e10cba 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -581,7 +581,7 @@ try_scan:
}
if (from + size > get_capacity(disk)) {
- struct block_device_operations *bdops = disk->fops;
+ const struct block_device_operations *bdops = disk->fops;
unsigned long long capacity;
printk(KERN_WARNING
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 725a650bbbb..07f77a7945c 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -82,6 +82,7 @@
#include <linux/pid_namespace.h>
#include <linux/ptrace.h>
#include <linux/tracehook.h>
+#include <linux/swapops.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
@@ -321,6 +322,94 @@ static inline void task_context_switch_counts(struct seq_file *m,
p->nivcsw);
}
+#ifdef CONFIG_MMU
+
+struct stack_stats {
+ struct vm_area_struct *vma;
+ unsigned long startpage;
+ unsigned long usage;
+};
+
+static int stack_usage_pte_range(pmd_t *pmd, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
+{
+ struct stack_stats *ss = walk->private;
+ struct vm_area_struct *vma = ss->vma;
+ pte_t *pte, ptent;
+ spinlock_t *ptl;
+ int ret = 0;
+
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ for (; addr != end; pte++, addr += PAGE_SIZE) {
+ ptent = *pte;
+
+#ifdef CONFIG_STACK_GROWSUP
+ if (pte_present(ptent) || is_swap_pte(ptent))
+ ss->usage = addr - ss->startpage + PAGE_SIZE;
+#else
+ if (pte_present(ptent) || is_swap_pte(ptent)) {
+ ss->usage = ss->startpage - addr + PAGE_SIZE;
+ pte++;
+ ret = 1;
+ break;
+ }
+#endif
+ }
+ pte_unmap_unlock(pte - 1, ptl);
+ cond_resched();
+ return ret;
+}
+
+static inline unsigned long get_stack_usage_in_bytes(struct vm_area_struct *vma,
+ struct task_struct *task)
+{
+ struct stack_stats ss;
+ struct mm_walk stack_walk = {
+ .pmd_entry = stack_usage_pte_range,
+ .mm = vma->vm_mm,
+ .private = &ss,
+ };
+
+ if (!vma->vm_mm || is_vm_hugetlb_page(vma))
+ return 0;
+
+ ss.vma = vma;
+ ss.startpage = task->stack_start & PAGE_MASK;
+ ss.usage = 0;
+
+#ifdef CONFIG_STACK_GROWSUP
+ walk_page_range(KSTK_ESP(task) & PAGE_MASK, vma->vm_end,
+ &stack_walk);
+#else
+ walk_page_range(vma->vm_start, (KSTK_ESP(task) & PAGE_MASK) + PAGE_SIZE,
+ &stack_walk);
+#endif
+ return ss.usage;
+}
+
+static inline void task_show_stack_usage(struct seq_file *m,
+ struct task_struct *task)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = get_task_mm(task);
+
+ if (mm) {
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, task->stack_start);
+ if (vma)
+ seq_printf(m, "Stack usage:\t%lu kB\n",
+ get_stack_usage_in_bytes(vma, task) >> 10);
+
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+ }
+}
+#else
+static void task_show_stack_usage(struct seq_file *m, struct task_struct *task)
+{
+}
+#endif /* CONFIG_MMU */
+
int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
@@ -340,6 +429,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
task_show_regs(m, task);
#endif
task_context_switch_counts(m, task);
+ task_show_stack_usage(m, task);
return 0;
}
@@ -481,7 +571,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
rsslim,
mm ? mm->start_code : 0,
mm ? mm->end_code : 0,
- (permitted && mm) ? mm->start_stack : 0,
+ (permitted) ? task->stack_start : 0,
esp,
eip,
/* The signal information here is obsolete.
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 6f742f6658a..837469a9659 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -447,7 +447,7 @@ static int proc_oom_score(struct task_struct *task, char *buffer)
do_posix_clock_monotonic_gettime(&uptime);
read_lock(&tasklist_lock);
- points = badness(task, uptime.tv_sec);
+ points = badness(task->group_leader, uptime.tv_sec);
read_unlock(&tasklist_lock);
return sprintf(buffer, "%lu\n", points);
}
@@ -458,7 +458,7 @@ struct limit_names {
};
static const struct limit_names lnames[RLIM_NLIMITS] = {
- [RLIMIT_CPU] = {"Max cpu time", "ms"},
+ [RLIMIT_CPU] = {"Max cpu time", "seconds"},
[RLIMIT_FSIZE] = {"Max file size", "bytes"},
[RLIMIT_DATA] = {"Max data size", "bytes"},
[RLIMIT_STACK] = {"Max stack size", "bytes"},
@@ -999,11 +999,17 @@ static ssize_t oom_adjust_read(struct file *file, char __user *buf,
struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
char buffer[PROC_NUMBUF];
size_t len;
- int oom_adjust;
+ int oom_adjust = OOM_DISABLE;
+ unsigned long flags;
if (!task)
return -ESRCH;
- oom_adjust = task->oomkilladj;
+
+ if (lock_task_sighand(task, &flags)) {
+ oom_adjust = task->signal->oom_adj;
+ unlock_task_sighand(task, &flags);
+ }
+
put_task_struct(task);
len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust);
@@ -1015,32 +1021,44 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct task_struct *task;
- char buffer[PROC_NUMBUF], *end;
- int oom_adjust;
+ char buffer[PROC_NUMBUF];
+ long oom_adjust;
+ unsigned long flags;
+ int err;
memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
count = sizeof(buffer) - 1;
if (copy_from_user(buffer, buf, count))
return -EFAULT;
- oom_adjust = simple_strtol(buffer, &end, 0);
+
+ err = strict_strtol(strstrip(buffer), 0, &oom_adjust);
+ if (err)
+ return -EINVAL;
if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) &&
oom_adjust != OOM_DISABLE)
return -EINVAL;
- if (*end == '\n')
- end++;
+
task = get_proc_task(file->f_path.dentry->d_inode);
if (!task)
return -ESRCH;
- if (oom_adjust < task->oomkilladj && !capable(CAP_SYS_RESOURCE)) {
+ if (!lock_task_sighand(task, &flags)) {
+ put_task_struct(task);
+ return -ESRCH;
+ }
+
+ if (oom_adjust < task->signal->oom_adj && !capable(CAP_SYS_RESOURCE)) {
+ unlock_task_sighand(task, &flags);
put_task_struct(task);
return -EACCES;
}
- task->oomkilladj = oom_adjust;
+
+ task->signal->oom_adj = oom_adjust;
+
+ unlock_task_sighand(task, &flags);
put_task_struct(task);
- if (end - buffer == 0)
- return -EIO;
- return end - buffer;
+
+ return count;
}
static const struct file_operations proc_oom_adjust_operations = {
@@ -1169,17 +1187,16 @@ static ssize_t proc_fault_inject_write(struct file * file,
count = sizeof(buffer) - 1;
if (copy_from_user(buffer, buf, count))
return -EFAULT;
- make_it_fail = simple_strtol(buffer, &end, 0);
- if (*end == '\n')
- end++;
+ make_it_fail = simple_strtol(strstrip(buffer), &end, 0);
+ if (*end)
+ return -EINVAL;
task = get_proc_task(file->f_dentry->d_inode);
if (!task)
return -ESRCH;
task->make_it_fail = make_it_fail;
put_task_struct(task);
- if (end - buffer == 0)
- return -EIO;
- return end - buffer;
+
+ return count;
}
static const struct file_operations proc_fault_inject_operations = {
@@ -2586,9 +2603,6 @@ static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
dput(dentry);
}
- if (tgid == 0)
- goto out;
-
name.name = buf;
name.len = snprintf(buf, sizeof(buf), "%d", tgid);
leader = d_hash_and_lookup(mnt->mnt_root, &name);
@@ -2645,17 +2659,16 @@ out:
void proc_flush_task(struct task_struct *task)
{
int i;
- struct pid *pid, *tgid = NULL;
+ struct pid *pid, *tgid;
struct upid *upid;
pid = task_pid(task);
- if (thread_group_leader(task))
- tgid = task_tgid(task);
+ tgid = task_tgid(task);
for (i = 0; i <= pid->level; i++) {
upid = &pid->numbers[i];
proc_flush_task_mnt(upid->ns->proc_mnt, upid->nr,
- tgid ? tgid->numbers[i].nr : 0);
+ tgid->numbers[i].nr);
}
upid = &pid->numbers[pid->level];
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 59b43a06887..56013371f9f 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -17,9 +17,15 @@
#include <linux/elfcore.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
+#include <linux/bootmem.h>
#include <linux/init.h>
#include <asm/uaccess.h>
#include <asm/io.h>
+#include <linux/list.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/memory.h>
+#include <asm/sections.h>
#define CORE_STR "CORE"
@@ -29,17 +35,6 @@
static struct proc_dir_entry *proc_root_kcore;
-static int open_kcore(struct inode * inode, struct file * filp)
-{
- return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
-}
-
-static ssize_t read_kcore(struct file *, char __user *, size_t, loff_t *);
-
-static const struct file_operations proc_kcore_operations = {
- .read = read_kcore,
- .open = open_kcore,
-};
#ifndef kc_vaddr_to_offset
#define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
@@ -57,18 +52,19 @@ struct memelfnote
void *data;
};
-static struct kcore_list *kclist;
+static LIST_HEAD(kclist_head);
static DEFINE_RWLOCK(kclist_lock);
+static int kcore_need_update = 1;
void
-kclist_add(struct kcore_list *new, void *addr, size_t size)
+kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
{
new->addr = (unsigned long)addr;
new->size = size;
+ new->type = type;
write_lock(&kclist_lock);
- new->next = kclist;
- kclist = new;
+ list_add_tail(&new->list, &kclist_head);
write_unlock(&kclist_lock);
}
@@ -80,7 +76,7 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
*nphdr = 1; /* PT_NOTE */
size = 0;
- for (m=kclist; m; m=m->next) {
+ list_for_each_entry(m, &kclist_head, list) {
try = kc_vaddr_to_offset((size_t)m->addr + m->size);
if (try > size)
size = try;
@@ -97,6 +93,177 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
return size + *elf_buflen;
}
+static void free_kclist_ents(struct list_head *head)
+{
+ struct kcore_list *tmp, *pos;
+
+ list_for_each_entry_safe(pos, tmp, head, list) {
+ list_del(&pos->list);
+ kfree(pos);
+ }
+}
+/*
+ * Replace all KCORE_RAM/KCORE_VMEMMAP information with passed list.
+ */
+static void __kcore_update_ram(struct list_head *list)
+{
+ int nphdr;
+ size_t size;
+ struct kcore_list *tmp, *pos;
+ LIST_HEAD(garbage);
+
+ write_lock(&kclist_lock);
+ if (kcore_need_update) {
+ list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
+ if (pos->type == KCORE_RAM
+ || pos->type == KCORE_VMEMMAP)
+ list_move(&pos->list, &garbage);
+ }
+ list_splice_tail(list, &kclist_head);
+ } else
+ list_splice(list, &garbage);
+ kcore_need_update = 0;
+ proc_root_kcore->size = get_kcore_size(&nphdr, &size);
+ write_unlock(&kclist_lock);
+
+ free_kclist_ents(&garbage);
+}
+
+
+#ifdef CONFIG_HIGHMEM
+/*
+ * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
+ * because memory hole is not as big as !HIGHMEM case.
+ * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
+ */
+static int kcore_update_ram(void)
+{
+ LIST_HEAD(head);
+ struct kcore_list *ent;
+ int ret = 0;
+
+ ent = kmalloc(sizeof(*ent), GFP_KERNEL);
+ if (!ent)
+ return -ENOMEM;
+ ent->addr = (unsigned long)__va(0);
+ ent->size = max_low_pfn << PAGE_SHIFT;
+ ent->type = KCORE_RAM;
+ list_add(&ent->list, &head);
+ __kcore_update_ram(&head);
+ return ret;
+}
+
+#else /* !CONFIG_HIGHMEM */
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+/* calculate vmemmap's address from given system ram pfn and register it */
+int get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
+{
+ unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
+ unsigned long nr_pages = ent->size >> PAGE_SHIFT;
+ unsigned long start, end;
+ struct kcore_list *vmm, *tmp;
+
+
+ start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
+ end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
+ end = ALIGN(end, PAGE_SIZE);
+ /* overlap check (because we have to align page */
+ list_for_each_entry(tmp, head, list) {
+ if (tmp->type != KCORE_VMEMMAP)
+ continue;
+ if (start < tmp->addr + tmp->size)
+ if (end > tmp->addr)
+ end = tmp->addr;
+ }
+ if (start < end) {
+ vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
+ if (!vmm)
+ return 0;
+ vmm->addr = start;
+ vmm->size = end - start;
+ vmm->type = KCORE_VMEMMAP;
+ list_add_tail(&vmm->list, head);
+ }
+ return 1;
+
+}
+#else
+int get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
+{
+ return 1;
+}
+
+#endif
+
+static int
+kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
+{
+ struct list_head *head = (struct list_head *)arg;
+ struct kcore_list *ent;
+
+ ent = kmalloc(sizeof(*ent), GFP_KERNEL);
+ if (!ent)
+ return -ENOMEM;
+ ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT));
+ ent->size = nr_pages << PAGE_SHIFT;
+
+ /* Sanity check: Can happen in 32bit arch...maybe */
+ if (ent->addr < (unsigned long) __va(0))
+ goto free_out;
+
+ /* cut not-mapped area. ....from ppc-32 code. */
+ if (ULONG_MAX - ent->addr < ent->size)
+ ent->size = ULONG_MAX - ent->addr;
+
+ /* cut when vmalloc() area is higher than direct-map area */
+ if (VMALLOC_START > (unsigned long)__va(0)) {
+ if (ent->addr > VMALLOC_START)
+ goto free_out;
+ if (VMALLOC_START - ent->addr < ent->size)
+ ent->size = VMALLOC_START - ent->addr;
+ }
+
+ ent->type = KCORE_RAM;
+ list_add_tail(&ent->list, head);
+
+ if (!get_sparsemem_vmemmap_info(ent, head)) {
+ list_del(&ent->list);
+ goto free_out;
+ }
+
+ return 0;
+free_out:
+ kfree(ent);
+ return 1;
+}
+
+static int kcore_update_ram(void)
+{
+ int nid, ret;
+ unsigned long end_pfn;
+ LIST_HEAD(head);
+
+ /* Not inialized....update now */
+ /* find out "max pfn" */
+ end_pfn = 0;
+ for_each_node_state(nid, N_HIGH_MEMORY) {
+ unsigned long node_end;
+ node_end = NODE_DATA(nid)->node_start_pfn +
+ NODE_DATA(nid)->node_spanned_pages;
+ if (end_pfn < node_end)
+ end_pfn = node_end;
+ }
+ /* scan 0 to max_pfn */
+ ret = walk_system_ram_range(0, end_pfn, &head, kclist_add_private);
+ if (ret) {
+ free_kclist_ents(&head);
+ return -ENOMEM;
+ }
+ __kcore_update_ram(&head);
+ return ret;
+}
+#endif /* CONFIG_HIGHMEM */
/*****************************************************************************/
/*
@@ -192,7 +359,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
nhdr->p_align = 0;
/* setup ELF PT_LOAD program header for every area */
- for (m=kclist; m; m=m->next) {
+ list_for_each_entry(m, &kclist_head, list) {
phdr = (struct elf_phdr *) bufp;
bufp += sizeof(struct elf_phdr);
offset += sizeof(struct elf_phdr);
@@ -265,7 +432,8 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
unsigned long start;
read_lock(&kclist_lock);
- proc_root_kcore->size = size = get_kcore_size(&nphdr, &elf_buflen);
+ size = get_kcore_size(&nphdr, &elf_buflen);
+
if (buflen == 0 || *fpos >= size) {
read_unlock(&kclist_lock);
return 0;
@@ -317,7 +485,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
struct kcore_list *m;
read_lock(&kclist_lock);
- for (m=kclist; m; m=m->next) {
+ list_for_each_entry(m, &kclist_head, list) {
if (start >= m->addr && start < (m->addr+m->size))
break;
}
@@ -326,45 +494,14 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
if (m == NULL) {
if (clear_user(buffer, tsz))
return -EFAULT;
- } else if (is_vmalloc_addr((void *)start)) {
+ } else if (is_vmalloc_or_module_addr((void *)start)) {
char * elf_buf;
- struct vm_struct *m;
- unsigned long curstart = start;
- unsigned long cursize = tsz;
elf_buf = kzalloc(tsz, GFP_KERNEL);
if (!elf_buf)
return -ENOMEM;
-
- read_lock(&vmlist_lock);
- for (m=vmlist; m && cursize; m=m->next) {
- unsigned long vmstart;
- unsigned long vmsize;
- unsigned long msize = m->size - PAGE_SIZE;
-
- if (((unsigned long)m->addr + msize) <
- curstart)
- continue;
- if ((unsigned long)m->addr > (curstart +
- cursize))
- break;
- vmstart = (curstart < (unsigned long)m->addr ?
- (unsigned long)m->addr : curstart);
- if (((unsigned long)m->addr + msize) >
- (curstart + cursize))
- vmsize = curstart + cursize - vmstart;
- else
- vmsize = (unsigned long)m->addr +
- msize - vmstart;
- curstart = vmstart + vmsize;
- cursize -= vmsize;
- /* don't dump ioremap'd stuff! (TA) */
- if (m->flags & VM_IOREMAP)
- continue;
- memcpy(elf_buf + (vmstart - start),
- (char *)vmstart, vmsize);
- }
- read_unlock(&vmlist_lock);
+ vread(elf_buf, (char *)start, tsz);
+ /* we have to zero-fill user buffer even if no read */
if (copy_to_user(buffer, elf_buf, tsz)) {
kfree(elf_buf);
return -EFAULT;
@@ -402,12 +539,96 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
return acc;
}
+
+static int open_kcore(struct inode *inode, struct file *filp)
+{
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ if (kcore_need_update)
+ kcore_update_ram();
+ if (i_size_read(inode) != proc_root_kcore->size) {
+ mutex_lock(&inode->i_mutex);
+ i_size_write(inode, proc_root_kcore->size);
+ mutex_unlock(&inode->i_mutex);
+ }
+ return 0;
+}
+
+
+static const struct file_operations proc_kcore_operations = {
+ .read = read_kcore,
+ .open = open_kcore,
+};
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+/* just remember that we have to update kcore */
+static int __meminit kcore_callback(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ switch (action) {
+ case MEM_ONLINE:
+ case MEM_OFFLINE:
+ write_lock(&kclist_lock);
+ kcore_need_update = 1;
+ write_unlock(&kclist_lock);
+ }
+ return NOTIFY_OK;
+}
+#endif
+
+
+static struct kcore_list kcore_vmalloc;
+
+#ifdef CONFIG_ARCH_PROC_KCORE_TEXT
+static struct kcore_list kcore_text;
+/*
+ * If defined, special segment is used for mapping kernel text instead of
+ * direct-map area. We need to create special TEXT section.
+ */
+static void __init proc_kcore_text_init(void)
+{
+ kclist_add(&kcore_text, _stext, _end - _stext, KCORE_TEXT);
+}
+#else
+static void __init proc_kcore_text_init(void)
+{
+}
+#endif
+
+#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
+/*
+ * MODULES_VADDR has no intersection with VMALLOC_ADDR.
+ */
+struct kcore_list kcore_modules;
+static void __init add_modules_range(void)
+{
+ kclist_add(&kcore_modules, (void *)MODULES_VADDR,
+ MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
+}
+#else
+static void __init add_modules_range(void)
+{
+}
+#endif
+
static int __init proc_kcore_init(void)
{
- proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &proc_kcore_operations);
- if (proc_root_kcore)
- proc_root_kcore->size =
- (size_t)high_memory - PAGE_OFFSET + PAGE_SIZE;
+ proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
+ &proc_kcore_operations);
+ if (!proc_root_kcore) {
+ printk(KERN_ERR "couldn't create /proc/kcore\n");
+ return 0; /* Always returns 0. */
+ }
+ /* Store text area if it's special */
+ proc_kcore_text_init();
+ /* Store vmalloc area */
+ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
+ VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
+ add_modules_range();
+ /* Store direct-map area from physical memory map */
+ kcore_update_ram();
+ hotplug_memory_notifier(kcore_callback, 0);
+
return 0;
}
module_init(proc_kcore_init);
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index d5c410d47fa..c7bff4f603f 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -81,9 +81,11 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
"Writeback: %8lu kB\n"
"AnonPages: %8lu kB\n"
"Mapped: %8lu kB\n"
+ "Shmem: %8lu kB\n"
"Slab: %8lu kB\n"
"SReclaimable: %8lu kB\n"
"SUnreclaim: %8lu kB\n"
+ "KernelStack: %8lu kB\n"
"PageTables: %8lu kB\n"
#ifdef CONFIG_QUICKLIST
"Quicklists: %8lu kB\n"
@@ -95,7 +97,11 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
"Committed_AS: %8lu kB\n"
"VmallocTotal: %8lu kB\n"
"VmallocUsed: %8lu kB\n"
- "VmallocChunk: %8lu kB\n",
+ "VmallocChunk: %8lu kB\n"
+#ifdef CONFIG_MEMORY_FAILURE
+ "HardwareCorrupted: %8lu kB\n"
+#endif
+ ,
K(i.totalram),
K(i.freeram),
K(i.bufferram),
@@ -124,10 +130,12 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
K(global_page_state(NR_WRITEBACK)),
K(global_page_state(NR_ANON_PAGES)),
K(global_page_state(NR_FILE_MAPPED)),
+ K(global_page_state(NR_SHMEM)),
K(global_page_state(NR_SLAB_RECLAIMABLE) +
global_page_state(NR_SLAB_UNRECLAIMABLE)),
K(global_page_state(NR_SLAB_RECLAIMABLE)),
K(global_page_state(NR_SLAB_UNRECLAIMABLE)),
+ global_page_state(NR_KERNEL_STACK) * THREAD_SIZE / 1024,
K(global_page_state(NR_PAGETABLE)),
#ifdef CONFIG_QUICKLIST
K(quicklist_total_size()),
@@ -140,6 +148,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
(unsigned long)VMALLOC_TOTAL >> 10,
vmi.used >> 10,
vmi.largest_chunk >> 10
+#ifdef CONFIG_MEMORY_FAILURE
+ ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
+#endif
);
hugetlb_report_meminfo(m);
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
index 7e14d1a0400..9fe7d7ebe11 100644
--- a/fs/proc/nommu.c
+++ b/fs/proc/nommu.c
@@ -109,7 +109,7 @@ static void *nommu_region_list_next(struct seq_file *m, void *v, loff_t *pos)
return rb_next((struct rb_node *) v);
}
-static struct seq_operations proc_nommu_region_list_seqop = {
+static const struct seq_operations proc_nommu_region_list_seqop = {
.start = nommu_region_list_start,
.next = nommu_region_list_next,
.stop = nommu_region_list_stop,
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 2707c6c7a20..2281c2cbfe2 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -2,6 +2,7 @@
#include <linux/compiler.h>
#include <linux/fs.h>
#include <linux/init.h>
+#include <linux/ksm.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/proc_fs.h>
@@ -95,6 +96,8 @@ static const struct file_operations proc_kpagecount_operations = {
#define KPF_UNEVICTABLE 18
#define KPF_NOPAGE 20
+#define KPF_KSM 21
+
/* kernel hacking assistances
* WARNING: subject to change, never rely on them!
*/
@@ -137,6 +140,8 @@ static u64 get_uflags(struct page *page)
u |= 1 << KPF_MMAP;
if (PageAnon(page))
u |= 1 << KPF_ANON;
+ if (PageKsm(page))
+ u |= 1 << KPF_KSM;
/*
* compound pages: export both head/tail info
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 9b1e4e9a16b..f667e8aeabd 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -153,7 +153,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
/* careful: calling conventions are nasty here */
res = count;
- error = table->proc_handler(table, write, filp, buf, &res, ppos);
+ error = table->proc_handler(table, write, buf, &res, ppos);
if (!error)
error = res;
out:
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 9bd8be1d235..2a1bef9203c 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -243,6 +243,25 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
} else if (vma->vm_start <= mm->start_stack &&
vma->vm_end >= mm->start_stack) {
name = "[stack]";
+ } else {
+ unsigned long stack_start;
+ struct proc_maps_private *pmp;
+
+ pmp = m->private;
+ stack_start = pmp->task->stack_start;
+
+ if (vma->vm_start <= stack_start &&
+ vma->vm_end >= stack_start) {
+ pad_len_spaces(m, len);
+ seq_printf(m,
+ "[threadstack:%08lx]",
+#ifdef CONFIG_STACK_GROWSUP
+ vma->vm_end - stack_start
+#else
+ stack_start - vma->vm_start
+#endif
+ );
+ }
}
} else {
name = "[vdso]";
@@ -465,23 +484,28 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
return 0;
}
+#define CLEAR_REFS_ALL 1
+#define CLEAR_REFS_ANON 2
+#define CLEAR_REFS_MAPPED 3
+
static ssize_t clear_refs_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct task_struct *task;
- char buffer[PROC_NUMBUF], *end;
+ char buffer[PROC_NUMBUF];
struct mm_struct *mm;
struct vm_area_struct *vma;
+ long type;
memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
count = sizeof(buffer) - 1;
if (copy_from_user(buffer, buf, count))
return -EFAULT;
- if (!simple_strtol(buffer, &end, 0))
+ if (strict_strtol(strstrip(buffer), 10, &type))
+ return -EINVAL;
+ if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
return -EINVAL;
- if (*end == '\n')
- end++;
task = get_proc_task(file->f_path.dentry->d_inode);
if (!task)
return -ESRCH;
@@ -494,18 +518,31 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
down_read(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
clear_refs_walk.private = vma;
- if (!is_vm_hugetlb_page(vma))
- walk_page_range(vma->vm_start, vma->vm_end,
- &clear_refs_walk);
+ if (is_vm_hugetlb_page(vma))
+ continue;
+ /*
+ * Writing 1 to /proc/pid/clear_refs affects all pages.
+ *
+ * Writing 2 to /proc/pid/clear_refs only affects
+ * Anonymous pages.
+ *
+ * Writing 3 to /proc/pid/clear_refs only affects file
+ * mapped pages.
+ */
+ if (type == CLEAR_REFS_ANON && vma->vm_file)
+ continue;
+ if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
+ continue;
+ walk_page_range(vma->vm_start, vma->vm_end,
+ &clear_refs_walk);
}
flush_tlb_mm(mm);
up_read(&mm->mmap_sem);
mmput(mm);
}
put_task_struct(task);
- if (end - buffer == 0)
- return -EIO;
- return end - buffer;
+
+ return count;
}
const struct file_operations proc_clear_refs_operations = {
diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c
index 0c10a0b3f14..766b1d45605 100644
--- a/fs/proc/uptime.c
+++ b/fs/proc/uptime.c
@@ -4,13 +4,18 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/time.h>
+#include <linux/kernel_stat.h>
#include <asm/cputime.h>
static int uptime_proc_show(struct seq_file *m, void *v)
{
struct timespec uptime;
struct timespec idle;
- cputime_t idletime = cputime_add(init_task.utime, init_task.stime);
+ int i;
+ cputime_t idletime = cputime_zero;
+
+ for_each_possible_cpu(i)
+ idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle);
do_posix_clock_monotonic_gettime(&uptime);
monotonic_to_bootbased(&uptime);
diff --git a/fs/qnx4/Kconfig b/fs/qnx4/Kconfig
index be8e0e1445b..5f608999404 100644
--- a/fs/qnx4/Kconfig
+++ b/fs/qnx4/Kconfig
@@ -6,20 +6,9 @@ config QNX4FS_FS
QNX 4 and QNX 6 (the latter is also called QNX RTP).
Further information is available at <http://www.qnx.com/>.
Say Y if you intend to mount QNX hard disks or floppies.
- Unless you say Y to "QNX4FS read-write support" below, you will
- only be able to read these file systems.
To compile this file system support as a module, choose M here: the
module will be called qnx4.
If you don't know whether you need it, then you don't need it:
answer N.
-
-config QNX4FS_RW
- bool "QNX4FS write support (DANGEROUS)"
- depends on QNX4FS_FS && EXPERIMENTAL && BROKEN
- help
- Say Y if you want to test write support for QNX4 file systems.
-
- It's currently broken, so for now:
- answer N.
diff --git a/fs/qnx4/Makefile b/fs/qnx4/Makefile
index e4d408cc547..4a283b3f87f 100644
--- a/fs/qnx4/Makefile
+++ b/fs/qnx4/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_QNX4FS_FS) += qnx4.o
-qnx4-objs := inode.o dir.o namei.o file.o bitmap.o truncate.o
+qnx4-objs := inode.o dir.o namei.o bitmap.o
diff --git a/fs/qnx4/bitmap.c b/fs/qnx4/bitmap.c
index e1cd061a25f..0afba069d56 100644
--- a/fs/qnx4/bitmap.c
+++ b/fs/qnx4/bitmap.c
@@ -78,84 +78,3 @@ unsigned long qnx4_count_free_blocks(struct super_block *sb)
return total_free;
}
-
-#ifdef CONFIG_QNX4FS_RW
-
-int qnx4_is_free(struct super_block *sb, long block)
-{
- int start = le32_to_cpu(qnx4_sb(sb)->BitMap->di_first_xtnt.xtnt_blk) - 1;
- int size = le32_to_cpu(qnx4_sb(sb)->BitMap->di_size);
- struct buffer_head *bh;
- const char *g;
- int ret = -EIO;
-
- start += block / (QNX4_BLOCK_SIZE * 8);
- QNX4DEBUG(("qnx4: is_free requesting block [%lu], bitmap in block [%lu]\n",
- (unsigned long) block, (unsigned long) start));
- (void) size; /* CHECKME */
- bh = sb_bread(sb, start);
- if (bh == NULL) {
- return -EIO;
- }
- g = bh->b_data + (block % QNX4_BLOCK_SIZE);
- if (((*g) & (1 << (block % 8))) == 0) {
- QNX4DEBUG(("qnx4: is_free -> block is free\n"));
- ret = 1;
- } else {
- QNX4DEBUG(("qnx4: is_free -> block is busy\n"));
- ret = 0;
- }
- brelse(bh);
-
- return ret;
-}
-
-int qnx4_set_bitmap(struct super_block *sb, long block, int busy)
-{
- int start = le32_to_cpu(qnx4_sb(sb)->BitMap->di_first_xtnt.xtnt_blk) - 1;
- int size = le32_to_cpu(qnx4_sb(sb)->BitMap->di_size);
- struct buffer_head *bh;
- char *g;
-
- start += block / (QNX4_BLOCK_SIZE * 8);
- QNX4DEBUG(("qnx4: set_bitmap requesting block [%lu], bitmap in block [%lu]\n",
- (unsigned long) block, (unsigned long) start));
- (void) size; /* CHECKME */
- bh = sb_bread(sb, start);
- if (bh == NULL) {
- return -EIO;
- }
- g = bh->b_data + (block % QNX4_BLOCK_SIZE);
- if (busy == 0) {
- (*g) &= ~(1 << (block % 8));
- } else {
- (*g) |= (1 << (block % 8));
- }
- mark_buffer_dirty(bh);
- brelse(bh);
-
- return 0;
-}
-
-static void qnx4_clear_inode(struct inode *inode)
-{
- struct qnx4_inode_entry *qnx4_ino = qnx4_raw_inode(inode);
- /* What for? */
- memset(qnx4_ino->di_fname, 0, sizeof qnx4_ino->di_fname);
- qnx4_ino->di_size = 0;
- qnx4_ino->di_num_xtnts = 0;
- qnx4_ino->di_mode = 0;
- qnx4_ino->di_status = 0;
-}
-
-void qnx4_free_inode(struct inode *inode)
-{
- if (inode->i_ino < 1) {
- printk("free_inode: inode 0 or nonexistent inode\n");
- return;
- }
- qnx4_clear_inode(inode);
- clear_inode(inode);
-}
-
-#endif
diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c
index 003c68f3238..86cc39cb139 100644
--- a/fs/qnx4/dir.c
+++ b/fs/qnx4/dir.c
@@ -85,9 +85,4 @@ const struct file_operations qnx4_dir_operations =
const struct inode_operations qnx4_dir_inode_operations =
{
.lookup = qnx4_lookup,
-#ifdef CONFIG_QNX4FS_RW
- .create = qnx4_create,
- .unlink = qnx4_unlink,
- .rmdir = qnx4_rmdir,
-#endif
};
diff --git a/fs/qnx4/file.c b/fs/qnx4/file.c
deleted file mode 100644
index 09b170ac936..00000000000
--- a/fs/qnx4/file.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * QNX4 file system, Linux implementation.
- *
- * Version : 0.2.1
- *
- * Using parts of the xiafs filesystem.
- *
- * History :
- *
- * 25-05-1998 by Richard Frowijn : first release.
- * 21-06-1998 by Frank Denis : wrote qnx4_readpage to use generic_file_read.
- * 27-06-1998 by Frank Denis : file overwriting.
- */
-
-#include "qnx4.h"
-
-/*
- * We have mostly NULL's here: the current defaults are ok for
- * the qnx4 filesystem.
- */
-const struct file_operations qnx4_file_operations =
-{
- .llseek = generic_file_llseek,
- .read = do_sync_read,
- .aio_read = generic_file_aio_read,
- .mmap = generic_file_mmap,
- .splice_read = generic_file_splice_read,
-#ifdef CONFIG_QNX4FS_RW
- .write = do_sync_write,
- .aio_write = generic_file_aio_write,
- .fsync = simple_fsync,
-#endif
-};
-
-const struct inode_operations qnx4_file_inode_operations =
-{
-#ifdef CONFIG_QNX4FS_RW
- .truncate = qnx4_truncate,
-#endif
-};
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 681df5fcd16..d2cd1798d8c 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -28,73 +28,6 @@
static const struct super_operations qnx4_sops;
-#ifdef CONFIG_QNX4FS_RW
-
-static void qnx4_delete_inode(struct inode *inode)
-{
- QNX4DEBUG(("qnx4: deleting inode [%lu]\n", (unsigned long) inode->i_ino));
- truncate_inode_pages(&inode->i_data, 0);
- inode->i_size = 0;
- qnx4_truncate(inode);
- lock_kernel();
- qnx4_free_inode(inode);
- unlock_kernel();
-}
-
-static int qnx4_write_inode(struct inode *inode, int do_sync)
-{
- struct qnx4_inode_entry *raw_inode;
- int block, ino;
- struct buffer_head *bh;
- ino = inode->i_ino;
-
- QNX4DEBUG(("qnx4: write inode 1.\n"));
- if (inode->i_nlink == 0) {
- return 0;
- }
- if (!ino) {
- printk("qnx4: bad inode number on dev %s: %d is out of range\n",
- inode->i_sb->s_id, ino);
- return -EIO;
- }
- QNX4DEBUG(("qnx4: write inode 2.\n"));
- block = ino / QNX4_INODES_PER_BLOCK;
- lock_kernel();
- if (!(bh = sb_bread(inode->i_sb, block))) {
- printk("qnx4: major problem: unable to read inode from dev "
- "%s\n", inode->i_sb->s_id);
- unlock_kernel();
- return -EIO;
- }
- raw_inode = ((struct qnx4_inode_entry *) bh->b_data) +
- (ino % QNX4_INODES_PER_BLOCK);
- raw_inode->di_mode = cpu_to_le16(inode->i_mode);
- raw_inode->di_uid = cpu_to_le16(fs_high2lowuid(inode->i_uid));
- raw_inode->di_gid = cpu_to_le16(fs_high2lowgid(inode->i_gid));
- raw_inode->di_nlink = cpu_to_le16(inode->i_nlink);
- raw_inode->di_size = cpu_to_le32(inode->i_size);
- raw_inode->di_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
- raw_inode->di_atime = cpu_to_le32(inode->i_atime.tv_sec);
- raw_inode->di_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
- raw_inode->di_first_xtnt.xtnt_size = cpu_to_le32(inode->i_blocks);
- mark_buffer_dirty(bh);
- if (do_sync) {
- sync_dirty_buffer(bh);
- if (buffer_req(bh) && !buffer_uptodate(bh)) {
- printk("qnx4: IO error syncing inode [%s:%08x]\n",
- inode->i_sb->s_id, ino);
- brelse(bh);
- unlock_kernel();
- return -EIO;
- }
- }
- brelse(bh);
- unlock_kernel();
- return 0;
-}
-
-#endif
-
static void qnx4_put_super(struct super_block *sb);
static struct inode *qnx4_alloc_inode(struct super_block *sb);
static void qnx4_destroy_inode(struct inode *inode);
@@ -108,10 +41,6 @@ static const struct super_operations qnx4_sops =
.put_super = qnx4_put_super,
.statfs = qnx4_statfs,
.remount_fs = qnx4_remount,
-#ifdef CONFIG_QNX4FS_RW
- .write_inode = qnx4_write_inode,
- .delete_inode = qnx4_delete_inode,
-#endif
};
static int qnx4_remount(struct super_block *sb, int *flags, char *data)
@@ -120,15 +49,7 @@ static int qnx4_remount(struct super_block *sb, int *flags, char *data)
qs = qnx4_sb(sb);
qs->Version = QNX4_VERSION;
-#ifndef CONFIG_QNX4FS_RW
*flags |= MS_RDONLY;
-#endif
- if (*flags & MS_RDONLY) {
- return 0;
- }
-
- mark_buffer_dirty(qs->sb_buf);
-
return 0;
}
@@ -354,9 +275,7 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
}
s->s_op = &qnx4_sops;
s->s_magic = QNX4_SUPER_MAGIC;
-#ifndef CONFIG_QNX4FS_RW
s->s_flags |= MS_RDONLY; /* Yup, read-only yet */
-#endif
qnx4_sb(s)->sb_buf = bh;
qnx4_sb(s)->sb = (struct qnx4_super_block *) bh->b_data;
@@ -489,8 +408,7 @@ struct inode *qnx4_iget(struct super_block *sb, unsigned long ino)
memcpy(qnx4_inode, raw_inode, QNX4_DIR_ENTRY_SIZE);
if (S_ISREG(inode->i_mode)) {
- inode->i_op = &qnx4_file_inode_operations;
- inode->i_fop = &qnx4_file_operations;
+ inode->i_fop = &generic_ro_fops;
inode->i_mapping->a_ops = &qnx4_aops;
qnx4_i(inode)->mmu_private = inode->i_size;
} else if (S_ISDIR(inode->i_mode)) {
diff --git a/fs/qnx4/namei.c b/fs/qnx4/namei.c
index 5972ed21493..ae1e7edbacd 100644
--- a/fs/qnx4/namei.c
+++ b/fs/qnx4/namei.c
@@ -134,108 +134,3 @@ out:
return NULL;
}
-
-#ifdef CONFIG_QNX4FS_RW
-int qnx4_create(struct inode *dir, struct dentry *dentry, int mode,
- struct nameidata *nd)
-{
- QNX4DEBUG(("qnx4: qnx4_create\n"));
- if (dir == NULL) {
- return -ENOENT;
- }
- return -ENOSPC;
-}
-
-int qnx4_rmdir(struct inode *dir, struct dentry *dentry)
-{
- struct buffer_head *bh;
- struct qnx4_inode_entry *de;
- struct inode *inode;
- int retval;
- int ino;
-
- QNX4DEBUG(("qnx4: qnx4_rmdir [%s]\n", dentry->d_name.name));
- lock_kernel();
- bh = qnx4_find_entry(dentry->d_name.len, dir, dentry->d_name.name,
- &de, &ino);
- if (bh == NULL) {
- unlock_kernel();
- return -ENOENT;
- }
- inode = dentry->d_inode;
- if (inode->i_ino != ino) {
- retval = -EIO;
- goto end_rmdir;
- }
-#if 0
- if (!empty_dir(inode)) {
- retval = -ENOTEMPTY;
- goto end_rmdir;
- }
-#endif
- if (inode->i_nlink != 2) {
- QNX4DEBUG(("empty directory has nlink!=2 (%d)\n", inode->i_nlink));
- }
- QNX4DEBUG(("qnx4: deleting directory\n"));
- de->di_status = 0;
- memset(de->di_fname, 0, sizeof de->di_fname);
- de->di_mode = 0;
- mark_buffer_dirty_inode(bh, dir);
- clear_nlink(inode);
- mark_inode_dirty(inode);
- inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
- inode_dec_link_count(dir);
- retval = 0;
-
- end_rmdir:
- brelse(bh);
-
- unlock_kernel();
- return retval;
-}
-
-int qnx4_unlink(struct inode *dir, struct dentry *dentry)
-{
- struct buffer_head *bh;
- struct qnx4_inode_entry *de;
- struct inode *inode;
- int retval;
- int ino;
-
- QNX4DEBUG(("qnx4: qnx4_unlink [%s]\n", dentry->d_name.name));
- lock_kernel();
- bh = qnx4_find_entry(dentry->d_name.len, dir, dentry->d_name.name,
- &de, &ino);
- if (bh == NULL) {
- unlock_kernel();
- return -ENOENT;
- }
- inode = dentry->d_inode;
- if (inode->i_ino != ino) {
- retval = -EIO;
- goto end_unlink;
- }
- retval = -EPERM;
- if (!inode->i_nlink) {
- QNX4DEBUG(("Deleting nonexistent file (%s:%lu), %d\n",
- inode->i_sb->s_id,
- inode->i_ino, inode->i_nlink));
- inode->i_nlink = 1;
- }
- de->di_status = 0;
- memset(de->di_fname, 0, sizeof de->di_fname);
- de->di_mode = 0;
- mark_buffer_dirty_inode(bh, dir);
- dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
- mark_inode_dirty(dir);
- inode->i_ctime = dir->i_ctime;
- inode_dec_link_count(inode);
- retval = 0;
-
-end_unlink:
- unlock_kernel();
- brelse(bh);
-
- return retval;
-}
-#endif
diff --git a/fs/qnx4/qnx4.h b/fs/qnx4/qnx4.h
index 9efc089454f..33a60858203 100644
--- a/fs/qnx4/qnx4.h
+++ b/fs/qnx4/qnx4.h
@@ -29,17 +29,9 @@ extern unsigned long qnx4_block_map(struct inode *inode, long iblock);
extern struct buffer_head *qnx4_bread(struct inode *, int, int);
-extern const struct inode_operations qnx4_file_inode_operations;
extern const struct inode_operations qnx4_dir_inode_operations;
-extern const struct file_operations qnx4_file_operations;
extern const struct file_operations qnx4_dir_operations;
extern int qnx4_is_free(struct super_block *sb, long block);
-extern int qnx4_set_bitmap(struct super_block *sb, long block, int busy);
-extern int qnx4_create(struct inode *inode, struct dentry *dentry, int mode, struct nameidata *nd);
-extern void qnx4_truncate(struct inode *inode);
-extern void qnx4_free_inode(struct inode *inode);
-extern int qnx4_unlink(struct inode *dir, struct dentry *dentry);
-extern int qnx4_rmdir(struct inode *dir, struct dentry *dentry);
static inline struct qnx4_sb_info *qnx4_sb(struct super_block *sb)
{
diff --git a/fs/qnx4/truncate.c b/fs/qnx4/truncate.c
deleted file mode 100644
index d94d9ee241f..00000000000
--- a/fs/qnx4/truncate.c
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * QNX4 file system, Linux implementation.
- *
- * Version : 0.1
- *
- * Using parts of the xiafs filesystem.
- *
- * History :
- *
- * 30-06-1998 by Frank DENIS : ugly filler.
- */
-
-#include <linux/smp_lock.h>
-#include "qnx4.h"
-
-#ifdef CONFIG_QNX4FS_RW
-
-void qnx4_truncate(struct inode *inode)
-{
- if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- S_ISLNK(inode->i_mode))) {
- return;
- }
- lock_kernel();
- if (!(S_ISDIR(inode->i_mode))) {
- /* TODO */
- }
- QNX4DEBUG(("qnx4: qnx4_truncate called\n"));
- inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
- mark_inode_dirty(inode);
- unlock_kernel();
-}
-
-#endif
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 38f7bd559f3..39b49c42a7e 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1839,7 +1839,7 @@ EXPORT_SYMBOL(dquot_commit_info);
/*
* Definitions of diskquota operations.
*/
-struct dquot_operations dquot_operations = {
+const struct dquot_operations dquot_operations = {
.initialize = dquot_initialize,
.drop = dquot_drop,
.alloc_space = dquot_alloc_space,
@@ -2461,7 +2461,7 @@ out:
}
EXPORT_SYMBOL(vfs_set_dqinfo);
-struct quotactl_ops vfs_quotactl_ops = {
+const struct quotactl_ops vfs_quotactl_ops = {
.quota_on = vfs_quota_on,
.quota_off = vfs_quota_off,
.quota_sync = vfs_quota_sync,
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 11f0c06316d..32fae4040eb 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -69,14 +69,11 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
/* make various checks */
order = get_order(newsize);
if (unlikely(order >= MAX_ORDER))
- goto too_big;
+ return -EFBIG;
- limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
- if (limit != RLIM_INFINITY && newsize > limit)
- goto fsize_exceeded;
-
- if (newsize > inode->i_sb->s_maxbytes)
- goto too_big;
+ ret = inode_newsize_ok(inode, newsize);
+ if (ret)
+ return ret;
i_size_write(inode, newsize);
@@ -118,12 +115,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
return 0;
- fsize_exceeded:
- send_sig(SIGXFSZ, current, 0);
- too_big:
- return -EFBIG;
-
- add_error:
+add_error:
while (loop < npages)
__free_page(pages + loop++);
return ret;
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index a7f0110fca4..a6090aa1a7c 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -34,12 +34,10 @@
#include <linux/ramfs.h>
#include <linux/sched.h>
#include <linux/parser.h>
+#include <linux/magic.h>
#include <asm/uaccess.h>
#include "internal.h"
-/* some random number */
-#define RAMFS_MAGIC 0x858458f6
-
#define RAMFS_DEFAULT_MODE 0755
static const struct super_operations ramfs_ops;
diff --git a/fs/read_write.c b/fs/read_write.c
index 6c8c55dec2b..3ac28987f22 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -839,9 +839,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
pos = *ppos;
- retval = -EINVAL;
- if (unlikely(pos < 0))
- goto fput_out;
if (unlikely(pos + count > max)) {
retval = -EOVERFLOW;
if (pos >= max)
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 7adea74d6a8..f0ad05f3802 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -612,7 +612,7 @@ static int reiserfs_mark_dquot_dirty(struct dquot *);
static int reiserfs_write_info(struct super_block *, int);
static int reiserfs_quota_on(struct super_block *, int, int, char *, int);
-static struct dquot_operations reiserfs_quota_operations = {
+static const struct dquot_operations reiserfs_quota_operations = {
.initialize = dquot_initialize,
.drop = dquot_drop,
.alloc_space = dquot_alloc_space,
@@ -629,7 +629,7 @@ static struct dquot_operations reiserfs_quota_operations = {
.destroy_dquot = dquot_destroy,
};
-static struct quotactl_ops reiserfs_qctl_operations = {
+static const struct quotactl_ops reiserfs_qctl_operations = {
.quota_on = reiserfs_quota_on,
.quota_off = vfs_quota_off,
.quota_sync = vfs_quota_sync,
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index 4ab3c03d8f9..c117fa80d1e 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -284,7 +284,7 @@ static const struct file_operations romfs_dir_operations = {
.readdir = romfs_readdir,
};
-static struct inode_operations romfs_dir_inode_operations = {
+static const struct inode_operations romfs_dir_inode_operations = {
.lookup = romfs_lookup,
};
@@ -528,7 +528,7 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
pos = (ROMFH_SIZE + len + 1 + ROMFH_PAD) & ROMFH_MASK;
root = romfs_iget(sb, pos);
- if (!root)
+ if (IS_ERR(root))
goto error;
sb->s_root = d_alloc_root(root);
diff --git a/fs/select.c b/fs/select.c
index 8084834e123..a201fc37022 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -41,22 +41,28 @@
* better solutions..
*/
+#define MAX_SLACK (100 * NSEC_PER_MSEC)
+
static long __estimate_accuracy(struct timespec *tv)
{
long slack;
int divfactor = 1000;
+ if (tv->tv_sec < 0)
+ return 0;
+
if (task_nice(current) > 0)
divfactor = divfactor / 5;
+ if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
+ return MAX_SLACK;
+
slack = tv->tv_nsec / divfactor;
slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
- if (slack > 100 * NSEC_PER_MSEC)
- slack = 100 * NSEC_PER_MSEC;
+ if (slack > MAX_SLACK)
+ return MAX_SLACK;
- if (slack < 0)
- slack = 0;
return slack;
}
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 6c959275f2d..eae7d9dbf3f 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -429,20 +429,21 @@ EXPORT_SYMBOL(mangle_path);
*/
int seq_path(struct seq_file *m, struct path *path, char *esc)
{
- if (m->count < m->size) {
- char *s = m->buf + m->count;
- char *p = d_path(path, s, m->size - m->count);
+ char *buf;
+ size_t size = seq_get_buf(m, &buf);
+ int res = -1;
+
+ if (size) {
+ char *p = d_path(path, buf, size);
if (!IS_ERR(p)) {
- s = mangle_path(s, p, esc);
- if (s) {
- p = m->buf + m->count;
- m->count = s - m->buf;
- return s - p;
- }
+ char *end = mangle_path(buf, p, esc);
+ if (end)
+ res = end - buf;
}
}
- m->count = m->size;
- return -1;
+ seq_commit(m, res);
+
+ return res;
}
EXPORT_SYMBOL(seq_path);
@@ -454,26 +455,28 @@ EXPORT_SYMBOL(seq_path);
int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
char *esc)
{
- int err = -ENAMETOOLONG;
- if (m->count < m->size) {
- char *s = m->buf + m->count;
+ char *buf;
+ size_t size = seq_get_buf(m, &buf);
+ int res = -ENAMETOOLONG;
+
+ if (size) {
char *p;
spin_lock(&dcache_lock);
- p = __d_path(path, root, s, m->size - m->count);
+ p = __d_path(path, root, buf, size);
spin_unlock(&dcache_lock);
- err = PTR_ERR(p);
+ res = PTR_ERR(p);
if (!IS_ERR(p)) {
- s = mangle_path(s, p, esc);
- if (s) {
- p = m->buf + m->count;
- m->count = s - m->buf;
- return 0;
- }
+ char *end = mangle_path(buf, p, esc);
+ if (end)
+ res = end - buf;
+ else
+ res = -ENAMETOOLONG;
}
}
- m->count = m->size;
- return err;
+ seq_commit(m, res);
+
+ return res < 0 ? res : 0;
}
/*
@@ -481,20 +484,21 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
*/
int seq_dentry(struct seq_file *m, struct dentry *dentry, char *esc)
{
- if (m->count < m->size) {
- char *s = m->buf + m->count;
- char *p = dentry_path(dentry, s, m->size - m->count);
+ char *buf;
+ size_t size = seq_get_buf(m, &buf);
+ int res = -1;
+
+ if (size) {
+ char *p = dentry_path(dentry, buf, size);
if (!IS_ERR(p)) {
- s = mangle_path(s, p, esc);
- if (s) {
- p = m->buf + m->count;
- m->count = s - m->buf;
- return s - p;
- }
+ char *end = mangle_path(buf, p, esc);
+ if (end)
+ res = end - buf;
}
}
- m->count = m->size;
- return -1;
+ seq_commit(m, res);
+
+ return res;
}
int seq_bitmap(struct seq_file *m, const unsigned long *bits,
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index 1402d2d54f5..1c4c8f08997 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -459,14 +459,8 @@ smb_show_options(struct seq_file *s, struct vfsmount *m)
static void
smb_unload_nls(struct smb_sb_info *server)
{
- if (server->remote_nls) {
- unload_nls(server->remote_nls);
- server->remote_nls = NULL;
- }
- if (server->local_nls) {
- unload_nls(server->local_nls);
- server->local_nls = NULL;
- }
+ unload_nls(server->remote_nls);
+ unload_nls(server->local_nls);
}
static void
diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
index 9468168b9af..71c29b6670b 100644
--- a/fs/smbfs/proc.c
+++ b/fs/smbfs/proc.c
@@ -509,7 +509,7 @@ date_unix2dos(struct smb_sb_info *server,
month = 2;
} else {
nl_day = (year & 3) || day <= 59 ? day : day - 1;
- for (month = 0; month < 12; month++)
+ for (month = 1; month < 12; month++)
if (day_n[month] > nl_day)
break;
}
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index cb5fc57e370..6c197ef53ad 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -44,7 +44,7 @@
#include "squashfs.h"
static struct file_system_type squashfs_fs_type;
-static struct super_operations squashfs_super_ops;
+static const struct super_operations squashfs_super_ops;
static int supported_squashfs_filesystem(short major, short minor, short comp)
{
@@ -444,7 +444,7 @@ static struct file_system_type squashfs_fs_type = {
.fs_flags = FS_REQUIRES_DEV
};
-static struct super_operations squashfs_super_ops = {
+static const struct super_operations squashfs_super_ops = {
.alloc_inode = squashfs_alloc_inode,
.destroy_inode = squashfs_destroy_inode,
.statfs = squashfs_statfs,
diff --git a/fs/super.c b/fs/super.c
index b03fea8fbfb..19eb70b374b 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -54,7 +54,7 @@ DEFINE_SPINLOCK(sb_lock);
static struct super_block *alloc_super(struct file_system_type *type)
{
struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
- static struct super_operations default_op;
+ static const struct super_operations default_op;
if (s) {
if (security_sb_alloc(s)) {
@@ -465,6 +465,48 @@ rescan:
}
EXPORT_SYMBOL(get_super);
+
+/**
+ * get_active_super - get an active reference to the superblock of a device
+ * @bdev: device to get the superblock for
+ *
+ * Scans the superblock list and finds the superblock of the file system
+ * mounted on the device given. Returns the superblock with an active
+ * reference and s_umount held exclusively or %NULL if none was found.
+ */
+struct super_block *get_active_super(struct block_device *bdev)
+{
+ struct super_block *sb;
+
+ if (!bdev)
+ return NULL;
+
+ spin_lock(&sb_lock);
+ list_for_each_entry(sb, &super_blocks, s_list) {
+ if (sb->s_bdev != bdev)
+ continue;
+
+ sb->s_count++;
+ spin_unlock(&sb_lock);
+ down_write(&sb->s_umount);
+ if (sb->s_root) {
+ spin_lock(&sb_lock);
+ if (sb->s_count > S_BIAS) {
+ atomic_inc(&sb->s_active);
+ sb->s_count--;
+ spin_unlock(&sb_lock);
+ return sb;
+ }
+ spin_unlock(&sb_lock);
+ }
+ up_write(&sb->s_umount);
+ put_super(sb);
+ yield();
+ spin_lock(&sb_lock);
+ }
+ spin_unlock(&sb_lock);
+ return NULL;
+}
struct super_block * user_get_super(dev_t dev)
{
@@ -527,11 +569,15 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
{
int retval;
int remount_rw;
-
+
+ if (sb->s_frozen != SB_UNFROZEN)
+ return -EBUSY;
+
#ifdef CONFIG_BLOCK
if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev))
return -EACCES;
#endif
+
if (flags & MS_RDONLY)
acct_auto_close(sb);
shrink_dcache_sb(sb);
@@ -743,9 +789,14 @@ int get_sb_bdev(struct file_system_type *fs_type,
* will protect the lockfs code from trying to start a snapshot
* while we are mounting
*/
- down(&bdev->bd_mount_sem);
+ mutex_lock(&bdev->bd_fsfreeze_mutex);
+ if (bdev->bd_fsfreeze_count > 0) {
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ error = -EBUSY;
+ goto error_bdev;
+ }
s = sget(fs_type, test_bdev_super, set_bdev_super, bdev);
- up(&bdev->bd_mount_sem);
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
if (IS_ERR(s))
goto error_s;
@@ -892,6 +943,16 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
if (error)
goto out_sb;
+ /*
+ * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
+ * but s_maxbytes was an unsigned long long for many releases. Throw
+ * this warning for a little while to try and catch filesystems that
+ * violate this rule. This warning should be either removed or
+ * converted to a BUG() in 2.6.34.
+ */
+ WARN((mnt->mnt_sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
+ "negative value (%lld)\n", type->name, mnt->mnt_sb->s_maxbytes);
+
mnt->mnt_mountpoint = mnt->mnt_root;
mnt->mnt_parent = mnt;
up_write(&mnt->mnt_sb->s_umount);
diff --git a/fs/sync.c b/fs/sync.c
index c08467a5d7c..d104591b066 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -183,6 +183,7 @@ int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
ret = err;
return ret;
}
+EXPORT_SYMBOL(file_fsync);
/**
* vfs_fsync_range - helper to sync a range of data & metadata to disk
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 7998cc37825..195830f4756 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -79,7 +79,7 @@ enum {
};
static const struct inode_operations none_inode_operations;
-static struct address_space_operations none_address_operations;
+static const struct address_space_operations none_address_operations;
static const struct file_operations none_file_operations;
/**
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index d5e5559e31d..381854461b2 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1635,4 +1635,5 @@ const struct address_space_operations xfs_address_space_operations = {
.direct_IO = xfs_vm_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
+ .error_remove_page = generic_error_remove_page,
};
diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/linux-2.6/xfs_quotaops.c
index cb6e2cca214..9e41f91aa26 100644
--- a/fs/xfs/linux-2.6/xfs_quotaops.c
+++ b/fs/xfs/linux-2.6/xfs_quotaops.c
@@ -150,7 +150,7 @@ xfs_fs_set_xquota(
return -xfs_qm_scall_setqlim(mp, id, xfs_quota_type(type), fdq);
}
-struct quotactl_ops xfs_quotactl_operations = {
+const struct quotactl_ops xfs_quotactl_operations = {
.quota_sync = xfs_fs_quota_sync,
.get_xstate = xfs_fs_get_xstate,
.set_xstate = xfs_fs_set_xstate,
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 5d7c60ac77b..bdd41c8c342 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -67,7 +67,7 @@
#include <linux/freezer.h>
#include <linux/parser.h>
-static struct super_operations xfs_super_operations;
+static const struct super_operations xfs_super_operations;
static kmem_zone_t *xfs_ioend_zone;
mempool_t *xfs_ioend_pool;
@@ -1536,7 +1536,7 @@ xfs_fs_get_sb(
mnt);
}
-static struct super_operations xfs_super_operations = {
+static const struct super_operations xfs_super_operations = {
.alloc_inode = xfs_fs_alloc_inode,
.destroy_inode = xfs_fs_destroy_inode,
.write_inode = xfs_fs_write_inode,
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index 5a2ea3a2178..18175ebd58e 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -93,7 +93,7 @@ extern void xfs_blkdev_issue_flush(struct xfs_buftarg *);
extern const struct export_operations xfs_export_operations;
extern struct xattr_handler *xfs_xattr_handlers[];
-extern struct quotactl_ops xfs_quotactl_operations;
+extern const struct quotactl_ops xfs_quotactl_operations;
#define XFS_M(sb) ((struct xfs_mount *)((sb)->s_fs_info))
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c
index 916c0ffb608..c5bc67c4e3b 100644
--- a/fs/xfs/linux-2.6/xfs_sysctl.c
+++ b/fs/xfs/linux-2.6/xfs_sysctl.c
@@ -26,7 +26,6 @@ STATIC int
xfs_stats_clear_proc_handler(
ctl_table *ctl,
int write,
- struct file *filp,
void __user *buffer,
size_t *lenp,
loff_t *ppos)
@@ -34,7 +33,7 @@ xfs_stats_clear_proc_handler(
int c, ret, *valp = ctl->data;
__uint32_t vn_active;
- ret = proc_dointvec_minmax(ctl, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);
if (!ret && write && *valp) {
printk("XFS Clearing xfsstats\n");
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index c4ea51b55dc..f52ac276277 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -117,7 +117,7 @@ struct getbmapx {
#define BMV_IF_VALID \
(BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC|BMV_IF_DELALLOC)
-/* bmv_oflags values - returned for for each non-header segment */
+/* bmv_oflags values - returned for each non-header segment */
#define BMV_OF_PREALLOC 0x1 /* segment = unwritten pre-allocation */
#define BMV_OF_DELALLOC 0x2 /* segment = delayed allocation */
#define BMV_OF_LAST 0x4 /* segment is the last in the file */
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 1b3b36068ca..3cd9ccdcbd8 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -30,8 +30,6 @@
#include <acpi/acpi.h>
-#define PREFIX "ACPI: "
-
/* TBD: Make dynamic */
#define ACPI_MAX_HANDLES 10
struct acpi_handle_list {
@@ -72,7 +70,6 @@ enum acpi_bus_device_type {
ACPI_BUS_TYPE_POWER,
ACPI_BUS_TYPE_PROCESSOR,
ACPI_BUS_TYPE_THERMAL,
- ACPI_BUS_TYPE_SYSTEM,
ACPI_BUS_TYPE_POWER_BUTTON,
ACPI_BUS_TYPE_SLEEP_BUTTON,
ACPI_BUS_DEVICE_TYPE_COUNT
@@ -89,7 +86,6 @@ struct acpi_device;
typedef int (*acpi_op_add) (struct acpi_device * device);
typedef int (*acpi_op_remove) (struct acpi_device * device, int type);
typedef int (*acpi_op_start) (struct acpi_device * device);
-typedef int (*acpi_op_stop) (struct acpi_device * device, int type);
typedef int (*acpi_op_suspend) (struct acpi_device * device,
pm_message_t state);
typedef int (*acpi_op_resume) (struct acpi_device * device);
@@ -106,7 +102,6 @@ struct acpi_device_ops {
acpi_op_add add;
acpi_op_remove remove;
acpi_op_start start;
- acpi_op_stop stop;
acpi_op_suspend suspend;
acpi_op_resume resume;
acpi_op_bind bind;
@@ -146,10 +141,7 @@ struct acpi_device_status {
struct acpi_device_flags {
u32 dynamic_status:1;
- u32 hardware_id:1;
- u32 compatible_ids:1;
u32 bus_address:1;
- u32 unique_id:1;
u32 removable:1;
u32 ejectable:1;
u32 lockable:1;
@@ -158,7 +150,7 @@ struct acpi_device_flags {
u32 performance_manageable:1;
u32 wake_capable:1; /* Wakeup(_PRW) supported? */
u32 force_power_state:1;
- u32 reserved:19;
+ u32 reserved:22;
};
/* File System */
@@ -173,25 +165,26 @@ struct acpi_device_dir {
typedef char acpi_bus_id[8];
typedef unsigned long acpi_bus_address;
-typedef char acpi_hardware_id[15];
-typedef char acpi_unique_id[9];
typedef char acpi_device_name[40];
typedef char acpi_device_class[20];
+struct acpi_hardware_id {
+ struct list_head list;
+ char *id;
+};
+
struct acpi_device_pnp {
acpi_bus_id bus_id; /* Object name */
acpi_bus_address bus_address; /* _ADR */
- acpi_hardware_id hardware_id; /* _HID */
- struct acpi_compatible_id_list *cid_list; /* _CIDs */
- acpi_unique_id unique_id; /* _UID */
+ char *unique_id; /* _UID */
+ struct list_head ids; /* _HID and _CIDs */
acpi_device_name device_name; /* Driver-determined */
acpi_device_class device_class; /* " */
};
#define acpi_device_bid(d) ((d)->pnp.bus_id)
#define acpi_device_adr(d) ((d)->pnp.bus_address)
-#define acpi_device_hid(d) ((d)->pnp.hardware_id)
-#define acpi_device_uid(d) ((d)->pnp.unique_id)
+char *acpi_device_hid(struct acpi_device *device);
#define acpi_device_name(d) ((d)->pnp.device_name)
#define acpi_device_class(d) ((d)->pnp.device_class)
@@ -268,7 +261,8 @@ struct acpi_device_wakeup {
/* Device */
struct acpi_device {
- acpi_handle handle;
+ int device_type;
+ acpi_handle handle; /* no handle for fixed hardware */
struct acpi_device *parent;
struct list_head children;
struct list_head node;
@@ -314,7 +308,7 @@ struct acpi_bus_event {
extern struct kobject *acpi_kobj;
extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int);
-void acpi_bus_private_data_handler(acpi_handle, u32, void *);
+void acpi_bus_private_data_handler(acpi_handle, void *);
int acpi_bus_get_private_data(acpi_handle, void **);
extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
extern int register_acpi_notifier(struct notifier_block *);
@@ -327,7 +321,9 @@ extern void unregister_acpi_bus_notifier(struct notifier_block *nb);
*/
int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device);
-void acpi_bus_data_handler(acpi_handle handle, u32 function, void *context);
+void acpi_bus_data_handler(acpi_handle handle, void *context);
+acpi_status acpi_bus_get_status_handle(acpi_handle handle,
+ unsigned long long *sta);
int acpi_bus_get_status(struct acpi_device *device);
int acpi_bus_get_power(acpi_handle handle, int *state);
int acpi_bus_set_power(acpi_handle handle, int state);
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index ab0b85cf21f..eb0e7189075 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -245,6 +245,9 @@ acpi_status acpi_osi_invalidate(char* interface);
acpi_status
acpi_os_validate_address(u8 space_id, acpi_physical_address address,
acpi_size length, char *name);
+acpi_status
+acpi_os_invalidate_address(u8 space_id, acpi_physical_address address,
+ acpi_size length);
u64 acpi_os_get_timer(void);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 82ec6a3c050..e723b0fd8e4 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -47,7 +47,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20090521
+#define ACPI_CA_VERSION 0x20090903
#include "actypes.h"
#include "actbl.h"
@@ -64,6 +64,7 @@ extern u8 acpi_gbl_enable_interpreter_slack;
extern u8 acpi_gbl_all_methods_serialized;
extern u8 acpi_gbl_create_osi_method;
extern u8 acpi_gbl_leave_wake_gpes_disabled;
+extern u8 acpi_gbl_use_default_register_widths;
extern acpi_name acpi_gbl_trace_method_name;
extern u32 acpi_gbl_trace_flags;
@@ -199,7 +200,8 @@ acpi_evaluate_object_typed(acpi_handle object,
acpi_object_type return_type);
acpi_status
-acpi_get_object_info(acpi_handle handle, struct acpi_buffer *return_buffer);
+acpi_get_object_info(acpi_handle handle,
+ struct acpi_device_info **return_buffer);
acpi_status acpi_install_method(u8 *buffer);
@@ -359,9 +361,9 @@ acpi_status acpi_set_firmware_waking_vector(u32 physical_address);
acpi_status acpi_set_firmware_waking_vector64(u64 physical_address);
#endif
-acpi_status acpi_read(u32 *value, struct acpi_generic_address *reg);
+acpi_status acpi_read(u64 *value, struct acpi_generic_address *reg);
-acpi_status acpi_write(u32 value, struct acpi_generic_address *reg);
+acpi_status acpi_write(u64 value, struct acpi_generic_address *reg);
acpi_status
acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b);
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 222733d01f3..1b658795260 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -44,9 +44,23 @@
#ifndef __ACTBL_H__
#define __ACTBL_H__
+/*******************************************************************************
+ *
+ * Fundamental ACPI tables
+ *
+ * This file contains definitions for the ACPI tables that are directly consumed
+ * by ACPICA. All other tables are consumed by the OS-dependent ACPI-related
+ * device drivers and other OS support code.
+ *
+ * The RSDP and FACS do not use the common ACPI table header. All other ACPI
+ * tables use the header.
+ *
+ ******************************************************************************/
+
/*
- * Values for description table header signatures. Useful because they make
- * it more difficult to inadvertently type in the wrong signature.
+ * Values for description table header signatures for tables defined in this
+ * file. Useful because they make it more difficult to inadvertently type in
+ * the wrong signature.
*/
#define ACPI_SIG_DSDT "DSDT" /* Differentiated System Description Table */
#define ACPI_SIG_FADT "FACP" /* Fixed ACPI Description Table */
@@ -65,11 +79,6 @@
#pragma pack(1)
/*
- * These are the ACPI tables that are directly consumed by the subsystem.
- *
- * The RSDP and FACS do not use the common ACPI table header. All other ACPI
- * tables use the header.
- *
* Note about bitfields: The u8 type is used for bitfields in ACPI tables.
* This is the only type that is even remotely portable. Anything else is not
* portable, so do not use any other bitfield types.
@@ -77,9 +86,8 @@
/*******************************************************************************
*
- * ACPI Table Header. This common header is used by all tables except the
- * RSDP and FACS. The define is used for direct inclusion of header into
- * other ACPI tables
+ * Master ACPI Table Header. This common header is used by all ACPI tables
+ * except the RSDP and FACS.
*
******************************************************************************/
@@ -95,13 +103,16 @@ struct acpi_table_header {
u32 asl_compiler_revision; /* ASL compiler version */
};
-/*
+/*******************************************************************************
+ *
* GAS - Generic Address Structure (ACPI 2.0+)
*
* Note: Since this structure is used in the ACPI tables, it is byte aligned.
- * If misalignment is not supported, access to the Address field must be
- * performed with care.
- */
+ * If misaliged access is not supported by the hardware, accesses to the
+ * 64-bit Address field must be performed with care.
+ *
+ ******************************************************************************/
+
struct acpi_generic_address {
u8 space_id; /* Address space where struct or register exists */
u8 bit_width; /* Size in bits of given register */
@@ -113,6 +124,7 @@ struct acpi_generic_address {
/*******************************************************************************
*
* RSDP - Root System Description Pointer (Signature is "RSD PTR ")
+ * Version 2
*
******************************************************************************/
@@ -133,6 +145,7 @@ struct acpi_table_rsdp {
/*******************************************************************************
*
* RSDT/XSDT - Root System Description Tables
+ * Version 1 (both)
*
******************************************************************************/
@@ -161,21 +174,29 @@ struct acpi_table_facs {
u32 flags;
u64 xfirmware_waking_vector; /* 64-bit version of the Firmware Waking Vector (ACPI 2.0+) */
u8 version; /* Version of this table (ACPI 2.0+) */
- u8 reserved[31]; /* Reserved, must be zero */
+ u8 reserved[3]; /* Reserved, must be zero */
+ u32 ospm_flags; /* Flags to be set by OSPM (ACPI 4.0) */
+ u8 reserved1[24]; /* Reserved, must be zero */
};
-/* Flag macros */
+/* Masks for global_lock flag field above */
-#define ACPI_FACS_S4_BIOS_PRESENT (1) /* 00: S4BIOS support is present */
+#define ACPI_GLOCK_PENDING (1) /* 00: Pending global lock ownership */
+#define ACPI_GLOCK_OWNED (1<<1) /* 01: Global lock is owned */
-/* Global lock flags */
+/* Masks for Flags field above */
-#define ACPI_GLOCK_PENDING 0x01 /* 00: Pending global lock ownership */
-#define ACPI_GLOCK_OWNED 0x02 /* 01: Global lock is owned */
+#define ACPI_FACS_S4_BIOS_PRESENT (1) /* 00: S4BIOS support is present */
+#define ACPI_FACS_64BIT_WAKE (1<<1) /* 01: 64-bit wake vector supported (ACPI 4.0) */
+
+/* Masks for ospm_flags field above */
+
+#define ACPI_FACS_64BIT_ENVIRONMENT (1) /* 00: 64-bit wake environment is required (ACPI 4.0) */
/*******************************************************************************
*
* FADT - Fixed ACPI Description Table (Signature "FACP")
+ * Version 4
*
******************************************************************************/
@@ -236,7 +257,7 @@ struct acpi_table_fadt {
struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */
};
-/* FADT Boot Architecture Flags (boot_flags) */
+/* Masks for FADT Boot Architecture Flags (boot_flags) */
#define ACPI_FADT_LEGACY_DEVICES (1) /* 00: [V2] System has LPC or ISA bus devices */
#define ACPI_FADT_8042 (1<<1) /* 01: [V3] System has an 8042 controller on port 60/64 */
@@ -246,7 +267,7 @@ struct acpi_table_fadt {
#define FADT2_REVISION_ID 3
-/* FADT flags */
+/* Masks for FADT flags */
#define ACPI_FADT_WBINVD (1) /* 00: [V1] The wbinvd instruction works properly */
#define ACPI_FADT_WBINVD_FLUSH (1<<1) /* 01: [V1] wbinvd flushes but does not invalidate caches */
@@ -269,7 +290,7 @@ struct acpi_table_fadt {
#define ACPI_FADT_APIC_CLUSTER (1<<18) /* 18: [V4] All local APICs must use cluster model (ACPI 3.0) */
#define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: [V4] All local x_aPICs must use physical dest mode (ACPI 3.0) */
-/* FADT Prefered Power Management Profiles */
+/* Values for preferred_profile (Prefered Power Management Profiles) */
enum acpi_prefered_pm_profiles {
PM_UNSPECIFIED = 0,
@@ -287,14 +308,16 @@ enum acpi_prefered_pm_profiles {
#define ACPI_FADT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_fadt, f)
+/*
+ * Internal table-related structures
+ */
union acpi_name_union {
u32 integer;
char ascii[4];
};
-/*
- * Internal ACPI Table Descriptor. One per ACPI table
- */
+/* Internal ACPI Table Descriptor. One per ACPI table. */
+
struct acpi_table_desc {
acpi_physical_address address;
struct acpi_table_header *pointer;
@@ -304,7 +327,7 @@ struct acpi_table_desc {
u8 flags;
};
-/* Flags for above */
+/* Masks for Flags field above */
#define ACPI_TABLE_ORIGIN_UNKNOWN (0)
#define ACPI_TABLE_ORIGIN_MAPPED (1)
@@ -318,5 +341,6 @@ struct acpi_table_desc {
*/
#include <acpi/actbl1.h>
+#include <acpi/actbl2.h>
#endif /* __ACTBL_H__ */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 59ade075247..0b9b430b092 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -46,41 +46,31 @@
/*******************************************************************************
*
- * Additional ACPI Tables
+ * Additional ACPI Tables (1)
*
* These tables are not consumed directly by the ACPICA subsystem, but are
* included here to support device drivers and the AML disassembler.
*
+ * The tables in this file are fully defined within the ACPI specification.
+ *
******************************************************************************/
/*
- * Values for description table header signatures. Useful because they make
- * it more difficult to inadvertently type in the wrong signature.
+ * Values for description table header signatures for tables defined in this
+ * file. Useful because they make it more difficult to inadvertently type in
+ * the wrong signature.
*/
-#define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */
#define ACPI_SIG_BERT "BERT" /* Boot Error Record Table */
-#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */
#define ACPI_SIG_CPEP "CPEP" /* Corrected Platform Error Polling table */
-#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */
-#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */
#define ACPI_SIG_ECDT "ECDT" /* Embedded Controller Boot Resources Table */
#define ACPI_SIG_EINJ "EINJ" /* Error Injection table */
#define ACPI_SIG_ERST "ERST" /* Error Record Serialization Table */
#define ACPI_SIG_HEST "HEST" /* Hardware Error Source Table */
-#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */
-#define ACPI_SIG_IBFT "IBFT" /* i_sCSI Boot Firmware Table */
#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */
-#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */
+#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */
#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
-#define ACPI_SIG_SLIC "SLIC" /* Software Licensing Description Table */
#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */
-#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */
-#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */
#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */
-#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */
-#define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */
-#define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */
-#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */
/*
* All tables must be byte-packed to match the ACPI specification, since
@@ -94,14 +84,20 @@
* portable, so do not use any other bitfield types.
*/
-/* Common Subtable header (used in MADT, SRAT, etc.) */
+/*******************************************************************************
+ *
+ * Common subtable headers
+ *
+ ******************************************************************************/
+
+/* Generic subtable header (used in MADT, SRAT, etc.) */
struct acpi_subtable_header {
u8 type;
u8 length;
};
-/* Common Subtable header for WHEA tables (EINJ, ERST, WDAT) */
+/* Subtable header for WHEA tables (EINJ, ERST, WDAT) */
struct acpi_whea_header {
u8 action;
@@ -115,116 +111,8 @@ struct acpi_whea_header {
/*******************************************************************************
*
- * ASF - Alert Standard Format table (Signature "ASF!")
- *
- * Conforms to the Alert Standard Format Specification V2.0, 23 April 2003
- *
- ******************************************************************************/
-
-struct acpi_table_asf {
- struct acpi_table_header header; /* Common ACPI table header */
-};
-
-/* ASF subtable header */
-
-struct acpi_asf_header {
- u8 type;
- u8 reserved;
- u16 length;
-};
-
-/* Values for Type field above */
-
-enum acpi_asf_type {
- ACPI_ASF_TYPE_INFO = 0,
- ACPI_ASF_TYPE_ALERT = 1,
- ACPI_ASF_TYPE_CONTROL = 2,
- ACPI_ASF_TYPE_BOOT = 3,
- ACPI_ASF_TYPE_ADDRESS = 4,
- ACPI_ASF_TYPE_RESERVED = 5
-};
-
-/*
- * ASF subtables
- */
-
-/* 0: ASF Information */
-
-struct acpi_asf_info {
- struct acpi_asf_header header;
- u8 min_reset_value;
- u8 min_poll_interval;
- u16 system_id;
- u32 mfg_id;
- u8 flags;
- u8 reserved2[3];
-};
-
-/* 1: ASF Alerts */
-
-struct acpi_asf_alert {
- struct acpi_asf_header header;
- u8 assert_mask;
- u8 deassert_mask;
- u8 alerts;
- u8 data_length;
-};
-
-struct acpi_asf_alert_data {
- u8 address;
- u8 command;
- u8 mask;
- u8 value;
- u8 sensor_type;
- u8 type;
- u8 offset;
- u8 source_type;
- u8 severity;
- u8 sensor_number;
- u8 entity;
- u8 instance;
-};
-
-/* 2: ASF Remote Control */
-
-struct acpi_asf_remote {
- struct acpi_asf_header header;
- u8 controls;
- u8 data_length;
- u16 reserved2;
-};
-
-struct acpi_asf_control_data {
- u8 function;
- u8 address;
- u8 command;
- u8 value;
-};
-
-/* 3: ASF RMCP Boot Options */
-
-struct acpi_asf_rmcp {
- struct acpi_asf_header header;
- u8 capabilities[7];
- u8 completion_code;
- u32 enterprise_id;
- u8 command;
- u16 parameter;
- u16 boot_options;
- u16 oem_parameters;
-};
-
-/* 4: ASF Address */
-
-struct acpi_asf_address {
- struct acpi_asf_header header;
- u8 eprom_address;
- u8 devices;
-};
-
-/*******************************************************************************
- *
- * BERT - Boot Error Record Table
+ * BERT - Boot Error Record Table (ACPI 4.0)
+ * Version 1
*
******************************************************************************/
@@ -234,38 +122,43 @@ struct acpi_table_bert {
u64 address; /* Physical addresss of the error region */
};
-/* Boot Error Region */
+/* Boot Error Region (not a subtable, pointed to by Address field above) */
struct acpi_bert_region {
- u32 block_status;
- u32 raw_data_offset;
- u32 raw_data_length;
- u32 data_length;
- u32 error_severity;
+ u32 block_status; /* Type of error information */
+ u32 raw_data_offset; /* Offset to raw error data */
+ u32 raw_data_length; /* Length of raw error data */
+ u32 data_length; /* Length of generic error data */
+ u32 error_severity; /* Severity code */
};
-/* block_status Flags */
+/* Values for block_status flags above */
#define ACPI_BERT_UNCORRECTABLE (1)
-#define ACPI_BERT_CORRECTABLE (2)
-#define ACPI_BERT_MULTIPLE_UNCORRECTABLE (4)
-#define ACPI_BERT_MULTIPLE_CORRECTABLE (8)
+#define ACPI_BERT_CORRECTABLE (1<<1)
+#define ACPI_BERT_MULTIPLE_UNCORRECTABLE (1<<2)
+#define ACPI_BERT_MULTIPLE_CORRECTABLE (1<<3)
+#define ACPI_BERT_ERROR_ENTRY_COUNT (0xFF<<4) /* 8 bits, error count */
-/*******************************************************************************
- *
- * BOOT - Simple Boot Flag Table
- *
- ******************************************************************************/
+/* Values for error_severity above */
-struct acpi_table_boot {
- struct acpi_table_header header; /* Common ACPI table header */
- u8 cmos_index; /* Index in CMOS RAM for the boot register */
- u8 reserved[3];
+enum acpi_bert_error_severity {
+ ACPI_BERT_ERROR_CORRECTABLE = 0,
+ ACPI_BERT_ERROR_FATAL = 1,
+ ACPI_BERT_ERROR_CORRECTED = 2,
+ ACPI_BERT_ERROR_NONE = 3,
+ ACPI_BERT_ERROR_RESERVED = 4 /* 4 and greater are reserved */
};
+/*
+ * Note: The generic error data that follows the error_severity field above
+ * uses the struct acpi_hest_generic_data defined under the HEST table below
+ */
+
/*******************************************************************************
*
- * CPEP - Corrected Platform Error Polling table
+ * CPEP - Corrected Platform Error Polling table (ACPI 4.0)
+ * Version 1
*
******************************************************************************/
@@ -277,8 +170,7 @@ struct acpi_table_cpep {
/* Subtable */
struct acpi_cpep_polling {
- u8 type;
- u8 length;
+ struct acpi_subtable_header header;
u8 id; /* Processor ID */
u8 eid; /* Processor EID */
u32 interval; /* Polling interval (msec) */
@@ -286,124 +178,8 @@ struct acpi_cpep_polling {
/*******************************************************************************
*
- * DBGP - Debug Port table
- *
- ******************************************************************************/
-
-struct acpi_table_dbgp {
- struct acpi_table_header header; /* Common ACPI table header */
- u8 type; /* 0=full 16550, 1=subset of 16550 */
- u8 reserved[3];
- struct acpi_generic_address debug_port;
-};
-
-/*******************************************************************************
- *
- * DMAR - DMA Remapping table
- * From "Intel Virtualization Technology for Directed I/O", Sept. 2007
- *
- ******************************************************************************/
-
-struct acpi_table_dmar {
- struct acpi_table_header header; /* Common ACPI table header */
- u8 width; /* Host Address Width */
- u8 flags;
- u8 reserved[10];
-};
-
-/* Flags */
-
-#define ACPI_DMAR_INTR_REMAP (1)
-
-/* DMAR subtable header */
-
-struct acpi_dmar_header {
- u16 type;
- u16 length;
-};
-
-/* Values for subtable type in struct acpi_dmar_header */
-
-enum acpi_dmar_type {
- ACPI_DMAR_TYPE_HARDWARE_UNIT = 0,
- ACPI_DMAR_TYPE_RESERVED_MEMORY = 1,
- ACPI_DMAR_TYPE_ATSR = 2,
- ACPI_DMAR_TYPE_RESERVED = 3 /* 3 and greater are reserved */
-};
-
-struct acpi_dmar_device_scope {
- u8 entry_type;
- u8 length;
- u16 reserved;
- u8 enumeration_id;
- u8 bus;
-};
-
-/* Values for entry_type in struct acpi_dmar_device_scope */
-
-enum acpi_dmar_scope_type {
- ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0,
- ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1,
- ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2,
- ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3,
- ACPI_DMAR_SCOPE_TYPE_HPET = 4,
- ACPI_DMAR_SCOPE_TYPE_RESERVED = 5 /* 5 and greater are reserved */
-};
-
-struct acpi_dmar_pci_path {
- u8 dev;
- u8 fn;
-};
-
-/*
- * DMAR Sub-tables, correspond to Type in struct acpi_dmar_header
- */
-
-/* 0: Hardware Unit Definition */
-
-struct acpi_dmar_hardware_unit {
- struct acpi_dmar_header header;
- u8 flags;
- u8 reserved;
- u16 segment;
- u64 address; /* Register Base Address */
-};
-
-/* Flags */
-
-#define ACPI_DMAR_INCLUDE_ALL (1)
-
-/* 1: Reserved Memory Defininition */
-
-struct acpi_dmar_reserved_memory {
- struct acpi_dmar_header header;
- u16 reserved;
- u16 segment;
- u64 base_address; /* 4_k aligned base address */
- u64 end_address; /* 4_k aligned limit address */
-};
-
-/* Flags */
-
-#define ACPI_DMAR_ALLOW_ALL (1)
-
-
-/* 2: Root Port ATS Capability Reporting Structure */
-
-struct acpi_dmar_atsr {
- struct acpi_dmar_header header;
- u8 flags;
- u8 reserved;
- u16 segment;
-};
-
-/* Flags */
-
-#define ACPI_DMAR_ALL_PORTS (1)
-
-/*******************************************************************************
- *
* ECDT - Embedded Controller Boot Resources Table
+ * Version 1
*
******************************************************************************/
@@ -418,14 +194,16 @@ struct acpi_table_ecdt {
/*******************************************************************************
*
- * EINJ - Error Injection Table
+ * EINJ - Error Injection Table (ACPI 4.0)
+ * Version 1
*
******************************************************************************/
struct acpi_table_einj {
struct acpi_table_header header; /* Common ACPI table header */
u32 header_length;
- u32 reserved;
+ u8 flags;
+ u8 reserved[3];
u32 entries;
};
@@ -435,6 +213,10 @@ struct acpi_einj_entry {
struct acpi_whea_header whea_header; /* Common header for WHEA tables */
};
+/* Masks for Flags field above */
+
+#define ACPI_EINJ_PRESERVE (1)
+
/* Values for Action field above */
enum acpi_einj_actions {
@@ -470,9 +252,34 @@ struct acpi_einj_trigger {
u32 entry_count;
};
+/* Command status return values */
+
+enum acpi_einj_command_status {
+ ACPI_EINJ_SUCCESS = 0,
+ ACPI_EINJ_FAILURE = 1,
+ ACPI_EINJ_INVALID_ACCESS = 2,
+ ACPI_EINJ_STATUS_RESERVED = 3 /* 3 and greater are reserved */
+};
+
+/* Error types returned from ACPI_EINJ_GET_ERROR_TYPE (bitfield) */
+
+#define ACPI_EINJ_PROCESSOR_CORRECTABLE (1)
+#define ACPI_EINJ_PROCESSOR_UNCORRECTABLE (1<<1)
+#define ACPI_EINJ_PROCESSOR_FATAL (1<<2)
+#define ACPI_EINJ_MEMORY_CORRECTABLE (1<<3)
+#define ACPI_EINJ_MEMORY_UNCORRECTABLE (1<<4)
+#define ACPI_EINJ_MEMORY_FATAL (1<<5)
+#define ACPI_EINJ_PCIX_CORRECTABLE (1<<6)
+#define ACPI_EINJ_PCIX_UNCORRECTABLE (1<<7)
+#define ACPI_EINJ_PCIX_FATAL (1<<8)
+#define ACPI_EINJ_PLATFORM_CORRECTABLE (1<<9)
+#define ACPI_EINJ_PLATFORM_UNCORRECTABLE (1<<10)
+#define ACPI_EINJ_PLATFORM_FATAL (1<<11)
+
/*******************************************************************************
*
- * ERST - Error Record Serialization Table
+ * ERST - Error Record Serialization Table (ACPI 4.0)
+ * Version 1
*
******************************************************************************/
@@ -489,19 +296,23 @@ struct acpi_erst_entry {
struct acpi_whea_header whea_header; /* Common header for WHEA tables */
};
+/* Masks for Flags field above */
+
+#define ACPI_ERST_PRESERVE (1)
+
/* Values for Action field above */
enum acpi_erst_actions {
- ACPI_ERST_BEGIN_WRITE_OPERATION = 0,
- ACPI_ERST_BEGIN_READ_OPERATION = 1,
- ACPI_ERST_BETGIN_CLEAR_OPERATION = 2,
- ACPI_ERST_END_OPERATION = 3,
+ ACPI_ERST_BEGIN_WRITE = 0,
+ ACPI_ERST_BEGIN_READ = 1,
+ ACPI_ERST_BEGIN_CLEAR = 2,
+ ACPI_ERST_END = 3,
ACPI_ERST_SET_RECORD_OFFSET = 4,
ACPI_ERST_EXECUTE_OPERATION = 5,
ACPI_ERST_CHECK_BUSY_STATUS = 6,
ACPI_ERST_GET_COMMAND_STATUS = 7,
- ACPI_ERST_GET_RECORD_IDENTIFIER = 8,
- ACPI_ERST_SET_RECORD_IDENTIFIER = 9,
+ ACPI_ERST_GET_RECORD_ID = 8,
+ ACPI_ERST_SET_RECORD_ID = 9,
ACPI_ERST_GET_RECORD_COUNT = 10,
ACPI_ERST_BEGIN_DUMMY_WRIITE = 11,
ACPI_ERST_NOT_USED = 12,
@@ -536,9 +347,29 @@ enum acpi_erst_instructions {
ACPI_ERST_INSTRUCTION_RESERVED = 19 /* 19 and greater are reserved */
};
+/* Command status return values */
+
+enum acpi_erst_command_status {
+ ACPI_ERST_SUCESS = 0,
+ ACPI_ERST_NO_SPACE = 1,
+ ACPI_ERST_NOT_AVAILABLE = 2,
+ ACPI_ERST_FAILURE = 3,
+ ACPI_ERST_RECORD_EMPTY = 4,
+ ACPI_ERST_NOT_FOUND = 5,
+ ACPI_ERST_STATUS_RESERVED = 6 /* 6 and greater are reserved */
+};
+
+/* Error Record Serialization Information */
+
+struct acpi_erst_info {
+ u16 signature; /* Should be "ER" */
+ u8 data[48];
+};
+
/*******************************************************************************
*
- * HEST - Hardware Error Source Table
+ * HEST - Hardware Error Source Table (ACPI 4.0)
+ * Version 1
*
******************************************************************************/
@@ -551,85 +382,69 @@ struct acpi_table_hest {
struct acpi_hest_header {
u16 type;
+ u16 source_id;
};
/* Values for Type field above for subtables */
enum acpi_hest_types {
- ACPI_HEST_TYPE_XPF_MACHINE_CHECK = 0,
- ACPI_HEST_TYPE_XPF_CORRECTED_MACHINE_CHECK = 1,
- ACPI_HEST_TYPE_XPF_UNUSED = 2,
- ACPI_HEST_TYPE_XPF_NON_MASKABLE_INTERRUPT = 3,
- ACPI_HEST_TYPE_IPF_CORRECTED_MACHINE_CHECK = 4,
- ACPI_HEST_TYPE_IPF_CORRECTED_PLATFORM_ERROR = 5,
+ ACPI_HEST_TYPE_IA32_CHECK = 0,
+ ACPI_HEST_TYPE_IA32_CORRECTED_CHECK = 1,
+ ACPI_HEST_TYPE_IA32_NMI = 2,
+ ACPI_HEST_TYPE_NOT_USED3 = 3,
+ ACPI_HEST_TYPE_NOT_USED4 = 4,
+ ACPI_HEST_TYPE_NOT_USED5 = 5,
ACPI_HEST_TYPE_AER_ROOT_PORT = 6,
ACPI_HEST_TYPE_AER_ENDPOINT = 7,
ACPI_HEST_TYPE_AER_BRIDGE = 8,
- ACPI_HEST_TYPE_GENERIC_HARDWARE_ERROR_SOURCE = 9,
+ ACPI_HEST_TYPE_GENERIC_ERROR = 9,
ACPI_HEST_TYPE_RESERVED = 10 /* 10 and greater are reserved */
};
/*
- * HEST Sub-subtables
+ * HEST substructures contained in subtables
*/
-/* XPF Machine Check Error Bank */
-
-struct acpi_hest_xpf_error_bank {
+/*
+ * IA32 Error Bank(s) - Follows the struct acpi_hest_ia_machine_check and
+ * struct acpi_hest_ia_corrected structures.
+ */
+struct acpi_hest_ia_error_bank {
u8 bank_number;
u8 clear_status_on_init;
u8 status_format;
- u8 config_write_enable;
+ u8 reserved;
u32 control_register;
- u64 control_init_data;
+ u64 control_data;
u32 status_register;
u32 address_register;
u32 misc_register;
};
-/* Generic Error Status */
-
-struct acpi_hest_generic_status {
- u32 block_status;
- u32 raw_data_offset;
- u32 raw_data_length;
- u32 data_length;
- u32 error_severity;
-};
-
-/* Generic Error Data */
-
-struct acpi_hest_generic_data {
- u8 section_type[16];
- u32 error_severity;
- u16 revision;
- u8 validation_bits;
- u8 flags;
- u32 error_data_length;
- u8 fru_id[16];
- u8 fru_text[20];
-};
-
-/* Common HEST structure for PCI/AER types below (6,7,8) */
+/* Common HEST sub-structure for PCI/AER structures below (6,7,8) */
struct acpi_hest_aer_common {
- u16 source_id;
- u16 config_write_enable;
+ u16 reserved1;
u8 flags;
u8 enabled;
- u32 records_to_pre_allocate;
+ u32 records_to_preallocate;
u32 max_sections_per_record;
u32 bus;
u16 device;
u16 function;
u16 device_control;
- u16 reserved;
- u32 uncorrectable_error_mask;
- u32 uncorrectable_error_severity;
- u32 correctable_error_mask;
- u32 advanced_error_capabilities;
+ u16 reserved2;
+ u32 uncorrectable_mask;
+ u32 uncorrectable_severity;
+ u32 correctable_mask;
+ u32 advanced_capabilities;
};
+/* Masks for HEST Flags fields */
+
+#define ACPI_HEST_FIRMWARE_FIRST (1)
+#define ACPI_HEST_GLOBAL (1<<1)
+
/* Hardware Error Notification */
struct acpi_hest_notify {
@@ -655,71 +470,59 @@ enum acpi_hest_notify_types {
ACPI_HEST_NOTIFY_RESERVED = 5 /* 5 and greater are reserved */
};
+/* Values for config_write_enable bitfield above */
+
+#define ACPI_HEST_TYPE (1)
+#define ACPI_HEST_POLL_INTERVAL (1<<1)
+#define ACPI_HEST_POLL_THRESHOLD_VALUE (1<<2)
+#define ACPI_HEST_POLL_THRESHOLD_WINDOW (1<<3)
+#define ACPI_HEST_ERR_THRESHOLD_VALUE (1<<4)
+#define ACPI_HEST_ERR_THRESHOLD_WINDOW (1<<5)
+
/*
* HEST subtables
- *
- * From WHEA Design Document, 16 May 2007.
- * Note: There is no subtable type 2 in this version of the document,
- * and there are two different subtable type 3s.
*/
- /* 0: XPF Machine Check Exception */
+/* 0: IA32 Machine Check Exception */
-struct acpi_hest_xpf_machine_check {
+struct acpi_hest_ia_machine_check {
struct acpi_hest_header header;
- u16 source_id;
- u16 config_write_enable;
+ u16 reserved1;
u8 flags;
- u8 reserved1;
- u32 records_to_pre_allocate;
+ u8 enabled;
+ u32 records_to_preallocate;
u32 max_sections_per_record;
u64 global_capability_data;
u64 global_control_data;
u8 num_hardware_banks;
- u8 reserved2[7];
+ u8 reserved3[7];
};
-/* 1: XPF Corrected Machine Check */
+/* 1: IA32 Corrected Machine Check */
-struct acpi_table_hest_xpf_corrected {
+struct acpi_hest_ia_corrected {
struct acpi_hest_header header;
- u16 source_id;
- u16 config_write_enable;
+ u16 reserved1;
u8 flags;
u8 enabled;
- u32 records_to_pre_allocate;
+ u32 records_to_preallocate;
u32 max_sections_per_record;
struct acpi_hest_notify notify;
u8 num_hardware_banks;
- u8 reserved[3];
+ u8 reserved2[3];
};
-/* 3: XPF Non-Maskable Interrupt */
+/* 2: IA32 Non-Maskable Interrupt */
-struct acpi_hest_xpf_nmi {
+struct acpi_hest_ia_nmi {
struct acpi_hest_header header;
- u16 source_id;
u32 reserved;
- u32 records_to_pre_allocate;
+ u32 records_to_preallocate;
u32 max_sections_per_record;
u32 max_raw_data_length;
};
-/* 4: IPF Corrected Machine Check */
-
-struct acpi_hest_ipf_corrected {
- struct acpi_hest_header header;
- u8 enabled;
- u8 reserved;
-};
-
-/* 5: IPF Corrected Platform Error */
-
-struct acpi_hest_ipf_corrected_platform {
- struct acpi_hest_header header;
- u8 enabled;
- u8 reserved;
-};
+/* 3,4,5: Not used */
/* 6: PCI Express Root Port AER */
@@ -741,143 +544,61 @@ struct acpi_hest_aer {
struct acpi_hest_aer_bridge {
struct acpi_hest_header header;
struct acpi_hest_aer_common aer;
- u32 secondary_uncorrectable_error_mask;
- u32 secondary_uncorrectable_error_severity;
- u32 secondary_advanced_capabilities;
+ u32 uncorrectable_mask2;
+ u32 uncorrectable_severity2;
+ u32 advanced_capabilities2;
};
/* 9: Generic Hardware Error Source */
struct acpi_hest_generic {
struct acpi_hest_header header;
- u16 source_id;
u16 related_source_id;
- u8 config_write_enable;
+ u8 reserved;
u8 enabled;
- u32 records_to_pre_allocate;
+ u32 records_to_preallocate;
u32 max_sections_per_record;
u32 max_raw_data_length;
struct acpi_generic_address error_status_address;
struct acpi_hest_notify notify;
- u32 error_status_block_length;
+ u32 error_block_length;
};
-/*******************************************************************************
- *
- * HPET - High Precision Event Timer table
- *
- ******************************************************************************/
+/* Generic Error Status block */
-struct acpi_table_hpet {
- struct acpi_table_header header; /* Common ACPI table header */
- u32 id; /* Hardware ID of event timer block */
- struct acpi_generic_address address; /* Address of event timer block */
- u8 sequence; /* HPET sequence number */
- u16 minimum_tick; /* Main counter min tick, periodic mode */
- u8 flags;
+struct acpi_hest_generic_status {
+ u32 block_status;
+ u32 raw_data_offset;
+ u32 raw_data_length;
+ u32 data_length;
+ u32 error_severity;
};
-/*! Flags */
+/* Values for block_status flags above */
-#define ACPI_HPET_PAGE_PROTECT (1) /* 00: No page protection */
-#define ACPI_HPET_PAGE_PROTECT_4 (1<<1) /* 01: 4KB page protected */
-#define ACPI_HPET_PAGE_PROTECT_64 (1<<2) /* 02: 64KB page protected */
+#define ACPI_HEST_UNCORRECTABLE (1)
+#define ACPI_HEST_CORRECTABLE (1<<1)
+#define ACPI_HEST_MULTIPLE_UNCORRECTABLE (1<<2)
+#define ACPI_HEST_MULTIPLE_CORRECTABLE (1<<3)
+#define ACPI_HEST_ERROR_ENTRY_COUNT (0xFF<<4) /* 8 bits, error count */
-/*! [End] no source code translation !*/
+/* Generic Error Data entry */
-/*******************************************************************************
- *
- * IBFT - Boot Firmware Table
- *
- ******************************************************************************/
-
-struct acpi_table_ibft {
- struct acpi_table_header header; /* Common ACPI table header */
- u8 reserved[12];
-};
-
-/* IBFT common subtable header */
-
-struct acpi_ibft_header {
- u8 type;
- u8 version;
- u16 length;
- u8 index;
+struct acpi_hest_generic_data {
+ u8 section_type[16];
+ u32 error_severity;
+ u16 revision;
+ u8 validation_bits;
u8 flags;
-};
-
-/* Values for Type field above */
-
-enum acpi_ibft_type {
- ACPI_IBFT_TYPE_NOT_USED = 0,
- ACPI_IBFT_TYPE_CONTROL = 1,
- ACPI_IBFT_TYPE_INITIATOR = 2,
- ACPI_IBFT_TYPE_NIC = 3,
- ACPI_IBFT_TYPE_TARGET = 4,
- ACPI_IBFT_TYPE_EXTENSIONS = 5,
- ACPI_IBFT_TYPE_RESERVED = 6 /* 6 and greater are reserved */
-};
-
-/* IBFT subtables */
-
-struct acpi_ibft_control {
- struct acpi_ibft_header header;
- u16 extensions;
- u16 initiator_offset;
- u16 nic0_offset;
- u16 target0_offset;
- u16 nic1_offset;
- u16 target1_offset;
-};
-
-struct acpi_ibft_initiator {
- struct acpi_ibft_header header;
- u8 sns_server[16];
- u8 slp_server[16];
- u8 primary_server[16];
- u8 secondary_server[16];
- u16 name_length;
- u16 name_offset;
-};
-
-struct acpi_ibft_nic {
- struct acpi_ibft_header header;
- u8 ip_address[16];
- u8 subnet_mask_prefix;
- u8 origin;
- u8 gateway[16];
- u8 primary_dns[16];
- u8 secondary_dns[16];
- u8 dhcp[16];
- u16 vlan;
- u8 mac_address[6];
- u16 pci_address;
- u16 name_length;
- u16 name_offset;
-};
-
-struct acpi_ibft_target {
- struct acpi_ibft_header header;
- u8 target_ip_address[16];
- u16 target_ip_socket;
- u8 target_boot_lun[8];
- u8 chap_type;
- u8 nic_association;
- u16 target_name_length;
- u16 target_name_offset;
- u16 chap_name_length;
- u16 chap_name_offset;
- u16 chap_secret_length;
- u16 chap_secret_offset;
- u16 reverse_chap_name_length;
- u16 reverse_chap_name_offset;
- u16 reverse_chap_secret_length;
- u16 reverse_chap_secret_offset;
+ u32 error_data_length;
+ u8 fru_id[16];
+ u8 fru_text[20];
};
/*******************************************************************************
*
* MADT - Multiple APIC Description Table
+ * Version 3
*
******************************************************************************/
@@ -887,16 +608,16 @@ struct acpi_table_madt {
u32 flags;
};
-/* Flags */
+/* Masks for Flags field above */
-#define ACPI_MADT_PCAT_COMPAT (1) /* 00: System also has dual 8259s */
+#define ACPI_MADT_PCAT_COMPAT (1) /* 00: System also has dual 8259s */
/* Values for PCATCompat flag */
#define ACPI_MADT_DUAL_PIC 0
#define ACPI_MADT_MULTIPLE_APIC 1
-/* Values for subtable type in struct acpi_subtable_header */
+/* Values for MADT subtable type in struct acpi_subtable_header */
enum acpi_madt_type {
ACPI_MADT_TYPE_LOCAL_APIC = 0,
@@ -1007,11 +728,11 @@ struct acpi_madt_interrupt_source {
u32 flags; /* Interrupt Source Flags */
};
-/* Flags field above */
+/* Masks for Flags field above */
#define ACPI_MADT_CPEI_OVERRIDE (1)
-/* 9: Processor Local X2_APIC (07/2008) */
+/* 9: Processor Local X2APIC (ACPI 4.0) */
struct acpi_madt_local_x2apic {
struct acpi_subtable_header header;
@@ -1021,7 +742,7 @@ struct acpi_madt_local_x2apic {
u32 uid; /* ACPI processor UID */
};
-/* 10: Local X2APIC NMI (07/2008) */
+/* 10: Local X2APIC NMI (ACPI 4.0) */
struct acpi_madt_local_x2apic_nmi {
struct acpi_subtable_header header;
@@ -1058,28 +779,34 @@ struct acpi_madt_local_x2apic_nmi {
/*******************************************************************************
*
- * MCFG - PCI Memory Mapped Configuration table and sub-table
+ * MSCT - Maximum System Characteristics Table (ACPI 4.0)
+ * Version 1
*
******************************************************************************/
-struct acpi_table_mcfg {
+struct acpi_table_msct {
struct acpi_table_header header; /* Common ACPI table header */
- u8 reserved[8];
+ u32 proximity_offset; /* Location of proximity info struct(s) */
+ u32 max_proximity_domains; /* Max number of proximity domains */
+ u32 max_clock_domains; /* Max number of clock domains */
+ u64 max_address; /* Max physical address in system */
};
-/* Subtable */
+/* Subtable - Maximum Proximity Domain Information. Version 1 */
-struct acpi_mcfg_allocation {
- u64 address; /* Base address, processor-relative */
- u16 pci_segment; /* PCI segment group number */
- u8 start_bus_number; /* Starting PCI Bus number */
- u8 end_bus_number; /* Final PCI Bus number */
- u32 reserved;
+struct acpi_msct_proximity {
+ u8 revision;
+ u8 length;
+ u32 range_start; /* Start of domain range */
+ u32 range_end; /* End of domain range */
+ u32 processor_capacity;
+ u64 memory_capacity; /* In bytes */
};
/*******************************************************************************
*
* SBST - Smart Battery Specification Table
+ * Version 1
*
******************************************************************************/
@@ -1093,6 +820,7 @@ struct acpi_table_sbst {
/*******************************************************************************
*
* SLIT - System Locality Distance Information Table
+ * Version 1
*
******************************************************************************/
@@ -1104,60 +832,8 @@ struct acpi_table_slit {
/*******************************************************************************
*
- * SPCR - Serial Port Console Redirection table
- *
- ******************************************************************************/
-
-struct acpi_table_spcr {
- struct acpi_table_header header; /* Common ACPI table header */
- u8 interface_type; /* 0=full 16550, 1=subset of 16550 */
- u8 reserved[3];
- struct acpi_generic_address serial_port;
- u8 interrupt_type;
- u8 pc_interrupt;
- u32 interrupt;
- u8 baud_rate;
- u8 parity;
- u8 stop_bits;
- u8 flow_control;
- u8 terminal_type;
- u8 reserved1;
- u16 pci_device_id;
- u16 pci_vendor_id;
- u8 pci_bus;
- u8 pci_device;
- u8 pci_function;
- u32 pci_flags;
- u8 pci_segment;
- u32 reserved2;
-};
-
-/*******************************************************************************
- *
- * SPMI - Server Platform Management Interface table
- *
- ******************************************************************************/
-
-struct acpi_table_spmi {
- struct acpi_table_header header; /* Common ACPI table header */
- u8 reserved;
- u8 interface_type;
- u16 spec_revision; /* Version of IPMI */
- u8 interrupt_type;
- u8 gpe_number; /* GPE assigned */
- u8 reserved1;
- u8 pci_device_flag;
- u32 interrupt;
- struct acpi_generic_address ipmi_register;
- u8 pci_segment;
- u8 pci_bus;
- u8 pci_device;
- u8 pci_function;
-};
-
-/*******************************************************************************
- *
* SRAT - System Resource Affinity Table
+ * Version 3
*
******************************************************************************/
@@ -1192,6 +868,10 @@ struct acpi_srat_cpu_affinity {
u32 reserved; /* Reserved, must be zero */
};
+/* Flags */
+
+#define ACPI_SRAT_CPU_USE_AFFINITY (1) /* 00: Use affinity structure */
+
/* 1: Memory Affinity */
struct acpi_srat_mem_affinity {
@@ -1211,7 +891,7 @@ struct acpi_srat_mem_affinity {
#define ACPI_SRAT_MEM_HOT_PLUGGABLE (1<<1) /* 01: Memory region is hot pluggable */
#define ACPI_SRAT_MEM_NON_VOLATILE (1<<2) /* 02: Memory region is non-volatile */
-/* 2: Processor Local X2_APIC Affinity (07/2008) */
+/* 2: Processor Local X2_APIC Affinity (ACPI 4.0) */
struct acpi_srat_x2apic_cpu_affinity {
struct acpi_subtable_header header;
@@ -1219,122 +899,14 @@ struct acpi_srat_x2apic_cpu_affinity {
u32 proximity_domain;
u32 apic_id;
u32 flags;
+ u32 clock_domain;
+ u32 reserved2;
};
/* Flags for struct acpi_srat_cpu_affinity and struct acpi_srat_x2apic_cpu_affinity */
#define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */
-/*******************************************************************************
- *
- * TCPA - Trusted Computing Platform Alliance table
- *
- ******************************************************************************/
-
-struct acpi_table_tcpa {
- struct acpi_table_header header; /* Common ACPI table header */
- u16 reserved;
- u32 max_log_length; /* Maximum length for the event log area */
- u64 log_address; /* Address of the event log area */
-};
-
-/*******************************************************************************
- *
- * UEFI - UEFI Boot optimization Table
- *
- ******************************************************************************/
-
-struct acpi_table_uefi {
- struct acpi_table_header header; /* Common ACPI table header */
- u8 identifier[16]; /* UUID identifier */
- u16 data_offset; /* Offset of remaining data in table */
- u8 data;
-};
-
-/*******************************************************************************
- *
- * WDAT - Watchdog Action Table
- *
- ******************************************************************************/
-
-struct acpi_table_wdat {
- struct acpi_table_header header; /* Common ACPI table header */
- u32 header_length; /* Watchdog Header Length */
- u16 pci_segment; /* PCI Segment number */
- u8 pci_bus; /* PCI Bus number */
- u8 pci_device; /* PCI Device number */
- u8 pci_function; /* PCI Function number */
- u8 reserved[3];
- u32 timer_period; /* Period of one timer count (msec) */
- u32 max_count; /* Maximum counter value supported */
- u32 min_count; /* Minimum counter value */
- u8 flags;
- u8 reserved2[3];
- u32 entries; /* Number of watchdog entries that follow */
-};
-
-/* WDAT Instruction Entries (actions) */
-
-struct acpi_wdat_entry {
- struct acpi_whea_header whea_header; /* Common header for WHEA tables */
-};
-
-/* Values for Action field above */
-
-enum acpi_wdat_actions {
- ACPI_WDAT_RESET = 1,
- ACPI_WDAT_GET_CURRENT_COUNTDOWN = 4,
- ACPI_WDAT_GET_COUNTDOWN = 5,
- ACPI_WDAT_SET_COUNTDOWN = 6,
- ACPI_WDAT_GET_RUNNING_STATE = 8,
- ACPI_WDAT_SET_RUNNING_STATE = 9,
- ACPI_WDAT_GET_STOPPED_STATE = 10,
- ACPI_WDAT_SET_STOPPED_STATE = 11,
- ACPI_WDAT_GET_REBOOT = 16,
- ACPI_WDAT_SET_REBOOT = 17,
- ACPI_WDAT_GET_SHUTDOWN = 18,
- ACPI_WDAT_SET_SHUTDOWN = 19,
- ACPI_WDAT_GET_STATUS = 32,
- ACPI_WDAT_SET_STATUS = 33,
- ACPI_WDAT_ACTION_RESERVED = 34 /* 34 and greater are reserved */
-};
-
-/* Values for Instruction field above */
-
-enum acpi_wdat_instructions {
- ACPI_WDAT_READ_VALUE = 0,
- ACPI_WDAT_READ_COUNTDOWN = 1,
- ACPI_WDAT_WRITE_VALUE = 2,
- ACPI_WDAT_WRITE_COUNTDOWN = 3,
- ACPI_WDAT_INSTRUCTION_RESERVED = 4, /* 4 and greater are reserved */
- ACPI_WDAT_PRESERVE_REGISTER = 0x80 /* Except for this value */
-};
-
-/*******************************************************************************
- *
- * WDRT - Watchdog Resource Table
- *
- ******************************************************************************/
-
-struct acpi_table_wdrt {
- struct acpi_table_header header; /* Common ACPI table header */
- u32 header_length; /* Watchdog Header Length */
- u8 pci_segment; /* PCI Segment number */
- u8 pci_bus; /* PCI Bus number */
- u8 pci_device; /* PCI Device number */
- u8 pci_function; /* PCI Function number */
- u32 timer_period; /* Period of one timer count (msec) */
- u32 max_count; /* Maximum counter value supported */
- u32 min_count; /* Minimum counter value */
- u8 flags;
- u8 reserved[3];
- u32 entries; /* Number of watchdog entries that follow */
-};
-
-/* Flags */
-
-#define ACPI_WDRT_TIMER_ENABLED (1) /* 00: Timer enabled */
-
/* Reset to default packing */
#pragma pack()
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
new file mode 100644
index 00000000000..6f3dce9991e
--- /dev/null
+++ b/include/acpi/actbl2.h
@@ -0,0 +1,868 @@
+#ifndef __ACTBL2_H__
+#define __ACTBL2_H__
+
+/*******************************************************************************
+ *
+ * Additional ACPI Tables (2)
+ *
+ * These tables are not consumed directly by the ACPICA subsystem, but are
+ * included here to support device drivers and the AML disassembler.
+ *
+ * The tables in this file are defined by third-party specifications, and are
+ * not defined directly by the ACPI specification itself.
+ *
+ ******************************************************************************/
+
+/*
+ * Values for description table header signatures for tables defined in this
+ * file. Useful because they make it more difficult to inadvertently type in
+ * the wrong signature.
+ */
+#define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */
+#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */
+#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */
+#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */
+#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */
+#define ACPI_SIG_IBFT "IBFT" /* i_sCSI Boot Firmware Table */
+#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */
+#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */
+#define ACPI_SIG_SLIC "SLIC" /* Software Licensing Description Table */
+#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */
+#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */
+#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */
+#define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */
+#define ACPI_SIG_WAET "WAET" /* Windows ACPI Emulated devices Table */
+#define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */
+#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */
+
+/*
+ * All tables must be byte-packed to match the ACPI specification, since
+ * the tables are provided by the system BIOS.
+ */
+#pragma pack(1)
+
+/*
+ * Note about bitfields: The u8 type is used for bitfields in ACPI tables.
+ * This is the only type that is even remotely portable. Anything else is not
+ * portable, so do not use any other bitfield types.
+ */
+
+/*******************************************************************************
+ *
+ * ASF - Alert Standard Format table (Signature "ASF!")
+ * Revision 0x10
+ *
+ * Conforms to the Alert Standard Format Specification V2.0, 23 April 2003
+ *
+ ******************************************************************************/
+
+struct acpi_table_asf {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+/* ASF subtable header */
+
+struct acpi_asf_header {
+ u8 type;
+ u8 reserved;
+ u16 length;
+};
+
+/* Values for Type field above */
+
+enum acpi_asf_type {
+ ACPI_ASF_TYPE_INFO = 0,
+ ACPI_ASF_TYPE_ALERT = 1,
+ ACPI_ASF_TYPE_CONTROL = 2,
+ ACPI_ASF_TYPE_BOOT = 3,
+ ACPI_ASF_TYPE_ADDRESS = 4,
+ ACPI_ASF_TYPE_RESERVED = 5
+};
+
+/*
+ * ASF subtables
+ */
+
+/* 0: ASF Information */
+
+struct acpi_asf_info {
+ struct acpi_asf_header header;
+ u8 min_reset_value;
+ u8 min_poll_interval;
+ u16 system_id;
+ u32 mfg_id;
+ u8 flags;
+ u8 reserved2[3];
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_ASF_SMBUS_PROTOCOLS (1)
+
+/* 1: ASF Alerts */
+
+struct acpi_asf_alert {
+ struct acpi_asf_header header;
+ u8 assert_mask;
+ u8 deassert_mask;
+ u8 alerts;
+ u8 data_length;
+};
+
+struct acpi_asf_alert_data {
+ u8 address;
+ u8 command;
+ u8 mask;
+ u8 value;
+ u8 sensor_type;
+ u8 type;
+ u8 offset;
+ u8 source_type;
+ u8 severity;
+ u8 sensor_number;
+ u8 entity;
+ u8 instance;
+};
+
+/* 2: ASF Remote Control */
+
+struct acpi_asf_remote {
+ struct acpi_asf_header header;
+ u8 controls;
+ u8 data_length;
+ u16 reserved2;
+};
+
+struct acpi_asf_control_data {
+ u8 function;
+ u8 address;
+ u8 command;
+ u8 value;
+};
+
+/* 3: ASF RMCP Boot Options */
+
+struct acpi_asf_rmcp {
+ struct acpi_asf_header header;
+ u8 capabilities[7];
+ u8 completion_code;
+ u32 enterprise_id;
+ u8 command;
+ u16 parameter;
+ u16 boot_options;
+ u16 oem_parameters;
+};
+
+/* 4: ASF Address */
+
+struct acpi_asf_address {
+ struct acpi_asf_header header;
+ u8 eprom_address;
+ u8 devices;
+};
+
+/*******************************************************************************
+ *
+ * BOOT - Simple Boot Flag Table
+ * Version 1
+ *
+ * Conforms to the "Simple Boot Flag Specification", Version 2.1
+ *
+ ******************************************************************************/
+
+struct acpi_table_boot {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 cmos_index; /* Index in CMOS RAM for the boot register */
+ u8 reserved[3];
+};
+
+/*******************************************************************************
+ *
+ * DBGP - Debug Port table
+ * Version 1
+ *
+ * Conforms to the "Debug Port Specification", Version 1.00, 2/9/2000
+ *
+ ******************************************************************************/
+
+struct acpi_table_dbgp {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 type; /* 0=full 16550, 1=subset of 16550 */
+ u8 reserved[3];
+ struct acpi_generic_address debug_port;
+};
+
+/*******************************************************************************
+ *
+ * DMAR - DMA Remapping table
+ * Version 1
+ *
+ * Conforms to "Intel Virtualization Technology for Directed I/O",
+ * Version 1.2, Sept. 2008
+ *
+ ******************************************************************************/
+
+struct acpi_table_dmar {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 width; /* Host Address Width */
+ u8 flags;
+ u8 reserved[10];
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_DMAR_INTR_REMAP (1)
+
+/* DMAR subtable header */
+
+struct acpi_dmar_header {
+ u16 type;
+ u16 length;
+};
+
+/* Values for subtable type in struct acpi_dmar_header */
+
+enum acpi_dmar_type {
+ ACPI_DMAR_TYPE_HARDWARE_UNIT = 0,
+ ACPI_DMAR_TYPE_RESERVED_MEMORY = 1,
+ ACPI_DMAR_TYPE_ATSR = 2,
+ ACPI_DMAR_HARDWARE_AFFINITY = 3,
+ ACPI_DMAR_TYPE_RESERVED = 4 /* 4 and greater are reserved */
+};
+
+/* DMAR Device Scope structure */
+
+struct acpi_dmar_device_scope {
+ u8 entry_type;
+ u8 length;
+ u16 reserved;
+ u8 enumeration_id;
+ u8 bus;
+};
+
+/* Values for entry_type in struct acpi_dmar_device_scope */
+
+enum acpi_dmar_scope_type {
+ ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0,
+ ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1,
+ ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2,
+ ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3,
+ ACPI_DMAR_SCOPE_TYPE_HPET = 4,
+ ACPI_DMAR_SCOPE_TYPE_RESERVED = 5 /* 5 and greater are reserved */
+};
+
+struct acpi_dmar_pci_path {
+ u8 dev;
+ u8 fn;
+};
+
+/*
+ * DMAR Sub-tables, correspond to Type in struct acpi_dmar_header
+ */
+
+/* 0: Hardware Unit Definition */
+
+struct acpi_dmar_hardware_unit {
+ struct acpi_dmar_header header;
+ u8 flags;
+ u8 reserved;
+ u16 segment;
+ u64 address; /* Register Base Address */
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_DMAR_INCLUDE_ALL (1)
+
+/* 1: Reserved Memory Defininition */
+
+struct acpi_dmar_reserved_memory {
+ struct acpi_dmar_header header;
+ u16 reserved;
+ u16 segment;
+ u64 base_address; /* 4_k aligned base address */
+ u64 end_address; /* 4_k aligned limit address */
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_DMAR_ALLOW_ALL (1)
+
+/* 2: Root Port ATS Capability Reporting Structure */
+
+struct acpi_dmar_atsr {
+ struct acpi_dmar_header header;
+ u8 flags;
+ u8 reserved;
+ u16 segment;
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_DMAR_ALL_PORTS (1)
+
+/* 3: Remapping Hardware Static Affinity Structure */
+
+struct acpi_dmar_rhsa {
+ struct acpi_dmar_header header;
+ u32 reserved;
+ u64 base_address;
+ u32 proximity_domain;
+};
+
+/*******************************************************************************
+ *
+ * HPET - High Precision Event Timer table
+ * Version 1
+ *
+ * Conforms to "IA-PC HPET (High Precision Event Timers) Specification",
+ * Version 1.0a, October 2004
+ *
+ ******************************************************************************/
+
+struct acpi_table_hpet {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 id; /* Hardware ID of event timer block */
+ struct acpi_generic_address address; /* Address of event timer block */
+ u8 sequence; /* HPET sequence number */
+ u16 minimum_tick; /* Main counter min tick, periodic mode */
+ u8 flags;
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_HPET_PAGE_PROTECT_MASK (3)
+
+/* Values for Page Protect flags */
+
+enum acpi_hpet_page_protect {
+ ACPI_HPET_NO_PAGE_PROTECT = 0,
+ ACPI_HPET_PAGE_PROTECT4 = 1,
+ ACPI_HPET_PAGE_PROTECT64 = 2
+};
+
+/*******************************************************************************
+ *
+ * IBFT - Boot Firmware Table
+ * Version 1
+ *
+ * Conforms to "iSCSI Boot Firmware Table (iBFT) as Defined in ACPI 3.0b
+ * Specification", Version 1.01, March 1, 2007
+ *
+ * Note: It appears that this table is not intended to appear in the RSDT/XSDT.
+ * Therefore, it is not currently supported by the disassembler.
+ *
+ ******************************************************************************/
+
+struct acpi_table_ibft {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 reserved[12];
+};
+
+/* IBFT common subtable header */
+
+struct acpi_ibft_header {
+ u8 type;
+ u8 version;
+ u16 length;
+ u8 index;
+ u8 flags;
+};
+
+/* Values for Type field above */
+
+enum acpi_ibft_type {
+ ACPI_IBFT_TYPE_NOT_USED = 0,
+ ACPI_IBFT_TYPE_CONTROL = 1,
+ ACPI_IBFT_TYPE_INITIATOR = 2,
+ ACPI_IBFT_TYPE_NIC = 3,
+ ACPI_IBFT_TYPE_TARGET = 4,
+ ACPI_IBFT_TYPE_EXTENSIONS = 5,
+ ACPI_IBFT_TYPE_RESERVED = 6 /* 6 and greater are reserved */
+};
+
+/* IBFT subtables */
+
+struct acpi_ibft_control {
+ struct acpi_ibft_header header;
+ u16 extensions;
+ u16 initiator_offset;
+ u16 nic0_offset;
+ u16 target0_offset;
+ u16 nic1_offset;
+ u16 target1_offset;
+};
+
+struct acpi_ibft_initiator {
+ struct acpi_ibft_header header;
+ u8 sns_server[16];
+ u8 slp_server[16];
+ u8 primary_server[16];
+ u8 secondary_server[16];
+ u16 name_length;
+ u16 name_offset;
+};
+
+struct acpi_ibft_nic {
+ struct acpi_ibft_header header;
+ u8 ip_address[16];
+ u8 subnet_mask_prefix;
+ u8 origin;
+ u8 gateway[16];
+ u8 primary_dns[16];
+ u8 secondary_dns[16];
+ u8 dhcp[16];
+ u16 vlan;
+ u8 mac_address[6];
+ u16 pci_address;
+ u16 name_length;
+ u16 name_offset;
+};
+
+struct acpi_ibft_target {
+ struct acpi_ibft_header header;
+ u8 target_ip_address[16];
+ u16 target_ip_socket;
+ u8 target_boot_lun[8];
+ u8 chap_type;
+ u8 nic_association;
+ u16 target_name_length;
+ u16 target_name_offset;
+ u16 chap_name_length;
+ u16 chap_name_offset;
+ u16 chap_secret_length;
+ u16 chap_secret_offset;
+ u16 reverse_chap_name_length;
+ u16 reverse_chap_name_offset;
+ u16 reverse_chap_secret_length;
+ u16 reverse_chap_secret_offset;
+};
+
+/*******************************************************************************
+ *
+ * IVRS - I/O Virtualization Reporting Structure
+ * Version 1
+ *
+ * Conforms to "AMD I/O Virtualization Technology (IOMMU) Specification",
+ * Revision 1.26, February 2009.
+ *
+ ******************************************************************************/
+
+struct acpi_table_ivrs {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 info; /* Common virtualization info */
+ u64 reserved;
+};
+
+/* Values for Info field above */
+
+#define ACPI_IVRS_PHYSICAL_SIZE 0x00007F00 /* 7 bits, physical address size */
+#define ACPI_IVRS_VIRTUAL_SIZE 0x003F8000 /* 7 bits, virtual address size */
+#define ACPI_IVRS_ATS_RESERVED 0x00400000 /* ATS address translation range reserved */
+
+/* IVRS subtable header */
+
+struct acpi_ivrs_header {
+ u8 type; /* Subtable type */
+ u8 flags;
+ u16 length; /* Subtable length */
+ u16 device_id; /* ID of IOMMU */
+};
+
+/* Values for subtable Type above */
+
+enum acpi_ivrs_type {
+ ACPI_IVRS_TYPE_HARDWARE = 0x10,
+ ACPI_IVRS_TYPE_MEMORY1 = 0x20,
+ ACPI_IVRS_TYPE_MEMORY2 = 0x21,
+ ACPI_IVRS_TYPE_MEMORY3 = 0x22
+};
+
+/* Masks for Flags field above for IVHD subtable */
+
+#define ACPI_IVHD_TT_ENABLE (1)
+#define ACPI_IVHD_PASS_PW (1<<1)
+#define ACPI_IVHD_RES_PASS_PW (1<<2)
+#define ACPI_IVHD_ISOC (1<<3)
+#define ACPI_IVHD_IOTLB (1<<4)
+
+/* Masks for Flags field above for IVMD subtable */
+
+#define ACPI_IVMD_UNITY (1)
+#define ACPI_IVMD_READ (1<<1)
+#define ACPI_IVMD_WRITE (1<<2)
+#define ACPI_IVMD_EXCLUSION_RANGE (1<<3)
+
+/*
+ * IVRS subtables, correspond to Type in struct acpi_ivrs_header
+ */
+
+/* 0x10: I/O Virtualization Hardware Definition Block (IVHD) */
+
+struct acpi_ivrs_hardware {
+ struct acpi_ivrs_header header;
+ u16 capability_offset; /* Offset for IOMMU control fields */
+ u64 base_address; /* IOMMU control registers */
+ u16 pci_segment_group;
+ u16 info; /* MSI number and unit ID */
+ u32 reserved;
+};
+
+/* Masks for Info field above */
+
+#define ACPI_IVHD_MSI_NUMBER_MASK 0x001F /* 5 bits, MSI message number */
+#define ACPI_IVHD_UNIT_ID_MASK 0x1F00 /* 5 bits, unit_iD */
+
+/*
+ * Device Entries for IVHD subtable, appear after struct acpi_ivrs_hardware structure.
+ * Upper two bits of the Type field are the (encoded) length of the structure.
+ * Currently, only 4 and 8 byte entries are defined. 16 and 32 byte entries
+ * are reserved for future use but not defined.
+ */
+struct acpi_ivrs_de_header {
+ u8 type;
+ u16 id;
+ u8 data_setting;
+};
+
+/* Length of device entry is in the top two bits of Type field above */
+
+#define ACPI_IVHD_ENTRY_LENGTH 0xC0
+
+/* Values for device entry Type field above */
+
+enum acpi_ivrs_device_entry_type {
+ /* 4-byte device entries, all use struct acpi_ivrs_device4 */
+
+ ACPI_IVRS_TYPE_PAD4 = 0,
+ ACPI_IVRS_TYPE_ALL = 1,
+ ACPI_IVRS_TYPE_SELECT = 2,
+ ACPI_IVRS_TYPE_START = 3,
+ ACPI_IVRS_TYPE_END = 4,
+
+ /* 8-byte device entries */
+
+ ACPI_IVRS_TYPE_PAD8 = 64,
+ ACPI_IVRS_TYPE_NOT_USED = 65,
+ ACPI_IVRS_TYPE_ALIAS_SELECT = 66, /* Uses struct acpi_ivrs_device8a */
+ ACPI_IVRS_TYPE_ALIAS_START = 67, /* Uses struct acpi_ivrs_device8a */
+ ACPI_IVRS_TYPE_EXT_SELECT = 70, /* Uses struct acpi_ivrs_device8b */
+ ACPI_IVRS_TYPE_EXT_START = 71, /* Uses struct acpi_ivrs_device8b */
+ ACPI_IVRS_TYPE_SPECIAL = 72 /* Uses struct acpi_ivrs_device8c */
+};
+
+/* Values for Data field above */
+
+#define ACPI_IVHD_INIT_PASS (1)
+#define ACPI_IVHD_EINT_PASS (1<<1)
+#define ACPI_IVHD_NMI_PASS (1<<2)
+#define ACPI_IVHD_SYSTEM_MGMT (3<<4)
+#define ACPI_IVHD_LINT0_PASS (1<<6)
+#define ACPI_IVHD_LINT1_PASS (1<<7)
+
+/* Types 0-4: 4-byte device entry */
+
+struct acpi_ivrs_device4 {
+ struct acpi_ivrs_de_header header;
+};
+
+/* Types 66-67: 8-byte device entry */
+
+struct acpi_ivrs_device8a {
+ struct acpi_ivrs_de_header header;
+ u8 reserved1;
+ u16 used_id;
+ u8 reserved2;
+};
+
+/* Types 70-71: 8-byte device entry */
+
+struct acpi_ivrs_device8b {
+ struct acpi_ivrs_de_header header;
+ u32 extended_data;
+};
+
+/* Values for extended_data above */
+
+#define ACPI_IVHD_ATS_DISABLED (1<<31)
+
+/* Type 72: 8-byte device entry */
+
+struct acpi_ivrs_device8c {
+ struct acpi_ivrs_de_header header;
+ u8 handle;
+ u16 used_id;
+ u8 variety;
+};
+
+/* Values for Variety field above */
+
+#define ACPI_IVHD_IOAPIC 1
+#define ACPI_IVHD_HPET 2
+
+/* 0x20, 0x21, 0x22: I/O Virtualization Memory Definition Block (IVMD) */
+
+struct acpi_ivrs_memory {
+ struct acpi_ivrs_header header;
+ u16 aux_data;
+ u64 reserved;
+ u64 start_address;
+ u64 memory_length;
+};
+
+/*******************************************************************************
+ *
+ * MCFG - PCI Memory Mapped Configuration table and sub-table
+ * Version 1
+ *
+ * Conforms to "PCI Firmware Specification", Revision 3.0, June 20, 2005
+ *
+ ******************************************************************************/
+
+struct acpi_table_mcfg {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 reserved[8];
+};
+
+/* Subtable */
+
+struct acpi_mcfg_allocation {
+ u64 address; /* Base address, processor-relative */
+ u16 pci_segment; /* PCI segment group number */
+ u8 start_bus_number; /* Starting PCI Bus number */
+ u8 end_bus_number; /* Final PCI Bus number */
+ u32 reserved;
+};
+
+/*******************************************************************************
+ *
+ * SPCR - Serial Port Console Redirection table
+ * Version 1
+ *
+ * Conforms to "Serial Port Console Redirection Table",
+ * Version 1.00, January 11, 2002
+ *
+ ******************************************************************************/
+
+struct acpi_table_spcr {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 interface_type; /* 0=full 16550, 1=subset of 16550 */
+ u8 reserved[3];
+ struct acpi_generic_address serial_port;
+ u8 interrupt_type;
+ u8 pc_interrupt;
+ u32 interrupt;
+ u8 baud_rate;
+ u8 parity;
+ u8 stop_bits;
+ u8 flow_control;
+ u8 terminal_type;
+ u8 reserved1;
+ u16 pci_device_id;
+ u16 pci_vendor_id;
+ u8 pci_bus;
+ u8 pci_device;
+ u8 pci_function;
+ u32 pci_flags;
+ u8 pci_segment;
+ u32 reserved2;
+};
+
+/* Masks for pci_flags field above */
+
+#define ACPI_SPCR_DO_NOT_DISABLE (1)
+
+/*******************************************************************************
+ *
+ * SPMI - Server Platform Management Interface table
+ * Version 5
+ *
+ * Conforms to "Intelligent Platform Management Interface Specification
+ * Second Generation v2.0", Document Revision 1.0, February 12, 2004 with
+ * June 12, 2009 markup.
+ *
+ ******************************************************************************/
+
+struct acpi_table_spmi {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 interface_type;
+ u8 reserved; /* Must be 1 */
+ u16 spec_revision; /* Version of IPMI */
+ u8 interrupt_type;
+ u8 gpe_number; /* GPE assigned */
+ u8 reserved1;
+ u8 pci_device_flag;
+ u32 interrupt;
+ struct acpi_generic_address ipmi_register;
+ u8 pci_segment;
+ u8 pci_bus;
+ u8 pci_device;
+ u8 pci_function;
+ u8 reserved2;
+};
+
+/* Values for interface_type above */
+
+enum acpi_spmi_interface_types {
+ ACPI_SPMI_NOT_USED = 0,
+ ACPI_SPMI_KEYBOARD = 1,
+ ACPI_SPMI_SMI = 2,
+ ACPI_SPMI_BLOCK_TRANSFER = 3,
+ ACPI_SPMI_SMBUS = 4,
+ ACPI_SPMI_RESERVED = 5 /* 5 and above are reserved */
+};
+
+/*******************************************************************************
+ *
+ * TCPA - Trusted Computing Platform Alliance table
+ * Version 1
+ *
+ * Conforms to "TCG PC Specific Implementation Specification",
+ * Version 1.1, August 18, 2003
+ *
+ ******************************************************************************/
+
+struct acpi_table_tcpa {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u16 reserved;
+ u32 max_log_length; /* Maximum length for the event log area */
+ u64 log_address; /* Address of the event log area */
+};
+
+/*******************************************************************************
+ *
+ * UEFI - UEFI Boot optimization Table
+ * Version 1
+ *
+ * Conforms to "Unified Extensible Firmware Interface Specification",
+ * Version 2.3, May 8, 2009
+ *
+ ******************************************************************************/
+
+struct acpi_table_uefi {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 identifier[16]; /* UUID identifier */
+ u16 data_offset; /* Offset of remaining data in table */
+};
+
+/*******************************************************************************
+ *
+ * WAET - Windows ACPI Emulated devices Table
+ * Version 1
+ *
+ * Conforms to "Windows ACPI Emulated Devices Table", version 1.0, April 6, 2009
+ *
+ ******************************************************************************/
+
+struct acpi_table_waet {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 flags;
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_WAET_RTC_NO_ACK (1) /* RTC requires no int acknowledge */
+#define ACPI_WAET_TIMER_ONE_READ (1<<1) /* PM timer requires only one read */
+
+/*******************************************************************************
+ *
+ * WDAT - Watchdog Action Table
+ * Version 1
+ *
+ * Conforms to "Hardware Watchdog Timers Design Specification",
+ * Copyright 2006 Microsoft Corporation.
+ *
+ ******************************************************************************/
+
+struct acpi_table_wdat {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 header_length; /* Watchdog Header Length */
+ u16 pci_segment; /* PCI Segment number */
+ u8 pci_bus; /* PCI Bus number */
+ u8 pci_device; /* PCI Device number */
+ u8 pci_function; /* PCI Function number */
+ u8 reserved[3];
+ u32 timer_period; /* Period of one timer count (msec) */
+ u32 max_count; /* Maximum counter value supported */
+ u32 min_count; /* Minimum counter value */
+ u8 flags;
+ u8 reserved2[3];
+ u32 entries; /* Number of watchdog entries that follow */
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_WDAT_ENABLED (1)
+#define ACPI_WDAT_STOPPED 0x80
+
+/* WDAT Instruction Entries (actions) */
+
+struct acpi_wdat_entry {
+ u8 action;
+ u8 instruction;
+ u16 reserved;
+ struct acpi_generic_address register_region;
+ u32 value; /* Value used with Read/Write register */
+ u32 mask; /* Bitmask required for this register instruction */
+};
+
+/* Values for Action field above */
+
+enum acpi_wdat_actions {
+ ACPI_WDAT_RESET = 1,
+ ACPI_WDAT_GET_CURRENT_COUNTDOWN = 4,
+ ACPI_WDAT_GET_COUNTDOWN = 5,
+ ACPI_WDAT_SET_COUNTDOWN = 6,
+ ACPI_WDAT_GET_RUNNING_STATE = 8,
+ ACPI_WDAT_SET_RUNNING_STATE = 9,
+ ACPI_WDAT_GET_STOPPED_STATE = 10,
+ ACPI_WDAT_SET_STOPPED_STATE = 11,
+ ACPI_WDAT_GET_REBOOT = 16,
+ ACPI_WDAT_SET_REBOOT = 17,
+ ACPI_WDAT_GET_SHUTDOWN = 18,
+ ACPI_WDAT_SET_SHUTDOWN = 19,
+ ACPI_WDAT_GET_STATUS = 32,
+ ACPI_WDAT_SET_STATUS = 33,
+ ACPI_WDAT_ACTION_RESERVED = 34 /* 34 and greater are reserved */
+};
+
+/* Values for Instruction field above */
+
+enum acpi_wdat_instructions {
+ ACPI_WDAT_READ_VALUE = 0,
+ ACPI_WDAT_READ_COUNTDOWN = 1,
+ ACPI_WDAT_WRITE_VALUE = 2,
+ ACPI_WDAT_WRITE_COUNTDOWN = 3,
+ ACPI_WDAT_INSTRUCTION_RESERVED = 4, /* 4 and greater are reserved */
+ ACPI_WDAT_PRESERVE_REGISTER = 0x80 /* Except for this value */
+};
+
+/*******************************************************************************
+ *
+ * WDRT - Watchdog Resource Table
+ * Version 1
+ *
+ * Conforms to "Watchdog Timer Hardware Requirements for Windows Server 2003",
+ * Version 1.01, August 28, 2006
+ *
+ ******************************************************************************/
+
+struct acpi_table_wdrt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ struct acpi_generic_address control_register;
+ struct acpi_generic_address count_register;
+ u16 pci_device_id;
+ u16 pci_vendor_id;
+ u8 pci_bus; /* PCI Bus number */
+ u8 pci_device; /* PCI Device number */
+ u8 pci_function; /* PCI Function number */
+ u8 pci_segment; /* PCI Segment number */
+ u16 max_count; /* Maximum counter value supported */
+ u8 units;
+};
+
+/* Reset to default packing */
+
+#pragma pack()
+
+#endif /* __ACTBL2_H__ */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 37ba576d06e..153f12dc337 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -288,7 +288,7 @@ typedef u32 acpi_physical_address;
/*
* Some compilers complain about unused variables. Sometimes we don't want to
* use all the variables (for example, _acpi_module_name). This allows us
- * to to tell the compiler in a per-variable manner that a variable
+ * to tell the compiler in a per-variable manner that a variable
* is unused
*/
#ifndef ACPI_UNUSED_VAR
@@ -338,7 +338,7 @@ typedef u32 acpi_physical_address;
/* PM Timer ticks per second (HZ) */
-#define PM_TIMER_FREQUENCY 3579545
+#define PM_TIMER_FREQUENCY 3579545
/*******************************************************************************
*
@@ -732,7 +732,8 @@ typedef u8 acpi_adr_space_type;
#define ACPI_ADR_SPACE_SMBUS (acpi_adr_space_type) 4
#define ACPI_ADR_SPACE_CMOS (acpi_adr_space_type) 5
#define ACPI_ADR_SPACE_PCI_BAR_TARGET (acpi_adr_space_type) 6
-#define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 7
+#define ACPI_ADR_SPACE_IPMI (acpi_adr_space_type) 7
+#define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 8
#define ACPI_ADR_SPACE_FIXED_HARDWARE (acpi_adr_space_type) 127
/*
@@ -921,7 +922,7 @@ typedef
void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context);
typedef
-void (*acpi_object_handler) (acpi_handle object, u32 function, void *data);
+void (*acpi_object_handler) (acpi_handle object, void *data);
typedef acpi_status(*acpi_init_handler) (acpi_handle object, u32 function);
@@ -969,38 +970,60 @@ acpi_status(*acpi_walk_callback) (acpi_handle obj_handle,
#define ACPI_INTERRUPT_NOT_HANDLED 0x00
#define ACPI_INTERRUPT_HANDLED 0x01
-/* Length of _HID, _UID, _CID, and UUID values */
+/* Length of 32-bit EISAID values when converted back to a string */
+
+#define ACPI_EISAID_STRING_SIZE 8 /* Includes null terminator */
+
+/* Length of UUID (string) values */
-#define ACPI_DEVICE_ID_LENGTH 0x09
-#define ACPI_MAX_CID_LENGTH 48
#define ACPI_UUID_LENGTH 16
-/* Common string version of device HIDs and UIDs */
+/* Structures used for device/processor HID, UID, CID */
struct acpica_device_id {
- char value[ACPI_DEVICE_ID_LENGTH];
+ u32 length; /* Length of string + null */
+ char *string;
};
-/* Common string version of device CIDs */
-
-struct acpi_compatible_id {
- char value[ACPI_MAX_CID_LENGTH];
+struct acpica_device_id_list {
+ u32 count; /* Number of IDs in Ids array */
+ u32 list_size; /* Size of list, including ID strings */
+ struct acpica_device_id ids[1]; /* ID array */
};
-struct acpi_compatible_id_list {
- u32 count;
- u32 size;
- struct acpi_compatible_id id[1];
+/*
+ * Structure returned from acpi_get_object_info.
+ * Optimized for both 32- and 64-bit builds
+ */
+struct acpi_device_info {
+ u32 info_size; /* Size of info, including ID strings */
+ u32 name; /* ACPI object Name */
+ acpi_object_type type; /* ACPI object Type */
+ u8 param_count; /* If a method, required parameter count */
+ u8 valid; /* Indicates which optional fields are valid */
+ u8 flags; /* Miscellaneous info */
+ u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */
+ u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */
+ u32 current_status; /* _STA value */
+ acpi_integer address; /* _ADR value */
+ struct acpica_device_id hardware_id; /* _HID value */
+ struct acpica_device_id unique_id; /* _UID value */
+ struct acpica_device_id_list compatible_id_list; /* _CID list <must be last> */
};
-/* Structure and flags for acpi_get_object_info */
+/* Values for Flags field above (acpi_get_object_info) */
+
+#define ACPI_PCI_ROOT_BRIDGE 0x01
-#define ACPI_VALID_STA 0x0001
-#define ACPI_VALID_ADR 0x0002
-#define ACPI_VALID_HID 0x0004
-#define ACPI_VALID_UID 0x0008
-#define ACPI_VALID_CID 0x0010
-#define ACPI_VALID_SXDS 0x0020
+/* Flags for Valid field above (acpi_get_object_info) */
+
+#define ACPI_VALID_STA 0x01
+#define ACPI_VALID_ADR 0x02
+#define ACPI_VALID_HID 0x04
+#define ACPI_VALID_UID 0x08
+#define ACPI_VALID_CID 0x10
+#define ACPI_VALID_SXDS 0x20
+#define ACPI_VALID_SXWS 0x40
/* Flags for _STA method */
@@ -1011,29 +1034,6 @@ struct acpi_compatible_id_list {
#define ACPI_STA_DEVICE_OK 0x08 /* Synonym */
#define ACPI_STA_BATTERY_PRESENT 0x10
-#define ACPI_COMMON_OBJ_INFO \
- acpi_object_type type; /* ACPI object type */ \
- acpi_name name /* ACPI object Name */
-
-struct acpi_obj_info_header {
- ACPI_COMMON_OBJ_INFO;
-};
-
-/* Structure returned from Get Object Info */
-
-struct acpi_device_info {
- ACPI_COMMON_OBJ_INFO;
-
- u32 param_count; /* If a method, required parameter count */
- u32 valid; /* Indicates which fields below are valid */
- u32 current_status; /* _STA value */
- acpi_integer address; /* _ADR value if any */
- struct acpica_device_id hardware_id; /* _HID value if any */
- struct acpica_device_id unique_id; /* _UID value if any */
- u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */
- struct acpi_compatible_id_list compatibility_id; /* List of _CIDs if any */
-};
-
/* Context structs for address space handlers */
struct acpi_pci_id {
diff --git a/include/acpi/button.h b/include/acpi/button.h
new file mode 100644
index 00000000000..97eea0e4c01
--- /dev/null
+++ b/include/acpi/button.h
@@ -0,0 +1,25 @@
+#ifndef ACPI_BUTTON_H
+#define ACPI_BUTTON_H
+
+#include <linux/notifier.h>
+
+#if defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE)
+extern int acpi_lid_notifier_register(struct notifier_block *nb);
+extern int acpi_lid_notifier_unregister(struct notifier_block *nb);
+extern int acpi_lid_open(void);
+#else
+static inline int acpi_lid_notifier_register(struct notifier_block *nb)
+{
+ return 0;
+}
+static inline int acpi_lid_notifier_unregister(struct notifier_block *nb)
+{
+ return 0;
+}
+static inline int acpi_lid_open(void)
+{
+ return 1;
+}
+#endif /* defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) */
+
+#endif /* ACPI_BUTTON_H */
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 935c5d7fc86..6aadbf84ae7 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -57,7 +57,7 @@
/*
* Some compilers complain about unused variables. Sometimes we don't want to
* use all the variables (for example, _acpi_module_name). This allows us
- * to to tell the compiler warning in a per-variable manner that a variable
+ * to tell the compiler warning in a per-variable manner that a variable
* is unused.
*/
#define ACPI_UNUSED_VAR __attribute__ ((unused))
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index fcb8e4b159b..9d7febde10a 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -149,10 +149,10 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
#define ACPI_FREE(a) kfree(a)
/* Used within ACPICA to show where it is safe to preempt execution */
-
+#include <linux/hardirq.h>
#define ACPI_PREEMPTION_POINT() \
do { \
- if (!irqs_disabled()) \
+ if (!in_atomic_preempt_off()) \
cond_resched(); \
} while (0)
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index 1c1fa422d18..ca0f239f0e1 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -7,6 +7,7 @@
typedef unsigned long cputime_t;
#define cputime_zero (0UL)
+#define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_max ((~0UL >> 1) - 1)
#define cputime_add(__a, __b) ((__a) + (__b))
#define cputime_sub(__a, __b) ((__a) - (__b))
diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h
index 4d3e48373e7..0c3dd860392 100644
--- a/include/asm-generic/fcntl.h
+++ b/include/asm-generic/fcntl.h
@@ -73,6 +73,19 @@
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
#endif
+#ifndef F_SETOWN_EX
+#define F_SETOWN_EX 12
+#define F_GETOWN_EX 13
+#endif
+
+#define F_OWNER_TID 0
+#define F_OWNER_PID 1
+#define F_OWNER_GID 2
+
+struct f_owner_ex {
+ int type;
+ pid_t pid;
+};
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index d6c379dc64f..9cca3785cab 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -141,6 +141,8 @@ extern int __gpio_to_irq(unsigned gpio);
* but more typically is configured entirely from userspace.
*/
extern int gpio_export(unsigned gpio, bool direction_may_change);
+extern int gpio_export_link(struct device *dev, const char *name,
+ unsigned gpio);
extern void gpio_unexport(unsigned gpio);
#endif /* CONFIG_GPIO_SYSFS */
@@ -185,6 +187,12 @@ static inline int gpio_export(unsigned gpio, bool direction_may_change)
return -ENOSYS;
}
+static inline int gpio_export_link(struct device *dev, const char *name,
+ unsigned gpio)
+{
+ return -ENOSYS;
+}
+
static inline void gpio_unexport(unsigned gpio)
{
}
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
index eddbce0f9fb..e5f234a0854 100644
--- a/include/asm-generic/kmap_types.h
+++ b/include/asm-generic/kmap_types.h
@@ -2,34 +2,35 @@
#define _ASM_GENERIC_KMAP_TYPES_H
#ifdef __WITH_KM_FENCE
-# define D(n) __KM_FENCE_##n ,
+# define KMAP_D(n) __KM_FENCE_##n ,
#else
-# define D(n)
+# define KMAP_D(n)
#endif
enum km_type {
-D(0) KM_BOUNCE_READ,
-D(1) KM_SKB_SUNRPC_DATA,
-D(2) KM_SKB_DATA_SOFTIRQ,
-D(3) KM_USER0,
-D(4) KM_USER1,
-D(5) KM_BIO_SRC_IRQ,
-D(6) KM_BIO_DST_IRQ,
-D(7) KM_PTE0,
-D(8) KM_PTE1,
-D(9) KM_IRQ0,
-D(10) KM_IRQ1,
-D(11) KM_SOFTIRQ0,
-D(12) KM_SOFTIRQ1,
-D(13) KM_SYNC_ICACHE,
-D(14) KM_SYNC_DCACHE,
-D(15) KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */
-D(16) KM_IRQ_PTE,
-D(17) KM_NMI,
-D(18) KM_NMI_PTE,
-D(19) KM_TYPE_NR
+KMAP_D(0) KM_BOUNCE_READ,
+KMAP_D(1) KM_SKB_SUNRPC_DATA,
+KMAP_D(2) KM_SKB_DATA_SOFTIRQ,
+KMAP_D(3) KM_USER0,
+KMAP_D(4) KM_USER1,
+KMAP_D(5) KM_BIO_SRC_IRQ,
+KMAP_D(6) KM_BIO_DST_IRQ,
+KMAP_D(7) KM_PTE0,
+KMAP_D(8) KM_PTE1,
+KMAP_D(9) KM_IRQ0,
+KMAP_D(10) KM_IRQ1,
+KMAP_D(11) KM_SOFTIRQ0,
+KMAP_D(12) KM_SOFTIRQ1,
+KMAP_D(13) KM_SYNC_ICACHE,
+KMAP_D(14) KM_SYNC_DCACHE,
+/* UML specific, for copy_*_user - used in do_op_one_page */
+KMAP_D(15) KM_UML_USERCOPY,
+KMAP_D(16) KM_IRQ_PTE,
+KMAP_D(17) KM_NMI,
+KMAP_D(18) KM_NMI_PTE,
+KMAP_D(19) KM_TYPE_NR
};
-#undef D
+#undef KMAP_D
#endif
diff --git a/include/asm-generic/mman-common.h b/include/asm-generic/mman-common.h
index 3b69ad34189..5ee13b2fd22 100644
--- a/include/asm-generic/mman-common.h
+++ b/include/asm-generic/mman-common.h
@@ -34,6 +34,10 @@
#define MADV_REMOVE 9 /* remove these pages & resources */
#define MADV_DONTFORK 10 /* don't inherit across fork */
#define MADV_DOFORK 11 /* do inherit across fork */
+#define MADV_HWPOISON 100 /* poison a page for testing */
+
+#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
+#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
/* compatibility flags */
#define MAP_FILE 0
diff --git a/include/asm-generic/mman.h b/include/asm-generic/mman.h
index 7cab4de2bca..32c8bd6a196 100644
--- a/include/asm-generic/mman.h
+++ b/include/asm-generic/mman.h
@@ -11,6 +11,7 @@
#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
+#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
#define MCL_CURRENT 1 /* lock all current mappings */
#define MCL_FUTURE 2 /* lock all future mappings */
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index d083561337f..b3bfabc258f 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -23,4 +23,20 @@ extern char __ctors_start[], __ctors_end[];
#define dereference_function_descriptor(p) (p)
#endif
+/* random extra sections (if any). Override
+ * in asm/sections.h */
+#ifndef arch_is_kernel_text
+static inline int arch_is_kernel_text(unsigned long addr)
+{
+ return 0;
+}
+#endif
+
+#ifndef arch_is_kernel_data
+static inline int arch_is_kernel_data(unsigned long addr)
+{
+ return 0;
+}
+#endif
+
#endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
index c840719a8c5..942d30b5aab 100644
--- a/include/asm-generic/siginfo.h
+++ b/include/asm-generic/siginfo.h
@@ -82,6 +82,7 @@ typedef struct siginfo {
#ifdef __ARCH_SI_TRAPNO
int _trapno; /* TRAP # which caused the signal */
#endif
+ short _addr_lsb; /* LSB of the reported address */
} _sigfault;
/* SIGPOLL */
@@ -112,6 +113,7 @@ typedef struct siginfo {
#ifdef __ARCH_SI_TRAPNO
#define si_trapno _sifields._sigfault._trapno
#endif
+#define si_addr_lsb _sifields._sigfault._addr_lsb
#define si_band _sifields._sigpoll._band
#define si_fd _sifields._sigpoll._fd
@@ -192,7 +194,11 @@ typedef struct siginfo {
#define BUS_ADRALN (__SI_FAULT|1) /* invalid address alignment */
#define BUS_ADRERR (__SI_FAULT|2) /* non-existant physical address */
#define BUS_OBJERR (__SI_FAULT|3) /* object specific hardware error */
-#define NSIGBUS 3
+/* hardware memory error consumed on a machine check: action required */
+#define BUS_MCEERR_AR (__SI_FAULT|4)
+/* hardware memory error detected in process but not consumed: action optional*/
+#define BUS_MCEERR_AO (__SI_FAULT|5)
+#define NSIGBUS 5
/*
* SIGTRAP si_codes
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
index ea8087b55ff..5c122ae6bfa 100644
--- a/include/asm-generic/syscall.h
+++ b/include/asm-generic/syscall.h
@@ -1,7 +1,7 @@
/*
* Access to user system call parameters and results
*
- * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
@@ -32,9 +32,13 @@ struct pt_regs;
* If @task is not executing a system call, i.e. it's blocked
* inside the kernel for a fault or signal, returns -1.
*
+ * Note this returns int even on 64-bit machines. Only 32 bits of
+ * system call number can be meaningful. If the actual arch value
+ * is 64 bits, this truncates to 32 bits so 0xffffffff means -1.
+ *
* It's only valid to call this when @task is known to be blocked.
*/
-long syscall_get_nr(struct task_struct *task, struct pt_regs *regs);
+int syscall_get_nr(struct task_struct *task, struct pt_regs *regs);
/**
* syscall_rollback - roll back registers after an aborted system call
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 88bada2ebc4..510df36dd5d 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -37,9 +37,6 @@
#ifndef parent_node
#define parent_node(node) ((void)(node),0)
#endif
-#ifndef node_to_cpumask
-#define node_to_cpumask(node) ((void)node, cpu_online_map)
-#endif
#ifndef cpumask_of_node
#define cpumask_of_node(node) ((void)node, cpu_online_mask)
#endif
@@ -55,18 +52,4 @@
#endif /* CONFIG_NUMA */
-/*
- * returns pointer to cpumask for specified node
- * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
- */
-#ifndef node_to_cpumask_ptr
-
-#define node_to_cpumask_ptr(v, node) \
- cpumask_t _##v = node_to_cpumask(node); \
- const cpumask_t *v = &_##v
-
-#define node_to_cpumask_ptr_next(v, node) \
- _##v = node_to_cpumask(node)
-#endif
-
#endif /* _ASM_GENERIC_TOPOLOGY_H */
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 1125e5a1ee5..d76b66acea9 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -620,8 +620,8 @@ __SYSCALL(__NR_move_pages, sys_move_pages)
#define __NR_rt_tgsigqueueinfo 240
__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
-#define __NR_perf_counter_open 241
-__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open)
+#define __NR_perf_event_open 241
+__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
#undef __NR_syscalls
#define __NR_syscalls 242
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 29ca8f53ffb..b6e818f4b24 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -721,12 +721,12 @@
. = ALIGN(PAGE_SIZE); \
.data : AT(ADDR(.data) - LOAD_OFFSET) { \
INIT_TASK_DATA(inittask) \
+ NOSAVE_DATA \
+ PAGE_ALIGNED_DATA(pagealigned) \
CACHELINE_ALIGNED_DATA(cacheline) \
READ_MOSTLY_DATA(cacheline) \
DATA_DATA \
CONSTRUCTORS \
- NOSAVE_DATA \
- PAGE_ALIGNED_DATA(pagealigned) \
}
#define INIT_TEXT_SECTION(inittext_align) \
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 853508499d2..3f6e545609b 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -552,6 +552,7 @@
{0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2e32, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+ {0x8086, 0x2e42, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 8e1e92583fb..7e0cb1da92e 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -185,6 +185,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_GET_APERTURE 0x23
#define DRM_I915_GEM_MMAP_GTT 0x24
#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
+#define DRM_I915_GEM_MADVISE 0x26
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -221,6 +222,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_intel_get_pipe_from_crtc_id)
+#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -667,4 +669,21 @@ struct drm_i915_get_pipe_from_crtc_id {
__u32 pipe;
};
+#define I915_MADV_WILLNEED 0
+#define I915_MADV_DONTNEED 1
+#define __I915_MADV_PURGED 2 /* internal state */
+
+struct drm_i915_gem_madvise {
+ /** Handle of the buffer to change the backing store advice */
+ __u32 handle;
+
+ /* Advice: either the buffer will be needed again in the near future,
+ * or wont be and could be discarded under memory pressure.
+ */
+ __u32 madv;
+
+ /** Whether the backing store still exists. */
+ __u32 retained;
+};
+
#endif /* _I915_DRM_H_ */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 34321cfffea..dfcd920c3e5 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -41,8 +41,6 @@
#include <acpi/acpi_drivers.h>
#include <acpi/acpi_numa.h>
#include <asm/acpi.h>
-#include <linux/dmi.h>
-
enum acpi_irq_model_id {
ACPI_IRQ_MODEL_PIC = 0,
@@ -219,10 +217,8 @@ static inline int acpi_video_display_switch_support(void)
#endif /* defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) */
extern int acpi_blacklisted(void);
-#ifdef CONFIG_DMI
extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d);
extern int acpi_osi_setup(char *str);
-#endif
#ifdef CONFIG_ACPI_NUMA
int acpi_get_pxm(acpi_handle handle);
@@ -292,7 +288,10 @@ void __init acpi_s4_no_nvs(void);
extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags);
extern void acpi_early_init(void);
-#else /* CONFIG_ACPI */
+#else /* !CONFIG_ACPI */
+
+#define acpi_disabled 1
+
static inline void acpi_early_init(void) { }
static inline int early_acpi_boot_init(void)
@@ -331,5 +330,11 @@ static inline int acpi_check_mem_region(resource_size_t start,
return 0;
}
+struct acpi_table_header;
+static inline int acpi_table_parse(char *id,
+ int (*handler)(struct acpi_table_header *))
+{
+ return -1;
+}
#endif /* !CONFIG_ACPI */
#endif /*_LINUX_ACPI_H*/
diff --git a/arch/arm/include/asm/mach/mmc.h b/include/linux/amba/mmci.h
index b490ecc79de..6b4241748dd 100644
--- a/arch/arm/include/asm/mach/mmc.h
+++ b/include/linux/amba/mmci.h
@@ -1,17 +1,18 @@
/*
- * arch/arm/include/asm/mach/mmc.h
+ * include/linux/amba/mmci.h
*/
-#ifndef ASMARM_MACH_MMC_H
-#define ASMARM_MACH_MMC_H
+#ifndef AMBA_MMCI_H
+#define AMBA_MMCI_H
#include <linux/mmc/host.h>
-struct mmc_platform_data {
+struct mmci_platform_data {
unsigned int ocr_mask; /* available voltages */
u32 (*translate_vdd)(struct device *, unsigned int);
unsigned int (*status)(struct device *);
int gpio_wp;
int gpio_cd;
+ unsigned long capabilities;
};
#endif
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h
index dcad0ffd175..e4836c6b3dd 100644
--- a/include/linux/amba/pl022.h
+++ b/include/linux/amba/pl022.h
@@ -136,12 +136,12 @@ enum ssp_tx_level_trig {
/**
* enum SPI Clock Phase - clock phase (Motorola SPI interface only)
- * @SSP_CLK_RISING_EDGE: Receive data on rising edge
- * @SSP_CLK_FALLING_EDGE: Receive data on falling edge
+ * @SSP_CLK_FIRST_EDGE: Receive data on first edge transition (actual direction depends on polarity)
+ * @SSP_CLK_SECOND_EDGE: Receive data on second edge transition (actual direction depends on polarity)
*/
enum ssp_spi_clk_phase {
- SSP_CLK_RISING_EDGE,
- SSP_CLK_FALLING_EDGE
+ SSP_CLK_FIRST_EDGE,
+ SSP_CLK_SECOND_EDGE
};
/**
diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h
index e0a0cdc2da4..69a21e0ebd3 100644
--- a/include/linux/anon_inodes.h
+++ b/include/linux/anon_inodes.h
@@ -8,6 +8,9 @@
#ifndef _LINUX_ANON_INODES_H
#define _LINUX_ANON_INODES_H
+struct file *anon_inode_getfile(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags);
int anon_inode_getfd(const char *name, const struct file_operations *fops,
void *priv, int flags);
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 5fc2ef8d97f..a1c486a88e8 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -58,25 +58,60 @@ struct dma_chan_ref {
* array.
* @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a
* dependency chain
- * @ASYNC_TX_DEP_ACK: ack the dependency descriptor. Useful for chaining.
+ * @ASYNC_TX_FENCE: specify that the next operation in the dependency
+ * chain uses this operation's result as an input
*/
enum async_tx_flags {
ASYNC_TX_XOR_ZERO_DST = (1 << 0),
ASYNC_TX_XOR_DROP_DST = (1 << 1),
- ASYNC_TX_ACK = (1 << 3),
- ASYNC_TX_DEP_ACK = (1 << 4),
+ ASYNC_TX_ACK = (1 << 2),
+ ASYNC_TX_FENCE = (1 << 3),
+};
+
+/**
+ * struct async_submit_ctl - async_tx submission/completion modifiers
+ * @flags: submission modifiers
+ * @depend_tx: parent dependency of the current operation being submitted
+ * @cb_fn: callback routine to run at operation completion
+ * @cb_param: parameter for the callback routine
+ * @scribble: caller provided space for dma/page address conversions
+ */
+struct async_submit_ctl {
+ enum async_tx_flags flags;
+ struct dma_async_tx_descriptor *depend_tx;
+ dma_async_tx_callback cb_fn;
+ void *cb_param;
+ void *scribble;
};
#ifdef CONFIG_DMA_ENGINE
#define async_tx_issue_pending_all dma_issue_pending_all
+
+/**
+ * async_tx_issue_pending - send pending descriptor to the hardware channel
+ * @tx: descriptor handle to retrieve hardware context
+ *
+ * Note: any dependent operations will have already been issued by
+ * async_tx_channel_switch, or (in the case of no channel switch) will
+ * be already pending on this channel.
+ */
+static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
+{
+ if (likely(tx)) {
+ struct dma_chan *chan = tx->chan;
+ struct dma_device *dma = chan->device;
+
+ dma->device_issue_pending(chan);
+ }
+}
#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
#include <asm/async_tx.h>
#else
#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \
__async_tx_find_channel(dep, type)
struct dma_chan *
-__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
- enum dma_transaction_type tx_type);
+__async_tx_find_channel(struct async_submit_ctl *submit,
+ enum dma_transaction_type tx_type);
#endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */
#else
static inline void async_tx_issue_pending_all(void)
@@ -84,10 +119,16 @@ static inline void async_tx_issue_pending_all(void)
do { } while (0);
}
+static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
+{
+ do { } while (0);
+}
+
static inline struct dma_chan *
-async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
- enum dma_transaction_type tx_type, struct page **dst, int dst_count,
- struct page **src, int src_count, size_t len)
+async_tx_find_channel(struct async_submit_ctl *submit,
+ enum dma_transaction_type tx_type, struct page **dst,
+ int dst_count, struct page **src, int src_count,
+ size_t len)
{
return NULL;
}
@@ -99,46 +140,70 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
* @cb_fn_param: parameter to pass to the callback routine
*/
static inline void
-async_tx_sync_epilog(dma_async_tx_callback cb_fn, void *cb_fn_param)
+async_tx_sync_epilog(struct async_submit_ctl *submit)
{
- if (cb_fn)
- cb_fn(cb_fn_param);
+ if (submit->cb_fn)
+ submit->cb_fn(submit->cb_param);
}
-void
-async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
- enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_fn_param);
+typedef union {
+ unsigned long addr;
+ struct page *page;
+ dma_addr_t dma;
+} addr_conv_t;
+
+static inline void
+init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags,
+ struct dma_async_tx_descriptor *tx,
+ dma_async_tx_callback cb_fn, void *cb_param,
+ addr_conv_t *scribble)
+{
+ args->flags = flags;
+ args->depend_tx = tx;
+ args->cb_fn = cb_fn;
+ args->cb_param = cb_param;
+ args->scribble = scribble;
+}
+
+void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
+ struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
async_xor(struct page *dest, struct page **src_list, unsigned int offset,
- int src_cnt, size_t len, enum async_tx_flags flags,
- struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_fn_param);
+ int src_cnt, size_t len, struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
-async_xor_zero_sum(struct page *dest, struct page **src_list,
- unsigned int offset, int src_cnt, size_t len,
- u32 *result, enum async_tx_flags flags,
- struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_fn_param);
+async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
+ int src_cnt, size_t len, enum sum_check_flags *result,
+ struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
- unsigned int src_offset, size_t len, enum async_tx_flags flags,
- struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_fn_param);
+ unsigned int src_offset, size_t len,
+ struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
async_memset(struct page *dest, int val, unsigned int offset,
- size_t len, enum async_tx_flags flags,
- struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_fn_param);
+ size_t len, struct async_submit_ctl *submit);
+
+struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit);
+
+struct dma_async_tx_descriptor *
+async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt,
+ size_t len, struct async_submit_ctl *submit);
+
+struct dma_async_tx_descriptor *
+async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt,
+ size_t len, enum sum_check_flags *pqres, struct page *spare,
+ struct async_submit_ctl *submit);
+
+struct dma_async_tx_descriptor *
+async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb,
+ struct page **ptrs, struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
-async_trigger_callback(enum async_tx_flags flags,
- struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_fn_param);
+async_raid6_datap_recov(int src_num, size_t bytes, int faila,
+ struct page **ptrs, struct async_submit_ctl *submit);
void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
#endif /* _ASYNC_TX_H_ */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 0ee33c2e612..b449e738533 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -101,7 +101,8 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
void bdi_unregister(struct backing_dev_info *bdi);
-void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages);
+void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
+ long nr_pages);
int bdi_writeback_task(struct bdi_writeback *wb);
int bdi_has_dirty_io(struct backing_dev_info *bdi);
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 2046b5b8af4..aece486ac73 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -120,7 +120,7 @@ extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm);
extern int prepare_bprm_creds(struct linux_binprm *bprm);
extern void install_exec_creds(struct linux_binprm *bprm);
extern void do_coredump(long signr, int exit_code, struct pt_regs *regs);
-extern int set_binfmt(struct linux_binfmt *new);
+extern void set_binfmt(struct linux_binfmt *new);
extern void free_bprm(struct linux_binprm *);
#endif /* __KERNEL__ */
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index bc3ab707369..dd97fb8408a 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -132,9 +132,6 @@ static inline void *alloc_remap(int nid, unsigned long size)
}
#endif /* CONFIG_HAVE_ARCH_ALLOC_REMAP */
-extern unsigned long __meminitdata nr_kernel_pages;
-extern unsigned long __meminitdata nr_all_pages;
-
extern void *alloc_large_system_hash(const char *tablename,
unsigned long bucketsize,
unsigned long numentries,
@@ -145,6 +142,8 @@ extern void *alloc_large_system_hash(const char *tablename,
unsigned long limit);
#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
+#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
+ * shift passed via *_hash_shift */
/* Only NUMA needs hash distribution. 64bit NUMA architectures have
* sufficient vmalloc space.
diff --git a/include/linux/capability.h b/include/linux/capability.h
index c3021105edc..c8f2a5f70ed 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -7,7 +7,7 @@
*
* See here for the libcap library ("POSIX draft" compliance):
*
- * ftp://linux.kernel.org/pub/linux/libs/security/linux-privs/kernel-2.6/
+ * ftp://www.kernel.org/pub/linux/libs/security/linux-privs/kernel-2.6/
*/
#ifndef _LINUX_CAPABILITY_H
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 90bba9e6228..b62bb9294d0 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -141,6 +141,38 @@ enum {
CGRP_WAIT_ON_RMDIR,
};
+/* which pidlist file are we talking about? */
+enum cgroup_filetype {
+ CGROUP_FILE_PROCS,
+ CGROUP_FILE_TASKS,
+};
+
+/*
+ * A pidlist is a list of pids that virtually represents the contents of one
+ * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
+ * a pair (one each for procs, tasks) for each pid namespace that's relevant
+ * to the cgroup.
+ */
+struct cgroup_pidlist {
+ /*
+ * used to find which pidlist is wanted. doesn't change as long as
+ * this particular list stays in the list.
+ */
+ struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
+ /* array of xids */
+ pid_t *list;
+ /* how many elements the above list has */
+ int length;
+ /* how many files are using the current array */
+ int use_count;
+ /* each of these stored in a list by its cgroup */
+ struct list_head links;
+ /* pointer to the cgroup we belong to, for list removal purposes */
+ struct cgroup *owner;
+ /* protects the other fields */
+ struct rw_semaphore mutex;
+};
+
struct cgroup {
unsigned long flags; /* "unsigned long" so bitops work */
@@ -179,11 +211,12 @@ struct cgroup {
*/
struct list_head release_list;
- /* pids_mutex protects pids_list and cached pid arrays. */
- struct rw_semaphore pids_mutex;
-
- /* Linked list of struct cgroup_pids */
- struct list_head pids_list;
+ /*
+ * list of pidlists, up to two for each namespace (one for procs, one
+ * for tasks); created on demand.
+ */
+ struct list_head pidlists;
+ struct mutex pidlist_mutex;
/* For RCU-protected deletion */
struct rcu_head rcu_head;
@@ -227,6 +260,9 @@ struct css_set {
* during subsystem registration (at boot time).
*/
struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
+
+ /* For RCU-protected deletion */
+ struct rcu_head rcu_head;
};
/*
@@ -389,10 +425,11 @@ struct cgroup_subsys {
struct cgroup *cgrp);
int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
- int (*can_attach)(struct cgroup_subsys *ss,
- struct cgroup *cgrp, struct task_struct *tsk);
+ int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
+ struct task_struct *tsk, bool threadgroup);
void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
- struct cgroup *old_cgrp, struct task_struct *tsk);
+ struct cgroup *old_cgrp, struct task_struct *tsk,
+ bool threadgroup);
void (*fork)(struct cgroup_subsys *ss, struct task_struct *task);
void (*exit)(struct cgroup_subsys *ss, struct task_struct *task);
int (*populate)(struct cgroup_subsys *ss,
diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h
index b8125b2eb66..47dac5ea8d3 100644
--- a/include/linux/cn_proc.h
+++ b/include/linux/cn_proc.h
@@ -52,6 +52,7 @@ struct proc_event {
PROC_EVENT_EXEC = 0x00000002,
PROC_EVENT_UID = 0x00000004,
PROC_EVENT_GID = 0x00000040,
+ PROC_EVENT_SID = 0x00000080,
/* "next" should be 0x00000400 */
/* "last" is the last process event: exit */
PROC_EVENT_EXIT = 0x80000000
@@ -89,6 +90,11 @@ struct proc_event {
} e;
} id;
+ struct sid_proc_event {
+ __kernel_pid_t process_pid;
+ __kernel_pid_t process_tgid;
+ } sid;
+
struct exit_proc_event {
__kernel_pid_t process_pid;
__kernel_pid_t process_tgid;
@@ -102,6 +108,7 @@ struct proc_event {
void proc_fork_connector(struct task_struct *task);
void proc_exec_connector(struct task_struct *task);
void proc_id_connector(struct task_struct *task, int which_id);
+void proc_sid_connector(struct task_struct *task);
void proc_exit_connector(struct task_struct *task);
#else
static inline void proc_fork_connector(struct task_struct *task)
@@ -114,6 +121,9 @@ static inline void proc_id_connector(struct task_struct *task,
int which_id)
{}
+static inline void proc_sid_connector(struct task_struct *task)
+{}
+
static inline void proc_exit_connector(struct task_struct *task)
{}
#endif /* CONFIG_PROC_EVENTS */
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 7f627775c94..ddb7a97c78c 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -27,8 +27,8 @@
*
* configfs Copyright (C) 2005 Oracle. All rights reserved.
*
- * Please read Documentation/filesystems/configfs.txt before using the
- * configfs interface, ESPECIALLY the parts about reference counts and
+ * Please read Documentation/filesystems/configfs/configfs.txt before using
+ * the configfs interface, ESPECIALLY the parts about reference counts and
* item destructors.
*/
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 796df12091b..789cf5f920c 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -3,444 +3,37 @@
/*
* Cpumasks provide a bitmap suitable for representing the
- * set of CPU's in a system, one bit position per CPU number.
- *
- * The new cpumask_ ops take a "struct cpumask *"; the old ones
- * use cpumask_t.
- *
- * See detailed comments in the file linux/bitmap.h describing the
- * data type on which these cpumasks are based.
- *
- * For details of cpumask_scnprintf() and cpumask_parse_user(),
- * see bitmap_scnprintf() and bitmap_parse_user() in lib/bitmap.c.
- * For details of cpulist_scnprintf() and cpulist_parse(), see
- * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
- * For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c
- * For details of cpus_remap(), see bitmap_remap in lib/bitmap.c.
- * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c.
- * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c.
- *
- * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
- * Note: The alternate operations with the suffix "_nr" are used
- * to limit the range of the loop to nr_cpu_ids instead of
- * NR_CPUS when NR_CPUS > 64 for performance reasons.
- * If NR_CPUS is <= 64 then most assembler bitmask
- * operators execute faster with a constant range, so
- * the operator will continue to use NR_CPUS.
- *
- * Another consideration is that nr_cpu_ids is initialized
- * to NR_CPUS and isn't lowered until the possible cpus are
- * discovered (including any disabled cpus). So early uses
- * will span the entire range of NR_CPUS.
- * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
- *
- * The obsolescent cpumask operations are:
- *
- * void cpu_set(cpu, mask) turn on bit 'cpu' in mask
- * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask
- * void cpus_setall(mask) set all bits
- * void cpus_clear(mask) clear all bits
- * int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask
- * int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask
- *
- * int cpus_and(dst, src1, src2) dst = src1 & src2 [intersection]
- * void cpus_or(dst, src1, src2) dst = src1 | src2 [union]
- * void cpus_xor(dst, src1, src2) dst = src1 ^ src2
- * int cpus_andnot(dst, src1, src2) dst = src1 & ~src2
- * void cpus_complement(dst, src) dst = ~src
- *
- * int cpus_equal(mask1, mask2) Does mask1 == mask2?
- * int cpus_intersects(mask1, mask2) Do mask1 and mask2 intersect?
- * int cpus_subset(mask1, mask2) Is mask1 a subset of mask2?
- * int cpus_empty(mask) Is mask empty (no bits sets)?
- * int cpus_full(mask) Is mask full (all bits sets)?
- * int cpus_weight(mask) Hamming weigh - number of set bits
- * int cpus_weight_nr(mask) Same using nr_cpu_ids instead of NR_CPUS
- *
- * void cpus_shift_right(dst, src, n) Shift right
- * void cpus_shift_left(dst, src, n) Shift left
- *
- * int first_cpu(mask) Number lowest set bit, or NR_CPUS
- * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS
- * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids
- *
- * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
- * (can be used as an lvalue)
- * CPU_MASK_ALL Initializer - all bits set
- * CPU_MASK_NONE Initializer - no bits set
- * unsigned long *cpus_addr(mask) Array of unsigned long's in mask
- *
- * CPUMASK_ALLOC kmalloc's a structure that is a composite of many cpumask_t
- * variables, and CPUMASK_PTR provides pointers to each field.
- *
- * The structure should be defined something like this:
- * struct my_cpumasks {
- * cpumask_t mask1;
- * cpumask_t mask2;
- * };
- *
- * Usage is then:
- * CPUMASK_ALLOC(my_cpumasks);
- * CPUMASK_PTR(mask1, my_cpumasks);
- * CPUMASK_PTR(mask2, my_cpumasks);
- *
- * --- DO NOT reference cpumask_t pointers until this check ---
- * if (my_cpumasks == NULL)
- * "kmalloc failed"...
- *
- * References are now pointers to the cpumask_t variables (*mask1, ...)
- *
- *if NR_CPUS > BITS_PER_LONG
- * CPUMASK_ALLOC(m) Declares and allocates struct m *m =
- * kmalloc(sizeof(*m), GFP_KERNEL)
- * CPUMASK_FREE(m) Macro for kfree(m)
- *else
- * CPUMASK_ALLOC(m) Declares struct m _m, *m = &_m
- * CPUMASK_FREE(m) Nop
- *endif
- * CPUMASK_PTR(v, m) Declares cpumask_t *v = &(m->v)
- * ------------------------------------------------------------------------
- *
- * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
- * int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask
- * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
- * int cpulist_parse(buf, map) Parse ascii string as cpulist
- * int cpu_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
- * void cpus_remap(dst, src, old, new) *dst = map(old, new)(src)
- * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap
- * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz
- *
- * for_each_cpu_mask(cpu, mask) for-loop cpu over mask using NR_CPUS
- * for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids
- *
- * int num_online_cpus() Number of online CPUs
- * int num_possible_cpus() Number of all possible CPUs
- * int num_present_cpus() Number of present CPUs
- *
- * int cpu_online(cpu) Is some cpu online?
- * int cpu_possible(cpu) Is some cpu possible?
- * int cpu_present(cpu) Is some cpu present (can schedule)?
- *
- * int any_online_cpu(mask) First online cpu in mask
- *
- * for_each_possible_cpu(cpu) for-loop cpu over cpu_possible_map
- * for_each_online_cpu(cpu) for-loop cpu over cpu_online_map
- * for_each_present_cpu(cpu) for-loop cpu over cpu_present_map
- *
- * Subtlety:
- * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
- * to generate slightly worse code. Note for example the additional
- * 40 lines of assembly code compiling the "for each possible cpu"
- * loops buried in the disk_stat_read() macros calls when compiling
- * drivers/block/genhd.c (arch i386, CONFIG_SMP=y). So use a simple
- * one-line #define for cpu_isset(), instead of wrapping an inline
- * inside a macro, the way we do the other calls.
+ * set of CPU's in a system, one bit position per CPU number. In general,
+ * only nr_cpu_ids (<= NR_CPUS) bits are valid.
*/
-
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/bitmap.h>
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
-extern cpumask_t _unused_cpumask_arg_;
-
-#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
-static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
-{
- set_bit(cpu, dstp->bits);
-}
-
-#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
-static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
-{
- clear_bit(cpu, dstp->bits);
-}
-
-#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
-static inline void __cpus_setall(cpumask_t *dstp, int nbits)
-{
- bitmap_fill(dstp->bits, nbits);
-}
-
-#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
-static inline void __cpus_clear(cpumask_t *dstp, int nbits)
-{
- bitmap_zero(dstp->bits, nbits);
-}
-
-/* No static inline type checking - see Subtlety (1) above. */
-#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
-
-#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
-static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
-{
- return test_and_set_bit(cpu, addr->bits);
-}
-
-#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
-static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_andnot(dst, src1, src2) \
- __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
-static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
-static inline void __cpus_complement(cpumask_t *dstp,
- const cpumask_t *srcp, int nbits)
-{
- bitmap_complement(dstp->bits, srcp->bits, nbits);
-}
-
-#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_equal(const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- return bitmap_equal(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_intersects(const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- return bitmap_intersects(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_subset(const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- return bitmap_subset(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
-static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
-{
- return bitmap_empty(srcp->bits, nbits);
-}
-
-#define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
-static inline int __cpus_full(const cpumask_t *srcp, int nbits)
-{
- return bitmap_full(srcp->bits, nbits);
-}
-
-#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
-static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
-{
- return bitmap_weight(srcp->bits, nbits);
-}
-
-#define cpus_shift_right(dst, src, n) \
- __cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_right(cpumask_t *dstp,
- const cpumask_t *srcp, int n, int nbits)
-{
- bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
-}
-
-#define cpus_shift_left(dst, src, n) \
- __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_left(cpumask_t *dstp,
- const cpumask_t *srcp, int n, int nbits)
-{
- bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
-}
-#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
/**
- * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
- * @bitmap: the bitmap
- *
- * There are a few places where cpumask_var_t isn't appropriate and
- * static cpumasks must be used (eg. very early boot), yet we don't
- * expose the definition of 'struct cpumask'.
+ * cpumask_bits - get the bits in a cpumask
+ * @maskp: the struct cpumask *
*
- * This does the conversion, and can be used as a constant initializer.
+ * You should only assume nr_cpu_ids bits of this mask are valid. This is
+ * a macro so it's const-correct.
*/
-#define to_cpumask(bitmap) \
- ((struct cpumask *)(1 ? (bitmap) \
- : (void *)sizeof(__check_is_bitmap(bitmap))))
-
-static inline int __check_is_bitmap(const unsigned long *bitmap)
-{
- return 1;
-}
-
-/*
- * Special-case data structure for "single bit set only" constant CPU masks.
- *
- * We pre-generate all the 64 (or 32) possible bit positions, with enough
- * padding to the left and the right, and return the constant pointer
- * appropriately offset.
- */
-extern const unsigned long
- cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
-
-static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
-{
- const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
- p -= cpu / BITS_PER_LONG;
- return to_cpumask(p);
-}
-
-#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-/*
- * In cases where we take the address of the cpumask immediately,
- * gcc optimizes it out (it's a constant) and there's no huge stack
- * variable created:
- */
-#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
-
-
-#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
-
-#if NR_CPUS <= BITS_PER_LONG
-
-#define CPU_MASK_ALL \
-(cpumask_t) { { \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
-} }
-
-#define CPU_MASK_ALL_PTR (&CPU_MASK_ALL)
-
-#else
-
-#define CPU_MASK_ALL \
-(cpumask_t) { { \
- [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
-} }
-
-/* cpu_mask_all is in init/main.c */
-extern cpumask_t cpu_mask_all;
-#define CPU_MASK_ALL_PTR (&cpu_mask_all)
-
-#endif
-
-#define CPU_MASK_NONE \
-(cpumask_t) { { \
- [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
-} }
-
-#define CPU_MASK_CPU0 \
-(cpumask_t) { { \
- [0] = 1UL \
-} }
-
-#define cpus_addr(src) ((src).bits)
-
-#if NR_CPUS > BITS_PER_LONG
-#define CPUMASK_ALLOC(m) struct m *m = kmalloc(sizeof(*m), GFP_KERNEL)
-#define CPUMASK_FREE(m) kfree(m)
-#else
-#define CPUMASK_ALLOC(m) struct m _m, *m = &_m
-#define CPUMASK_FREE(m)
-#endif
-#define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v)
-
-#define cpu_remap(oldbit, old, new) \
- __cpu_remap((oldbit), &(old), &(new), NR_CPUS)
-static inline int __cpu_remap(int oldbit,
- const cpumask_t *oldp, const cpumask_t *newp, int nbits)
-{
- return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
-}
-
-#define cpus_remap(dst, src, old, new) \
- __cpus_remap(&(dst), &(src), &(old), &(new), NR_CPUS)
-static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp,
- const cpumask_t *oldp, const cpumask_t *newp, int nbits)
-{
- bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
-}
-
-#define cpus_onto(dst, orig, relmap) \
- __cpus_onto(&(dst), &(orig), &(relmap), NR_CPUS)
-static inline void __cpus_onto(cpumask_t *dstp, const cpumask_t *origp,
- const cpumask_t *relmapp, int nbits)
-{
- bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
-}
-
-#define cpus_fold(dst, orig, sz) \
- __cpus_fold(&(dst), &(orig), sz, NR_CPUS)
-static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp,
- int sz, int nbits)
-{
- bitmap_fold(dstp->bits, origp->bits, sz, nbits);
-}
-#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
+#define cpumask_bits(maskp) ((maskp)->bits)
#if NR_CPUS == 1
-
#define nr_cpu_ids 1
-#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-#define first_cpu(src) ({ (void)(src); 0; })
-#define next_cpu(n, src) ({ (void)(src); 1; })
-#define any_online_cpu(mask) 0
-#define for_each_cpu_mask(cpu, mask) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
-#else /* NR_CPUS > 1 */
-
+#else
extern int nr_cpu_ids;
-#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-int __first_cpu(const cpumask_t *srcp);
-int __next_cpu(int n, const cpumask_t *srcp);
-int __any_online_cpu(const cpumask_t *mask);
-
-#define first_cpu(src) __first_cpu(&(src))
-#define next_cpu(n, src) __next_cpu((n), &(src))
-#define any_online_cpu(mask) __any_online_cpu(&(mask))
-#define for_each_cpu_mask(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = next_cpu((cpu), (mask)), \
- (cpu) < NR_CPUS; )
-#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
#endif
-#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-#if NR_CPUS <= 64
-
-#define next_cpu_nr(n, src) next_cpu(n, src)
-#define cpus_weight_nr(cpumask) cpus_weight(cpumask)
-#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
-
-#else /* NR_CPUS > 64 */
-
-int __next_cpu_nr(int n, const cpumask_t *srcp);
-#define next_cpu_nr(n, src) __next_cpu_nr((n), &(src))
-#define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids)
-#define for_each_cpu_mask_nr(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = next_cpu_nr((cpu), (mask)), \
- (cpu) < nr_cpu_ids; )
-
-#endif /* NR_CPUS > 64 */
-#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
+ * not all bits may be allocated. */
+#define nr_cpumask_bits nr_cpu_ids
+#else
+#define nr_cpumask_bits NR_CPUS
+#endif
/*
* The following particular system cpumasks and operations manage
@@ -487,12 +80,6 @@ extern const struct cpumask *const cpu_online_mask;
extern const struct cpumask *const cpu_present_mask;
extern const struct cpumask *const cpu_active_mask;
-/* These strip const, as traditionally they weren't const. */
-#define cpu_possible_map (*(cpumask_t *)cpu_possible_mask)
-#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
-#define cpu_present_map (*(cpumask_t *)cpu_present_mask)
-#define cpu_active_map (*(cpumask_t *)cpu_active_mask)
-
#if NR_CPUS > 1
#define num_online_cpus() cpumask_weight(cpu_online_mask)
#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
@@ -511,35 +98,6 @@ extern const struct cpumask *const cpu_active_mask;
#define cpu_active(cpu) ((cpu) == 0)
#endif
-#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
-
-/* These are the new versions of the cpumask operators: passed by pointer.
- * The older versions will be implemented in terms of these, then deleted. */
-#define cpumask_bits(maskp) ((maskp)->bits)
-
-#if NR_CPUS <= BITS_PER_LONG
-#define CPU_BITS_ALL \
-{ \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
-}
-
-#else /* NR_CPUS > BITS_PER_LONG */
-
-#define CPU_BITS_ALL \
-{ \
- [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
-}
-#endif /* NR_CPUS > BITS_PER_LONG */
-
-#ifdef CONFIG_CPUMASK_OFFSTACK
-/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
- * not all bits may be allocated. */
-#define nr_cpumask_bits nr_cpu_ids
-#else
-#define nr_cpumask_bits NR_CPUS
-#endif
-
/* verify cpu argument to cpumask_* operators */
static inline unsigned int cpumask_check(unsigned int cpu)
{
@@ -715,6 +273,18 @@ static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
}
/**
+ * cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask
+ * @cpu: cpu number (< nr_cpu_ids)
+ * @cpumask: the cpumask pointer
+ *
+ * test_and_clear_bit wrapper for cpumasks.
+ */
+static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
+{
+ return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
+}
+
+/**
* cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
* @dstp: the cpumask pointer
*/
@@ -1088,4 +658,241 @@ void set_cpu_active(unsigned int cpu, bool active);
void init_cpu_present(const struct cpumask *src);
void init_cpu_possible(const struct cpumask *src);
void init_cpu_online(const struct cpumask *src);
+
+/**
+ * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
+ * @bitmap: the bitmap
+ *
+ * There are a few places where cpumask_var_t isn't appropriate and
+ * static cpumasks must be used (eg. very early boot), yet we don't
+ * expose the definition of 'struct cpumask'.
+ *
+ * This does the conversion, and can be used as a constant initializer.
+ */
+#define to_cpumask(bitmap) \
+ ((struct cpumask *)(1 ? (bitmap) \
+ : (void *)sizeof(__check_is_bitmap(bitmap))))
+
+static inline int __check_is_bitmap(const unsigned long *bitmap)
+{
+ return 1;
+}
+
+/*
+ * Special-case data structure for "single bit set only" constant CPU masks.
+ *
+ * We pre-generate all the 64 (or 32) possible bit positions, with enough
+ * padding to the left and the right, and return the constant pointer
+ * appropriately offset.
+ */
+extern const unsigned long
+ cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
+
+static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
+{
+ const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
+ p -= cpu / BITS_PER_LONG;
+ return to_cpumask(p);
+}
+
+#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
+
+#if NR_CPUS <= BITS_PER_LONG
+#define CPU_BITS_ALL \
+{ \
+ [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+}
+
+#else /* NR_CPUS > BITS_PER_LONG */
+
+#define CPU_BITS_ALL \
+{ \
+ [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
+ [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+}
+#endif /* NR_CPUS > BITS_PER_LONG */
+
+/*
+ *
+ * From here down, all obsolete. Use cpumask_ variants!
+ *
+ */
+#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
+/* These strip const, as traditionally they weren't const. */
+#define cpu_possible_map (*(cpumask_t *)cpu_possible_mask)
+#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
+#define cpu_present_map (*(cpumask_t *)cpu_present_mask)
+#define cpu_active_map (*(cpumask_t *)cpu_active_mask)
+
+#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
+
+#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
+
+#if NR_CPUS <= BITS_PER_LONG
+
+#define CPU_MASK_ALL \
+(cpumask_t) { { \
+ [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+} }
+
+#else
+
+#define CPU_MASK_ALL \
+(cpumask_t) { { \
+ [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
+ [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+} }
+
+#endif
+
+#define CPU_MASK_NONE \
+(cpumask_t) { { \
+ [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
+} }
+
+#define CPU_MASK_CPU0 \
+(cpumask_t) { { \
+ [0] = 1UL \
+} }
+
+#if NR_CPUS == 1
+#define first_cpu(src) ({ (void)(src); 0; })
+#define next_cpu(n, src) ({ (void)(src); 1; })
+#define any_online_cpu(mask) 0
+#define for_each_cpu_mask(cpu, mask) \
+ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
+#else /* NR_CPUS > 1 */
+int __first_cpu(const cpumask_t *srcp);
+int __next_cpu(int n, const cpumask_t *srcp);
+int __any_online_cpu(const cpumask_t *mask);
+
+#define first_cpu(src) __first_cpu(&(src))
+#define next_cpu(n, src) __next_cpu((n), &(src))
+#define any_online_cpu(mask) __any_online_cpu(&(mask))
+#define for_each_cpu_mask(cpu, mask) \
+ for ((cpu) = -1; \
+ (cpu) = next_cpu((cpu), (mask)), \
+ (cpu) < NR_CPUS; )
+#endif /* SMP */
+
+#if NR_CPUS <= 64
+
+#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
+
+#else /* NR_CPUS > 64 */
+
+int __next_cpu_nr(int n, const cpumask_t *srcp);
+#define for_each_cpu_mask_nr(cpu, mask) \
+ for ((cpu) = -1; \
+ (cpu) = __next_cpu_nr((cpu), &(mask)), \
+ (cpu) < nr_cpu_ids; )
+
+#endif /* NR_CPUS > 64 */
+
+#define cpus_addr(src) ((src).bits)
+
+#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
+static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
+{
+ set_bit(cpu, dstp->bits);
+}
+
+#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
+static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
+{
+ clear_bit(cpu, dstp->bits);
+}
+
+#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
+static inline void __cpus_setall(cpumask_t *dstp, int nbits)
+{
+ bitmap_fill(dstp->bits, nbits);
+}
+
+#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
+static inline void __cpus_clear(cpumask_t *dstp, int nbits)
+{
+ bitmap_zero(dstp->bits, nbits);
+}
+
+/* No static inline type checking - see Subtlety (1) above. */
+#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
+
+#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
+static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
+{
+ return test_and_set_bit(cpu, addr->bits);
+}
+
+#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
+static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
+static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_andnot(dst, src1, src2) \
+ __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
+static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
+static inline int __cpus_equal(const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ return bitmap_equal(src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
+static inline int __cpus_intersects(const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ return bitmap_intersects(src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
+static inline int __cpus_subset(const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ return bitmap_subset(src1p->bits, src2p->bits, nbits);
+}
+
+#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
+static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
+{
+ return bitmap_empty(srcp->bits, nbits);
+}
+
+#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
+static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
+{
+ return bitmap_weight(srcp->bits, nbits);
+}
+
+#define cpus_shift_left(dst, src, n) \
+ __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
+static inline void __cpus_shift_left(cpumask_t *dstp,
+ const cpumask_t *srcp, int n, int nbits)
+{
+ bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
+}
+#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
+
#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index fb371601a3b..4e3387a89cb 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -176,23 +176,7 @@ extern void __invalid_creds(const struct cred *, const char *, unsigned);
extern void __validate_process_creds(struct task_struct *,
const char *, unsigned);
-static inline bool creds_are_invalid(const struct cred *cred)
-{
- if (cred->magic != CRED_MAGIC)
- return true;
- if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers))
- return true;
-#ifdef CONFIG_SECURITY_SELINUX
- if (selinux_is_enabled()) {
- if ((unsigned long) cred->security < PAGE_SIZE)
- return true;
- if ((*(u32 *)cred->security & 0xffffff00) ==
- (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8))
- return true;
- }
-#endif
- return false;
-}
+extern bool creds_are_invalid(const struct cred *cred);
static inline void __validate_creds(const struct cred *cred,
const char *file, unsigned line)
diff --git a/include/linux/dca.h b/include/linux/dca.h
index 9c20c7e87d0..d27a7a05718 100644
--- a/include/linux/dca.h
+++ b/include/linux/dca.h
@@ -20,6 +20,9 @@
*/
#ifndef DCA_H
#define DCA_H
+
+#include <linux/pci.h>
+
/* DCA Provider API */
/* DCA Notifier Interface */
@@ -36,6 +39,12 @@ struct dca_provider {
int id;
};
+struct dca_domain {
+ struct list_head node;
+ struct list_head dca_providers;
+ struct pci_bus *pci_rc;
+};
+
struct dca_ops {
int (*add_requester) (struct dca_provider *, struct device *);
int (*remove_requester) (struct dca_provider *, struct device *);
@@ -47,7 +56,7 @@ struct dca_ops {
struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size);
void free_dca_provider(struct dca_provider *dca);
int register_dca_provider(struct dca_provider *dca, struct device *dev);
-void unregister_dca_provider(struct dca_provider *dca);
+void unregister_dca_provider(struct dca_provider *dca, struct device *dev);
static inline void *dca_priv(struct dca_provider *dca)
{
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index eb5c2ba2f81..fc1b930f246 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -9,7 +9,7 @@
* 2 as published by the Free Software Foundation.
*
* debugfs is for people to use instead of /proc or /sys.
- * See Documentation/DocBook/kernel-api for more details.
+ * See Documentation/DocBook/filesystems for more details.
*/
#ifndef _DEBUGFS_H_
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index ffefba81c81..2b9f2ac7ed6 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -48,19 +48,20 @@ enum dma_status {
/**
* enum dma_transaction_type - DMA transaction types/indexes
+ *
+ * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is
+ * automatically set as dma devices are registered.
*/
enum dma_transaction_type {
DMA_MEMCPY,
DMA_XOR,
- DMA_PQ_XOR,
- DMA_DUAL_XOR,
- DMA_PQ_UPDATE,
- DMA_ZERO_SUM,
- DMA_PQ_ZERO_SUM,
+ DMA_PQ,
+ DMA_XOR_VAL,
+ DMA_PQ_VAL,
DMA_MEMSET,
- DMA_MEMCPY_CRC32C,
DMA_INTERRUPT,
DMA_PRIVATE,
+ DMA_ASYNC_TX,
DMA_SLAVE,
};
@@ -70,18 +71,25 @@ enum dma_transaction_type {
/**
* enum dma_ctrl_flags - DMA flags to augment operation preparation,
- * control completion, and communicate status.
+ * control completion, and communicate status.
* @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
- * this transaction
+ * this transaction
* @DMA_CTRL_ACK - the descriptor cannot be reused until the client
- * acknowledges receipt, i.e. has has a chance to establish any
- * dependency chains
+ * acknowledges receipt, i.e. has has a chance to establish any dependency
+ * chains
* @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
* @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
* @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
* (if not set, do the source dma-unmapping as page)
* @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
* (if not set, do the destination dma-unmapping as page)
+ * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
+ * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
+ * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
+ * sources that were the result of a previous operation, in the case of a PQ
+ * operation it continues the calculation with new sources
+ * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
+ * on the result of this operation
*/
enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0),
@@ -90,9 +98,32 @@ enum dma_ctrl_flags {
DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
+ DMA_PREP_PQ_DISABLE_P = (1 << 6),
+ DMA_PREP_PQ_DISABLE_Q = (1 << 7),
+ DMA_PREP_CONTINUE = (1 << 8),
+ DMA_PREP_FENCE = (1 << 9),
};
/**
+ * enum sum_check_bits - bit position of pq_check_flags
+ */
+enum sum_check_bits {
+ SUM_CHECK_P = 0,
+ SUM_CHECK_Q = 1,
+};
+
+/**
+ * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
+ * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
+ * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
+ */
+enum sum_check_flags {
+ SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
+ SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
+};
+
+
+/**
* dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
* See linux/cpumask.h
*/
@@ -180,8 +211,6 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param);
* @flags: flags to augment operation preparation, control completion, and
* communicate status
* @phys: physical address of the descriptor
- * @tx_list: driver common field for operations that require multiple
- * descriptors
* @chan: target channel for this operation
* @tx_submit: set the prepared descriptor(s) to be executed by the engine
* @callback: routine to call after this operation is complete
@@ -195,7 +224,6 @@ struct dma_async_tx_descriptor {
dma_cookie_t cookie;
enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
dma_addr_t phys;
- struct list_head tx_list;
struct dma_chan *chan;
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback;
@@ -213,6 +241,11 @@ struct dma_async_tx_descriptor {
* @global_node: list_head for global dma_device_list
* @cap_mask: one or more dma_capability flags
* @max_xor: maximum number of xor sources, 0 if no capability
+ * @max_pq: maximum number of PQ sources and PQ-continue capability
+ * @copy_align: alignment shift for memcpy operations
+ * @xor_align: alignment shift for xor operations
+ * @pq_align: alignment shift for pq operations
+ * @fill_align: alignment shift for memset operations
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
* @device_alloc_chan_resources: allocate resources and return the
@@ -220,7 +253,9 @@ struct dma_async_tx_descriptor {
* @device_free_chan_resources: release DMA channel's resources
* @device_prep_dma_memcpy: prepares a memcpy operation
* @device_prep_dma_xor: prepares a xor operation
- * @device_prep_dma_zero_sum: prepares a zero_sum operation
+ * @device_prep_dma_xor_val: prepares a xor validation operation
+ * @device_prep_dma_pq: prepares a pq operation
+ * @device_prep_dma_pq_val: prepares a pqzero_sum operation
* @device_prep_dma_memset: prepares a memset operation
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
* @device_prep_slave_sg: prepares a slave dma operation
@@ -235,7 +270,13 @@ struct dma_device {
struct list_head channels;
struct list_head global_node;
dma_cap_mask_t cap_mask;
- int max_xor;
+ unsigned short max_xor;
+ unsigned short max_pq;
+ u8 copy_align;
+ u8 xor_align;
+ u8 pq_align;
+ u8 fill_align;
+ #define DMA_HAS_PQ_CONTINUE (1 << 15)
int dev_id;
struct device *dev;
@@ -249,9 +290,17 @@ struct dma_device {
struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags);
- struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)(
+ struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
- size_t len, u32 *result, unsigned long flags);
+ size_t len, enum sum_check_flags *result, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
+ struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf,
+ size_t len, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
+ struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ enum sum_check_flags *pqres, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
unsigned long flags);
@@ -270,6 +319,96 @@ struct dma_device {
void (*device_issue_pending)(struct dma_chan *chan);
};
+static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
+{
+ size_t mask;
+
+ if (!align)
+ return true;
+ mask = (1 << align) - 1;
+ if (mask & (off1 | off2 | len))
+ return false;
+ return true;
+}
+
+static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
+ size_t off2, size_t len)
+{
+ return dmaengine_check_align(dev->copy_align, off1, off2, len);
+}
+
+static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
+ size_t off2, size_t len)
+{
+ return dmaengine_check_align(dev->xor_align, off1, off2, len);
+}
+
+static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
+ size_t off2, size_t len)
+{
+ return dmaengine_check_align(dev->pq_align, off1, off2, len);
+}
+
+static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
+ size_t off2, size_t len)
+{
+ return dmaengine_check_align(dev->fill_align, off1, off2, len);
+}
+
+static inline void
+dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
+{
+ dma->max_pq = maxpq;
+ if (has_pq_continue)
+ dma->max_pq |= DMA_HAS_PQ_CONTINUE;
+}
+
+static inline bool dmaf_continue(enum dma_ctrl_flags flags)
+{
+ return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
+}
+
+static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
+{
+ enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
+
+ return (flags & mask) == mask;
+}
+
+static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
+{
+ return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
+}
+
+static unsigned short dma_dev_to_maxpq(struct dma_device *dma)
+{
+ return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
+}
+
+/* dma_maxpq - reduce maxpq in the face of continued operations
+ * @dma - dma device with PQ capability
+ * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set
+ *
+ * When an engine does not support native continuation we need 3 extra
+ * source slots to reuse P and Q with the following coefficients:
+ * 1/ {00} * P : remove P from Q', but use it as a source for P'
+ * 2/ {01} * Q : use Q to continue Q' calculation
+ * 3/ {00} * Q : subtract Q from P' to cancel (2)
+ *
+ * In the case where P is disabled we only need 1 extra source:
+ * 1/ {01} * Q : use Q to continue Q' calculation
+ */
+static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
+{
+ if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
+ return dma_dev_to_maxpq(dma);
+ else if (dmaf_p_disabled_continue(flags))
+ return dma_dev_to_maxpq(dma) - 1;
+ else if (dmaf_continue(flags))
+ return dma_dev_to_maxpq(dma) - 3;
+ BUG();
+}
+
/* --- public DMA engine API --- */
#ifdef CONFIG_DMA_ENGINE
@@ -299,7 +438,11 @@ static inline void net_dmaengine_put(void)
#ifdef CONFIG_ASYNC_TX_DMA
#define async_dmaengine_get() dmaengine_get()
#define async_dmaengine_put() dmaengine_put()
+#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
+#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
+#else
#define async_dma_find_channel(type) dma_find_channel(type)
+#endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */
#else
static inline void async_dmaengine_get(void)
{
@@ -312,7 +455,7 @@ async_dma_find_channel(enum dma_transaction_type type)
{
return NULL;
}
-#endif
+#endif /* CONFIG_ASYNC_TX_DMA */
dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
void *dest, void *src, size_t len);
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 3b85ba6479f..94dd10366a7 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -27,6 +27,7 @@
#ifdef CONFIG_EVENTFD
+struct file *eventfd_file_create(unsigned int count, int flags);
struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx);
void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd);
@@ -40,6 +41,11 @@ int eventfd_signal(struct eventfd_ctx *ctx, int n);
* Ugly ugly ugly error layer to support modules that uses eventfd but
* pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO.
*/
+static inline struct file *eventfd_file_create(unsigned int count, int flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
{
return ERR_PTR(-ENOSYS);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index f847df9e99b..a34bdf5a9d2 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -133,6 +133,7 @@ struct dentry;
#define FB_ACCEL_NEOMAGIC_NM2230 96 /* NeoMagic NM2230 */
#define FB_ACCEL_NEOMAGIC_NM2360 97 /* NeoMagic NM2360 */
#define FB_ACCEL_NEOMAGIC_NM2380 98 /* NeoMagic NM2380 */
+#define FB_ACCEL_PXA3XX 99 /* PXA3xx */
#define FB_ACCEL_SAVAGE4 0x80 /* S3 Savage4 */
#define FB_ACCEL_SAVAGE3D 0x81 /* S3 Savage3D */
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 192d1e43c43..7e1d4dec83e 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -134,20 +134,6 @@ struct fw_card {
u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
};
-static inline struct fw_card *fw_card_get(struct fw_card *card)
-{
- kref_get(&card->kref);
-
- return card;
-}
-
-void fw_card_release(struct kref *kref);
-
-static inline void fw_card_put(struct fw_card *card)
-{
- kref_put(&card->kref, fw_card_release);
-}
-
struct fw_attribute_group {
struct attribute_group *groups[2];
struct attribute_group group;
diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h
index 45ff1849151..1d747f72298 100644
--- a/include/linux/flex_array.h
+++ b/include/linux/flex_array.h
@@ -31,10 +31,32 @@ struct flex_array {
};
};
-#define FLEX_ARRAY_INIT(size, total) { { {\
- .element_size = (size), \
- .total_nr_elements = (total), \
-} } }
+/* Number of bytes left in base struct flex_array, excluding metadata */
+#define FLEX_ARRAY_BASE_BYTES_LEFT \
+ (FLEX_ARRAY_BASE_SIZE - offsetof(struct flex_array, parts))
+
+/* Number of pointers in base to struct flex_array_part pages */
+#define FLEX_ARRAY_NR_BASE_PTRS \
+ (FLEX_ARRAY_BASE_BYTES_LEFT / sizeof(struct flex_array_part *))
+
+/* Number of elements of size that fit in struct flex_array_part */
+#define FLEX_ARRAY_ELEMENTS_PER_PART(size) \
+ (FLEX_ARRAY_PART_SIZE / size)
+
+/*
+ * Defines a statically allocated flex array and ensures its parameters are
+ * valid.
+ */
+#define DEFINE_FLEX_ARRAY(__arrayname, __element_size, __total) \
+ struct flex_array __arrayname = { { { \
+ .element_size = (__element_size), \
+ .total_nr_elements = (__total), \
+ } } }; \
+ static inline void __arrayname##_invalid_parameter(void) \
+ { \
+ BUILD_BUG_ON((__total) > FLEX_ARRAY_NR_BASE_PTRS * \
+ FLEX_ARRAY_ELEMENTS_PER_PART(__element_size)); \
+ }
struct flex_array *flex_array_alloc(int element_size, unsigned int total,
gfp_t flags);
@@ -44,6 +66,8 @@ void flex_array_free(struct flex_array *fa);
void flex_array_free_parts(struct flex_array *fa);
int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
gfp_t flags);
+int flex_array_clear(struct flex_array *fa, unsigned int element_nr);
void *flex_array_get(struct flex_array *fa, unsigned int element_nr);
+int flex_array_shrink(struct flex_array *fa);
#endif /* _FLEX_ARRAY_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 90162fb3bf0..2adaa2529f1 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -595,6 +595,7 @@ struct address_space_operations {
int (*launder_page) (struct page *);
int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
unsigned long);
+ int (*error_remove_page)(struct address_space *, struct page *);
};
/*
@@ -640,7 +641,6 @@ struct block_device {
struct super_block * bd_super;
int bd_openers;
struct mutex bd_mutex; /* open/close mutex */
- struct semaphore bd_mount_sem;
struct list_head bd_inodes;
void * bd_holder;
int bd_holders;
@@ -1066,8 +1066,8 @@ struct file_lock {
struct fasync_struct * fl_fasync; /* for lease break notifications */
unsigned long fl_break_time; /* for nonblocking lease breaks */
- struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
- struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
+ const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
+ const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
union {
struct nfs_lock_info nfs_fl;
struct nfs4_lock_info nfs4_fl;
@@ -1315,11 +1315,11 @@ struct super_block {
unsigned long s_blocksize;
unsigned char s_blocksize_bits;
unsigned char s_dirt;
- unsigned long long s_maxbytes; /* Max file size */
+ loff_t s_maxbytes; /* Max file size */
struct file_system_type *s_type;
const struct super_operations *s_op;
- struct dquot_operations *dq_op;
- struct quotactl_ops *s_qcop;
+ const struct dquot_operations *dq_op;
+ const struct quotactl_ops *s_qcop;
const struct export_operations *s_export_op;
unsigned long s_flags;
unsigned long s_magic;
@@ -2156,6 +2156,7 @@ extern ino_t iunique(struct super_block *, ino_t);
extern int inode_needs_sync(struct inode *inode);
extern void generic_delete_inode(struct inode *inode);
extern void generic_drop_inode(struct inode *inode);
+extern int generic_detach_inode(struct inode *inode);
extern struct inode *ilookup5_nowait(struct super_block *sb,
unsigned long hashval, int (*test)(struct inode *, void *),
@@ -2334,6 +2335,7 @@ extern void get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
extern struct super_block *get_super(struct block_device *);
+extern struct super_block *get_active_super(struct block_device *bdev);
extern struct super_block *user_get_super(dev_t);
extern void drop_super(struct super_block *sb);
@@ -2381,7 +2383,8 @@ extern int buffer_migrate_page(struct address_space *,
#define buffer_migrate_page NULL
#endif
-extern int inode_change_ok(struct inode *, struct iattr *);
+extern int inode_change_ok(const struct inode *, struct iattr *);
+extern int inode_newsize_ok(const struct inode *, loff_t offset);
extern int __must_check inode_setattr(struct inode *, struct iattr *);
extern void file_update_time(struct file *file);
@@ -2467,7 +2470,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
struct ctl_table;
-int proc_nr_files(struct ctl_table *table, int write, struct file *filp,
+int proc_nr_files(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
int __init get_filesystem_list(char *buf);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 3c0924a18da..cd3d2abaf30 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -19,7 +19,7 @@
extern int ftrace_enabled;
extern int
ftrace_enable_sysctl(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos);
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
@@ -94,7 +94,7 @@ static inline void ftrace_start(void) { }
extern int stack_tracer_enabled;
int
stack_trace_sysctl(struct ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos);
#endif
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 34956c8fdeb..8ec17997d94 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -4,11 +4,6 @@
#include <linux/compiler.h>
#include <linux/types.h>
-struct inode;
-struct mm_struct;
-struct task_struct;
-union ktime;
-
/* Second argument to futex syscall */
@@ -129,6 +124,11 @@ struct robust_list_head {
#define FUTEX_BITSET_MATCH_ANY 0xffffffff
#ifdef __KERNEL__
+struct inode;
+struct mm_struct;
+struct task_struct;
+union ktime;
+
long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout,
u32 __user *uaddr2, u32 val2, u32 val3);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 109d179adb9..297df45ffd0 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -151,7 +151,7 @@ struct gendisk {
struct disk_part_tbl *part_tbl;
struct hd_struct part0;
- struct block_device_operations *fops;
+ const struct block_device_operations *fops;
struct request_queue *queue;
void *private_data;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 7c777a0da17..557bdad320b 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -220,7 +220,7 @@ static inline enum zone_type gfp_zone(gfp_t flags)
((1 << ZONES_SHIFT) - 1);
if (__builtin_constant_p(bit))
- BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
+ MAYBE_BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
else {
#ifdef CONFIG_DEBUG_VM
BUG_ON((GFP_ZONE_BAD >> bit) & 1);
@@ -326,7 +326,6 @@ void free_pages_exact(void *virt, size_t size);
extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
extern void free_hot_page(struct page *page);
-extern void free_cold_page(struct page *page);
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr),0)
@@ -336,18 +335,6 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void drain_all_pages(void);
void drain_local_pages(void *dummy);
-extern bool oom_killer_disabled;
-
-static inline void oom_killer_disable(void)
-{
- oom_killer_disabled = true;
-}
-
-static inline void oom_killer_enable(void)
-{
- oom_killer_disabled = false;
-}
-
extern gfp_t gfp_allowed_mask;
static inline void set_gfp_allowed_mask(gfp_t mask)
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index e10c49a5b96..059bd189d35 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -12,6 +12,8 @@
#include <linux/types.h>
#include <linux/errno.h>
+struct device;
+
/*
* Some platforms don't support the GPIO programming interface.
*
@@ -89,6 +91,15 @@ static inline int gpio_export(unsigned gpio, bool direction_may_change)
return -EINVAL;
}
+static inline int gpio_export_link(struct device *dev, const char *name,
+ unsigned gpio)
+{
+ /* GPIO can never have been exported */
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+
static inline void gpio_unexport(unsigned gpio)
{
/* GPIO can never have been exported */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index a0ebdace7ba..10f62841674 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -494,6 +494,7 @@ struct hid_device { /* device report descriptor */
/* hiddev event handler */
int (*hiddev_connect)(struct hid_device *, unsigned int);
+ void (*hiddev_disconnect)(struct hid_device *);
void (*hiddev_hid_event) (struct hid_device *, struct hid_field *field,
struct hid_usage *, __s32);
void (*hiddev_report_event) (struct hid_device *, struct hid_report *);
@@ -691,6 +692,7 @@ struct hid_device *hid_allocate_device(void);
int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
int hid_check_keys_pressed(struct hid_device *hid);
int hid_connect(struct hid_device *hid, unsigned int connect_mask);
+void hid_disconnect(struct hid_device *hid);
/**
* hid_map_usage - map usage input bits
@@ -800,6 +802,7 @@ static inline int __must_check hid_hw_start(struct hid_device *hdev,
*/
static inline void hid_hw_stop(struct hid_device *hdev)
{
+ hid_disconnect(hdev);
hdev->ll_driver->stop(hdev);
}
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 5cbc620bdfe..16937995abd 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -3,15 +3,15 @@
#include <linux/fs.h>
+struct ctl_table;
+struct user_struct;
+
#ifdef CONFIG_HUGETLB_PAGE
#include <linux/mempolicy.h>
#include <linux/shm.h>
#include <asm/tlbflush.h>
-struct ctl_table;
-struct user_struct;
-
int PageHuge(struct page *page);
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
@@ -20,11 +20,13 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
}
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
-int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
-int hugetlb_overcommit_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
-int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
+int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
+int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
+int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
-int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int, int);
+int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
+ struct page **, struct vm_area_struct **,
+ unsigned long *, int *, int, unsigned int flags);
void unmap_hugepage_range(struct vm_area_struct *,
unsigned long, unsigned long, struct page *);
void __unmap_hugepage_range(struct vm_area_struct *,
@@ -110,6 +112,21 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
#endif /* !CONFIG_HUGETLB_PAGE */
+#define HUGETLB_ANON_FILE "anon_hugepage"
+
+enum {
+ /*
+ * The file will be used as an shm file so shmfs accounting rules
+ * apply
+ */
+ HUGETLB_SHMFS_INODE = 1,
+ /*
+ * The file is being created on the internal vfs mount and shmfs
+ * accounting rules do not apply
+ */
+ HUGETLB_ANONHUGE_INODE = 2,
+};
+
#ifdef CONFIG_HUGETLBFS
struct hugetlbfs_config {
uid_t uid;
@@ -148,7 +165,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
extern const struct file_operations hugetlbfs_file_operations;
extern struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_file_setup(const char *name, size_t size, int acct,
- struct user_struct **user);
+ struct user_struct **user, int creat_flags);
int hugetlb_get_quota(struct address_space *mapping, long delta);
void hugetlb_put_quota(struct address_space *mapping, long delta);
@@ -170,7 +187,11 @@ static inline void set_file_hugepages(struct file *file)
#define is_file_hugepages(file) 0
#define set_file_hugepages(file) BUG()
-#define hugetlb_file_setup(name,size,acct,user) ERR_PTR(-ENOSYS)
+static inline struct file *hugetlb_file_setup(const char *name, size_t size,
+ int acctflag, struct user_struct **user, int creat_flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
#endif /* !CONFIG_HUGETLBFS */
@@ -185,7 +206,8 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
#define HSTATE_NAME_LEN 32
/* Defines one hugetlb page size */
struct hstate {
- int hugetlb_next_nid;
+ int next_nid_to_alloc;
+ int next_nid_to_free;
unsigned int order;
unsigned long mask;
unsigned long max_huge_pages;
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index c9087de5c6c..e844a0b1869 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -28,17 +28,6 @@
identify a legacy client. If you don't need them, just don't set them. */
/*
- * ---- Driver types -----------------------------------------------------
- */
-
-#define I2C_DRIVERID_MSP3400 1
-#define I2C_DRIVERID_TUNER 2
-#define I2C_DRIVERID_TDA7432 27 /* Stereo sound processor */
-#define I2C_DRIVERID_TVAUDIO 29 /* Generic TV sound driver */
-#define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */
-#define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */
-
-/*
* ---- Adapter types ----------------------------------------------------
*/
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index f4784c0fe97..57d41b0abce 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -98,7 +98,6 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
/**
* struct i2c_driver - represent an I2C device driver
- * @id: Unique driver ID (optional)
* @class: What kind of i2c device we instantiate (for detect)
* @attach_adapter: Callback for bus addition (for legacy drivers)
* @detach_adapter: Callback for bus removal (for legacy drivers)
@@ -135,7 +134,6 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
* not allowed.
*/
struct i2c_driver {
- int id;
unsigned int class;
/* Notifies the driver that a new bus has appeared or is about to be
diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h
new file mode 100644
index 00000000000..fc5db826b48
--- /dev/null
+++ b/include/linux/i2c/adp5588.h
@@ -0,0 +1,92 @@
+/*
+ * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _ADP5588_H
+#define _ADP5588_H
+
+#define DEV_ID 0x00 /* Device ID */
+#define CFG 0x01 /* Configuration Register1 */
+#define INT_STAT 0x02 /* Interrupt Status Register */
+#define KEY_LCK_EC_STAT 0x03 /* Key Lock and Event Counter Register */
+#define Key_EVENTA 0x04 /* Key Event Register A */
+#define Key_EVENTB 0x05 /* Key Event Register B */
+#define Key_EVENTC 0x06 /* Key Event Register C */
+#define Key_EVENTD 0x07 /* Key Event Register D */
+#define Key_EVENTE 0x08 /* Key Event Register E */
+#define Key_EVENTF 0x09 /* Key Event Register F */
+#define Key_EVENTG 0x0A /* Key Event Register G */
+#define Key_EVENTH 0x0B /* Key Event Register H */
+#define Key_EVENTI 0x0C /* Key Event Register I */
+#define Key_EVENTJ 0x0D /* Key Event Register J */
+#define KP_LCK_TMR 0x0E /* Keypad Lock1 to Lock2 Timer */
+#define UNLOCK1 0x0F /* Unlock Key1 */
+#define UNLOCK2 0x10 /* Unlock Key2 */
+#define GPIO_INT_STAT1 0x11 /* GPIO Interrupt Status */
+#define GPIO_INT_STAT2 0x12 /* GPIO Interrupt Status */
+#define GPIO_INT_STAT3 0x13 /* GPIO Interrupt Status */
+#define GPIO_DAT_STAT1 0x14 /* GPIO Data Status, Read twice to clear */
+#define GPIO_DAT_STAT2 0x15 /* GPIO Data Status, Read twice to clear */
+#define GPIO_DAT_STAT3 0x16 /* GPIO Data Status, Read twice to clear */
+#define GPIO_DAT_OUT1 0x17 /* GPIO DATA OUT */
+#define GPIO_DAT_OUT2 0x18 /* GPIO DATA OUT */
+#define GPIO_DAT_OUT3 0x19 /* GPIO DATA OUT */
+#define GPIO_INT_EN1 0x1A /* GPIO Interrupt Enable */
+#define GPIO_INT_EN2 0x1B /* GPIO Interrupt Enable */
+#define GPIO_INT_EN3 0x1C /* GPIO Interrupt Enable */
+#define KP_GPIO1 0x1D /* Keypad or GPIO Selection */
+#define KP_GPIO2 0x1E /* Keypad or GPIO Selection */
+#define KP_GPIO3 0x1F /* Keypad or GPIO Selection */
+#define GPI_EM1 0x20 /* GPI Event Mode 1 */
+#define GPI_EM2 0x21 /* GPI Event Mode 2 */
+#define GPI_EM3 0x22 /* GPI Event Mode 3 */
+#define GPIO_DIR1 0x23 /* GPIO Data Direction */
+#define GPIO_DIR2 0x24 /* GPIO Data Direction */
+#define GPIO_DIR3 0x25 /* GPIO Data Direction */
+#define GPIO_INT_LVL1 0x26 /* GPIO Edge/Level Detect */
+#define GPIO_INT_LVL2 0x27 /* GPIO Edge/Level Detect */
+#define GPIO_INT_LVL3 0x28 /* GPIO Edge/Level Detect */
+#define Debounce_DIS1 0x29 /* Debounce Disable */
+#define Debounce_DIS2 0x2A /* Debounce Disable */
+#define Debounce_DIS3 0x2B /* Debounce Disable */
+#define GPIO_PULL1 0x2C /* GPIO Pull Disable */
+#define GPIO_PULL2 0x2D /* GPIO Pull Disable */
+#define GPIO_PULL3 0x2E /* GPIO Pull Disable */
+#define CMP_CFG_STAT 0x30 /* Comparator Configuration and Status Register */
+#define CMP_CONFG_SENS1 0x31 /* Sensor1 Comparator Configuration Register */
+#define CMP_CONFG_SENS2 0x32 /* L2 Light Sensor Reference Level, Output Falling for Sensor 1 */
+#define CMP1_LVL2_TRIP 0x33 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 1 */
+#define CMP1_LVL2_HYS 0x34 /* L3 Light Sensor Reference Level, Output Falling For Sensor 1 */
+#define CMP1_LVL3_TRIP 0x35 /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 1 */
+#define CMP1_LVL3_HYS 0x36 /* Sensor 2 Comparator Configuration Register */
+#define CMP2_LVL2_TRIP 0x37 /* L2 Light Sensor Reference Level, Output Falling for Sensor 2 */
+#define CMP2_LVL2_HYS 0x38 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 2 */
+#define CMP2_LVL3_TRIP 0x39 /* L3 Light Sensor Reference Level, Output Falling For Sensor 2 */
+#define CMP2_LVL3_HYS 0x3A /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 2 */
+#define CMP1_ADC_DAT_R1 0x3B /* Comparator 1 ADC data Register1 */
+#define CMP1_ADC_DAT_R2 0x3C /* Comparator 1 ADC data Register2 */
+#define CMP2_ADC_DAT_R1 0x3D /* Comparator 2 ADC data Register1 */
+#define CMP2_ADC_DAT_R2 0x3E /* Comparator 2 ADC data Register2 */
+
+#define ADP5588_DEVICE_ID_MASK 0xF
+
+/* Put one of these structures in i2c_board_info platform_data */
+
+#define ADP5588_KEYMAPSIZE 80
+
+struct adp5588_kpad_platform_data {
+ int rows; /* Number of rows */
+ int cols; /* Number of columns */
+ const unsigned short *keymap; /* Pointer to keymap */
+ unsigned short keymapsize; /* Keymap size */
+ unsigned repeat:1; /* Enable key repeat */
+ unsigned en_keylock:1; /* Enable Key Lock feature */
+ unsigned short unlock_key1; /* Unlock Key 1 */
+ unsigned short unlock_key2; /* Unlock Key 2 */
+};
+
+#endif
diff --git a/include/linux/i2c/mcs5000_ts.h b/include/linux/i2c/mcs5000_ts.h
new file mode 100644
index 00000000000..5a117b5ca15
--- /dev/null
+++ b/include/linux/i2c/mcs5000_ts.h
@@ -0,0 +1,24 @@
+/*
+ * mcs5000_ts.h
+ *
+ * Copyright (C) 2009 Samsung Electronics Co.Ltd
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MCS5000_TS_H
+#define __LINUX_MCS5000_TS_H
+
+/* platform data for the MELFAS MCS-5000 touchscreen driver */
+struct mcs5000_ts_platform_data {
+ void (*cfg_pin)(void);
+ int x_size;
+ int y_size;
+};
+
+#endif /* __LINUX_MCS5000_TS_H */
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
index 7907a72403e..60c3360ef6a 100644
--- a/include/linux/i8042.h
+++ b/include/linux/i8042.h
@@ -7,6 +7,7 @@
* the Free Software Foundation.
*/
+#include <linux/types.h>
/*
* Standard commands.
@@ -30,6 +31,35 @@
#define I8042_CMD_MUX_PFX 0x0090
#define I8042_CMD_MUX_SEND 0x1090
+struct serio;
+
+#if defined(CONFIG_SERIO_I8042) || defined(CONFIG_SERIO_I8042_MODULE)
+
+void i8042_lock_chip(void);
+void i8042_unlock_chip(void);
int i8042_command(unsigned char *param, int command);
+bool i8042_check_port_owner(const struct serio *);
+
+#else
+
+void i8042_lock_chip(void)
+{
+}
+
+void i8042_unlock_chip(void)
+{
+}
+
+int i8042_command(unsigned char *param, int command)
+{
+ return -ENOSYS;
+}
+
+bool i8042_check_port_owner(const struct serio *serio)
+{
+ return false;
+}
+
+#endif
#endif
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 9e7f2e8fc66..21a6f5d9af2 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -106,13 +106,13 @@ extern struct group_info init_groups;
extern struct cred init_cred;
-#ifdef CONFIG_PERF_COUNTERS
-# define INIT_PERF_COUNTERS(tsk) \
- .perf_counter_mutex = \
- __MUTEX_INITIALIZER(tsk.perf_counter_mutex), \
- .perf_counter_list = LIST_HEAD_INIT(tsk.perf_counter_list),
+#ifdef CONFIG_PERF_EVENTS
+# define INIT_PERF_EVENTS(tsk) \
+ .perf_event_mutex = \
+ __MUTEX_INITIALIZER(tsk.perf_event_mutex), \
+ .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
#else
-# define INIT_PERF_COUNTERS(tsk)
+# define INIT_PERF_EVENTS(tsk)
#endif
/*
@@ -178,7 +178,7 @@ extern struct cred init_cred;
}, \
.dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
INIT_IDS \
- INIT_PERF_COUNTERS(tsk) \
+ INIT_PERF_EVENTS(tsk) \
INIT_TRACE_IRQFLAGS \
INIT_LOCKDEP \
INIT_FTRACE_GRAPH \
diff --git a/include/linux/input.h b/include/linux/input.h
index 8b3bc3e0d14..0ccfc30cd40 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -1123,7 +1123,7 @@ struct input_dev {
struct mutex mutex;
unsigned int users;
- int going_away;
+ bool going_away;
struct device dev;
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 482dc91fd53..4f0a72a9740 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -360,4 +360,6 @@ extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
+extern int dmar_ir_support(void);
+
#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 8e9e151f811..b78cf819495 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -84,7 +84,6 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
* struct irqaction - per interrupt action descriptor
* @handler: interrupt handler function
* @flags: flags (see IRQF_* above)
- * @mask: no comment as it is useless and about to be removed
* @name: name of the device
* @dev_id: cookie to identify the device
* @next: pointer to the next irqaction for shared interrupts
@@ -97,7 +96,6 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
struct irqaction {
irq_handler_t handler;
unsigned long flags;
- cpumask_t mask;
const char *name;
void *dev_id;
struct irqaction *next;
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 786e7b8cece..83aa81297ea 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -184,5 +184,9 @@ extern void __devm_release_region(struct device *dev, struct resource *parent,
extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
extern int iomem_is_exclusive(u64 addr);
+extern int
+walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
+ void *arg, int (*func)(unsigned long, unsigned long, void *));
+
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_IOPORT_H */
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 228f6c94b69..76a0759e88e 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -28,7 +28,6 @@ struct iova {
/* holds all the iova translations for a domain */
struct iova_domain {
- spinlock_t iova_alloc_lock;/* Lock to protect iova allocation */
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
struct rb_root rbroot; /* iova domain rbtree root */
struct rb_node *cached32_node; /* Save last alloced node */
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index a1187a0c99b..331530cd3cc 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -556,7 +556,7 @@ struct transaction_s
* This transaction is being forced and some process is
* waiting for it to finish.
*/
- int t_synchronous_commit:1;
+ unsigned int t_synchronous_commit:1;
};
/**
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 2b5b1e0899a..d3cd23f3003 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -146,7 +146,7 @@ extern int _cond_resched(void);
#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
#define abs(x) ({ \
- int __x = (x); \
+ long __x = (x); \
(__x < 0) ? -__x : __x; \
})
@@ -246,14 +246,16 @@ extern int printk_ratelimit(void);
extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
unsigned int interval_msec);
+extern int printk_delay_msec;
+
/*
* Print a one-time message (analogous to WARN_ONCE() et al):
*/
#define printk_once(x...) ({ \
- static int __print_once = 1; \
+ static bool __print_once = true; \
\
if (__print_once) { \
- __print_once = 0; \
+ __print_once = false; \
printk(x); \
} \
})
@@ -676,13 +678,17 @@ struct sysinfo {
};
/* Force a compilation error if condition is true */
-#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition))
+
+/* Force a compilation error if condition is constant and true */
+#define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)]))
/* Force a compilation error if condition is true, but also produce a
result (of value 0 and type size_t), so the expression can be used
e.g. in a structure initializer (or where-ever else comma expressions
aren't permitted). */
-#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
+#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
+#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
/* Trap pasters of __FUNCTION__ at compile-time */
#define __FUNCTION__ (__func__)
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
index dc2fd545db0..e880d4cf9e2 100644
--- a/include/linux/kmemcheck.h
+++ b/include/linux/kmemcheck.h
@@ -144,10 +144,15 @@ static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
int name##_end[0];
#define kmemcheck_annotate_bitfield(ptr, name) \
- do if (ptr) { \
- int _n = (long) &((ptr)->name##_end) \
+ do { \
+ int _n; \
+ \
+ if (!ptr) \
+ break; \
+ \
+ _n = (long) &((ptr)->name##_end) \
- (long) &((ptr)->name##_begin); \
- BUILD_BUG_ON(_n < 0); \
+ MAYBE_BUILD_BUG_ON(_n < 0); \
\
kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
} while (0)
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
new file mode 100644
index 00000000000..a485c14ecd5
--- /dev/null
+++ b/include/linux/ksm.h
@@ -0,0 +1,79 @@
+#ifndef __LINUX_KSM_H
+#define __LINUX_KSM_H
+/*
+ * Memory merging support.
+ *
+ * This code enables dynamic sharing of identical pages found in different
+ * memory areas, even if they are not shared by fork().
+ */
+
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/vmstat.h>
+
+#ifdef CONFIG_KSM
+int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, int advice, unsigned long *vm_flags);
+int __ksm_enter(struct mm_struct *mm);
+void __ksm_exit(struct mm_struct *mm);
+
+static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+ if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
+ return __ksm_enter(mm);
+ return 0;
+}
+
+static inline void ksm_exit(struct mm_struct *mm)
+{
+ if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
+ __ksm_exit(mm);
+}
+
+/*
+ * A KSM page is one of those write-protected "shared pages" or "merged pages"
+ * which KSM maps into multiple mms, wherever identical anonymous page content
+ * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma.
+ */
+static inline int PageKsm(struct page *page)
+{
+ return ((unsigned long)page->mapping == PAGE_MAPPING_ANON);
+}
+
+/*
+ * But we have to avoid the checking which page_add_anon_rmap() performs.
+ */
+static inline void page_add_ksm_rmap(struct page *page)
+{
+ if (atomic_inc_and_test(&page->_mapcount)) {
+ page->mapping = (void *) PAGE_MAPPING_ANON;
+ __inc_zone_page_state(page, NR_ANON_PAGES);
+ }
+}
+#else /* !CONFIG_KSM */
+
+static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, int advice, unsigned long *vm_flags)
+{
+ return 0;
+}
+
+static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+ return 0;
+}
+
+static inline void ksm_exit(struct mm_struct *mm)
+{
+}
+
+static inline int PageKsm(struct page *page)
+{
+ return 0;
+}
+
+/* No stub required for page_add_ksm_rmap(page) */
+#endif /* !CONFIG_KSM */
+
+#endif
diff --git a/include/linux/libps2.h b/include/linux/libps2.h
index fcf5fbe6a50..79603a6c356 100644
--- a/include/linux/libps2.h
+++ b/include/linux/libps2.h
@@ -44,6 +44,8 @@ struct ps2dev {
void ps2_init(struct ps2dev *ps2dev, struct serio *serio);
int ps2_sendbyte(struct ps2dev *ps2dev, unsigned char byte, int timeout);
void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout);
+void ps2_begin_command(struct ps2dev *ps2dev);
+void ps2_end_command(struct ps2dev *ps2dev);
int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command);
int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command);
int ps2_handle_ack(struct ps2dev *ps2dev, unsigned char data);
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 691f59171c6..5126cceb6ae 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -57,6 +57,7 @@
#ifdef __ASSEMBLY__
+#ifndef LINKER_SCRIPT
#define ALIGN __ALIGN
#define ALIGN_STR __ALIGN_STR
@@ -66,6 +67,7 @@
ALIGN; \
name:
#endif
+#endif /* LINKER_SCRIPT */
#ifndef WEAK
#define WEAK(name) \
diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h
index ad651f4e45a..3cc2f2c53e4 100644
--- a/include/linux/lis3lv02d.h
+++ b/include/linux/lis3lv02d.h
@@ -32,8 +32,17 @@ struct lis3lv02d_platform_data {
#define LIS3_IRQ2_DATA_READY (4 << 3)
#define LIS3_IRQ2_CLICK (7 << 3)
#define LIS3_IRQ_OPEN_DRAIN (1 << 6)
-#define LIS3_IRQ_ACTIVE_HIGH (1 << 7)
+#define LIS3_IRQ_ACTIVE_LOW (1 << 7)
unsigned char irq_cfg;
+
+#define LIS3_WAKEUP_X_LO (1 << 0)
+#define LIS3_WAKEUP_X_HI (1 << 1)
+#define LIS3_WAKEUP_Y_LO (1 << 2)
+#define LIS3_WAKEUP_Y_HI (1 << 3)
+#define LIS3_WAKEUP_Z_LO (1 << 4)
+#define LIS3_WAKEUP_Z_HI (1 << 5)
+ unsigned char wakeup_flags;
+ unsigned char wakeup_thresh;
};
#endif /* __LIS3LV02D_H_ */
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index c325b187966..a34dea46b62 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -338,49 +338,6 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp)
}
}
-static inline int __nlm_cmp_addr4(const struct sockaddr *sap1,
- const struct sockaddr *sap2)
-{
- const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1;
- const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2;
- return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr;
-}
-
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static inline int __nlm_cmp_addr6(const struct sockaddr *sap1,
- const struct sockaddr *sap2)
-{
- const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1;
- const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2;
- return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr);
-}
-#else /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */
-static inline int __nlm_cmp_addr6(const struct sockaddr *sap1,
- const struct sockaddr *sap2)
-{
- return 0;
-}
-#endif /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */
-
-/*
- * Compare two host addresses
- *
- * Return TRUE if the addresses are the same; otherwise FALSE.
- */
-static inline int nlm_cmp_addr(const struct sockaddr *sap1,
- const struct sockaddr *sap2)
-{
- if (sap1->sa_family == sap2->sa_family) {
- switch (sap1->sa_family) {
- case AF_INET:
- return __nlm_cmp_addr4(sap1, sap2);
- case AF_INET6:
- return __nlm_cmp_addr6(sap1, sap2);
- }
- }
- return 0;
-}
-
/*
* Compare two NLM locks.
* When the second lock is of type F_UNLCK, this acts like a wildcard.
@@ -395,7 +352,7 @@ static inline int nlm_compare_locks(const struct file_lock *fl1,
&&(fl1->fl_type == fl2->fl_type || fl2->fl_type == F_UNLCK);
}
-extern struct lock_manager_operations nlmsvc_lock_operations;
+extern const struct lock_manager_operations nlmsvc_lock_operations;
#endif /* __KERNEL__ */
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h
index 536ca12442c..78c3bed1c3f 100644
--- a/include/linux/mISDNif.h
+++ b/include/linux/mISDNif.h
@@ -104,7 +104,7 @@
#define DL_UNITDATA_IND 0x3108
#define DL_INFORMATION_IND 0x0008
-/* intern layer 2 managment */
+/* intern layer 2 management */
#define MDL_ASSIGN_REQ 0x1804
#define MDL_ASSIGN_IND 0x1904
#define MDL_REMOVE_REQ 0x1A04
diff --git a/include/linux/magic.h b/include/linux/magic.h
index 1923327b986..76285e01b39 100644
--- a/include/linux/magic.h
+++ b/include/linux/magic.h
@@ -12,7 +12,9 @@
#define SYSFS_MAGIC 0x62656572
#define SECURITYFS_MAGIC 0x73636673
#define SELINUX_MAGIC 0xf97cff8c
+#define RAMFS_MAGIC 0x858458f6 /* some random number */
#define TMPFS_MAGIC 0x01021994
+#define HUGETLBFS_MAGIC 0x958458f6 /* some random number */
#define SQUASHFS_MAGIC 0x73717368
#define EFS_SUPER_MAGIC 0x414A53
#define EXT2_SUPER_MAGIC 0xEF53
@@ -53,4 +55,8 @@
#define INOTIFYFS_SUPER_MAGIC 0x2BAD1DEA
#define STACK_END_MAGIC 0x57AC6E9D
+
+#define DEVPTS_SUPER_MAGIC 0x1cd1
+#define SOCKFS_MAGIC 0x534F434B
+
#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index e46a0734ab6..bf9213b2db8 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -118,6 +118,9 @@ static inline bool mem_cgroup_disabled(void)
extern bool mem_cgroup_oom_called(struct task_struct *task);
void mem_cgroup_update_mapped_file_stat(struct page *page, int val);
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+ gfp_t gfp_mask, int nid,
+ int zid);
#else /* CONFIG_CGROUP_MEM_RES_CTLR */
struct mem_cgroup;
@@ -276,6 +279,13 @@ static inline void mem_cgroup_update_mapped_file_stat(struct page *page,
{
}
+static inline
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+ gfp_t gfp_mask, int nid, int zid)
+{
+ return 0;
+}
+
#endif /* CONFIG_CGROUP_MEM_CONT */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index d95f72e79b8..fed969281a4 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -191,14 +191,6 @@ static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
#endif /* ! CONFIG_MEMORY_HOTPLUG */
-/*
- * Walk through all memory which is registered as resource.
- * arg is (start_pfn, nr_pages, private_arg_pointer)
- */
-extern int walk_memory_resource(unsigned long start_pfn,
- unsigned long nr_pages, void *arg,
- int (*func)(unsigned long, unsigned long, void *));
-
#ifdef CONFIG_MEMORY_HOTREMOVE
extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index 9be484d1128..7c08052e332 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -47,22 +47,16 @@ mempool_create_slab_pool(int min_nr, struct kmem_cache *kc)
}
/*
- * 2 mempool_alloc_t's and a mempool_free_t to kmalloc/kzalloc and kfree
- * the amount of memory specified by pool_data
+ * a mempool_alloc_t and a mempool_free_t to kmalloc and kfree the
+ * amount of memory specified by pool_data
*/
void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
-void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data);
void mempool_kfree(void *element, void *pool_data);
static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size)
{
return mempool_create(min_nr, mempool_kmalloc, mempool_kfree,
(void *) size);
}
-static inline mempool_t *mempool_create_kzalloc_pool(int min_nr, size_t size)
-{
- return mempool_create(min_nr, mempool_kzalloc, mempool_kfree,
- (void *) size);
-}
/*
* A mempool_alloc_t and mempool_free_t for a simple page allocator that
diff --git a/include/linux/mfd/da903x.h b/include/linux/mfd/da903x.h
index 115dbe96508..c63b65c9442 100644
--- a/include/linux/mfd/da903x.h
+++ b/include/linux/mfd/da903x.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_PMIC_DA903X_H
#define __LINUX_PMIC_DA903X_H
-/* Unified sub device IDs for DA9030/DA9034 */
+/* Unified sub device IDs for DA9030/DA9034/DA9035 */
enum {
DA9030_ID_LED_1,
DA9030_ID_LED_2,
@@ -57,6 +57,8 @@ enum {
DA9034_ID_LDO13,
DA9034_ID_LDO14,
DA9034_ID_LDO15,
+
+ DA9035_ID_BUCK3,
};
/*
diff --git a/include/linux/mfd/wm831x/pmu.h b/include/linux/mfd/wm831x/pmu.h
new file mode 100644
index 00000000000..b18cbb027bc
--- /dev/null
+++ b/include/linux/mfd/wm831x/pmu.h
@@ -0,0 +1,189 @@
+/*
+ * include/linux/mfd/wm831x/pmu.h -- PMU for WM831x
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_WM831X_PMU_H__
+#define __MFD_WM831X_PMU_H__
+
+/*
+ * R16387 (0x4003) - Power State
+ */
+#define WM831X_CHIP_ON 0x8000 /* CHIP_ON */
+#define WM831X_CHIP_ON_MASK 0x8000 /* CHIP_ON */
+#define WM831X_CHIP_ON_SHIFT 15 /* CHIP_ON */
+#define WM831X_CHIP_ON_WIDTH 1 /* CHIP_ON */
+#define WM831X_CHIP_SLP 0x4000 /* CHIP_SLP */
+#define WM831X_CHIP_SLP_MASK 0x4000 /* CHIP_SLP */
+#define WM831X_CHIP_SLP_SHIFT 14 /* CHIP_SLP */
+#define WM831X_CHIP_SLP_WIDTH 1 /* CHIP_SLP */
+#define WM831X_REF_LP 0x1000 /* REF_LP */
+#define WM831X_REF_LP_MASK 0x1000 /* REF_LP */
+#define WM831X_REF_LP_SHIFT 12 /* REF_LP */
+#define WM831X_REF_LP_WIDTH 1 /* REF_LP */
+#define WM831X_PWRSTATE_DLY_MASK 0x0C00 /* PWRSTATE_DLY - [11:10] */
+#define WM831X_PWRSTATE_DLY_SHIFT 10 /* PWRSTATE_DLY - [11:10] */
+#define WM831X_PWRSTATE_DLY_WIDTH 2 /* PWRSTATE_DLY - [11:10] */
+#define WM831X_SWRST_DLY 0x0200 /* SWRST_DLY */
+#define WM831X_SWRST_DLY_MASK 0x0200 /* SWRST_DLY */
+#define WM831X_SWRST_DLY_SHIFT 9 /* SWRST_DLY */
+#define WM831X_SWRST_DLY_WIDTH 1 /* SWRST_DLY */
+#define WM831X_USB100MA_STARTUP_MASK 0x0030 /* USB100MA_STARTUP - [5:4] */
+#define WM831X_USB100MA_STARTUP_SHIFT 4 /* USB100MA_STARTUP - [5:4] */
+#define WM831X_USB100MA_STARTUP_WIDTH 2 /* USB100MA_STARTUP - [5:4] */
+#define WM831X_USB_CURR_STS 0x0008 /* USB_CURR_STS */
+#define WM831X_USB_CURR_STS_MASK 0x0008 /* USB_CURR_STS */
+#define WM831X_USB_CURR_STS_SHIFT 3 /* USB_CURR_STS */
+#define WM831X_USB_CURR_STS_WIDTH 1 /* USB_CURR_STS */
+#define WM831X_USB_ILIM_MASK 0x0007 /* USB_ILIM - [2:0] */
+#define WM831X_USB_ILIM_SHIFT 0 /* USB_ILIM - [2:0] */
+#define WM831X_USB_ILIM_WIDTH 3 /* USB_ILIM - [2:0] */
+
+/*
+ * R16397 (0x400D) - System Status
+ */
+#define WM831X_THW_STS 0x8000 /* THW_STS */
+#define WM831X_THW_STS_MASK 0x8000 /* THW_STS */
+#define WM831X_THW_STS_SHIFT 15 /* THW_STS */
+#define WM831X_THW_STS_WIDTH 1 /* THW_STS */
+#define WM831X_PWR_SRC_BATT 0x0400 /* PWR_SRC_BATT */
+#define WM831X_PWR_SRC_BATT_MASK 0x0400 /* PWR_SRC_BATT */
+#define WM831X_PWR_SRC_BATT_SHIFT 10 /* PWR_SRC_BATT */
+#define WM831X_PWR_SRC_BATT_WIDTH 1 /* PWR_SRC_BATT */
+#define WM831X_PWR_WALL 0x0200 /* PWR_WALL */
+#define WM831X_PWR_WALL_MASK 0x0200 /* PWR_WALL */
+#define WM831X_PWR_WALL_SHIFT 9 /* PWR_WALL */
+#define WM831X_PWR_WALL_WIDTH 1 /* PWR_WALL */
+#define WM831X_PWR_USB 0x0100 /* PWR_USB */
+#define WM831X_PWR_USB_MASK 0x0100 /* PWR_USB */
+#define WM831X_PWR_USB_SHIFT 8 /* PWR_USB */
+#define WM831X_PWR_USB_WIDTH 1 /* PWR_USB */
+#define WM831X_MAIN_STATE_MASK 0x001F /* MAIN_STATE - [4:0] */
+#define WM831X_MAIN_STATE_SHIFT 0 /* MAIN_STATE - [4:0] */
+#define WM831X_MAIN_STATE_WIDTH 5 /* MAIN_STATE - [4:0] */
+
+/*
+ * R16456 (0x4048) - Charger Control 1
+ */
+#define WM831X_CHG_ENA 0x8000 /* CHG_ENA */
+#define WM831X_CHG_ENA_MASK 0x8000 /* CHG_ENA */
+#define WM831X_CHG_ENA_SHIFT 15 /* CHG_ENA */
+#define WM831X_CHG_ENA_WIDTH 1 /* CHG_ENA */
+#define WM831X_CHG_FRC 0x4000 /* CHG_FRC */
+#define WM831X_CHG_FRC_MASK 0x4000 /* CHG_FRC */
+#define WM831X_CHG_FRC_SHIFT 14 /* CHG_FRC */
+#define WM831X_CHG_FRC_WIDTH 1 /* CHG_FRC */
+#define WM831X_CHG_ITERM_MASK 0x1C00 /* CHG_ITERM - [12:10] */
+#define WM831X_CHG_ITERM_SHIFT 10 /* CHG_ITERM - [12:10] */
+#define WM831X_CHG_ITERM_WIDTH 3 /* CHG_ITERM - [12:10] */
+#define WM831X_CHG_FAST 0x0020 /* CHG_FAST */
+#define WM831X_CHG_FAST_MASK 0x0020 /* CHG_FAST */
+#define WM831X_CHG_FAST_SHIFT 5 /* CHG_FAST */
+#define WM831X_CHG_FAST_WIDTH 1 /* CHG_FAST */
+#define WM831X_CHG_IMON_ENA 0x0002 /* CHG_IMON_ENA */
+#define WM831X_CHG_IMON_ENA_MASK 0x0002 /* CHG_IMON_ENA */
+#define WM831X_CHG_IMON_ENA_SHIFT 1 /* CHG_IMON_ENA */
+#define WM831X_CHG_IMON_ENA_WIDTH 1 /* CHG_IMON_ENA */
+#define WM831X_CHG_CHIP_TEMP_MON 0x0001 /* CHG_CHIP_TEMP_MON */
+#define WM831X_CHG_CHIP_TEMP_MON_MASK 0x0001 /* CHG_CHIP_TEMP_MON */
+#define WM831X_CHG_CHIP_TEMP_MON_SHIFT 0 /* CHG_CHIP_TEMP_MON */
+#define WM831X_CHG_CHIP_TEMP_MON_WIDTH 1 /* CHG_CHIP_TEMP_MON */
+
+/*
+ * R16457 (0x4049) - Charger Control 2
+ */
+#define WM831X_CHG_OFF_MSK 0x4000 /* CHG_OFF_MSK */
+#define WM831X_CHG_OFF_MSK_MASK 0x4000 /* CHG_OFF_MSK */
+#define WM831X_CHG_OFF_MSK_SHIFT 14 /* CHG_OFF_MSK */
+#define WM831X_CHG_OFF_MSK_WIDTH 1 /* CHG_OFF_MSK */
+#define WM831X_CHG_TIME_MASK 0x0F00 /* CHG_TIME - [11:8] */
+#define WM831X_CHG_TIME_SHIFT 8 /* CHG_TIME - [11:8] */
+#define WM831X_CHG_TIME_WIDTH 4 /* CHG_TIME - [11:8] */
+#define WM831X_CHG_TRKL_ILIM_MASK 0x00C0 /* CHG_TRKL_ILIM - [7:6] */
+#define WM831X_CHG_TRKL_ILIM_SHIFT 6 /* CHG_TRKL_ILIM - [7:6] */
+#define WM831X_CHG_TRKL_ILIM_WIDTH 2 /* CHG_TRKL_ILIM - [7:6] */
+#define WM831X_CHG_VSEL_MASK 0x0030 /* CHG_VSEL - [5:4] */
+#define WM831X_CHG_VSEL_SHIFT 4 /* CHG_VSEL - [5:4] */
+#define WM831X_CHG_VSEL_WIDTH 2 /* CHG_VSEL - [5:4] */
+#define WM831X_CHG_FAST_ILIM_MASK 0x000F /* CHG_FAST_ILIM - [3:0] */
+#define WM831X_CHG_FAST_ILIM_SHIFT 0 /* CHG_FAST_ILIM - [3:0] */
+#define WM831X_CHG_FAST_ILIM_WIDTH 4 /* CHG_FAST_ILIM - [3:0] */
+
+/*
+ * R16458 (0x404A) - Charger Status
+ */
+#define WM831X_BATT_OV_STS 0x8000 /* BATT_OV_STS */
+#define WM831X_BATT_OV_STS_MASK 0x8000 /* BATT_OV_STS */
+#define WM831X_BATT_OV_STS_SHIFT 15 /* BATT_OV_STS */
+#define WM831X_BATT_OV_STS_WIDTH 1 /* BATT_OV_STS */
+#define WM831X_CHG_STATE_MASK 0x7000 /* CHG_STATE - [14:12] */
+#define WM831X_CHG_STATE_SHIFT 12 /* CHG_STATE - [14:12] */
+#define WM831X_CHG_STATE_WIDTH 3 /* CHG_STATE - [14:12] */
+#define WM831X_BATT_HOT_STS 0x0800 /* BATT_HOT_STS */
+#define WM831X_BATT_HOT_STS_MASK 0x0800 /* BATT_HOT_STS */
+#define WM831X_BATT_HOT_STS_SHIFT 11 /* BATT_HOT_STS */
+#define WM831X_BATT_HOT_STS_WIDTH 1 /* BATT_HOT_STS */
+#define WM831X_BATT_COLD_STS 0x0400 /* BATT_COLD_STS */
+#define WM831X_BATT_COLD_STS_MASK 0x0400 /* BATT_COLD_STS */
+#define WM831X_BATT_COLD_STS_SHIFT 10 /* BATT_COLD_STS */
+#define WM831X_BATT_COLD_STS_WIDTH 1 /* BATT_COLD_STS */
+#define WM831X_CHG_TOPOFF 0x0200 /* CHG_TOPOFF */
+#define WM831X_CHG_TOPOFF_MASK 0x0200 /* CHG_TOPOFF */
+#define WM831X_CHG_TOPOFF_SHIFT 9 /* CHG_TOPOFF */
+#define WM831X_CHG_TOPOFF_WIDTH 1 /* CHG_TOPOFF */
+#define WM831X_CHG_ACTIVE 0x0100 /* CHG_ACTIVE */
+#define WM831X_CHG_ACTIVE_MASK 0x0100 /* CHG_ACTIVE */
+#define WM831X_CHG_ACTIVE_SHIFT 8 /* CHG_ACTIVE */
+#define WM831X_CHG_ACTIVE_WIDTH 1 /* CHG_ACTIVE */
+#define WM831X_CHG_TIME_ELAPSED_MASK 0x00FF /* CHG_TIME_ELAPSED - [7:0] */
+#define WM831X_CHG_TIME_ELAPSED_SHIFT 0 /* CHG_TIME_ELAPSED - [7:0] */
+#define WM831X_CHG_TIME_ELAPSED_WIDTH 8 /* CHG_TIME_ELAPSED - [7:0] */
+
+#define WM831X_CHG_STATE_OFF (0 << WM831X_CHG_STATE_SHIFT)
+#define WM831X_CHG_STATE_TRICKLE (1 << WM831X_CHG_STATE_SHIFT)
+#define WM831X_CHG_STATE_FAST (2 << WM831X_CHG_STATE_SHIFT)
+#define WM831X_CHG_STATE_TRICKLE_OT (3 << WM831X_CHG_STATE_SHIFT)
+#define WM831X_CHG_STATE_FAST_OT (4 << WM831X_CHG_STATE_SHIFT)
+#define WM831X_CHG_STATE_DEFECTIVE (5 << WM831X_CHG_STATE_SHIFT)
+
+/*
+ * R16459 (0x404B) - Backup Charger Control
+ */
+#define WM831X_BKUP_CHG_ENA 0x8000 /* BKUP_CHG_ENA */
+#define WM831X_BKUP_CHG_ENA_MASK 0x8000 /* BKUP_CHG_ENA */
+#define WM831X_BKUP_CHG_ENA_SHIFT 15 /* BKUP_CHG_ENA */
+#define WM831X_BKUP_CHG_ENA_WIDTH 1 /* BKUP_CHG_ENA */
+#define WM831X_BKUP_CHG_STS 0x4000 /* BKUP_CHG_STS */
+#define WM831X_BKUP_CHG_STS_MASK 0x4000 /* BKUP_CHG_STS */
+#define WM831X_BKUP_CHG_STS_SHIFT 14 /* BKUP_CHG_STS */
+#define WM831X_BKUP_CHG_STS_WIDTH 1 /* BKUP_CHG_STS */
+#define WM831X_BKUP_CHG_MODE 0x1000 /* BKUP_CHG_MODE */
+#define WM831X_BKUP_CHG_MODE_MASK 0x1000 /* BKUP_CHG_MODE */
+#define WM831X_BKUP_CHG_MODE_SHIFT 12 /* BKUP_CHG_MODE */
+#define WM831X_BKUP_CHG_MODE_WIDTH 1 /* BKUP_CHG_MODE */
+#define WM831X_BKUP_BATT_DET_ENA 0x0800 /* BKUP_BATT_DET_ENA */
+#define WM831X_BKUP_BATT_DET_ENA_MASK 0x0800 /* BKUP_BATT_DET_ENA */
+#define WM831X_BKUP_BATT_DET_ENA_SHIFT 11 /* BKUP_BATT_DET_ENA */
+#define WM831X_BKUP_BATT_DET_ENA_WIDTH 1 /* BKUP_BATT_DET_ENA */
+#define WM831X_BKUP_BATT_STS 0x0400 /* BKUP_BATT_STS */
+#define WM831X_BKUP_BATT_STS_MASK 0x0400 /* BKUP_BATT_STS */
+#define WM831X_BKUP_BATT_STS_SHIFT 10 /* BKUP_BATT_STS */
+#define WM831X_BKUP_BATT_STS_WIDTH 1 /* BKUP_BATT_STS */
+#define WM831X_BKUP_CHG_VLIM 0x0010 /* BKUP_CHG_VLIM */
+#define WM831X_BKUP_CHG_VLIM_MASK 0x0010 /* BKUP_CHG_VLIM */
+#define WM831X_BKUP_CHG_VLIM_SHIFT 4 /* BKUP_CHG_VLIM */
+#define WM831X_BKUP_CHG_VLIM_WIDTH 1 /* BKUP_CHG_VLIM */
+#define WM831X_BKUP_CHG_ILIM_MASK 0x0003 /* BKUP_CHG_ILIM - [1:0] */
+#define WM831X_BKUP_CHG_ILIM_SHIFT 0 /* BKUP_CHG_ILIM - [1:0] */
+#define WM831X_BKUP_CHG_ILIM_WIDTH 2 /* BKUP_CHG_ILIM - [1:0] */
+
+#endif
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9a72cc78e6b..24c395694f4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -25,6 +25,7 @@ extern unsigned long max_mapnr;
#endif
extern unsigned long num_physpages;
+extern unsigned long totalram_pages;
extern void * high_memory;
extern int page_cluster;
@@ -103,6 +104,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
#define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
#define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
+#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
@@ -283,6 +285,14 @@ static inline int is_vmalloc_addr(const void *x)
return 0;
#endif
}
+#ifdef CONFIG_MMU
+extern int is_vmalloc_or_module_addr(const void *x);
+#else
+static inline int is_vmalloc_or_module_addr(const void *x)
+{
+ return 0;
+}
+#endif
static inline struct page *compound_head(struct page *page)
{
@@ -685,11 +695,12 @@ static inline int page_mapped(struct page *page)
#define VM_FAULT_SIGBUS 0x0002
#define VM_FAULT_MAJOR 0x0004
#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
+#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned page */
#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
-#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS)
+#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON)
/*
* Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
@@ -700,17 +711,8 @@ extern void pagefault_out_of_memory(void);
extern void show_free_areas(void);
-#ifdef CONFIG_SHMEM
-extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
-#else
-static inline int shmem_lock(struct file *file, int lock,
- struct user_struct *user)
-{
- return 0;
-}
-#endif
+int shmem_lock(struct file *file, int lock, struct user_struct *user);
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
-
int shmem_zero_setup(struct vm_area_struct *);
#ifndef CONFIG_MMU
@@ -790,8 +792,14 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
unmap_mapping_range(mapping, holebegin, holelen, 0);
}
-extern int vmtruncate(struct inode * inode, loff_t offset);
-extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
+extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
+extern int vmtruncate(struct inode *inode, loff_t offset);
+extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
+
+int truncate_inode_page(struct address_space *mapping, struct page *page);
+int generic_error_remove_page(struct address_space *mapping, struct page *page);
+
+int invalidate_inode_page(struct page *page);
#ifdef CONFIG_MMU
extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -815,6 +823,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
struct page **pages, struct vm_area_struct **vmas);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
+struct page *get_dump_page(unsigned long addr);
extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
extern void do_invalidatepage(struct page *page, unsigned long offset);
@@ -1058,6 +1067,8 @@ extern void setup_per_cpu_pageset(void);
static inline void setup_per_cpu_pageset(void) {}
#endif
+extern void zone_pcp_update(struct zone *zone);
+
/* nommu.c */
extern atomic_long_t mmap_pages_allocated;
@@ -1226,7 +1237,8 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
#define FOLL_WRITE 0x01 /* check pte is writable */
#define FOLL_TOUCH 0x02 /* mark page accessed */
#define FOLL_GET 0x04 /* do get_page on page */
-#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */
+#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */
+#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
@@ -1274,7 +1286,7 @@ int in_gate_area_no_task(unsigned long addr);
#define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);})
#endif /* __HAVE_ARCH_GATE_AREA */
-int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *,
+int drop_caches_sysctl_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
unsigned long lru_pages);
@@ -1303,5 +1315,12 @@ void vmemmap_populate_print_last(void);
extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
size_t size);
extern void refund_locked_memory(struct mm_struct *mm, size_t size);
+
+extern void memory_failure(unsigned long pfn, int trapno);
+extern int __memory_failure(unsigned long pfn, int trapno, int ref);
+extern int sysctl_memory_failure_early_kill;
+extern int sysctl_memory_failure_recovery;
+extern atomic_long_t mce_bad_pages;
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 7fbb9726755..8835b877b8d 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -5,7 +5,7 @@
* page_is_file_cache - should the page be on a file LRU or anon LRU?
* @page: the page to test
*
- * Returns LRU_FILE if @page is page cache page backed by a regular filesystem,
+ * Returns 1 if @page is page cache page backed by a regular filesystem,
* or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
* Used by functions that manipulate the LRU lists, to sort a page
* onto the right LRU list.
@@ -16,11 +16,7 @@
*/
static inline int page_is_file_cache(struct page *page)
{
- if (PageSwapBacked(page))
- return 0;
-
- /* The page is page cache backed by a normal filesystem. */
- return LRU_FILE;
+ return !PageSwapBacked(page);
}
static inline void
@@ -39,21 +35,36 @@ del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
mem_cgroup_del_lru_list(page, l);
}
+/**
+ * page_lru_base_type - which LRU list type should a page be on?
+ * @page: the page to test
+ *
+ * Used for LRU list index arithmetic.
+ *
+ * Returns the base LRU type - file or anon - @page should be on.
+ */
+static inline enum lru_list page_lru_base_type(struct page *page)
+{
+ if (page_is_file_cache(page))
+ return LRU_INACTIVE_FILE;
+ return LRU_INACTIVE_ANON;
+}
+
static inline void
del_page_from_lru(struct zone *zone, struct page *page)
{
- enum lru_list l = LRU_BASE;
+ enum lru_list l;
list_del(&page->lru);
if (PageUnevictable(page)) {
__ClearPageUnevictable(page);
l = LRU_UNEVICTABLE;
} else {
+ l = page_lru_base_type(page);
if (PageActive(page)) {
__ClearPageActive(page);
l += LRU_ACTIVE;
}
- l += page_is_file_cache(page);
}
__dec_zone_state(zone, NR_LRU_BASE + l);
mem_cgroup_del_lru_list(page, l);
@@ -68,14 +79,14 @@ del_page_from_lru(struct zone *zone, struct page *page)
*/
static inline enum lru_list page_lru(struct page *page)
{
- enum lru_list lru = LRU_BASE;
+ enum lru_list lru;
if (PageUnevictable(page))
lru = LRU_UNEVICTABLE;
else {
+ lru = page_lru_base_type(page);
if (PageActive(page))
lru += LRU_ACTIVE;
- lru += page_is_file_cache(page);
}
return lru;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 0042090a4d7..21d6aa45206 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -240,6 +240,8 @@ struct mm_struct {
unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
+ struct linux_binfmt *binfmt;
+
cpumask_t cpu_vm_mask;
/* Architecture-specific MM context */
@@ -259,11 +261,10 @@ struct mm_struct {
unsigned long flags; /* Must use atomic bitops to access the bits */
struct core_state *core_state; /* coredumping support */
-
- /* aio bits */
+#ifdef CONFIG_AIO
spinlock_t ioctx_lock;
struct hlist_head ioctx_list;
-
+#endif
#ifdef CONFIG_MM_OWNER
/*
* "owner" points to a task that is regarded as the canonical
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 403aa505f27..2ee22e8af11 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -40,6 +40,8 @@ struct mmc_csd {
};
struct mmc_ext_csd {
+ u8 rev;
+ unsigned int sa_timeout; /* Units: 100ns */
unsigned int hs_max_dtr;
unsigned int sectors;
};
@@ -62,7 +64,8 @@ struct sdio_cccr {
low_speed:1,
wide_bus:1,
high_power:1,
- high_speed:1;
+ high_speed:1,
+ disable_cd:1;
};
struct sdio_cis {
@@ -94,6 +97,8 @@ struct mmc_card {
#define MMC_STATE_READONLY (1<<1) /* card is read-only */
#define MMC_STATE_HIGHSPEED (1<<2) /* card is in high speed mode */
#define MMC_STATE_BLOCKADDR (1<<3) /* card uses block-addressing */
+ unsigned int quirks; /* card quirks */
+#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */
u32 raw_cid[4]; /* raw card CID */
u32 raw_csd[4]; /* raw card CSD */
@@ -129,6 +134,11 @@ struct mmc_card {
#define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED)
#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
+static inline int mmc_card_lenient_fn0(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_LENIENT_FN0;
+}
+
#define mmc_card_name(c) ((c)->cid.prod_name)
#define mmc_card_id(c) (dev_name(&(c)->dev))
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 7ac8b500d55..e4898e9eeb5 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -139,6 +139,7 @@ extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int);
extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort);
extern void mmc_release_host(struct mmc_host *host);
+extern int mmc_try_claim_host(struct mmc_host *host);
/**
* mmc_claim_host - exclusively claim a host
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 3e7615e9087..81bb4235859 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -51,6 +51,35 @@ struct mmc_ios {
};
struct mmc_host_ops {
+ /*
+ * Hosts that support power saving can use the 'enable' and 'disable'
+ * methods to exit and enter power saving states. 'enable' is called
+ * when the host is claimed and 'disable' is called (or scheduled with
+ * a delay) when the host is released. The 'disable' is scheduled if
+ * the disable delay set by 'mmc_set_disable_delay()' is non-zero,
+ * otherwise 'disable' is called immediately. 'disable' may be
+ * scheduled repeatedly, to permit ever greater power saving at the
+ * expense of ever greater latency to re-enable. Rescheduling is
+ * determined by the return value of the 'disable' method. A positive
+ * value gives the delay in milliseconds.
+ *
+ * In the case where a host function (like set_ios) may be called
+ * with or without the host claimed, enabling and disabling can be
+ * done directly and will nest correctly. Call 'mmc_host_enable()' and
+ * 'mmc_host_lazy_disable()' for this purpose, but note that these
+ * functions must be paired.
+ *
+ * Alternatively, 'mmc_host_enable()' may be paired with
+ * 'mmc_host_disable()' which calls 'disable' immediately. In this
+ * case the 'disable' method will be called with 'lazy' set to 0.
+ * This is mainly useful for error paths.
+ *
+ * Because lazy disable may be called from a work queue, the 'disable'
+ * method must claim the host when 'lazy' != 0, which will work
+ * correctly because recursion is detected and handled.
+ */
+ int (*enable)(struct mmc_host *host);
+ int (*disable)(struct mmc_host *host, int lazy);
void (*request)(struct mmc_host *host, struct mmc_request *req);
/*
* Avoid calling these three functions too often or in a "fast path",
@@ -118,6 +147,9 @@ struct mmc_host {
#define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */
#define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */
#define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */
+#define MMC_CAP_DISABLE (1 << 7) /* Can the host be disabled */
+#define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */
+#define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */
/* host specific block data */
unsigned int max_seg_size; /* see blk_queue_max_segment_size */
@@ -142,9 +174,18 @@ struct mmc_host {
unsigned int removed:1; /* host is being removed */
#endif
+ /* Only used with MMC_CAP_DISABLE */
+ int enabled; /* host is enabled */
+ int nesting_cnt; /* "enable" nesting count */
+ int en_dis_recurs; /* detect recursion */
+ unsigned int disable_delay; /* disable delay in msecs */
+ struct delayed_work disable; /* disabling work */
+
struct mmc_card *card; /* device attached to this host */
wait_queue_head_t wq;
+ struct task_struct *claimer; /* task that has host claimed */
+ int claim_cnt; /* "claim" nesting count */
struct delayed_work detect;
@@ -183,6 +224,9 @@ static inline void *mmc_priv(struct mmc_host *host)
extern int mmc_suspend_host(struct mmc_host *, pm_message_t);
extern int mmc_resume_host(struct mmc_host *);
+extern void mmc_power_save_host(struct mmc_host *host);
+extern void mmc_power_restore_host(struct mmc_host *host);
+
extern void mmc_detect_change(struct mmc_host *, unsigned long delay);
extern void mmc_request_done(struct mmc_host *, struct mmc_request *);
@@ -197,5 +241,19 @@ struct regulator;
int mmc_regulator_get_ocrmask(struct regulator *supply);
int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit);
+int mmc_card_awake(struct mmc_host *host);
+int mmc_card_sleep(struct mmc_host *host);
+int mmc_card_can_sleep(struct mmc_host *host);
+
+int mmc_host_enable(struct mmc_host *host);
+int mmc_host_disable(struct mmc_host *host);
+int mmc_host_lazy_disable(struct mmc_host *host);
+
+static inline void mmc_set_disable_delay(struct mmc_host *host,
+ unsigned int disable_delay)
+{
+ host->disable_delay = disable_delay;
+}
+
#endif
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 14b81f3e523..c02c8db7370 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -31,6 +31,7 @@
#define MMC_ALL_SEND_CID 2 /* bcr R2 */
#define MMC_SET_RELATIVE_ADDR 3 /* ac [31:16] RCA R1 */
#define MMC_SET_DSR 4 /* bc [31:16] RCA */
+#define MMC_SLEEP_AWAKE 5 /* ac [31:16] RCA 15:flg R1b */
#define MMC_SWITCH 6 /* ac [31:0] See below R1b */
#define MMC_SELECT_CARD 7 /* ac [31:16] RCA R1 */
#define MMC_SEND_EXT_CSD 8 /* adtc R1 */
@@ -127,6 +128,7 @@
#define R1_STATUS(x) (x & 0xFFFFE000)
#define R1_CURRENT_STATE(x) ((x & 0x00001E00) >> 9) /* sx, b (4 bits) */
#define R1_READY_FOR_DATA (1 << 8) /* sx, a */
+#define R1_SWITCH_ERROR (1 << 7) /* sx, c */
#define R1_APP_CMD (1 << 5) /* sr, c */
/*
@@ -254,6 +256,7 @@ struct _mmc_csd {
#define EXT_CSD_CARD_TYPE 196 /* RO */
#define EXT_CSD_REV 192 /* RO */
#define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */
+#define EXT_CSD_S_A_TIMEOUT 217
/*
* EXT_CSD field definitions
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index 451bdfc8583..ac3ab683fec 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -67,6 +67,7 @@ struct sdio_func {
#define sdio_get_drvdata(f) dev_get_drvdata(&(f)->dev)
#define sdio_set_drvdata(f,d) dev_set_drvdata(&(f)->dev, d)
+#define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev)
/*
* SDIO function device driver
@@ -81,6 +82,8 @@ struct sdio_driver {
struct device_driver drv;
};
+#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv)
+
/**
* SDIO_DEVICE - macro used to describe a specific SDIO device
* @vend: the 16 bit manufacturer code
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
new file mode 100644
index 00000000000..70fffeba749
--- /dev/null
+++ b/include/linux/mmu_context.h
@@ -0,0 +1,9 @@
+#ifndef _LINUX_MMU_CONTEXT_H
+#define _LINUX_MMU_CONTEXT_H
+
+struct mm_struct;
+
+void use_mm(struct mm_struct *mm);
+void unuse_mm(struct mm_struct *mm);
+
+#endif
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index b77486d152c..4e02ee2b071 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -62,6 +62,15 @@ struct mmu_notifier_ops {
unsigned long address);
/*
+ * change_pte is called in cases that pte mapping to page is changed:
+ * for example, when ksm remaps pte to point to a new shared page.
+ */
+ void (*change_pte)(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address,
+ pte_t pte);
+
+ /*
* Before this is invoked any secondary MMU is still ok to
* read/write to the page previously pointed to by the Linux
* pte because the page hasn't been freed yet and it won't be
@@ -154,6 +163,8 @@ extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
extern void __mmu_notifier_release(struct mm_struct *mm);
extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
unsigned long address);
+extern void __mmu_notifier_change_pte(struct mm_struct *mm,
+ unsigned long address, pte_t pte);
extern void __mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address);
extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
@@ -175,6 +186,13 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
return 0;
}
+static inline void mmu_notifier_change_pte(struct mm_struct *mm,
+ unsigned long address, pte_t pte)
+{
+ if (mm_has_notifiers(mm))
+ __mmu_notifier_change_pte(mm, address, pte);
+}
+
static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address)
{
@@ -236,6 +254,16 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
__young; \
})
+#define set_pte_at_notify(__mm, __address, __ptep, __pte) \
+({ \
+ struct mm_struct *___mm = __mm; \
+ unsigned long ___address = __address; \
+ pte_t ___pte = __pte; \
+ \
+ set_pte_at(___mm, ___address, __ptep, ___pte); \
+ mmu_notifier_change_pte(___mm, ___address, ___pte); \
+})
+
#else /* CONFIG_MMU_NOTIFIER */
static inline void mmu_notifier_release(struct mm_struct *mm)
@@ -248,6 +276,11 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
return 0;
}
+static inline void mmu_notifier_change_pte(struct mm_struct *mm,
+ unsigned long address, pte_t pte)
+{
+}
+
static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address)
{
@@ -273,6 +306,7 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
#define ptep_clear_flush_young_notify ptep_clear_flush_young
#define ptep_clear_flush_notify ptep_clear_flush
+#define set_pte_at_notify set_pte_at
#endif /* CONFIG_MMU_NOTIFIER */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 88959853737..6f7561730d8 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -38,6 +38,7 @@
#define MIGRATE_UNMOVABLE 0
#define MIGRATE_RECLAIMABLE 1
#define MIGRATE_MOVABLE 2
+#define MIGRATE_PCPTYPES 3 /* the number of types on the pcp lists */
#define MIGRATE_RESERVE 3
#define MIGRATE_ISOLATE 4 /* can't allocate from here */
#define MIGRATE_TYPES 5
@@ -94,11 +95,15 @@ enum zone_stat_item {
NR_SLAB_RECLAIMABLE,
NR_SLAB_UNRECLAIMABLE,
NR_PAGETABLE, /* used for pagetables */
+ NR_KERNEL_STACK,
+ /* Second 128 byte cacheline */
NR_UNSTABLE_NFS, /* NFS unstable pages */
NR_BOUNCE,
NR_VMSCAN_WRITE,
- /* Second 128 byte cacheline */
NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
+ NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
+ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
+ NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
#ifdef CONFIG_NUMA
NUMA_HIT, /* allocated in intended node */
NUMA_MISS, /* allocated in non intended node */
@@ -165,7 +170,9 @@ struct per_cpu_pages {
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
int batch; /* chunk size for buddy add/remove */
- struct list_head list; /* the list of pages */
+
+ /* Lists of pages, one per migrate type stored on the pcp-lists */
+ struct list_head lists[MIGRATE_PCPTYPES];
};
struct per_cpu_pageset {
@@ -269,6 +276,11 @@ struct zone_reclaim_stat {
*/
unsigned long recent_rotated[2];
unsigned long recent_scanned[2];
+
+ /*
+ * accumulated for batching
+ */
+ unsigned long nr_saved_scan[NR_LRU_LISTS];
};
struct zone {
@@ -323,7 +335,6 @@ struct zone {
spinlock_t lru_lock;
struct zone_lru {
struct list_head list;
- unsigned long nr_saved_scan; /* accumulated for batching */
} lru[NR_LRU_LISTS];
struct zone_reclaim_stat reclaim_stat;
@@ -744,21 +755,20 @@ static inline int is_dma(struct zone *zone)
/* These two functions are used to setup the per zone pages min values */
struct ctl_table;
-struct file;
-int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *,
+int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
-int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
+int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
-int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *,
+int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
- struct file *, void __user *, size_t *, loff_t *);
+ void __user *, size_t *, loff_t *);
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
- struct file *, void __user *, size_t *, loff_t *);
+ void __user *, size_t *, loff_t *);
extern int numa_zonelist_order_handler(struct ctl_table *, int,
- struct file *, void __user *, size_t *, loff_t *);
+ void __user *, size_t *, loff_t *);
extern char numa_zonelist_order[];
#define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 1bf5900ffe4..f58e9d836f3 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -399,6 +399,17 @@ struct i2c_device_id {
__attribute__((aligned(sizeof(kernel_ulong_t))));
};
+/* spi */
+
+#define SPI_NAME_SIZE 32
+#define SPI_MODULE_PREFIX "spi:"
+
+struct spi_device_id {
+ char name[SPI_NAME_SIZE];
+ kernel_ulong_t driver_data /* Data private to the driver */
+ __attribute__((aligned(sizeof(kernel_ulong_t))));
+};
+
/* dmi */
enum dmi_field {
DMI_NONE,
diff --git a/include/linux/module.h b/include/linux/module.h
index 1c755b2f937..482efc865ac 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -128,7 +128,10 @@ extern struct module __this_module;
*/
#define MODULE_LICENSE(_license) MODULE_INFO(license, _license)
-/* Author, ideally of form NAME[, NAME]*[ and NAME] */
+/*
+ * Author(s), use "Name <email>" or just "Name", for multiple
+ * authors use multiple MODULE_AUTHOR() statements/lines.
+ */
#define MODULE_AUTHOR(_author) MODULE_INFO(author, _author)
/* What your module does. */
@@ -308,10 +311,14 @@ struct module
#endif
#ifdef CONFIG_KALLSYMS
- /* We keep the symbol and string tables for kallsyms. */
- Elf_Sym *symtab;
- unsigned int num_symtab;
- char *strtab;
+ /*
+ * We keep the symbol and string tables for kallsyms.
+ * The core_* fields below are temporary, loader-only (they
+ * could really be discarded after module init).
+ */
+ Elf_Sym *symtab, *core_symtab;
+ unsigned int num_symtab, core_num_syms;
+ char *strtab, *core_strtab;
/* Section attributes */
struct module_sect_attrs *sect_attrs;
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 4030ebada49..7a232a9bdd6 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -121,6 +121,7 @@ typedef enum {
NAND_ECC_SOFT,
NAND_ECC_HW,
NAND_ECC_HW_SYNDROME,
+ NAND_ECC_HW_OOB_FIRST,
} nand_ecc_modes_t;
/*
@@ -271,13 +272,13 @@ struct nand_ecc_ctrl {
uint8_t *calc_ecc);
int (*read_page_raw)(struct mtd_info *mtd,
struct nand_chip *chip,
- uint8_t *buf);
+ uint8_t *buf, int page);
void (*write_page_raw)(struct mtd_info *mtd,
struct nand_chip *chip,
const uint8_t *buf);
int (*read_page)(struct mtd_info *mtd,
struct nand_chip *chip,
- uint8_t *buf);
+ uint8_t *buf, int page);
int (*read_subpage)(struct mtd_info *mtd,
struct nand_chip *chip,
uint32_t offs, uint32_t len,
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h
index 090da505425..052ea8ca243 100644
--- a/include/linux/mtd/nand_ecc.h
+++ b/include/linux/mtd/nand_ecc.h
@@ -21,6 +21,12 @@ struct mtd_info;
int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code);
/*
+ * Detect and correct a 1 bit error for eccsize byte block
+ */
+int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc,
+ unsigned int eccsize);
+
+/*
* Detect and correct a 1 bit error for 256 byte block
*/
int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 8ed87337438..4e49f335067 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -214,4 +214,12 @@ unsigned onenand_block(struct onenand_chip *this, loff_t addr);
loff_t onenand_addr(struct onenand_chip *this, int block);
int flexonenand_region(struct mtd_info *mtd, loff_t addr);
+struct mtd_partition;
+
+struct onenand_platform_data {
+ void (*mmcontrol)(struct mtd_info *mtd, int sync_read);
+ struct mtd_partition *parts;
+ unsigned int nr_parts;
+};
+
#endif /* __LINUX_MTD_ONENAND_H */
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index 86a6bbef646..acadbf53a69 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -207,6 +207,9 @@
#define ONENAND_ECC_2BIT (1 << 1)
#define ONENAND_ECC_2BIT_ALL (0xAAAA)
#define FLEXONENAND_UNCORRECTABLE_ERROR (0x1010)
+#define ONENAND_ECC_3BIT (1 << 2)
+#define ONENAND_ECC_4BIT (1 << 3)
+#define ONENAND_ECC_4BIT_UNCORRECTABLE (0x1010)
/*
* One-Time Programmable (OTP)
diff --git a/include/linux/namei.h b/include/linux/namei.h
index d870ae2faed..ec0f607b364 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -40,7 +40,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
* - follow links at the end
* - require a directory
* - ending slashes ok even for nonexistent files
- * - internal "there are more path compnents" flag
+ * - internal "there are more path components" flag
* - locked when lookup done with dcache_lock held
* - dentry cache is untrusted; force a real lookup
*/
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 080f6ba9e73..ab5d3126831 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -187,6 +187,7 @@ extern struct sock *netlink_kernel_create(struct net *net,
extern void netlink_kernel_release(struct sock *sk);
extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
+extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group);
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
extern int netlink_has_listeners(struct sock *sk, unsigned int group);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 33b283601f6..c4c06020810 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -234,7 +234,7 @@ enum nfs_opnum4 {
Needs to be updated if more operations are defined in future.*/
#define FIRST_NFS4_OP OP_ACCESS
-#define LAST_NFS4_OP OP_RELEASE_LOCKOWNER
+#define LAST_NFS4_OP OP_RECLAIM_COMPLETE
enum nfsstat4 {
NFS4_OK = 0,
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
index 2b49d676d0c..510ffdd5020 100644
--- a/include/linux/nfsd/nfsd.h
+++ b/include/linux/nfsd/nfsd.h
@@ -56,8 +56,11 @@ extern struct svc_version nfsd_version2, nfsd_version3,
extern u32 nfsd_supported_minorversion;
extern struct mutex nfsd_mutex;
extern struct svc_serv *nfsd_serv;
+extern spinlock_t nfsd_drc_lock;
+extern unsigned int nfsd_drc_max_mem;
+extern unsigned int nfsd_drc_mem_used;
-extern struct seq_operations nfs_exports_op;
+extern const struct seq_operations nfs_exports_op;
/*
* Function prototypes.
@@ -163,7 +166,7 @@ extern int nfsd_max_blksize;
extern unsigned int max_delegations;
int nfs4_state_init(void);
void nfsd4_free_slabs(void);
-void nfs4_state_start(void);
+int nfs4_state_start(void);
void nfs4_state_shutdown(void);
time_t nfs4_lease_time(void);
void nfs4_reset_lease(time_t leasetime);
@@ -171,7 +174,7 @@ int nfs4_reset_recoverydir(char *recdir);
#else
static inline int nfs4_state_init(void) { return 0; }
static inline void nfsd4_free_slabs(void) { }
-static inline void nfs4_state_start(void) { }
+static inline int nfs4_state_start(void) { return 0; }
static inline void nfs4_state_shutdown(void) { }
static inline time_t nfs4_lease_time(void) { return 0; }
static inline void nfs4_reset_lease(time_t leasetime) { }
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
index 57ab2ed0845..b38d1132418 100644
--- a/include/linux/nfsd/state.h
+++ b/include/linux/nfsd/state.h
@@ -60,6 +60,12 @@ typedef struct {
#define si_stateownerid si_opaque.so_stateownerid
#define si_fileid si_opaque.so_fileid
+struct nfsd4_cb_sequence {
+ /* args/res */
+ u32 cbs_minorversion;
+ struct nfs4_client *cbs_clp;
+};
+
struct nfs4_delegation {
struct list_head dl_perfile;
struct list_head dl_perclnt;
@@ -81,38 +87,35 @@ struct nfs4_delegation {
/* client delegation callback info */
struct nfs4_cb_conn {
/* SETCLIENTID info */
- u32 cb_addr;
- unsigned short cb_port;
+ struct sockaddr_storage cb_addr;
+ size_t cb_addrlen;
u32 cb_prog;
u32 cb_minorversion;
u32 cb_ident; /* minorversion 0 only */
/* RPC client info */
atomic_t cb_set; /* successful CB_NULL call */
struct rpc_clnt * cb_client;
- struct rpc_cred * cb_cred;
};
-/* Maximum number of slots per session. 128 is useful for long haul TCP */
-#define NFSD_MAX_SLOTS_PER_SESSION 128
-/* Maximum number of pages per slot cache entry */
-#define NFSD_PAGES_PER_SLOT 1
+/* Maximum number of slots per session. 160 is useful for long haul TCP */
+#define NFSD_MAX_SLOTS_PER_SESSION 160
/* Maximum number of operations per session compound */
#define NFSD_MAX_OPS_PER_COMPOUND 16
-
-struct nfsd4_cache_entry {
- __be32 ce_status;
- struct kvec ce_datav; /* encoded NFSv4.1 data in rq_res.head[0] */
- struct page *ce_respages[NFSD_PAGES_PER_SLOT + 1];
- int ce_cachethis;
- short ce_resused;
- int ce_opcnt;
- int ce_rpchdrlen;
-};
+/* Maximum session per slot cache size */
+#define NFSD_SLOT_CACHE_SIZE 1024
+/* Maximum number of NFSD_SLOT_CACHE_SIZE slots per session */
+#define NFSD_CACHE_SIZE_SLOTS_PER_SESSION 32
+#define NFSD_MAX_MEM_PER_SESSION \
+ (NFSD_CACHE_SIZE_SLOTS_PER_SESSION * NFSD_SLOT_CACHE_SIZE)
struct nfsd4_slot {
- bool sl_inuse;
- u32 sl_seqid;
- struct nfsd4_cache_entry sl_cache_entry;
+ bool sl_inuse;
+ bool sl_cachethis;
+ u16 sl_opcnt;
+ u32 sl_seqid;
+ __be32 sl_status;
+ u32 sl_datalen;
+ char sl_data[];
};
struct nfsd4_channel_attrs {
@@ -126,6 +129,25 @@ struct nfsd4_channel_attrs {
u32 rdma_attrs;
};
+struct nfsd4_create_session {
+ clientid_t clientid;
+ struct nfs4_sessionid sessionid;
+ u32 seqid;
+ u32 flags;
+ struct nfsd4_channel_attrs fore_channel;
+ struct nfsd4_channel_attrs back_channel;
+ u32 callback_prog;
+ u32 uid;
+ u32 gid;
+};
+
+/* The single slot clientid cache structure */
+struct nfsd4_clid_slot {
+ u32 sl_seqid;
+ __be32 sl_status;
+ struct nfsd4_create_session sl_cr_ses;
+};
+
struct nfsd4_session {
struct kref se_ref;
struct list_head se_hash; /* hash by sessionid */
@@ -135,7 +157,7 @@ struct nfsd4_session {
struct nfs4_sessionid se_sessionid;
struct nfsd4_channel_attrs se_fchannel;
struct nfsd4_channel_attrs se_bchannel;
- struct nfsd4_slot se_slots[]; /* forward channel slots */
+ struct nfsd4_slot *se_slots[]; /* forward channel slots */
};
static inline void
@@ -180,7 +202,7 @@ struct nfs4_client {
char cl_recdir[HEXDIR_LEN]; /* recovery dir */
nfs4_verifier cl_verifier; /* generated by client */
time_t cl_time; /* time of last lease renewal */
- __be32 cl_addr; /* client ipaddress */
+ struct sockaddr_storage cl_addr; /* client ipaddress */
u32 cl_flavor; /* setclientid pseudoflavor */
char *cl_principal; /* setclientid principal name */
struct svc_cred cl_cred; /* setclientid principal */
@@ -192,9 +214,17 @@ struct nfs4_client {
/* for nfs41 */
struct list_head cl_sessions;
- struct nfsd4_slot cl_slot; /* create_session slot */
+ struct nfsd4_clid_slot cl_cs_slot; /* create_session slot */
u32 cl_exchange_flags;
struct nfs4_sessionid cl_sessionid;
+
+ /* for nfs41 callbacks */
+ /* We currently support a single back channel with a single slot */
+ unsigned long cl_cb_slot_busy;
+ u32 cl_cb_seq_nr;
+ struct svc_xprt *cl_cb_xprt; /* 4.1 callback transport */
+ struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */
+ /* wait here for slots */
};
/* struct nfs4_client_reset
@@ -345,6 +375,7 @@ extern int nfs4_in_grace(void);
extern __be32 nfs4_check_open_reclaim(clientid_t *clid);
extern void put_nfs4_client(struct nfs4_client *clp);
extern void nfs4_free_stateowner(struct kref *kref);
+extern int set_callback_cred(void);
extern void nfsd4_probe_callback(struct nfs4_client *clp);
extern void nfsd4_cb_recall(struct nfs4_delegation *dp);
extern void nfs4_put_delegation(struct nfs4_delegation *dp);
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h
index 2bacf753506..73164c2b3d2 100644
--- a/include/linux/nfsd/xdr4.h
+++ b/include/linux/nfsd/xdr4.h
@@ -51,7 +51,7 @@ struct nfsd4_compound_state {
/* For sessions DRC */
struct nfsd4_session *session;
struct nfsd4_slot *slot;
- __be32 *statp;
+ __be32 *datap;
size_t iovlen;
u32 minorversion;
u32 status;
@@ -366,18 +366,6 @@ struct nfsd4_exchange_id {
int spa_how;
};
-struct nfsd4_create_session {
- clientid_t clientid;
- struct nfs4_sessionid sessionid;
- u32 seqid;
- u32 flags;
- struct nfsd4_channel_attrs fore_channel;
- struct nfsd4_channel_attrs back_channel;
- u32 callback_prog;
- u32 uid;
- u32 gid;
-};
-
struct nfsd4_sequence {
struct nfs4_sessionid sessionid; /* request/response */
u32 seqid; /* request/response */
@@ -479,13 +467,12 @@ struct nfsd4_compoundres {
static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp)
{
struct nfsd4_compoundargs *args = resp->rqstp->rq_argp;
- return args->opcnt == 1;
+ return resp->opcnt == 1 && args->ops[0].opnum == OP_SEQUENCE;
}
static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp)
{
- return !resp->cstate.slot->sl_cache_entry.ce_cachethis ||
- nfsd4_is_solo_sequence(resp);
+ return !resp->cstate.slot->sl_cachethis || nfsd4_is_solo_sequence(resp);
}
#define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs)
diff --git a/include/linux/oom.h b/include/linux/oom.h
index a7979baf1e3..6aac5fe4f6f 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -30,5 +30,16 @@ extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order);
extern int register_oom_notifier(struct notifier_block *nb);
extern int unregister_oom_notifier(struct notifier_block *nb);
+extern bool oom_killer_disabled;
+
+static inline void oom_killer_disable(void)
+{
+ oom_killer_disabled = true;
+}
+
+static inline void oom_killer_enable(void)
+{
+ oom_killer_disabled = false;
+}
#endif /* __KERNEL__*/
#endif /* _INCLUDE_LINUX_OOM_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 2b87acfc5f8..6b202b17395 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -51,6 +51,9 @@
* PG_buddy is set to indicate that the page is free and in the buddy system
* (see mm/page_alloc.c).
*
+ * PG_hwpoison indicates that a page got corrupted in hardware and contains
+ * data with incorrect ECC bits that triggered a machine check. Accessing is
+ * not safe since it may cause another machine check. Don't touch!
*/
/*
@@ -102,6 +105,9 @@ enum pageflags {
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
PG_uncached, /* Page has been mapped as uncached */
#endif
+#ifdef CONFIG_MEMORY_FAILURE
+ PG_hwpoison, /* hardware poisoned page. Don't touch */
+#endif
__NR_PAGEFLAGS,
/* Filesystems */
@@ -158,6 +164,9 @@ static inline int TestSetPage##uname(struct page *page) \
static inline int TestClearPage##uname(struct page *page) \
{ return test_and_clear_bit(PG_##lname, &page->flags); }
+#define __TESTCLEARFLAG(uname, lname) \
+static inline int __TestClearPage##uname(struct page *page) \
+ { return __test_and_clear_bit(PG_##lname, &page->flags); }
#define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname)
@@ -184,6 +193,9 @@ static inline void __ClearPage##uname(struct page *page) { }
#define TESTCLEARFLAG_FALSE(uname) \
static inline int TestClearPage##uname(struct page *page) { return 0; }
+#define __TESTCLEARFLAG_FALSE(uname) \
+static inline int __TestClearPage##uname(struct page *page) { return 0; }
+
struct page; /* forward declaration */
TESTPAGEFLAG(Locked, locked) TESTSETFLAG(Locked, locked)
@@ -250,11 +262,11 @@ PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
#define MLOCK_PAGES 1
PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
- TESTSCFLAG(Mlocked, mlocked)
+ TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked)
#else
#define MLOCK_PAGES 0
-PAGEFLAG_FALSE(Mlocked)
- SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked)
+PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked)
+ TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
#endif
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
@@ -263,6 +275,15 @@ PAGEFLAG(Uncached, uncached)
PAGEFLAG_FALSE(Uncached)
#endif
+#ifdef CONFIG_MEMORY_FAILURE
+PAGEFLAG(HWPoison, hwpoison)
+TESTSETFLAG(HWPoison, hwpoison)
+#define __PG_HWPOISON (1UL << PG_hwpoison)
+#else
+PAGEFLAG_FALSE(HWPoison)
+#define __PG_HWPOISON 0
+#endif
+
static inline int PageUptodate(struct page *page)
{
int ret = test_bit(PG_uptodate, &(page)->flags);
@@ -387,7 +408,7 @@ static inline void __ClearPageTail(struct page *page)
1 << PG_private | 1 << PG_private_2 | \
1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \
1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
- 1 << PG_unevictable | __PG_MLOCKED)
+ 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON)
/*
* Flags checked when a page is prepped for return by the page allocator.
@@ -396,8 +417,8 @@ static inline void __ClearPageTail(struct page *page)
*/
#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1)
-#endif /* !__GENERATING_BOUNDS_H */
-
+#define PAGE_FLAGS_PRIVATE \
+ (1 << PG_private | 1 << PG_private_2)
/**
* page_has_private - Determine if page has private stuff
* @page: The page to be checked
@@ -405,8 +426,11 @@ static inline void __ClearPageTail(struct page *page)
* Determine if a page has private stuff, indicating that release routines
* should be invoked upon it.
*/
-#define page_has_private(page) \
- ((page)->flags & ((1 << PG_private) | \
- (1 << PG_private_2)))
+static inline int page_has_private(struct page *page)
+{
+ return !!(page->flags & PAGE_FLAGS_PRIVATE);
+}
+
+#endif /* !__GENERATING_BOUNDS_H */
#endif /* PAGE_FLAGS_H */
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index ada779f2417..4b938d4f3ac 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -38,6 +38,7 @@ enum {
PCG_LOCK, /* page cgroup is locked */
PCG_CACHE, /* charged as cache */
PCG_USED, /* this object is in use. */
+ PCG_ACCT_LRU, /* page has been accounted for */
};
#define TESTPCGFLAG(uname, lname) \
@@ -52,11 +53,23 @@ static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
{ clear_bit(PCG_##lname, &pc->flags); }
+#define TESTCLEARPCGFLAG(uname, lname) \
+static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
+ { return test_and_clear_bit(PCG_##lname, &pc->flags); }
+
/* Cache flag is set only once (at allocation) */
TESTPCGFLAG(Cache, CACHE)
+CLEARPCGFLAG(Cache, CACHE)
+SETPCGFLAG(Cache, CACHE)
TESTPCGFLAG(Used, USED)
CLEARPCGFLAG(Used, USED)
+SETPCGFLAG(Used, USED)
+
+SETPCGFLAG(AcctLRU, ACCT_LRU)
+CLEARPCGFLAG(AcctLRU, ACCT_LRU)
+TESTPCGFLAG(AcctLRU, ACCT_LRU)
+TESTCLEARPCGFLAG(AcctLRU, ACCT_LRU)
static inline int page_cgroup_nid(struct page_cgroup *pc)
{
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 3b6b788fe2b..da1fda8623e 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -543,6 +543,7 @@
#define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450
#define PCI_DEVICE_ID_AMD_8131_APIC 0x7451
#define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458
+#define PCI_DEVICE_ID_AMD_SB900_SMBUS 0x780b
#define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F
#define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090
#define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091
@@ -2526,6 +2527,16 @@
#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e
#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b
#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF1 0x3711
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF2 0x3712
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF3 0x3713
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF4 0x3714
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF5 0x3715
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF6 0x3716
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719
#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14
#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16
#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 740caad09a4..368bd70f1d2 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -1,5 +1,9 @@
/*
- * Performance counters:
+ * NOTE: this file will be removed in a future kernel release, it is
+ * provided as a courtesy copy of user-space code that relies on the
+ * old (pre-rename) symbols and constants.
+ *
+ * Performance events:
*
* Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
@@ -131,19 +135,19 @@ enum perf_counter_sample_format {
* as specified by attr.read_format:
*
* struct read_format {
- * { u64 value;
- * { u64 time_enabled; } && PERF_FORMAT_ENABLED
- * { u64 time_running; } && PERF_FORMAT_RUNNING
- * { u64 id; } && PERF_FORMAT_ID
- * } && !PERF_FORMAT_GROUP
+ * { u64 value;
+ * { u64 time_enabled; } && PERF_FORMAT_ENABLED
+ * { u64 time_running; } && PERF_FORMAT_RUNNING
+ * { u64 id; } && PERF_FORMAT_ID
+ * } && !PERF_FORMAT_GROUP
*
- * { u64 nr;
- * { u64 time_enabled; } && PERF_FORMAT_ENABLED
- * { u64 time_running; } && PERF_FORMAT_RUNNING
- * { u64 value;
- * { u64 id; } && PERF_FORMAT_ID
- * } cntr[nr];
- * } && PERF_FORMAT_GROUP
+ * { u64 nr;
+ * { u64 time_enabled; } && PERF_FORMAT_ENABLED
+ * { u64 time_running; } && PERF_FORMAT_RUNNING
+ * { u64 value;
+ * { u64 id; } && PERF_FORMAT_ID
+ * } cntr[nr];
+ * } && PERF_FORMAT_GROUP
* };
*/
enum perf_counter_read_format {
@@ -314,9 +318,9 @@ enum perf_event_type {
/*
* struct {
- * struct perf_event_header header;
- * u64 id;
- * u64 lost;
+ * struct perf_event_header header;
+ * u64 id;
+ * u64 lost;
* };
*/
PERF_EVENT_LOST = 2,
@@ -364,10 +368,10 @@ enum perf_event_type {
/*
* struct {
- * struct perf_event_header header;
- * u32 pid, tid;
+ * struct perf_event_header header;
+ * u32 pid, tid;
*
- * struct read_format values;
+ * struct read_format values;
* };
*/
PERF_EVENT_READ = 8,
@@ -383,23 +387,23 @@ enum perf_event_type {
* { u64 id; } && PERF_SAMPLE_ID
* { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
* { u32 cpu, res; } && PERF_SAMPLE_CPU
- * { u64 period; } && PERF_SAMPLE_PERIOD
+ * { u64 period; } && PERF_SAMPLE_PERIOD
*
* { struct read_format values; } && PERF_SAMPLE_READ
*
* { u64 nr,
* u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
*
- * #
- * # The RAW record below is opaque data wrt the ABI
- * #
- * # That is, the ABI doesn't make any promises wrt to
- * # the stability of its content, it may vary depending
- * # on event, hardware, kernel version and phase of
- * # the moon.
- * #
- * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
- * #
+ * #
+ * # The RAW record below is opaque data wrt the ABI
+ * #
+ * # That is, the ABI doesn't make any promises wrt to
+ * # the stability of its content, it may vary depending
+ * # on event, hardware, kernel version and phase of
+ * # the moon.
+ * #
+ * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
+ * #
*
* { u32 size;
* char data[size];}&& PERF_SAMPLE_RAW
@@ -422,437 +426,16 @@ enum perf_callchain_context {
PERF_CONTEXT_MAX = (__u64)-4095,
};
-#define PERF_FLAG_FD_NO_GROUP (1U << 0)
-#define PERF_FLAG_FD_OUTPUT (1U << 1)
+#define PERF_FLAG_FD_NO_GROUP (1U << 0)
+#define PERF_FLAG_FD_OUTPUT (1U << 1)
-#ifdef __KERNEL__
/*
- * Kernel-internal data types and definitions:
- */
-
-#ifdef CONFIG_PERF_COUNTERS
-# include <asm/perf_counter.h>
-#endif
-
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/rculist.h>
-#include <linux/rcupdate.h>
-#include <linux/spinlock.h>
-#include <linux/hrtimer.h>
-#include <linux/fs.h>
-#include <linux/pid_namespace.h>
-#include <asm/atomic.h>
-
-#define PERF_MAX_STACK_DEPTH 255
-
-struct perf_callchain_entry {
- __u64 nr;
- __u64 ip[PERF_MAX_STACK_DEPTH];
-};
-
-struct perf_raw_record {
- u32 size;
- void *data;
-};
-
-struct task_struct;
-
-/**
- * struct hw_perf_counter - performance counter hardware details:
+ * In case some app still references the old symbols:
*/
-struct hw_perf_counter {
-#ifdef CONFIG_PERF_COUNTERS
- union {
- struct { /* hardware */
- u64 config;
- unsigned long config_base;
- unsigned long counter_base;
- int idx;
- };
- union { /* software */
- atomic64_t count;
- struct hrtimer hrtimer;
- };
- };
- atomic64_t prev_count;
- u64 sample_period;
- u64 last_period;
- atomic64_t period_left;
- u64 interrupts;
-
- u64 freq_count;
- u64 freq_interrupts;
- u64 freq_stamp;
-#endif
-};
-
-struct perf_counter;
-
-/**
- * struct pmu - generic performance monitoring unit
- */
-struct pmu {
- int (*enable) (struct perf_counter *counter);
- void (*disable) (struct perf_counter *counter);
- void (*read) (struct perf_counter *counter);
- void (*unthrottle) (struct perf_counter *counter);
-};
-
-/**
- * enum perf_counter_active_state - the states of a counter
- */
-enum perf_counter_active_state {
- PERF_COUNTER_STATE_ERROR = -2,
- PERF_COUNTER_STATE_OFF = -1,
- PERF_COUNTER_STATE_INACTIVE = 0,
- PERF_COUNTER_STATE_ACTIVE = 1,
-};
-
-struct file;
-struct perf_mmap_data {
- struct rcu_head rcu_head;
- int nr_pages; /* nr of data pages */
- int writable; /* are we writable */
- int nr_locked; /* nr pages mlocked */
+#define __NR_perf_counter_open __NR_perf_event_open
- atomic_t poll; /* POLL_ for wakeups */
- atomic_t events; /* event limit */
+#define PR_TASK_PERF_COUNTERS_DISABLE PR_TASK_PERF_EVENTS_DISABLE
+#define PR_TASK_PERF_COUNTERS_ENABLE PR_TASK_PERF_EVENTS_ENABLE
- atomic_long_t head; /* write position */
- atomic_long_t done_head; /* completed head */
-
- atomic_t lock; /* concurrent writes */
- atomic_t wakeup; /* needs a wakeup */
- atomic_t lost; /* nr records lost */
-
- long watermark; /* wakeup watermark */
-
- struct perf_counter_mmap_page *user_page;
- void *data_pages[0];
-};
-
-struct perf_pending_entry {
- struct perf_pending_entry *next;
- void (*func)(struct perf_pending_entry *);
-};
-
-/**
- * struct perf_counter - performance counter kernel representation:
- */
-struct perf_counter {
-#ifdef CONFIG_PERF_COUNTERS
- struct list_head list_entry;
- struct list_head event_entry;
- struct list_head sibling_list;
- int nr_siblings;
- struct perf_counter *group_leader;
- struct perf_counter *output;
- const struct pmu *pmu;
-
- enum perf_counter_active_state state;
- atomic64_t count;
-
- /*
- * These are the total time in nanoseconds that the counter
- * has been enabled (i.e. eligible to run, and the task has
- * been scheduled in, if this is a per-task counter)
- * and running (scheduled onto the CPU), respectively.
- *
- * They are computed from tstamp_enabled, tstamp_running and
- * tstamp_stopped when the counter is in INACTIVE or ACTIVE state.
- */
- u64 total_time_enabled;
- u64 total_time_running;
-
- /*
- * These are timestamps used for computing total_time_enabled
- * and total_time_running when the counter is in INACTIVE or
- * ACTIVE state, measured in nanoseconds from an arbitrary point
- * in time.
- * tstamp_enabled: the notional time when the counter was enabled
- * tstamp_running: the notional time when the counter was scheduled on
- * tstamp_stopped: in INACTIVE state, the notional time when the
- * counter was scheduled off.
- */
- u64 tstamp_enabled;
- u64 tstamp_running;
- u64 tstamp_stopped;
-
- struct perf_counter_attr attr;
- struct hw_perf_counter hw;
-
- struct perf_counter_context *ctx;
- struct file *filp;
-
- /*
- * These accumulate total time (in nanoseconds) that children
- * counters have been enabled and running, respectively.
- */
- atomic64_t child_total_time_enabled;
- atomic64_t child_total_time_running;
-
- /*
- * Protect attach/detach and child_list:
- */
- struct mutex child_mutex;
- struct list_head child_list;
- struct perf_counter *parent;
-
- int oncpu;
- int cpu;
-
- struct list_head owner_entry;
- struct task_struct *owner;
-
- /* mmap bits */
- struct mutex mmap_mutex;
- atomic_t mmap_count;
- struct perf_mmap_data *data;
-
- /* poll related */
- wait_queue_head_t waitq;
- struct fasync_struct *fasync;
-
- /* delayed work for NMIs and such */
- int pending_wakeup;
- int pending_kill;
- int pending_disable;
- struct perf_pending_entry pending;
-
- atomic_t event_limit;
-
- void (*destroy)(struct perf_counter *);
- struct rcu_head rcu_head;
-
- struct pid_namespace *ns;
- u64 id;
-#endif
-};
-
-/**
- * struct perf_counter_context - counter context structure
- *
- * Used as a container for task counters and CPU counters as well:
- */
-struct perf_counter_context {
- /*
- * Protect the states of the counters in the list,
- * nr_active, and the list:
- */
- spinlock_t lock;
- /*
- * Protect the list of counters. Locking either mutex or lock
- * is sufficient to ensure the list doesn't change; to change
- * the list you need to lock both the mutex and the spinlock.
- */
- struct mutex mutex;
-
- struct list_head counter_list;
- struct list_head event_list;
- int nr_counters;
- int nr_active;
- int is_active;
- int nr_stat;
- atomic_t refcount;
- struct task_struct *task;
-
- /*
- * Context clock, runs when context enabled.
- */
- u64 time;
- u64 timestamp;
-
- /*
- * These fields let us detect when two contexts have both
- * been cloned (inherited) from a common ancestor.
- */
- struct perf_counter_context *parent_ctx;
- u64 parent_gen;
- u64 generation;
- int pin_count;
- struct rcu_head rcu_head;
-};
-
-/**
- * struct perf_counter_cpu_context - per cpu counter context structure
- */
-struct perf_cpu_context {
- struct perf_counter_context ctx;
- struct perf_counter_context *task_ctx;
- int active_oncpu;
- int max_pertask;
- int exclusive;
-
- /*
- * Recursion avoidance:
- *
- * task, softirq, irq, nmi context
- */
- int recursion[4];
-};
-
-struct perf_output_handle {
- struct perf_counter *counter;
- struct perf_mmap_data *data;
- unsigned long head;
- unsigned long offset;
- int nmi;
- int sample;
- int locked;
- unsigned long flags;
-};
-
-#ifdef CONFIG_PERF_COUNTERS
-
-/*
- * Set by architecture code:
- */
-extern int perf_max_counters;
-
-extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter);
-
-extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
-extern void perf_counter_task_sched_out(struct task_struct *task,
- struct task_struct *next, int cpu);
-extern void perf_counter_task_tick(struct task_struct *task, int cpu);
-extern int perf_counter_init_task(struct task_struct *child);
-extern void perf_counter_exit_task(struct task_struct *child);
-extern void perf_counter_free_task(struct task_struct *task);
-extern void set_perf_counter_pending(void);
-extern void perf_counter_do_pending(void);
-extern void perf_counter_print_debug(void);
-extern void __perf_disable(void);
-extern bool __perf_enable(void);
-extern void perf_disable(void);
-extern void perf_enable(void);
-extern int perf_counter_task_disable(void);
-extern int perf_counter_task_enable(void);
-extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
- struct perf_cpu_context *cpuctx,
- struct perf_counter_context *ctx, int cpu);
-extern void perf_counter_update_userpage(struct perf_counter *counter);
-
-struct perf_sample_data {
- u64 type;
-
- u64 ip;
- struct {
- u32 pid;
- u32 tid;
- } tid_entry;
- u64 time;
- u64 addr;
- u64 id;
- u64 stream_id;
- struct {
- u32 cpu;
- u32 reserved;
- } cpu_entry;
- u64 period;
- struct perf_callchain_entry *callchain;
- struct perf_raw_record *raw;
-};
-
-extern void perf_output_sample(struct perf_output_handle *handle,
- struct perf_event_header *header,
- struct perf_sample_data *data,
- struct perf_counter *counter);
-extern void perf_prepare_sample(struct perf_event_header *header,
- struct perf_sample_data *data,
- struct perf_counter *counter,
- struct pt_regs *regs);
-
-extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
- struct perf_sample_data *data,
- struct pt_regs *regs);
-
-/*
- * Return 1 for a software counter, 0 for a hardware counter
- */
-static inline int is_software_counter(struct perf_counter *counter)
-{
- return (counter->attr.type != PERF_TYPE_RAW) &&
- (counter->attr.type != PERF_TYPE_HARDWARE) &&
- (counter->attr.type != PERF_TYPE_HW_CACHE);
-}
-
-extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
-
-extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
-
-static inline void
-perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
-{
- if (atomic_read(&perf_swcounter_enabled[event]))
- __perf_swcounter_event(event, nr, nmi, regs, addr);
-}
-
-extern void __perf_counter_mmap(struct vm_area_struct *vma);
-
-static inline void perf_counter_mmap(struct vm_area_struct *vma)
-{
- if (vma->vm_flags & VM_EXEC)
- __perf_counter_mmap(vma);
-}
-
-extern void perf_counter_comm(struct task_struct *tsk);
-extern void perf_counter_fork(struct task_struct *tsk);
-
-extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
-
-extern int sysctl_perf_counter_paranoid;
-extern int sysctl_perf_counter_mlock;
-extern int sysctl_perf_counter_sample_rate;
-
-extern void perf_counter_init(void);
-extern void perf_tpcounter_event(int event_id, u64 addr, u64 count,
- void *record, int entry_size);
-
-#ifndef perf_misc_flags
-#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \
- PERF_EVENT_MISC_KERNEL)
-#define perf_instruction_pointer(regs) instruction_pointer(regs)
-#endif
-
-extern int perf_output_begin(struct perf_output_handle *handle,
- struct perf_counter *counter, unsigned int size,
- int nmi, int sample);
-extern void perf_output_end(struct perf_output_handle *handle);
-extern void perf_output_copy(struct perf_output_handle *handle,
- const void *buf, unsigned int len);
-#else
-static inline void
-perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
-static inline void
-perf_counter_task_sched_out(struct task_struct *task,
- struct task_struct *next, int cpu) { }
-static inline void
-perf_counter_task_tick(struct task_struct *task, int cpu) { }
-static inline int perf_counter_init_task(struct task_struct *child) { return 0; }
-static inline void perf_counter_exit_task(struct task_struct *child) { }
-static inline void perf_counter_free_task(struct task_struct *task) { }
-static inline void perf_counter_do_pending(void) { }
-static inline void perf_counter_print_debug(void) { }
-static inline void perf_disable(void) { }
-static inline void perf_enable(void) { }
-static inline int perf_counter_task_disable(void) { return -EINVAL; }
-static inline int perf_counter_task_enable(void) { return -EINVAL; }
-
-static inline void
-perf_swcounter_event(u32 event, u64 nr, int nmi,
- struct pt_regs *regs, u64 addr) { }
-
-static inline void perf_counter_mmap(struct vm_area_struct *vma) { }
-static inline void perf_counter_comm(struct task_struct *tsk) { }
-static inline void perf_counter_fork(struct task_struct *tsk) { }
-static inline void perf_counter_init(void) { }
-
-#endif
-
-#define perf_output_put(handle, x) \
- perf_output_copy((handle), &(x), sizeof(x))
-
-#endif /* __KERNEL__ */
#endif /* _LINUX_PERF_COUNTER_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
new file mode 100644
index 00000000000..acefaf71e6d
--- /dev/null
+++ b/include/linux/perf_event.h
@@ -0,0 +1,858 @@
+/*
+ * Performance events:
+ *
+ * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
+ *
+ * Data type definitions, declarations, prototypes.
+ *
+ * Started by: Thomas Gleixner and Ingo Molnar
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+#ifndef _LINUX_PERF_EVENT_H
+#define _LINUX_PERF_EVENT_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <asm/byteorder.h>
+
+/*
+ * User-space ABI bits:
+ */
+
+/*
+ * attr.type
+ */
+enum perf_type_id {
+ PERF_TYPE_HARDWARE = 0,
+ PERF_TYPE_SOFTWARE = 1,
+ PERF_TYPE_TRACEPOINT = 2,
+ PERF_TYPE_HW_CACHE = 3,
+ PERF_TYPE_RAW = 4,
+
+ PERF_TYPE_MAX, /* non-ABI */
+};
+
+/*
+ * Generalized performance event event_id types, used by the
+ * attr.event_id parameter of the sys_perf_event_open()
+ * syscall:
+ */
+enum perf_hw_id {
+ /*
+ * Common hardware events, generalized by the kernel:
+ */
+ PERF_COUNT_HW_CPU_CYCLES = 0,
+ PERF_COUNT_HW_INSTRUCTIONS = 1,
+ PERF_COUNT_HW_CACHE_REFERENCES = 2,
+ PERF_COUNT_HW_CACHE_MISSES = 3,
+ PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
+ PERF_COUNT_HW_BRANCH_MISSES = 5,
+ PERF_COUNT_HW_BUS_CYCLES = 6,
+
+ PERF_COUNT_HW_MAX, /* non-ABI */
+};
+
+/*
+ * Generalized hardware cache events:
+ *
+ * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
+ * { read, write, prefetch } x
+ * { accesses, misses }
+ */
+enum perf_hw_cache_id {
+ PERF_COUNT_HW_CACHE_L1D = 0,
+ PERF_COUNT_HW_CACHE_L1I = 1,
+ PERF_COUNT_HW_CACHE_LL = 2,
+ PERF_COUNT_HW_CACHE_DTLB = 3,
+ PERF_COUNT_HW_CACHE_ITLB = 4,
+ PERF_COUNT_HW_CACHE_BPU = 5,
+
+ PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
+};
+
+enum perf_hw_cache_op_id {
+ PERF_COUNT_HW_CACHE_OP_READ = 0,
+ PERF_COUNT_HW_CACHE_OP_WRITE = 1,
+ PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
+
+ PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
+};
+
+enum perf_hw_cache_op_result_id {
+ PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
+ PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
+
+ PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
+};
+
+/*
+ * Special "software" events provided by the kernel, even if the hardware
+ * does not support performance events. These events measure various
+ * physical and sw events of the kernel (and allow the profiling of them as
+ * well):
+ */
+enum perf_sw_ids {
+ PERF_COUNT_SW_CPU_CLOCK = 0,
+ PERF_COUNT_SW_TASK_CLOCK = 1,
+ PERF_COUNT_SW_PAGE_FAULTS = 2,
+ PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
+ PERF_COUNT_SW_CPU_MIGRATIONS = 4,
+ PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
+ PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
+
+ PERF_COUNT_SW_MAX, /* non-ABI */
+};
+
+/*
+ * Bits that can be set in attr.sample_type to request information
+ * in the overflow packets.
+ */
+enum perf_event_sample_format {
+ PERF_SAMPLE_IP = 1U << 0,
+ PERF_SAMPLE_TID = 1U << 1,
+ PERF_SAMPLE_TIME = 1U << 2,
+ PERF_SAMPLE_ADDR = 1U << 3,
+ PERF_SAMPLE_READ = 1U << 4,
+ PERF_SAMPLE_CALLCHAIN = 1U << 5,
+ PERF_SAMPLE_ID = 1U << 6,
+ PERF_SAMPLE_CPU = 1U << 7,
+ PERF_SAMPLE_PERIOD = 1U << 8,
+ PERF_SAMPLE_STREAM_ID = 1U << 9,
+ PERF_SAMPLE_RAW = 1U << 10,
+
+ PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
+};
+
+/*
+ * The format of the data returned by read() on a perf event fd,
+ * as specified by attr.read_format:
+ *
+ * struct read_format {
+ * { u64 value;
+ * { u64 time_enabled; } && PERF_FORMAT_ENABLED
+ * { u64 time_running; } && PERF_FORMAT_RUNNING
+ * { u64 id; } && PERF_FORMAT_ID
+ * } && !PERF_FORMAT_GROUP
+ *
+ * { u64 nr;
+ * { u64 time_enabled; } && PERF_FORMAT_ENABLED
+ * { u64 time_running; } && PERF_FORMAT_RUNNING
+ * { u64 value;
+ * { u64 id; } && PERF_FORMAT_ID
+ * } cntr[nr];
+ * } && PERF_FORMAT_GROUP
+ * };
+ */
+enum perf_event_read_format {
+ PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
+ PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
+ PERF_FORMAT_ID = 1U << 2,
+ PERF_FORMAT_GROUP = 1U << 3,
+
+ PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
+};
+
+#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
+
+/*
+ * Hardware event_id to monitor via a performance monitoring event:
+ */
+struct perf_event_attr {
+
+ /*
+ * Major type: hardware/software/tracepoint/etc.
+ */
+ __u32 type;
+
+ /*
+ * Size of the attr structure, for fwd/bwd compat.
+ */
+ __u32 size;
+
+ /*
+ * Type specific configuration information.
+ */
+ __u64 config;
+
+ union {
+ __u64 sample_period;
+ __u64 sample_freq;
+ };
+
+ __u64 sample_type;
+ __u64 read_format;
+
+ __u64 disabled : 1, /* off by default */
+ inherit : 1, /* children inherit it */
+ pinned : 1, /* must always be on PMU */
+ exclusive : 1, /* only group on PMU */
+ exclude_user : 1, /* don't count user */
+ exclude_kernel : 1, /* ditto kernel */
+ exclude_hv : 1, /* ditto hypervisor */
+ exclude_idle : 1, /* don't count when idle */
+ mmap : 1, /* include mmap data */
+ comm : 1, /* include comm data */
+ freq : 1, /* use freq, not period */
+ inherit_stat : 1, /* per task counts */
+ enable_on_exec : 1, /* next exec enables */
+ task : 1, /* trace fork/exit */
+ watermark : 1, /* wakeup_watermark */
+
+ __reserved_1 : 49;
+
+ union {
+ __u32 wakeup_events; /* wakeup every n events */
+ __u32 wakeup_watermark; /* bytes before wakeup */
+ };
+ __u32 __reserved_2;
+
+ __u64 __reserved_3;
+};
+
+/*
+ * Ioctls that can be done on a perf event fd:
+ */
+#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
+#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
+#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
+#define PERF_EVENT_IOC_RESET _IO ('$', 3)
+#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64)
+#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
+
+enum perf_event_ioc_flags {
+ PERF_IOC_FLAG_GROUP = 1U << 0,
+};
+
+/*
+ * Structure of the page that can be mapped via mmap
+ */
+struct perf_event_mmap_page {
+ __u32 version; /* version number of this structure */
+ __u32 compat_version; /* lowest version this is compat with */
+
+ /*
+ * Bits needed to read the hw events in user-space.
+ *
+ * u32 seq;
+ * s64 count;
+ *
+ * do {
+ * seq = pc->lock;
+ *
+ * barrier()
+ * if (pc->index) {
+ * count = pmc_read(pc->index - 1);
+ * count += pc->offset;
+ * } else
+ * goto regular_read;
+ *
+ * barrier();
+ * } while (pc->lock != seq);
+ *
+ * NOTE: for obvious reason this only works on self-monitoring
+ * processes.
+ */
+ __u32 lock; /* seqlock for synchronization */
+ __u32 index; /* hardware event identifier */
+ __s64 offset; /* add to hardware event value */
+ __u64 time_enabled; /* time event active */
+ __u64 time_running; /* time event on cpu */
+
+ /*
+ * Hole for extension of the self monitor capabilities
+ */
+
+ __u64 __reserved[123]; /* align to 1k */
+
+ /*
+ * Control data for the mmap() data buffer.
+ *
+ * User-space reading the @data_head value should issue an rmb(), on
+ * SMP capable platforms, after reading this value -- see
+ * perf_event_wakeup().
+ *
+ * When the mapping is PROT_WRITE the @data_tail value should be
+ * written by userspace to reflect the last read data. In this case
+ * the kernel will not over-write unread data.
+ */
+ __u64 data_head; /* head in the data section */
+ __u64 data_tail; /* user-space written tail */
+};
+
+#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0)
+#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
+#define PERF_RECORD_MISC_KERNEL (1 << 0)
+#define PERF_RECORD_MISC_USER (2 << 0)
+#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
+
+struct perf_event_header {
+ __u32 type;
+ __u16 misc;
+ __u16 size;
+};
+
+enum perf_event_type {
+
+ /*
+ * The MMAP events record the PROT_EXEC mappings so that we can
+ * correlate userspace IPs to code. They have the following structure:
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u32 pid, tid;
+ * u64 addr;
+ * u64 len;
+ * u64 pgoff;
+ * char filename[];
+ * };
+ */
+ PERF_RECORD_MMAP = 1,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u64 id;
+ * u64 lost;
+ * };
+ */
+ PERF_RECORD_LOST = 2,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u32 pid, tid;
+ * char comm[];
+ * };
+ */
+ PERF_RECORD_COMM = 3,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid, ppid;
+ * u32 tid, ptid;
+ * u64 time;
+ * };
+ */
+ PERF_RECORD_EXIT = 4,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u64 time;
+ * u64 id;
+ * u64 stream_id;
+ * };
+ */
+ PERF_RECORD_THROTTLE = 5,
+ PERF_RECORD_UNTHROTTLE = 6,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid, ppid;
+ * u32 tid, ptid;
+ * { u64 time; } && PERF_SAMPLE_TIME
+ * };
+ */
+ PERF_RECORD_FORK = 7,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid, tid;
+ *
+ * struct read_format values;
+ * };
+ */
+ PERF_RECORD_READ = 8,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ *
+ * { u64 ip; } && PERF_SAMPLE_IP
+ * { u32 pid, tid; } && PERF_SAMPLE_TID
+ * { u64 time; } && PERF_SAMPLE_TIME
+ * { u64 addr; } && PERF_SAMPLE_ADDR
+ * { u64 id; } && PERF_SAMPLE_ID
+ * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
+ * { u32 cpu, res; } && PERF_SAMPLE_CPU
+ * { u64 period; } && PERF_SAMPLE_PERIOD
+ *
+ * { struct read_format values; } && PERF_SAMPLE_READ
+ *
+ * { u64 nr,
+ * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
+ *
+ * #
+ * # The RAW record below is opaque data wrt the ABI
+ * #
+ * # That is, the ABI doesn't make any promises wrt to
+ * # the stability of its content, it may vary depending
+ * # on event, hardware, kernel version and phase of
+ * # the moon.
+ * #
+ * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
+ * #
+ *
+ * { u32 size;
+ * char data[size];}&& PERF_SAMPLE_RAW
+ * };
+ */
+ PERF_RECORD_SAMPLE = 9,
+
+ PERF_RECORD_MAX, /* non-ABI */
+};
+
+enum perf_callchain_context {
+ PERF_CONTEXT_HV = (__u64)-32,
+ PERF_CONTEXT_KERNEL = (__u64)-128,
+ PERF_CONTEXT_USER = (__u64)-512,
+
+ PERF_CONTEXT_GUEST = (__u64)-2048,
+ PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
+ PERF_CONTEXT_GUEST_USER = (__u64)-2560,
+
+ PERF_CONTEXT_MAX = (__u64)-4095,
+};
+
+#define PERF_FLAG_FD_NO_GROUP (1U << 0)
+#define PERF_FLAG_FD_OUTPUT (1U << 1)
+
+#ifdef __KERNEL__
+/*
+ * Kernel-internal data types and definitions:
+ */
+
+#ifdef CONFIG_PERF_EVENTS
+# include <asm/perf_event.h>
+#endif
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+#include <linux/hrtimer.h>
+#include <linux/fs.h>
+#include <linux/pid_namespace.h>
+#include <asm/atomic.h>
+
+#define PERF_MAX_STACK_DEPTH 255
+
+struct perf_callchain_entry {
+ __u64 nr;
+ __u64 ip[PERF_MAX_STACK_DEPTH];
+};
+
+struct perf_raw_record {
+ u32 size;
+ void *data;
+};
+
+struct task_struct;
+
+/**
+ * struct hw_perf_event - performance event hardware details:
+ */
+struct hw_perf_event {
+#ifdef CONFIG_PERF_EVENTS
+ union {
+ struct { /* hardware */
+ u64 config;
+ unsigned long config_base;
+ unsigned long event_base;
+ int idx;
+ };
+ union { /* software */
+ atomic64_t count;
+ struct hrtimer hrtimer;
+ };
+ };
+ atomic64_t prev_count;
+ u64 sample_period;
+ u64 last_period;
+ atomic64_t period_left;
+ u64 interrupts;
+
+ u64 freq_count;
+ u64 freq_interrupts;
+ u64 freq_stamp;
+#endif
+};
+
+struct perf_event;
+
+/**
+ * struct pmu - generic performance monitoring unit
+ */
+struct pmu {
+ int (*enable) (struct perf_event *event);
+ void (*disable) (struct perf_event *event);
+ void (*read) (struct perf_event *event);
+ void (*unthrottle) (struct perf_event *event);
+};
+
+/**
+ * enum perf_event_active_state - the states of a event
+ */
+enum perf_event_active_state {
+ PERF_EVENT_STATE_ERROR = -2,
+ PERF_EVENT_STATE_OFF = -1,
+ PERF_EVENT_STATE_INACTIVE = 0,
+ PERF_EVENT_STATE_ACTIVE = 1,
+};
+
+struct file;
+
+struct perf_mmap_data {
+ struct rcu_head rcu_head;
+ int nr_pages; /* nr of data pages */
+ int writable; /* are we writable */
+ int nr_locked; /* nr pages mlocked */
+
+ atomic_t poll; /* POLL_ for wakeups */
+ atomic_t events; /* event_id limit */
+
+ atomic_long_t head; /* write position */
+ atomic_long_t done_head; /* completed head */
+
+ atomic_t lock; /* concurrent writes */
+ atomic_t wakeup; /* needs a wakeup */
+ atomic_t lost; /* nr records lost */
+
+ long watermark; /* wakeup watermark */
+
+ struct perf_event_mmap_page *user_page;
+ void *data_pages[0];
+};
+
+struct perf_pending_entry {
+ struct perf_pending_entry *next;
+ void (*func)(struct perf_pending_entry *);
+};
+
+/**
+ * struct perf_event - performance event kernel representation:
+ */
+struct perf_event {
+#ifdef CONFIG_PERF_EVENTS
+ struct list_head group_entry;
+ struct list_head event_entry;
+ struct list_head sibling_list;
+ int nr_siblings;
+ struct perf_event *group_leader;
+ struct perf_event *output;
+ const struct pmu *pmu;
+
+ enum perf_event_active_state state;
+ atomic64_t count;
+
+ /*
+ * These are the total time in nanoseconds that the event
+ * has been enabled (i.e. eligible to run, and the task has
+ * been scheduled in, if this is a per-task event)
+ * and running (scheduled onto the CPU), respectively.
+ *
+ * They are computed from tstamp_enabled, tstamp_running and
+ * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
+ */
+ u64 total_time_enabled;
+ u64 total_time_running;
+
+ /*
+ * These are timestamps used for computing total_time_enabled
+ * and total_time_running when the event is in INACTIVE or
+ * ACTIVE state, measured in nanoseconds from an arbitrary point
+ * in time.
+ * tstamp_enabled: the notional time when the event was enabled
+ * tstamp_running: the notional time when the event was scheduled on
+ * tstamp_stopped: in INACTIVE state, the notional time when the
+ * event was scheduled off.
+ */
+ u64 tstamp_enabled;
+ u64 tstamp_running;
+ u64 tstamp_stopped;
+
+ struct perf_event_attr attr;
+ struct hw_perf_event hw;
+
+ struct perf_event_context *ctx;
+ struct file *filp;
+
+ /*
+ * These accumulate total time (in nanoseconds) that children
+ * events have been enabled and running, respectively.
+ */
+ atomic64_t child_total_time_enabled;
+ atomic64_t child_total_time_running;
+
+ /*
+ * Protect attach/detach and child_list:
+ */
+ struct mutex child_mutex;
+ struct list_head child_list;
+ struct perf_event *parent;
+
+ int oncpu;
+ int cpu;
+
+ struct list_head owner_entry;
+ struct task_struct *owner;
+
+ /* mmap bits */
+ struct mutex mmap_mutex;
+ atomic_t mmap_count;
+ struct perf_mmap_data *data;
+
+ /* poll related */
+ wait_queue_head_t waitq;
+ struct fasync_struct *fasync;
+
+ /* delayed work for NMIs and such */
+ int pending_wakeup;
+ int pending_kill;
+ int pending_disable;
+ struct perf_pending_entry pending;
+
+ atomic_t event_limit;
+
+ void (*destroy)(struct perf_event *);
+ struct rcu_head rcu_head;
+
+ struct pid_namespace *ns;
+ u64 id;
+#endif
+};
+
+/**
+ * struct perf_event_context - event context structure
+ *
+ * Used as a container for task events and CPU events as well:
+ */
+struct perf_event_context {
+ /*
+ * Protect the states of the events in the list,
+ * nr_active, and the list:
+ */
+ spinlock_t lock;
+ /*
+ * Protect the list of events. Locking either mutex or lock
+ * is sufficient to ensure the list doesn't change; to change
+ * the list you need to lock both the mutex and the spinlock.
+ */
+ struct mutex mutex;
+
+ struct list_head group_list;
+ struct list_head event_list;
+ int nr_events;
+ int nr_active;
+ int is_active;
+ int nr_stat;
+ atomic_t refcount;
+ struct task_struct *task;
+
+ /*
+ * Context clock, runs when context enabled.
+ */
+ u64 time;
+ u64 timestamp;
+
+ /*
+ * These fields let us detect when two contexts have both
+ * been cloned (inherited) from a common ancestor.
+ */
+ struct perf_event_context *parent_ctx;
+ u64 parent_gen;
+ u64 generation;
+ int pin_count;
+ struct rcu_head rcu_head;
+};
+
+/**
+ * struct perf_event_cpu_context - per cpu event context structure
+ */
+struct perf_cpu_context {
+ struct perf_event_context ctx;
+ struct perf_event_context *task_ctx;
+ int active_oncpu;
+ int max_pertask;
+ int exclusive;
+
+ /*
+ * Recursion avoidance:
+ *
+ * task, softirq, irq, nmi context
+ */
+ int recursion[4];
+};
+
+struct perf_output_handle {
+ struct perf_event *event;
+ struct perf_mmap_data *data;
+ unsigned long head;
+ unsigned long offset;
+ int nmi;
+ int sample;
+ int locked;
+ unsigned long flags;
+};
+
+#ifdef CONFIG_PERF_EVENTS
+
+/*
+ * Set by architecture code:
+ */
+extern int perf_max_events;
+
+extern const struct pmu *hw_perf_event_init(struct perf_event *event);
+
+extern void perf_event_task_sched_in(struct task_struct *task, int cpu);
+extern void perf_event_task_sched_out(struct task_struct *task,
+ struct task_struct *next, int cpu);
+extern void perf_event_task_tick(struct task_struct *task, int cpu);
+extern int perf_event_init_task(struct task_struct *child);
+extern void perf_event_exit_task(struct task_struct *child);
+extern void perf_event_free_task(struct task_struct *task);
+extern void set_perf_event_pending(void);
+extern void perf_event_do_pending(void);
+extern void perf_event_print_debug(void);
+extern void __perf_disable(void);
+extern bool __perf_enable(void);
+extern void perf_disable(void);
+extern void perf_enable(void);
+extern int perf_event_task_disable(void);
+extern int perf_event_task_enable(void);
+extern int hw_perf_group_sched_in(struct perf_event *group_leader,
+ struct perf_cpu_context *cpuctx,
+ struct perf_event_context *ctx, int cpu);
+extern void perf_event_update_userpage(struct perf_event *event);
+
+struct perf_sample_data {
+ u64 type;
+
+ u64 ip;
+ struct {
+ u32 pid;
+ u32 tid;
+ } tid_entry;
+ u64 time;
+ u64 addr;
+ u64 id;
+ u64 stream_id;
+ struct {
+ u32 cpu;
+ u32 reserved;
+ } cpu_entry;
+ u64 period;
+ struct perf_callchain_entry *callchain;
+ struct perf_raw_record *raw;
+};
+
+extern void perf_output_sample(struct perf_output_handle *handle,
+ struct perf_event_header *header,
+ struct perf_sample_data *data,
+ struct perf_event *event);
+extern void perf_prepare_sample(struct perf_event_header *header,
+ struct perf_sample_data *data,
+ struct perf_event *event,
+ struct pt_regs *regs);
+
+extern int perf_event_overflow(struct perf_event *event, int nmi,
+ struct perf_sample_data *data,
+ struct pt_regs *regs);
+
+/*
+ * Return 1 for a software event, 0 for a hardware event
+ */
+static inline int is_software_event(struct perf_event *event)
+{
+ return (event->attr.type != PERF_TYPE_RAW) &&
+ (event->attr.type != PERF_TYPE_HARDWARE) &&
+ (event->attr.type != PERF_TYPE_HW_CACHE);
+}
+
+extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
+
+extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
+
+static inline void
+perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
+{
+ if (atomic_read(&perf_swevent_enabled[event_id]))
+ __perf_sw_event(event_id, nr, nmi, regs, addr);
+}
+
+extern void __perf_event_mmap(struct vm_area_struct *vma);
+
+static inline void perf_event_mmap(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_EXEC)
+ __perf_event_mmap(vma);
+}
+
+extern void perf_event_comm(struct task_struct *tsk);
+extern void perf_event_fork(struct task_struct *tsk);
+
+extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
+
+extern int sysctl_perf_event_paranoid;
+extern int sysctl_perf_event_mlock;
+extern int sysctl_perf_event_sample_rate;
+
+extern void perf_event_init(void);
+extern void perf_tp_event(int event_id, u64 addr, u64 count,
+ void *record, int entry_size);
+
+#ifndef perf_misc_flags
+#define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \
+ PERF_RECORD_MISC_KERNEL)
+#define perf_instruction_pointer(regs) instruction_pointer(regs)
+#endif
+
+extern int perf_output_begin(struct perf_output_handle *handle,
+ struct perf_event *event, unsigned int size,
+ int nmi, int sample);
+extern void perf_output_end(struct perf_output_handle *handle);
+extern void perf_output_copy(struct perf_output_handle *handle,
+ const void *buf, unsigned int len);
+#else
+static inline void
+perf_event_task_sched_in(struct task_struct *task, int cpu) { }
+static inline void
+perf_event_task_sched_out(struct task_struct *task,
+ struct task_struct *next, int cpu) { }
+static inline void
+perf_event_task_tick(struct task_struct *task, int cpu) { }
+static inline int perf_event_init_task(struct task_struct *child) { return 0; }
+static inline void perf_event_exit_task(struct task_struct *child) { }
+static inline void perf_event_free_task(struct task_struct *task) { }
+static inline void perf_event_do_pending(void) { }
+static inline void perf_event_print_debug(void) { }
+static inline void perf_disable(void) { }
+static inline void perf_enable(void) { }
+static inline int perf_event_task_disable(void) { return -EINVAL; }
+static inline int perf_event_task_enable(void) { return -EINVAL; }
+
+static inline void
+perf_sw_event(u32 event_id, u64 nr, int nmi,
+ struct pt_regs *regs, u64 addr) { }
+
+static inline void perf_event_mmap(struct vm_area_struct *vma) { }
+static inline void perf_event_comm(struct task_struct *tsk) { }
+static inline void perf_event_fork(struct task_struct *tsk) { }
+static inline void perf_event_init(void) { }
+
+#endif
+
+#define perf_output_put(handle, x) \
+ perf_output_copy((handle), &(x), sizeof(x))
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_PERF_EVENT_H */
diff --git a/include/linux/phonet.h b/include/linux/phonet.h
index 1ef5a078183..e5126cff9b2 100644
--- a/include/linux/phonet.h
+++ b/include/linux/phonet.h
@@ -38,6 +38,7 @@
#define PNPIPE_IFINDEX 2
#define PNADDR_ANY 0
+#define PNADDR_BROADCAST 0xFC
#define PNPORT_RESOURCE_ROUTING 0
/* Values for PNPIPE_ENCAP option */
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index b063c7328ba..fddfafaed02 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -360,6 +360,7 @@ struct pnp_driver {
unsigned int flags;
int (*probe) (struct pnp_dev *dev, const struct pnp_device_id *dev_id);
void (*remove) (struct pnp_dev *dev);
+ void (*shutdown) (struct pnp_dev *dev);
int (*suspend) (struct pnp_dev *dev, pm_message_t state);
int (*resume) (struct pnp_dev *dev);
struct device_driver driver;
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 6729f7dcd60..7fc194aef8c 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -65,6 +65,9 @@
#define MUTEX_DEBUG_INIT 0x11
#define MUTEX_DEBUG_FREE 0x22
+/********** lib/flex_array.c **********/
+#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */
+
/********** security/ **********/
#define KEY_DESTROY 0xbd
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 594c494ac3f..b5d096d3a9b 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -39,6 +39,13 @@ enum {
};
enum {
+ POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0,
+ POWER_SUPPLY_CHARGE_TYPE_NONE,
+ POWER_SUPPLY_CHARGE_TYPE_TRICKLE,
+ POWER_SUPPLY_CHARGE_TYPE_FAST,
+};
+
+enum {
POWER_SUPPLY_HEALTH_UNKNOWN = 0,
POWER_SUPPLY_HEALTH_GOOD,
POWER_SUPPLY_HEALTH_OVERHEAT,
@@ -58,9 +65,19 @@ enum {
POWER_SUPPLY_TECHNOLOGY_LiMn,
};
+enum {
+ POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN = 0,
+ POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL,
+ POWER_SUPPLY_CAPACITY_LEVEL_LOW,
+ POWER_SUPPLY_CAPACITY_LEVEL_NORMAL,
+ POWER_SUPPLY_CAPACITY_LEVEL_HIGH,
+ POWER_SUPPLY_CAPACITY_LEVEL_FULL,
+};
+
enum power_supply_property {
/* Properties of type `int' */
POWER_SUPPLY_PROP_STATUS = 0,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
@@ -89,6 +106,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_ENERGY_NOW,
POWER_SUPPLY_PROP_ENERGY_AVG,
POWER_SUPPLY_PROP_CAPACITY, /* in percents! */
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TEMP_AMBIENT,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
@@ -126,6 +144,7 @@ struct power_supply {
enum power_supply_property psp,
union power_supply_propval *val);
void (*external_power_changed)(struct power_supply *psy);
+ void (*set_charged)(struct power_supply *psy);
/* For APM emulation, think legacy userspace. */
int use_for_apm;
@@ -165,8 +184,10 @@ struct power_supply_info {
int use_for_apm;
};
+extern struct power_supply *power_supply_get_by_name(char *name);
extern void power_supply_changed(struct power_supply *psy);
extern int power_supply_am_i_supplied(struct power_supply *psy);
+extern int power_supply_set_battery_charged(struct power_supply *psy);
#if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE)
extern int power_supply_is_system_supplied(void);
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index b00df4c79c6..931150566ad 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -85,7 +85,9 @@
#define PR_SET_TIMERSLACK 29
#define PR_GET_TIMERSLACK 30
-#define PR_TASK_PERF_COUNTERS_DISABLE 31
-#define PR_TASK_PERF_COUNTERS_ENABLE 32
+#define PR_TASK_PERF_EVENTS_DISABLE 31
+#define PR_TASK_PERF_EVENTS_ENABLE 32
+
+#define PR_MCE_KILL 33
#endif /* _LINUX_PRCTL_H */
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index e6e77d31c41..379eaed72d4 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -78,10 +78,19 @@ struct proc_dir_entry {
struct list_head pde_openers; /* who did ->open, but not ->release */
};
+enum kcore_type {
+ KCORE_TEXT,
+ KCORE_VMALLOC,
+ KCORE_RAM,
+ KCORE_VMEMMAP,
+ KCORE_OTHER,
+};
+
struct kcore_list {
- struct kcore_list *next;
+ struct list_head list;
unsigned long addr;
size_t size;
+ int type;
};
struct vmcore {
@@ -233,11 +242,12 @@ static inline void dup_mm_exe_file(struct mm_struct *oldmm,
#endif /* CONFIG_PROC_FS */
#if !defined(CONFIG_PROC_KCORE)
-static inline void kclist_add(struct kcore_list *new, void *addr, size_t size)
+static inline void
+kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
{
}
#else
-extern void kclist_add(struct kcore_list *, void *, size_t);
+extern void kclist_add(struct kcore_list *, void *, size_t, int type);
#endif
union proc_op {
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 26361c4c037..3ebb2315364 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -135,8 +135,8 @@ static inline int sb_any_quota_active(struct super_block *sb)
/*
* Operations supported for diskquotas.
*/
-extern struct dquot_operations dquot_operations;
-extern struct quotactl_ops vfs_quotactl_ops;
+extern const struct dquot_operations dquot_operations;
+extern const struct quotactl_ops vfs_quotactl_ops;
#define sb_dquot_ops (&dquot_operations)
#define sb_quotactl_ops (&vfs_quotactl_ops)
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 277f4b964df..490c5b37b6d 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -125,6 +125,8 @@ struct regulator_bulk_data {
/* regulator get and put */
struct regulator *__must_check regulator_get(struct device *dev,
const char *id);
+struct regulator *__must_check regulator_get_exclusive(struct device *dev,
+ const char *id);
void regulator_put(struct regulator *regulator);
/* regulator output control and status */
@@ -144,6 +146,8 @@ void regulator_bulk_free(int num_consumers,
int regulator_count_voltages(struct regulator *regulator);
int regulator_list_voltage(struct regulator *regulator, unsigned selector);
+int regulator_is_supported_voltage(struct regulator *regulator,
+ int min_uV, int max_uV);
int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV);
int regulator_get_voltage(struct regulator *regulator);
int regulator_set_current_limit(struct regulator *regulator,
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index ce1be708ca1..31f2055eae2 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -37,7 +37,8 @@ enum regulator_status {
*
* @enable: Configure the regulator as enabled.
* @disable: Configure the regulator as disabled.
- * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise.
+ * @is_enabled: Return 1 if the regulator is enabled, 0 if not.
+ * May also return negative errno.
*
* @set_voltage: Set the voltage for the regulator within the range specified.
* The driver should select the voltage closest to min_uV.
@@ -162,6 +163,8 @@ struct regulator_desc {
struct regulator_dev {
struct regulator_desc *desc;
int use_count;
+ int open_count;
+ int exclusive;
/* lists we belong to */
struct list_head list; /* list of all regulators */
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h
index 91b4da31f1b..e94a4a1c7c8 100644
--- a/include/linux/regulator/fixed.h
+++ b/include/linux/regulator/fixed.h
@@ -5,6 +5,9 @@
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
+ * Copyright (c) 2009 Nokia Corporation
+ * Roger Quadros <ext-roger.quadros@nokia.com>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
@@ -16,9 +19,30 @@
struct regulator_init_data;
+/**
+ * struct fixed_voltage_config - fixed_voltage_config structure
+ * @supply_name: Name of the regulator supply
+ * @microvolts: Output voltage of regulator
+ * @gpio: GPIO to use for enable control
+ * set to -EINVAL if not used
+ * @enable_high: Polarity of enable GPIO
+ * 1 = Active high, 0 = Active low
+ * @enabled_at_boot: Whether regulator has been enabled at
+ * boot or not. 1 = Yes, 0 = No
+ * This is used to keep the regulator at
+ * the default state
+ * @init_data: regulator_init_data
+ *
+ * This structure contains fixed voltage regulator configuration
+ * information that must be passed by platform code to the fixed
+ * voltage regulator driver.
+ */
struct fixed_voltage_config {
const char *supply_name;
int microvolts;
+ int gpio;
+ unsigned enable_high:1;
+ unsigned enabled_at_boot:1;
struct regulator_init_data *init_data;
};
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index bac64fa390f..87f5f176d4e 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -41,7 +41,7 @@ struct regulator;
#define REGULATOR_CHANGE_DRMS 0x10
/**
- * struct regulator_state - regulator state during low power syatem states
+ * struct regulator_state - regulator state during low power system states
*
* This describes a regulators state during a system wide low power state.
*
@@ -117,25 +117,37 @@ struct regulation_constraints {
/* mode to set on startup */
unsigned int initial_mode;
- /* constriant flags */
+ /* constraint flags */
unsigned always_on:1; /* regulator never off when system is on */
unsigned boot_on:1; /* bootloader/firmware enabled regulator */
- unsigned apply_uV:1; /* apply uV constraint iff min == max */
+ unsigned apply_uV:1; /* apply uV constraint if min == max */
};
/**
* struct regulator_consumer_supply - supply -> device mapping
*
- * This maps a supply name to a device.
+ * This maps a supply name to a device. Only one of dev or dev_name
+ * can be specified. Use of dev_name allows support for buses which
+ * make struct device available late such as I2C and is the preferred
+ * form.
*
* @dev: Device structure for the consumer.
+ * @dev_name: Result of dev_name() for the consumer.
* @supply: Name for the supply.
*/
struct regulator_consumer_supply {
struct device *dev; /* consumer */
+ const char *dev_name; /* dev_name() for consumer */
const char *supply; /* consumer supply - e.g. "vcc" */
};
+/* Initialize struct regulator_consumer_supply */
+#define REGULATOR_SUPPLY(_name, _dev_name) \
+{ \
+ .supply = _name, \
+ .dev_name = _dev_name, \
+}
+
/**
* struct regulator_init_data - regulator platform initialisation data.
*
@@ -166,6 +178,12 @@ struct regulator_init_data {
int regulator_suspend_prepare(suspend_state_t state);
+#ifdef CONFIG_REGULATOR
void regulator_has_full_constraints(void);
+#else
+static inline void regulator_has_full_constraints(void)
+{
+}
+#endif
#endif
diff --git a/include/linux/regulator/max1586.h b/include/linux/regulator/max1586.h
index 44563192bf1..de9a7fae20b 100644
--- a/include/linux/regulator/max1586.h
+++ b/include/linux/regulator/max1586.h
@@ -36,7 +36,7 @@
* max1586_subdev_data - regulator data
* @id: regulator Id (either MAX1586_V3 or MAX1586_V6)
* @name: regulator cute name (example for V3: "vcc_core")
- * @platform_data: regulator init data (contraints, supplies, ...)
+ * @platform_data: regulator init data (constraints, supplies, ...)
*/
struct max1586_subdev_data {
int id;
@@ -46,7 +46,7 @@ struct max1586_subdev_data {
/**
* max1586_platform_data - platform data for max1586
- * @num_subdevs: number of regultors used (may be 1 or 2)
+ * @num_subdevs: number of regulators used (may be 1 or 2)
* @subdevs: regulator used
* At most, there will be a regulator for V3 and one for V6 voltages.
* @v3_gain: gain on the V3 voltage output multiplied by 1e6.
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 953fc055e87..14a86bc7102 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -140,7 +140,7 @@ struct rchan_callbacks
* cause relay_open() to create a single global buffer rather
* than the default set of per-cpu buffers.
*
- * See Documentation/filesystems/relayfs.txt for more info.
+ * See Documentation/filesystems/relay.txt for more info.
*/
struct dentry *(*create_buf_file)(const char *filename,
struct dentry *parent,
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index 511f42fc681..731af71cddc 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -35,6 +35,10 @@ struct res_counter {
*/
unsigned long long limit;
/*
+ * the limit that usage can be exceed
+ */
+ unsigned long long soft_limit;
+ /*
* the number of unsuccessful attempts to consume the resource
*/
unsigned long long failcnt;
@@ -87,6 +91,7 @@ enum {
RES_MAX_USAGE,
RES_LIMIT,
RES_FAILCNT,
+ RES_SOFT_LIMIT,
};
/*
@@ -109,7 +114,8 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent);
int __must_check res_counter_charge_locked(struct res_counter *counter,
unsigned long val);
int __must_check res_counter_charge(struct res_counter *counter,
- unsigned long val, struct res_counter **limit_fail_at);
+ unsigned long val, struct res_counter **limit_fail_at,
+ struct res_counter **soft_limit_at);
/*
* uncharge - tell that some portion of the resource is released
@@ -122,7 +128,8 @@ int __must_check res_counter_charge(struct res_counter *counter,
*/
void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
-void res_counter_uncharge(struct res_counter *counter, unsigned long val);
+void res_counter_uncharge(struct res_counter *counter, unsigned long val,
+ bool *was_soft_limit_excess);
static inline bool res_counter_limit_check_locked(struct res_counter *cnt)
{
@@ -132,6 +139,36 @@ static inline bool res_counter_limit_check_locked(struct res_counter *cnt)
return false;
}
+static inline bool res_counter_soft_limit_check_locked(struct res_counter *cnt)
+{
+ if (cnt->usage < cnt->soft_limit)
+ return true;
+
+ return false;
+}
+
+/**
+ * Get the difference between the usage and the soft limit
+ * @cnt: The counter
+ *
+ * Returns 0 if usage is less than or equal to soft limit
+ * The difference between usage and soft limit, otherwise.
+ */
+static inline unsigned long long
+res_counter_soft_limit_excess(struct res_counter *cnt)
+{
+ unsigned long long excess;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cnt->lock, flags);
+ if (cnt->usage <= cnt->soft_limit)
+ excess = 0;
+ else
+ excess = cnt->usage - cnt->soft_limit;
+ spin_unlock_irqrestore(&cnt->lock, flags);
+ return excess;
+}
+
/*
* Helper function to detect if the cgroup is within it's limit or
* not. It's currently called from cgroup_rss_prepare()
@@ -147,6 +184,17 @@ static inline bool res_counter_check_under_limit(struct res_counter *cnt)
return ret;
}
+static inline bool res_counter_check_under_soft_limit(struct res_counter *cnt)
+{
+ bool ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cnt->lock, flags);
+ ret = res_counter_soft_limit_check_locked(cnt);
+ spin_unlock_irqrestore(&cnt->lock, flags);
+ return ret;
+}
+
static inline void res_counter_reset_max(struct res_counter *cnt)
{
unsigned long flags;
@@ -180,4 +228,16 @@ static inline int res_counter_set_limit(struct res_counter *cnt,
return ret;
}
+static inline int
+res_counter_set_soft_limit(struct res_counter *cnt,
+ unsigned long long soft_limit)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cnt->lock, flags);
+ cnt->soft_limit = soft_limit;
+ spin_unlock_irqrestore(&cnt->lock, flags);
+ return 0;
+}
+
#endif
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index bf116d0dbf2..cb0ba703260 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -71,21 +71,29 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned lon
void page_add_file_rmap(struct page *);
void page_remove_rmap(struct page *);
-#ifdef CONFIG_DEBUG_VM
-void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address);
-#else
-static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
+static inline void page_dup_rmap(struct page *page)
{
atomic_inc(&page->_mapcount);
}
-#endif
/*
* Called from mm/vmscan.c to handle paging out
*/
int page_referenced(struct page *, int is_locked,
struct mem_cgroup *cnt, unsigned long *vm_flags);
-int try_to_unmap(struct page *, int ignore_refs);
+enum ttu_flags {
+ TTU_UNMAP = 0, /* unmap mode */
+ TTU_MIGRATION = 1, /* migration mode */
+ TTU_MUNLOCK = 2, /* munlock mode */
+ TTU_ACTION_MASK = 0xff,
+
+ TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
+ TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
+ TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
+};
+#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
+
+int try_to_unmap(struct page *, enum ttu_flags flags);
/*
* Called from mm/filemap_xip.c to unmap empty zero page
@@ -112,6 +120,13 @@ int page_mkclean(struct page *);
*/
int try_to_munlock(struct page *);
+/*
+ * Called by memory-failure.c to kill processes.
+ */
+struct anon_vma *page_lock_anon_vma(struct page *page);
+void page_unlock_anon_vma(struct anon_vma *anon_vma);
+int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
+
#else /* !CONFIG_MMU */
#define anon_vma_init() do {} while (0)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 115af05ecab..75e6e60bf58 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -100,7 +100,7 @@ struct robust_list_head;
struct bio;
struct fs_struct;
struct bts_context;
-struct perf_counter_context;
+struct perf_event_context;
/*
* List of flags we want to share for kernel threads,
@@ -140,6 +140,10 @@ extern int nr_processes(void);
extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_iowait(void);
+extern unsigned long nr_iowait_cpu(void);
+extern unsigned long this_cpu_load(void);
+
+
extern void calc_global_load(void);
extern u64 cpu_nr_migrations(int cpu);
@@ -305,7 +309,7 @@ extern void softlockup_tick(void);
extern void touch_softlockup_watchdog(void);
extern void touch_all_softlockup_watchdogs(void);
extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer,
+ void __user *buffer,
size_t *lenp, loff_t *ppos);
extern unsigned int softlockup_panic;
extern int softlockup_thresh;
@@ -327,7 +331,7 @@ extern unsigned long sysctl_hung_task_check_count;
extern unsigned long sysctl_hung_task_timeout_secs;
extern unsigned long sysctl_hung_task_warnings;
extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer,
+ void __user *buffer,
size_t *lenp, loff_t *ppos);
#endif
@@ -422,6 +426,15 @@ static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
return max(mm->hiwater_rss, get_mm_rss(mm));
}
+static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
+ struct mm_struct *mm)
+{
+ unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
+
+ if (*maxrss < hiwater_rss)
+ *maxrss = hiwater_rss;
+}
+
static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
{
return max(mm->hiwater_vm, mm->total_vm);
@@ -434,7 +447,9 @@ extern int get_dumpable(struct mm_struct *mm);
/* dumpable bits */
#define MMF_DUMPABLE 0 /* core dump is permitted */
#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */
+
#define MMF_DUMPABLE_BITS 2
+#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
/* coredump filter bits */
#define MMF_DUMP_ANON_PRIVATE 2
@@ -444,6 +459,7 @@ extern int get_dumpable(struct mm_struct *mm);
#define MMF_DUMP_ELF_HEADERS 6
#define MMF_DUMP_HUGETLB_PRIVATE 7
#define MMF_DUMP_HUGETLB_SHARED 8
+
#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
#define MMF_DUMP_FILTER_BITS 7
#define MMF_DUMP_FILTER_MASK \
@@ -457,6 +473,10 @@ extern int get_dumpable(struct mm_struct *mm);
#else
# define MMF_DUMP_MASK_DEFAULT_ELF 0
#endif
+ /* leave room for more dump flags */
+#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
+
+#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
struct sighand_struct {
atomic_t count;
@@ -473,6 +493,13 @@ struct pacct_struct {
unsigned long ac_minflt, ac_majflt;
};
+struct cpu_itimer {
+ cputime_t expires;
+ cputime_t incr;
+ u32 error;
+ u32 incr_error;
+};
+
/**
* struct task_cputime - collected CPU time counts
* @utime: time spent in user mode, in &cputime_t units
@@ -567,9 +594,12 @@ struct signal_struct {
struct pid *leader_pid;
ktime_t it_real_incr;
- /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */
- cputime_t it_prof_expires, it_virt_expires;
- cputime_t it_prof_incr, it_virt_incr;
+ /*
+ * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
+ * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
+ * values are defined to 0 and 1 respectively
+ */
+ struct cpu_itimer it[2];
/*
* Thread group totals for process CPU timers.
@@ -601,6 +631,7 @@ struct signal_struct {
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
unsigned long inblock, oublock, cinblock, coublock;
+ unsigned long maxrss, cmaxrss;
struct task_io_accounting ioac;
/*
@@ -632,6 +663,8 @@ struct signal_struct {
unsigned audit_tty;
struct tty_audit_buf *tty_audit_buf;
#endif
+
+ int oom_adj; /* OOM kill score adjustment (bit shift) */
};
/* Context switch must be unlocked if interrupts are to be enabled */
@@ -701,7 +734,7 @@ struct user_struct {
#endif
#endif
-#ifdef CONFIG_PERF_COUNTERS
+#ifdef CONFIG_PERF_EVENTS
atomic_long_t locked_vm;
#endif
};
@@ -1214,7 +1247,6 @@ struct task_struct {
* a short time
*/
unsigned char fpu_counter;
- s8 oomkilladj; /* OOM kill score adjustment (bit shift). */
#ifdef CONFIG_BLK_DEV_IO_TRACE
unsigned int btrace_seq;
#endif
@@ -1239,7 +1271,6 @@ struct task_struct {
struct mm_struct *mm, *active_mm;
/* task state */
- struct linux_binfmt *binfmt;
int exit_state;
int exit_code, exit_signal;
int pdeath_signal; /* The signal sent when the parent dies */
@@ -1451,10 +1482,10 @@ struct task_struct {
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
#endif
-#ifdef CONFIG_PERF_COUNTERS
- struct perf_counter_context *perf_counter_ctxp;
- struct mutex perf_counter_mutex;
- struct list_head perf_counter_list;
+#ifdef CONFIG_PERF_EVENTS
+ struct perf_event_context *perf_event_ctxp;
+ struct mutex perf_event_mutex;
+ struct list_head perf_event_list;
#endif
#ifdef CONFIG_NUMA
struct mempolicy *mempolicy; /* Protected by alloc_lock */
@@ -1507,6 +1538,7 @@ struct task_struct {
/* bitmask of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
+ unsigned long stack_start;
};
/* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -1702,6 +1734,7 @@ extern cputime_t task_gtime(struct task_struct *p);
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
+#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
#define PF_DUMPCORE 0x00000200 /* dumped core */
#define PF_SIGNALED 0x00000400 /* killed by a signal */
@@ -1713,7 +1746,7 @@ extern cputime_t task_gtime(struct task_struct *p);
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
#define PF_KSWAPD 0x00040000 /* I am kswapd */
-#define PF_SWAPOFF 0x00080000 /* I am in swapoff */
+#define PF_OOM_ORIGIN 0x00080000 /* Allocating much memory to others */
#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
@@ -1721,6 +1754,7 @@ extern cputime_t task_gtime(struct task_struct *p);
#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */
+#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */
@@ -1784,10 +1818,13 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
return 0;
}
#endif
+
+#ifndef CONFIG_CPUMASK_OFFSTACK
static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
{
return set_cpus_allowed_ptr(p, &new_mask);
}
+#endif
/*
* Architectures can set this to 1 if they have specified
@@ -1870,7 +1907,7 @@ extern unsigned int sysctl_sched_time_avg;
extern unsigned int sysctl_timer_migration;
int sched_nr_latency_handler(struct ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length,
+ void __user *buffer, size_t *length,
loff_t *ppos);
#endif
#ifdef CONFIG_SCHED_DEBUG
@@ -1888,7 +1925,7 @@ extern unsigned int sysctl_sched_rt_period;
extern int sysctl_sched_rt_runtime;
int sched_rt_handler(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos);
extern unsigned int sysctl_sched_compat_yield;
@@ -2023,6 +2060,7 @@ extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
extern int kill_proc_info(int, struct siginfo *, pid_t);
extern int do_notify_parent(struct task_struct *, int);
+extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
extern void force_sig(int, struct task_struct *);
extern void force_sig_specific(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
@@ -2300,7 +2338,10 @@ static inline int signal_pending(struct task_struct *p)
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
}
-extern int __fatal_signal_pending(struct task_struct *p);
+static inline int __fatal_signal_pending(struct task_struct *p)
+{
+ return unlikely(sigismember(&p->pending.signal, SIGKILL));
+}
static inline int fatal_signal_pending(struct task_struct *p)
{
diff --git a/include/linux/security.h b/include/linux/security.h
index d050b66ab9e..239e40d0450 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -133,7 +133,7 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
return PAGE_ALIGN(mmap_min_addr);
return hint;
}
-extern int mmap_min_addr_handler(struct ctl_table *table, int write, struct file *filp,
+extern int mmap_min_addr_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
#ifdef CONFIG_SECURITY
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 0c6a86b7959..8366d8f12e5 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -35,6 +35,44 @@ struct seq_operations {
#define SEQ_SKIP 1
+/**
+ * seq_get_buf - get buffer to write arbitrary data to
+ * @m: the seq_file handle
+ * @bufp: the beginning of the buffer is stored here
+ *
+ * Return the number of bytes available in the buffer, or zero if
+ * there's no space.
+ */
+static inline size_t seq_get_buf(struct seq_file *m, char **bufp)
+{
+ BUG_ON(m->count > m->size);
+ if (m->count < m->size)
+ *bufp = m->buf + m->count;
+ else
+ *bufp = NULL;
+
+ return m->size - m->count;
+}
+
+/**
+ * seq_commit - commit data to the buffer
+ * @m: the seq_file handle
+ * @num: the number of bytes to commit
+ *
+ * Commit @num bytes of data written to a buffer previously acquired
+ * by seq_buf_get. To signal an error condition, or that the data
+ * didn't fit in the available space, pass a negative @num value.
+ */
+static inline void seq_commit(struct seq_file *m, int num)
+{
+ if (num < 0) {
+ m->count = m->size;
+ } else {
+ BUG_ON(m->count + num > m->size);
+ m->count += num;
+ }
+}
+
char *mangle_path(char *s, char *p, char *esc);
int seq_open(struct file *, const struct seq_operations *);
ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index d58e460844d..fe661afe071 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -477,7 +477,7 @@ static inline int uart_handle_break(struct uart_port *port)
/**
* uart_handle_dcd_change - handle a change of carrier detect state
- * @port: uart_port structure for the open port
+ * @uport: uart_port structure for the open port
* @status: new carrier detect status, nonzero if active
*/
static inline void
@@ -503,7 +503,7 @@ uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
/**
* uart_handle_cts_change - handle a change of clear-to-send state
- * @port: uart_port structure for the open port
+ * @uport: uart_port structure for the open port
* @status: new clear to send status, nonzero if active
*/
static inline void
diff --git a/include/linux/sfi.h b/include/linux/sfi.h
new file mode 100644
index 00000000000..9a6f7607174
--- /dev/null
+++ b/include/linux/sfi.h
@@ -0,0 +1,206 @@
+/* sfi.h Simple Firmware Interface */
+
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2009 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2009 Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _LINUX_SFI_H
+#define _LINUX_SFI_H
+
+/* Table signatures reserved by the SFI specification */
+#define SFI_SIG_SYST "SYST"
+#define SFI_SIG_FREQ "FREQ"
+#define SFI_SIG_IDLE "IDLE"
+#define SFI_SIG_CPUS "CPUS"
+#define SFI_SIG_MTMR "MTMR"
+#define SFI_SIG_MRTC "MRTC"
+#define SFI_SIG_MMAP "MMAP"
+#define SFI_SIG_APIC "APIC"
+#define SFI_SIG_XSDT "XSDT"
+#define SFI_SIG_WAKE "WAKE"
+#define SFI_SIG_SPIB "SPIB"
+#define SFI_SIG_I2CB "I2CB"
+#define SFI_SIG_GPEM "GPEM"
+
+#define SFI_SIGNATURE_SIZE 4
+#define SFI_OEM_ID_SIZE 6
+#define SFI_OEM_TABLE_ID_SIZE 8
+
+#define SFI_SYST_SEARCH_BEGIN 0x000E0000
+#define SFI_SYST_SEARCH_END 0x000FFFFF
+
+#define SFI_GET_NUM_ENTRIES(ptable, entry_type) \
+ ((ptable->header.len - sizeof(struct sfi_table_header)) / \
+ (sizeof(entry_type)))
+/*
+ * Table structures must be byte-packed to match the SFI specification,
+ * as they are provided by the BIOS.
+ */
+struct sfi_table_header {
+ char sig[SFI_SIGNATURE_SIZE];
+ u32 len;
+ u8 rev;
+ u8 csum;
+ char oem_id[SFI_OEM_ID_SIZE];
+ char oem_table_id[SFI_OEM_TABLE_ID_SIZE];
+} __packed;
+
+struct sfi_table_simple {
+ struct sfi_table_header header;
+ u64 pentry[1];
+} __packed;
+
+/* Comply with UEFI spec 2.1 */
+struct sfi_mem_entry {
+ u32 type;
+ u64 phys_start;
+ u64 virt_start;
+ u64 pages;
+ u64 attrib;
+} __packed;
+
+struct sfi_cpu_table_entry {
+ u32 apic_id;
+} __packed;
+
+struct sfi_cstate_table_entry {
+ u32 hint; /* MWAIT hint */
+ u32 latency; /* latency in ms */
+} __packed;
+
+struct sfi_apic_table_entry {
+ u64 phys_addr; /* phy base addr for APIC reg */
+} __packed;
+
+struct sfi_freq_table_entry {
+ u32 freq_mhz; /* in MHZ */
+ u32 latency; /* transition latency in ms */
+ u32 ctrl_val; /* value to write to PERF_CTL */
+} __packed;
+
+struct sfi_wake_table_entry {
+ u64 phys_addr; /* pointer to where the wake vector locates */
+} __packed;
+
+struct sfi_timer_table_entry {
+ u64 phys_addr; /* phy base addr for the timer */
+ u32 freq_hz; /* in HZ */
+ u32 irq;
+} __packed;
+
+struct sfi_rtc_table_entry {
+ u64 phys_addr; /* phy base addr for the RTC */
+ u32 irq;
+} __packed;
+
+struct sfi_spi_table_entry {
+ u16 host_num; /* attached to host 0, 1...*/
+ u16 cs; /* chip select */
+ u16 irq_info;
+ char name[16];
+ u8 dev_info[10];
+} __packed;
+
+struct sfi_i2c_table_entry {
+ u16 host_num;
+ u16 addr; /* slave addr */
+ u16 irq_info;
+ char name[16];
+ u8 dev_info[10];
+} __packed;
+
+struct sfi_gpe_table_entry {
+ u16 logical_id; /* logical id */
+ u16 phys_id; /* physical GPE id */
+} __packed;
+
+
+typedef int (*sfi_table_handler) (struct sfi_table_header *table);
+
+#ifdef CONFIG_SFI
+extern void __init sfi_init(void);
+extern int __init sfi_platform_init(void);
+extern void __init sfi_init_late(void);
+extern int sfi_table_parse(char *signature, char *oem_id, char *oem_table_id,
+ sfi_table_handler handler);
+
+extern int sfi_disabled;
+static inline void disable_sfi(void)
+{
+ sfi_disabled = 1;
+}
+
+#else /* !CONFIG_SFI */
+
+static inline void sfi_init(void)
+{
+}
+
+static inline void sfi_init_late(void)
+{
+}
+
+#define sfi_disabled 0
+
+static inline int sfi_table_parse(char *signature, char *oem_id,
+ char *oem_table_id,
+ sfi_table_handler handler)
+{
+ return -1;
+}
+
+#endif /* !CONFIG_SFI */
+
+#endif /*_LINUX_SFI_H*/
diff --git a/include/linux/sfi_acpi.h b/include/linux/sfi_acpi.h
new file mode 100644
index 00000000000..c4a5a8cd446
--- /dev/null
+++ b/include/linux/sfi_acpi.h
@@ -0,0 +1,93 @@
+/* sfi.h Simple Firmware Interface */
+
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2009 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2009 Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _LINUX_SFI_ACPI_H
+#define _LINUX_SFI_ACPI_H
+
+#ifdef CONFIG_SFI
+#include <acpi/acpi.h> /* struct acpi_table_header */
+
+extern int sfi_acpi_table_parse(char *signature, char *oem_id,
+ char *oem_table_id,
+ int (*handler)(struct acpi_table_header *));
+
+static inline int acpi_sfi_table_parse(char *signature,
+ int (*handler)(struct acpi_table_header *))
+{
+ if (!acpi_table_parse(signature, handler))
+ return 0;
+
+ return sfi_acpi_table_parse(signature, NULL, NULL, handler);
+}
+#else /* !CONFIG_SFI */
+
+static inline int sfi_acpi_table_parse(char *signature, char *oem_id,
+ char *oem_table_id,
+ int (*handler)(struct acpi_table_header *))
+{
+ return -1;
+}
+
+static inline int acpi_sfi_table_parse(char *signature,
+ int (*handler)(struct acpi_table_header *))
+{
+ return acpi_table_parse(signature, handler);
+}
+#endif /* !CONFIG_SFI */
+
+#endif /*_LINUX_SFI_ACPI_H*/
diff --git a/include/linux/signal.h b/include/linux/signal.h
index c7552836bd9..ab9272cc270 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -233,6 +233,8 @@ static inline int valid_signal(unsigned long sig)
}
extern int next_signal(struct sigpending *pending, sigset_t *mask);
+extern int do_send_sig_info(int sig, struct siginfo *info,
+ struct task_struct *p, bool group);
extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p);
extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *);
extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig,
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 9e3d8af0920..39c64bae776 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -73,15 +73,6 @@ int smp_call_function(void(*func)(void *info), void *info, int wait);
void smp_call_function_many(const struct cpumask *mask,
void (*func)(void *info), void *info, bool wait);
-/* Deprecated: Use smp_call_function_many which takes a pointer to the mask. */
-static inline int
-smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
- int wait)
-{
- smp_call_function_many(&mask, func, info, wait);
- return 0;
-}
-
void __smp_call_function_single(int cpuid, struct call_single_data *data,
int wait);
@@ -144,8 +135,6 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
static inline void smp_send_reschedule(int cpu) { }
#define num_booting_cpus() 1
#define smp_prepare_boot_cpu() do {} while (0)
-#define smp_call_function_mask(mask, func, info, wait) \
- (up_smp_call_function(func, info))
#define smp_call_function_many(mask, func, info, wait) \
(up_smp_call_function(func, info))
static inline void init_call_single_data(void)
diff --git a/include/linux/spi/mc33880.h b/include/linux/spi/mc33880.h
new file mode 100644
index 00000000000..82ffccd6fbe
--- /dev/null
+++ b/include/linux/spi/mc33880.h
@@ -0,0 +1,10 @@
+#ifndef LINUX_SPI_MC33880_H
+#define LINUX_SPI_MC33880_H
+
+struct mc33880_platform_data {
+ /* number assigned to the first GPIO */
+ unsigned base;
+};
+
+#endif
+
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index c47c4b4da97..97b60b37f44 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -20,6 +20,7 @@
#define __LINUX_SPI_H
#include <linux/device.h>
+#include <linux/mod_devicetable.h>
/*
* INTERFACES between SPI master-side drivers and SPI infrastructure.
@@ -86,7 +87,7 @@ struct spi_device {
int irq;
void *controller_state;
void *controller_data;
- char modalias[32];
+ char modalias[SPI_NAME_SIZE];
/*
* likely need more hooks for more protocol options affecting how
@@ -145,6 +146,7 @@ struct spi_message;
/**
* struct spi_driver - Host side "protocol" driver
+ * @id_table: List of SPI devices supported by this driver
* @probe: Binds this driver to the spi device. Drivers can verify
* that the device is actually present, and may need to configure
* characteristics (such as bits_per_word) which weren't needed for
@@ -170,6 +172,7 @@ struct spi_message;
* MMC, RTC, filesystem character device nodes, and hardware monitoring.
*/
struct spi_driver {
+ const struct spi_device_id *id_table;
int (*probe)(struct spi_device *spi);
int (*remove)(struct spi_device *spi);
void (*shutdown)(struct spi_device *spi);
@@ -207,6 +210,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* each slave has a chipselect signal, but it's common that not
* every chipselect is connected to a slave.
* @dma_alignment: SPI controller constraint on DMA buffers alignment.
+ * @mode_bits: flags understood by this controller driver
+ * @flags: other constraints relevant to this driver
* @setup: updates the device mode and clocking records used by a
* device's SPI controller; protocol code may call this. This
* must fail if an unrecognized or unsupported mode is requested.
@@ -253,6 +258,8 @@ struct spi_master {
/* other constraints relevant to this driver */
u16 flags;
#define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */
+#define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */
+#define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */
/* Setup mode and clock, etc (spi driver may call many times).
*
@@ -533,42 +540,7 @@ static inline void spi_message_free(struct spi_message *m)
}
extern int spi_setup(struct spi_device *spi);
-
-/**
- * spi_async - asynchronous SPI transfer
- * @spi: device with which data will be exchanged
- * @message: describes the data transfers, including completion callback
- * Context: any (irqs may be blocked, etc)
- *
- * This call may be used in_irq and other contexts which can't sleep,
- * as well as from task contexts which can sleep.
- *
- * The completion callback is invoked in a context which can't sleep.
- * Before that invocation, the value of message->status is undefined.
- * When the callback is issued, message->status holds either zero (to
- * indicate complete success) or a negative error code. After that
- * callback returns, the driver which issued the transfer request may
- * deallocate the associated memory; it's no longer in use by any SPI
- * core or controller driver code.
- *
- * Note that although all messages to a spi_device are handled in
- * FIFO order, messages may go to different devices in other orders.
- * Some device might be higher priority, or have various "hard" access
- * time requirements, for example.
- *
- * On detection of any fault during the transfer, processing of
- * the entire message is aborted, and the device is deselected.
- * Until returning from the associated message completion callback,
- * no other spi_message queued to that device will be processed.
- * (This rule applies equally to all the synchronous transfer calls,
- * which are wrappers around this core asynchronous primitive.)
- */
-static inline int
-spi_async(struct spi_device *spi, struct spi_message *message)
-{
- message->spi = spi;
- return spi->master->transfer(spi, message);
-}
+extern int spi_async(struct spi_device *spi, struct spi_message *message);
/*---------------------------------------------------------------------------*/
@@ -732,7 +704,7 @@ struct spi_board_info {
* controller_data goes to spi_device.controller_data,
* irq is copied too
*/
- char modalias[32];
+ char modalias[SPI_NAME_SIZE];
const void *platform_data;
void *controller_data;
int irq;
@@ -800,4 +772,7 @@ spi_unregister_device(struct spi_device *spi)
device_unregister(&spi->dev);
}
+extern const struct spi_device_id *
+spi_get_device_id(const struct spi_device *sdev);
+
#endif /* __LINUX_SPI_H */
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 3f632182d8e..996df4dac7d 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -111,7 +111,7 @@ struct rpc_credops {
void (*crdestroy)(struct rpc_cred *);
int (*crmatch)(struct auth_cred *, struct rpc_cred *, int);
- void (*crbind)(struct rpc_task *, struct rpc_cred *);
+ void (*crbind)(struct rpc_task *, struct rpc_cred *, int);
__be32 * (*crmarshal)(struct rpc_task *, __be32 *);
int (*crrefresh)(struct rpc_task *);
__be32 * (*crvalidate)(struct rpc_task *, __be32 *);
@@ -140,7 +140,7 @@ struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *
void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *);
struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int);
void rpcauth_bindcred(struct rpc_task *, struct rpc_cred *, int);
-void rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *);
+void rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int);
void put_rpccred(struct rpc_cred *);
void rpcauth_unbindcred(struct rpc_task *);
__be32 * rpcauth_marshcred(struct rpc_task *, __be32 *);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index ab3f6e90caa..8ed9642a5a7 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -22,6 +22,7 @@
#include <linux/sunrpc/timer.h>
#include <asm/signal.h>
#include <linux/path.h>
+#include <net/ipv6.h>
struct rpc_inode;
@@ -113,6 +114,7 @@ struct rpc_create_args {
rpc_authflavor_t authflavor;
unsigned long flags;
char *client_name;
+ struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
};
/* Values for "flags" field */
@@ -188,5 +190,117 @@ static inline void rpc_set_port(struct sockaddr *sap,
#define IPV6_SCOPE_DELIMITER '%'
#define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn")
+static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
+{
+ const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1;
+ const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2;
+
+ return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr;
+}
+
+static inline bool __rpc_copy_addr4(struct sockaddr *dst,
+ const struct sockaddr *src)
+{
+ const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
+ struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
+
+ dsin->sin_family = ssin->sin_family;
+ dsin->sin_addr.s_addr = ssin->sin_addr.s_addr;
+ return true;
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
+{
+ const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1;
+ const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2;
+ return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr);
+}
+
+static inline bool __rpc_copy_addr6(struct sockaddr *dst,
+ const struct sockaddr *src)
+{
+ const struct sockaddr_in6 *ssin6 = (const struct sockaddr_in6 *) src;
+ struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst;
+
+ dsin6->sin6_family = ssin6->sin6_family;
+ ipv6_addr_copy(&dsin6->sin6_addr, &ssin6->sin6_addr);
+ return true;
+}
+#else /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */
+static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
+{
+ return false;
+}
+
+static inline bool __rpc_copy_addr6(struct sockaddr *dst,
+ const struct sockaddr *src)
+{
+ return false;
+}
+#endif /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */
+
+/**
+ * rpc_cmp_addr - compare the address portion of two sockaddrs.
+ * @sap1: first sockaddr
+ * @sap2: second sockaddr
+ *
+ * Just compares the family and address portion. Ignores port, scope, etc.
+ * Returns true if the addrs are equal, false if they aren't.
+ */
+static inline bool rpc_cmp_addr(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
+{
+ if (sap1->sa_family == sap2->sa_family) {
+ switch (sap1->sa_family) {
+ case AF_INET:
+ return __rpc_cmp_addr4(sap1, sap2);
+ case AF_INET6:
+ return __rpc_cmp_addr6(sap1, sap2);
+ }
+ }
+ return false;
+}
+
+/**
+ * rpc_copy_addr - copy the address portion of one sockaddr to another
+ * @dst: destination sockaddr
+ * @src: source sockaddr
+ *
+ * Just copies the address portion and family. Ignores port, scope, etc.
+ * Caller is responsible for making certain that dst is large enough to hold
+ * the address in src. Returns true if address family is supported. Returns
+ * false otherwise.
+ */
+static inline bool rpc_copy_addr(struct sockaddr *dst,
+ const struct sockaddr *src)
+{
+ switch (src->sa_family) {
+ case AF_INET:
+ return __rpc_copy_addr4(dst, src);
+ case AF_INET6:
+ return __rpc_copy_addr6(dst, src);
+ }
+ return false;
+}
+
+/**
+ * rpc_get_scope_id - return scopeid for a given sockaddr
+ * @sa: sockaddr to get scopeid from
+ *
+ * Returns the value of the sin6_scope_id for AF_INET6 addrs, or 0 if
+ * not an AF_INET6 address.
+ */
+static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
+{
+ if (sa->sa_family != AF_INET6)
+ return 0;
+
+ return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index ea8009695c6..52e8cb0a756 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -94,8 +94,6 @@ struct svc_serv {
struct module * sv_module; /* optional module to count when
* adding threads */
svc_thread_fn sv_function; /* main function for threads */
- unsigned int sv_drc_max_pages; /* Total pages for DRC */
- unsigned int sv_drc_pages_used;/* DRC pages used */
#if defined(CONFIG_NFS_V4_1)
struct list_head sv_cb_list; /* queue for callback requests
* that arrive over the same
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 2223ae0b5ed..5f4e18b3ce7 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -65,6 +65,7 @@ struct svc_xprt {
size_t xpt_locallen; /* length of address */
struct sockaddr_storage xpt_remote; /* remote peer's address */
size_t xpt_remotelen; /* length of address */
+ struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */
};
int svc_reg_xprt_class(struct svc_xprt_class *);
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 04dba23c59f..1b353a76c30 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -28,6 +28,7 @@ struct svc_sock {
/* private TCP part */
u32 sk_reclen; /* length of record */
u32 sk_tcplen; /* current read length */
+ struct rpc_xprt *sk_bc_xprt; /* NFSv4.1 backchannel xprt */
};
/*
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 7da466ba4b0..f5cc0898bc5 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -11,6 +11,7 @@
#include <linux/uio.h>
#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#include <linux/scatterlist.h>
/*
@@ -117,14 +118,14 @@ static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int le
static inline __be32 *
xdr_encode_hyper(__be32 *p, __u64 val)
{
- *(__be64 *)p = cpu_to_be64(val);
+ put_unaligned_be64(val, p);
return p + 2;
}
static inline __be32 *
xdr_decode_hyper(__be32 *p, __u64 *valp)
{
- *valp = be64_to_cpup((__be64 *)p);
+ *valp = get_unaligned_be64(p);
return p + 2;
}
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index c090df44257..6f9457a75b8 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -124,6 +124,23 @@ struct rpc_xprt_ops {
void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq);
};
+/*
+ * RPC transport identifiers
+ *
+ * To preserve compatibility with the historical use of raw IP protocol
+ * id's for transport selection, UDP and TCP identifiers are specified
+ * with the previous values. No such restriction exists for new transports,
+ * except that they may not collide with these values (17 and 6,
+ * respectively).
+ */
+#define XPRT_TRANSPORT_BC (1 << 31)
+enum xprt_transports {
+ XPRT_TRANSPORT_UDP = IPPROTO_UDP,
+ XPRT_TRANSPORT_TCP = IPPROTO_TCP,
+ XPRT_TRANSPORT_BC_TCP = IPPROTO_TCP | XPRT_TRANSPORT_BC,
+ XPRT_TRANSPORT_RDMA = 256
+};
+
struct rpc_xprt {
struct kref kref; /* Reference count */
struct rpc_xprt_ops * ops; /* transport methods */
@@ -179,6 +196,7 @@ struct rpc_xprt {
spinlock_t reserve_lock; /* lock slot table */
u32 xid; /* Next XID value to use */
struct rpc_task * snd_task; /* Task blocked in send */
+ struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
#if defined(CONFIG_NFS_V4_1)
struct svc_serv *bc_serv; /* The RPC service which will */
/* process the callback */
@@ -231,6 +249,7 @@ struct xprt_create {
struct sockaddr * srcaddr; /* optional local address */
struct sockaddr * dstaddr; /* remote peer address */
size_t addrlen;
+ struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
};
struct xprt_class {
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index 54a379c9e8e..c2f04e1ae15 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -41,11 +41,6 @@
#define _LINUX_SUNRPC_XPRTRDMA_H
/*
- * RPC transport identifier for RDMA
- */
-#define XPRT_TRANSPORT_RDMA 256
-
-/*
* rpcbind (v3+) RDMA netid.
*/
#define RPCBIND_NETID_RDMA "rdma"
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index c2a46c45c8f..3f14a02e9cc 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -13,17 +13,6 @@ int init_socket_xprt(void);
void cleanup_socket_xprt(void);
/*
- * RPC transport identifiers for UDP, TCP
- *
- * To preserve compatibility with the historical use of raw IP protocol
- * id's for transport selection, these are specified with the previous
- * values. No such restriction exists for new transports, except that
- * they may not collide with these values (17 and 6, respectively).
- */
-#define XPRT_TRANSPORT_UDP IPPROTO_UDP
-#define XPRT_TRANSPORT_TCP IPPROTO_TCP
-
-/*
* RPC slot table sizes for UDP, TCP transports
*/
extern unsigned int xprt_udp_slot_table_entries;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 7c15334f3ff..4ec90019c1a 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -34,15 +34,37 @@ static inline int current_is_kswapd(void)
* the type/offset into the pte as 5/27 as well.
*/
#define MAX_SWAPFILES_SHIFT 5
-#ifndef CONFIG_MIGRATION
-#define MAX_SWAPFILES (1 << MAX_SWAPFILES_SHIFT)
+
+/*
+ * Use some of the swap files numbers for other purposes. This
+ * is a convenient way to hook into the VM to trigger special
+ * actions on faults.
+ */
+
+/*
+ * NUMA node memory migration support
+ */
+#ifdef CONFIG_MIGRATION
+#define SWP_MIGRATION_NUM 2
+#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
+#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
+#else
+#define SWP_MIGRATION_NUM 0
+#endif
+
+/*
+ * Handling of hardware poisoned pages with memory corruption.
+ */
+#ifdef CONFIG_MEMORY_FAILURE
+#define SWP_HWPOISON_NUM 1
+#define SWP_HWPOISON MAX_SWAPFILES
#else
-/* Use last two entries for page migration swap entries */
-#define MAX_SWAPFILES ((1 << MAX_SWAPFILES_SHIFT)-2)
-#define SWP_MIGRATION_READ MAX_SWAPFILES
-#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + 1)
+#define SWP_HWPOISON_NUM 0
#endif
+#define MAX_SWAPFILES \
+ ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
+
/*
* Magic header for a swap area. The first part of the union is
* what the swap magic looks like for the old (limited to 128MB)
@@ -217,6 +239,11 @@ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap,
unsigned int swappiness);
+extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
+ gfp_t gfp_mask, bool noswap,
+ unsigned int swappiness,
+ struct zone *zone,
+ int nid);
extern int __isolate_lru_page(struct page *page, int mode, int file);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
@@ -240,7 +267,7 @@ extern int page_evictable(struct page *page, struct vm_area_struct *vma);
extern void scan_mapping_unevictable_pages(struct address_space *);
extern unsigned long scan_unevictable_pages;
-extern int scan_unevictable_handler(struct ctl_table *, int, struct file *,
+extern int scan_unevictable_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int scan_unevictable_register_node(struct node *node);
extern void scan_unevictable_unregister_node(struct node *node);
@@ -419,10 +446,22 @@ static inline swp_entry_t get_swap_page(void)
}
/* linux/mm/thrash.c */
-#define put_swap_token(mm) do { } while (0)
-#define grab_swap_token(mm) do { } while (0)
-#define has_swap_token(mm) 0
-#define disable_swap_token() do { } while (0)
+static inline void put_swap_token(struct mm_struct *mm)
+{
+}
+
+static inline void grab_swap_token(struct mm_struct *mm)
+{
+}
+
+static inline int has_swap_token(struct mm_struct *mm)
+{
+ return 0;
+}
+
+static inline void disable_swap_token(void)
+{
+}
static inline void
mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 6ec39ab27b4..cd42e30b7c6 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -131,3 +131,41 @@ static inline int is_write_migration_entry(swp_entry_t entry)
#endif
+#ifdef CONFIG_MEMORY_FAILURE
+/*
+ * Support for hardware poisoned pages
+ */
+static inline swp_entry_t make_hwpoison_entry(struct page *page)
+{
+ BUG_ON(!PageLocked(page));
+ return swp_entry(SWP_HWPOISON, page_to_pfn(page));
+}
+
+static inline int is_hwpoison_entry(swp_entry_t entry)
+{
+ return swp_type(entry) == SWP_HWPOISON;
+}
+#else
+
+static inline swp_entry_t make_hwpoison_entry(struct page *page)
+{
+ return swp_entry(0, 0);
+}
+
+static inline int is_hwpoison_entry(swp_entry_t swp)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
+static inline int non_swap_entry(swp_entry_t entry)
+{
+ return swp_type(entry) >= MAX_SWAPFILES;
+}
+#else
+static inline int non_swap_entry(swp_entry_t entry)
+{
+ return 0;
+}
+#endif
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 7d9803cbb20..a990ace1a83 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -55,7 +55,7 @@ struct compat_timeval;
struct robust_list_head;
struct getcpu_cache;
struct old_linux_dirent;
-struct perf_counter_attr;
+struct perf_event_attr;
#include <linux/types.h>
#include <linux/aio_abi.h>
@@ -460,8 +460,7 @@ asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
void __user *data);
asmlinkage long sys_umount(char __user *name, int flags);
asmlinkage long sys_oldumount(char __user *name);
-asmlinkage long sys_truncate(const char __user *path,
- unsigned long length);
+asmlinkage long sys_truncate(const char __user *path, long length);
asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
asmlinkage long sys_stat(char __user *filename,
struct __old_kernel_stat __user *statbuf);
@@ -877,7 +876,7 @@ asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int,
int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
-asmlinkage long sys_perf_counter_open(
- struct perf_counter_attr __user *attr_uptr,
+asmlinkage long sys_perf_event_open(
+ struct perf_event_attr __user *attr_uptr,
pid_t pid, int cpu, int group_fd, unsigned long flags);
#endif
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index e76d3b22a46..1e4743ee683 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -29,7 +29,6 @@
#include <linux/types.h>
#include <linux/compiler.h>
-struct file;
struct completion;
#define CTL_MAXNAME 10 /* how many path components do we allow in a
@@ -977,25 +976,25 @@ typedef int ctl_handler (struct ctl_table *table,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen);
-typedef int proc_handler (struct ctl_table *ctl, int write, struct file * filp,
+typedef int proc_handler (struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
-extern int proc_dostring(struct ctl_table *, int, struct file *,
+extern int proc_dostring(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
-extern int proc_dointvec(struct ctl_table *, int, struct file *,
+extern int proc_dointvec(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
-extern int proc_dointvec_minmax(struct ctl_table *, int, struct file *,
+extern int proc_dointvec_minmax(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
-extern int proc_dointvec_jiffies(struct ctl_table *, int, struct file *,
+extern int proc_dointvec_jiffies(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
-extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, struct file *,
+extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
-extern int proc_dointvec_ms_jiffies(struct ctl_table *, int, struct file *,
+extern int proc_dointvec_ms_jiffies(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
-extern int proc_doulongvec_minmax(struct ctl_table *, int, struct file *,
+extern int proc_doulongvec_minmax(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int,
- struct file *, void __user *, size_t *, loff_t *);
+ void __user *, size_t *, loff_t *);
extern int do_sysctl (int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
diff --git a/include/linux/time.h b/include/linux/time.h
index 56787c09334..fe04e5ef6a5 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -155,6 +155,34 @@ extern void timekeeping_leap_insert(int leapsecond);
struct tms;
extern void do_sys_times(struct tms *);
+/*
+ * Similar to the struct tm in userspace <time.h>, but it needs to be here so
+ * that the kernel source is self contained.
+ */
+struct tm {
+ /*
+ * the number of seconds after the minute, normally in the range
+ * 0 to 59, but can be up to 60 to allow for leap seconds
+ */
+ int tm_sec;
+ /* the number of minutes after the hour, in the range 0 to 59*/
+ int tm_min;
+ /* the number of hours past midnight, in the range 0 to 23 */
+ int tm_hour;
+ /* the day of the month, in the range 1 to 31 */
+ int tm_mday;
+ /* the number of months since January, in the range 0 to 11 */
+ int tm_mon;
+ /* the number of years since 1900 */
+ long tm_year;
+ /* the number of days since Sunday, in the range 0 to 6 */
+ int tm_wday;
+ /* the number of days since January 1, in the range 0 to 365 */
+ int tm_yday;
+};
+
+void time_to_tm(time_t totalsecs, int offset, struct tm *result);
+
/**
* timespec_to_ns - Convert timespec to nanoseconds
* @ts: pointer to the timespec variable to be converted
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 809b26c0709..fc0bf3edeb6 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -211,12 +211,6 @@ int arch_update_cpu_topology(void);
#ifndef topology_core_id
#define topology_core_id(cpu) ((void)(cpu), 0)
#endif
-#ifndef topology_thread_siblings
-#define topology_thread_siblings(cpu) cpumask_of_cpu(cpu)
-#endif
-#ifndef topology_core_siblings
-#define topology_core_siblings(cpu) cpumask_of_cpu(cpu)
-#endif
#ifndef topology_thread_cpumask
#define topology_thread_cpumask(cpu) cpumask_of(cpu)
#endif
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 17ba82efa48..1eb44a924e5 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -1,7 +1,7 @@
/*
* Tracing hooks
*
- * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
@@ -463,22 +463,38 @@ static inline int tracehook_get_signal(struct task_struct *task,
/**
* tracehook_notify_jctl - report about job control stop/continue
- * @notify: nonzero if this is the last thread in the group to stop
+ * @notify: zero, %CLD_STOPPED or %CLD_CONTINUED
* @why: %CLD_STOPPED or %CLD_CONTINUED
*
* This is called when we might call do_notify_parent_cldstop().
- * It's called when about to stop for job control; we are already in
- * %TASK_STOPPED state, about to call schedule(). It's also called when
- * a delayed %CLD_STOPPED or %CLD_CONTINUED report is ready to be made.
*
- * Return nonzero to generate a %SIGCHLD with @why, which is
- * normal if @notify is nonzero.
+ * @notify is zero if we would not ordinarily send a %SIGCHLD,
+ * or is the %CLD_STOPPED or %CLD_CONTINUED .si_code for %SIGCHLD.
*
- * Called with no locks held.
+ * @why is %CLD_STOPPED when about to stop for job control;
+ * we are already in %TASK_STOPPED state, about to call schedule().
+ * It might also be that we have just exited (check %PF_EXITING),
+ * but need to report that a group-wide stop is complete.
+ *
+ * @why is %CLD_CONTINUED when waking up after job control stop and
+ * ready to make a delayed @notify report.
+ *
+ * Return the %CLD_* value for %SIGCHLD, or zero to generate no signal.
+ *
+ * Called with the siglock held.
*/
static inline int tracehook_notify_jctl(int notify, int why)
{
- return notify || (current->ptrace & PT_PTRACED);
+ return notify ?: (current->ptrace & PT_PTRACED) ? why : 0;
+}
+
+/**
+ * tracehook_finish_jctl - report about return from job control stop
+ *
+ * This is called by do_signal_stop() after wakeup.
+ */
+static inline void tracehook_finish_jctl(void)
+{
}
#define DEATH_REAP -1
diff --git a/include/linux/ucb1400.h b/include/linux/ucb1400.h
index ae779bb8cc0..adb44066680 100644
--- a/include/linux/ucb1400.h
+++ b/include/linux/ucb1400.h
@@ -26,6 +26,7 @@
#include <sound/ac97_codec.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
+#include <linux/gpio.h>
/*
* UCB1400 AC-link registers
@@ -82,6 +83,17 @@
#define UCB_ID 0x7e
#define UCB_ID_1400 0x4304
+struct ucb1400_gpio_data {
+ int gpio_offset;
+ int (*gpio_setup)(struct device *dev, int ngpio);
+ int (*gpio_teardown)(struct device *dev, int ngpio);
+};
+
+struct ucb1400_gpio {
+ struct gpio_chip gc;
+ struct snd_ac97 *ac97;
+};
+
struct ucb1400_ts {
struct input_dev *ts_idev;
struct task_struct *ts_task;
@@ -95,6 +107,7 @@ struct ucb1400_ts {
struct ucb1400 {
struct platform_device *ucb1400_ts;
+ struct platform_device *ucb1400_gpio;
};
static inline u16 ucb1400_reg_read(struct snd_ac97 *ac97, u16 reg)
@@ -147,4 +160,10 @@ static inline void ucb1400_adc_disable(struct snd_ac97 *ac97)
unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel,
int adcsync);
+#ifdef CONFIG_GPIO_UCB1400
+void __init ucb1400_gpio_set_data(struct ucb1400_gpio_data *data);
+#else
+static inline void ucb1400_gpio_set_data(struct ucb1400_gpio_data *data) {}
+#endif
+
#endif
diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h
index 46dd12c5709..9356b24223a 100644
--- a/include/linux/unaligned/be_byteshift.h
+++ b/include/linux/unaligned/be_byteshift.h
@@ -1,7 +1,7 @@
#ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H
#define _LINUX_UNALIGNED_BE_BYTESHIFT_H
-#include <linux/kernel.h>
+#include <linux/types.h>
static inline u16 __get_unaligned_be16(const u8 *p)
{
diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h
index 59777e951ba..be376fb79b6 100644
--- a/include/linux/unaligned/le_byteshift.h
+++ b/include/linux/unaligned/le_byteshift.h
@@ -1,7 +1,7 @@
#ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H
#define _LINUX_UNALIGNED_LE_BYTESHIFT_H
-#include <linux/kernel.h>
+#include <linux/types.h>
static inline u16 __get_unaligned_le16(const u8 *p)
{
diff --git a/include/linux/usb.h b/include/linux/usb.h
index a8fe05f224e..a34fa89f147 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -195,7 +195,7 @@ struct usb_interface {
struct device dev; /* interface specific device info */
struct device *usb_dev;
- int pm_usage_cnt; /* usage counter for autosuspend */
+ atomic_t pm_usage_cnt; /* usage counter for autosuspend */
struct work_struct reset_ws; /* for resets in atomic context */
};
#define to_usb_interface(d) container_of(d, struct usb_interface, dev)
@@ -551,13 +551,13 @@ extern void usb_autopm_put_interface_async(struct usb_interface *intf);
static inline void usb_autopm_enable(struct usb_interface *intf)
{
- intf->pm_usage_cnt = 0;
+ atomic_set(&intf->pm_usage_cnt, 0);
usb_autopm_set_interface(intf);
}
static inline void usb_autopm_disable(struct usb_interface *intf)
{
- intf->pm_usage_cnt = 1;
+ atomic_set(&intf->pm_usage_cnt, 1);
usb_autopm_set_interface(intf);
}
@@ -1036,9 +1036,10 @@ typedef void (*usb_complete_t)(struct urb *);
* @transfer_flags: A variety of flags may be used to affect how URB
* submission, unlinking, or operation are handled. Different
* kinds of URB can use different flags.
- * @transfer_buffer: This identifies the buffer to (or from) which
- * the I/O request will be performed (unless URB_NO_TRANSFER_DMA_MAP
- * is set). This buffer must be suitable for DMA; allocate it with
+ * @transfer_buffer: This identifies the buffer to (or from) which the I/O
+ * request will be performed unless URB_NO_TRANSFER_DMA_MAP is set
+ * (however, do not leave garbage in transfer_buffer even then).
+ * This buffer must be suitable for DMA; allocate it with
* kmalloc() or equivalent. For transfers to "in" endpoints, contents
* of this buffer will be modified. This buffer is used for the data
* stage of control transfers.
@@ -1071,7 +1072,7 @@ typedef void (*usb_complete_t)(struct urb *);
* @start_frame: Returns the initial frame for isochronous transfers.
* @number_of_packets: Lists the number of ISO transfer buffers.
* @interval: Specifies the polling interval for interrupt or isochronous
- * transfers. The units are frames (milliseconds) for for full and low
+ * transfers. The units are frames (milliseconds) for full and low
* speed devices, and microframes (1/8 millisecond) for highspeed ones.
* @error_count: Returns the number of ISO transfers that reported errors.
* @context: For use in completion functions. This normally points to
@@ -1104,9 +1105,15 @@ typedef void (*usb_complete_t)(struct urb *);
* allocate a DMA buffer with usb_buffer_alloc() or call usb_buffer_map().
* When these transfer flags are provided, host controller drivers will
* attempt to use the dma addresses found in the transfer_dma and/or
- * setup_dma fields rather than determining a dma address themselves. (Note
- * that transfer_buffer and setup_packet must still be set because not all
- * host controllers use DMA, nor do virtual root hubs).
+ * setup_dma fields rather than determining a dma address themselves.
+ *
+ * Note that transfer_buffer must still be set if the controller
+ * does not support DMA (as indicated by bus.uses_dma) and when talking
+ * to root hub. If you have to trasfer between highmem zone and the device
+ * on such controller, create a bounce buffer or bail out with an error.
+ * If transfer_buffer cannot be set (is in highmem) and the controller is DMA
+ * capable, assign NULL to it, so that usbmon knows not to use the value.
+ * The setup_packet must always be set, so it cannot be located in highmem.
*
* Initialization:
*
diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h
index b5744bc218a..eaf9dffe0a0 100644
--- a/include/linux/usb/audio.h
+++ b/include/linux/usb/audio.h
@@ -24,88 +24,78 @@
#define USB_SUBCLASS_AUDIOCONTROL 0x01
#define USB_SUBCLASS_AUDIOSTREAMING 0x02
#define USB_SUBCLASS_MIDISTREAMING 0x03
-#define USB_SUBCLASS_VENDOR_SPEC 0xff
-
-/* A.5 Audio Class-Specific AC interface Descriptor Subtypes*/
-#define HEADER 0x01
-#define INPUT_TERMINAL 0x02
-#define OUTPUT_TERMINAL 0x03
-#define MIXER_UNIT 0x04
-#define SELECTOR_UNIT 0x05
-#define FEATURE_UNIT 0x06
-#define PROCESSING_UNIT 0x07
-#define EXTENSION_UNIT 0x08
-
-#define AS_GENERAL 0x01
-#define FORMAT_TYPE 0x02
-#define FORMAT_SPECIFIC 0x03
-
-#define EP_GENERAL 0x01
-
-#define MS_GENERAL 0x01
-#define MIDI_IN_JACK 0x02
-#define MIDI_OUT_JACK 0x03
-
-/* endpoint attributes */
-#define EP_ATTR_MASK 0x0c
-#define EP_ATTR_ASYNC 0x04
-#define EP_ATTR_ADAPTIVE 0x08
-#define EP_ATTR_SYNC 0x0c
-
-/* cs endpoint attributes */
-#define EP_CS_ATTR_SAMPLE_RATE 0x01
-#define EP_CS_ATTR_PITCH_CONTROL 0x02
-#define EP_CS_ATTR_FILL_MAX 0x80
-
-/* Audio Class specific Request Codes */
-#define USB_AUDIO_SET_INTF 0x21
-#define USB_AUDIO_SET_ENDPOINT 0x22
-#define USB_AUDIO_GET_INTF 0xa1
-#define USB_AUDIO_GET_ENDPOINT 0xa2
-
-#define SET_ 0x00
-#define GET_ 0x80
-
-#define _CUR 0x1
-#define _MIN 0x2
-#define _MAX 0x3
-#define _RES 0x4
-#define _MEM 0x5
-
-#define SET_CUR (SET_ | _CUR)
-#define GET_CUR (GET_ | _CUR)
-#define SET_MIN (SET_ | _MIN)
-#define GET_MIN (GET_ | _MIN)
-#define SET_MAX (SET_ | _MAX)
-#define GET_MAX (GET_ | _MAX)
-#define SET_RES (SET_ | _RES)
-#define GET_RES (GET_ | _RES)
-#define SET_MEM (SET_ | _MEM)
-#define GET_MEM (GET_ | _MEM)
-
-#define GET_STAT 0xff
-
-#define USB_AC_TERMINAL_UNDEFINED 0x100
-#define USB_AC_TERMINAL_STREAMING 0x101
-#define USB_AC_TERMINAL_VENDOR_SPEC 0x1FF
+
+/* A.5 Audio Class-Specific AC Interface Descriptor Subtypes */
+#define UAC_HEADER 0x01
+#define UAC_INPUT_TERMINAL 0x02
+#define UAC_OUTPUT_TERMINAL 0x03
+#define UAC_MIXER_UNIT 0x04
+#define UAC_SELECTOR_UNIT 0x05
+#define UAC_FEATURE_UNIT 0x06
+#define UAC_PROCESSING_UNIT 0x07
+#define UAC_EXTENSION_UNIT 0x08
+
+/* A.6 Audio Class-Specific AS Interface Descriptor Subtypes */
+#define UAC_AS_GENERAL 0x01
+#define UAC_FORMAT_TYPE 0x02
+#define UAC_FORMAT_SPECIFIC 0x03
+
+/* A.8 Audio Class-Specific Endpoint Descriptor Subtypes */
+#define UAC_EP_GENERAL 0x01
+
+/* A.9 Audio Class-Specific Request Codes */
+#define UAC_SET_ 0x00
+#define UAC_GET_ 0x80
+
+#define UAC__CUR 0x1
+#define UAC__MIN 0x2
+#define UAC__MAX 0x3
+#define UAC__RES 0x4
+#define UAC__MEM 0x5
+
+#define UAC_SET_CUR (UAC_SET_ | UAC__CUR)
+#define UAC_GET_CUR (UAC_GET_ | UAC__CUR)
+#define UAC_SET_MIN (UAC_SET_ | UAC__MIN)
+#define UAC_GET_MIN (UAC_GET_ | UAC__MIN)
+#define UAC_SET_MAX (UAC_SET_ | UAC__MAX)
+#define UAC_GET_MAX (UAC_GET_ | UAC__MAX)
+#define UAC_SET_RES (UAC_SET_ | UAC__RES)
+#define UAC_GET_RES (UAC_GET_ | UAC__RES)
+#define UAC_SET_MEM (UAC_SET_ | UAC__MEM)
+#define UAC_GET_MEM (UAC_GET_ | UAC__MEM)
+
+#define UAC_GET_STAT 0xff
+
+/* MIDI - A.1 MS Class-Specific Interface Descriptor Subtypes */
+#define UAC_MS_HEADER 0x01
+#define UAC_MIDI_IN_JACK 0x02
+#define UAC_MIDI_OUT_JACK 0x03
+
+/* MIDI - A.1 MS Class-Specific Endpoint Descriptor Subtypes */
+#define UAC_MS_GENERAL 0x01
+
+/* Terminals - 2.1 USB Terminal Types */
+#define UAC_TERMINAL_UNDEFINED 0x100
+#define UAC_TERMINAL_STREAMING 0x101
+#define UAC_TERMINAL_VENDOR_SPEC 0x1FF
/* Terminal Control Selectors */
/* 4.3.2 Class-Specific AC Interface Descriptor */
-struct usb_ac_header_descriptor {
+struct uac_ac_header_descriptor {
__u8 bLength; /* 8 + n */
__u8 bDescriptorType; /* USB_DT_CS_INTERFACE */
- __u8 bDescriptorSubtype; /* USB_MS_HEADER */
+ __u8 bDescriptorSubtype; /* UAC_MS_HEADER */
__le16 bcdADC; /* 0x0100 */
__le16 wTotalLength; /* includes Unit and Terminal desc. */
__u8 bInCollection; /* n */
__u8 baInterfaceNr[]; /* [n] */
} __attribute__ ((packed));
-#define USB_DT_AC_HEADER_SIZE(n) (8 + (n))
+#define UAC_DT_AC_HEADER_SIZE(n) (8 + (n))
/* As above, but more useful for defining your own descriptors: */
-#define DECLARE_USB_AC_HEADER_DESCRIPTOR(n) \
-struct usb_ac_header_descriptor_##n { \
+#define DECLARE_UAC_AC_HEADER_DESCRIPTOR(n) \
+struct uac_ac_header_descriptor_##n { \
__u8 bLength; \
__u8 bDescriptorType; \
__u8 bDescriptorSubtype; \
@@ -116,7 +106,7 @@ struct usb_ac_header_descriptor_##n { \
} __attribute__ ((packed))
/* 4.3.2.1 Input Terminal Descriptor */
-struct usb_input_terminal_descriptor {
+struct uac_input_terminal_descriptor {
__u8 bLength; /* in bytes: 12 */
__u8 bDescriptorType; /* CS_INTERFACE descriptor type */
__u8 bDescriptorSubtype; /* INPUT_TERMINAL descriptor subtype */
@@ -129,18 +119,19 @@ struct usb_input_terminal_descriptor {
__u8 iTerminal;
} __attribute__ ((packed));
-#define USB_DT_AC_INPUT_TERMINAL_SIZE 12
+#define UAC_DT_INPUT_TERMINAL_SIZE 12
-#define USB_AC_INPUT_TERMINAL_UNDEFINED 0x200
-#define USB_AC_INPUT_TERMINAL_MICROPHONE 0x201
-#define USB_AC_INPUT_TERMINAL_DESKTOP_MICROPHONE 0x202
-#define USB_AC_INPUT_TERMINAL_PERSONAL_MICROPHONE 0x203
-#define USB_AC_INPUT_TERMINAL_OMNI_DIR_MICROPHONE 0x204
-#define USB_AC_INPUT_TERMINAL_MICROPHONE_ARRAY 0x205
-#define USB_AC_INPUT_TERMINAL_PROC_MICROPHONE_ARRAY 0x206
+/* Terminals - 2.2 Input Terminal Types */
+#define UAC_INPUT_TERMINAL_UNDEFINED 0x200
+#define UAC_INPUT_TERMINAL_MICROPHONE 0x201
+#define UAC_INPUT_TERMINAL_DESKTOP_MICROPHONE 0x202
+#define UAC_INPUT_TERMINAL_PERSONAL_MICROPHONE 0x203
+#define UAC_INPUT_TERMINAL_OMNI_DIR_MICROPHONE 0x204
+#define UAC_INPUT_TERMINAL_MICROPHONE_ARRAY 0x205
+#define UAC_INPUT_TERMINAL_PROC_MICROPHONE_ARRAY 0x206
/* 4.3.2.2 Output Terminal Descriptor */
-struct usb_output_terminal_descriptor {
+struct uac_output_terminal_descriptor {
__u8 bLength; /* in bytes: 9 */
__u8 bDescriptorType; /* CS_INTERFACE descriptor type */
__u8 bDescriptorSubtype; /* OUTPUT_TERMINAL descriptor subtype */
@@ -151,23 +142,24 @@ struct usb_output_terminal_descriptor {
__u8 iTerminal;
} __attribute__ ((packed));
-#define USB_DT_AC_OUTPUT_TERMINAL_SIZE 9
+#define UAC_DT_OUTPUT_TERMINAL_SIZE 9
-#define USB_AC_OUTPUT_TERMINAL_UNDEFINED 0x300
-#define USB_AC_OUTPUT_TERMINAL_SPEAKER 0x301
-#define USB_AC_OUTPUT_TERMINAL_HEADPHONES 0x302
-#define USB_AC_OUTPUT_TERMINAL_HEAD_MOUNTED_DISPLAY_AUDIO 0x303
-#define USB_AC_OUTPUT_TERMINAL_DESKTOP_SPEAKER 0x304
-#define USB_AC_OUTPUT_TERMINAL_ROOM_SPEAKER 0x305
-#define USB_AC_OUTPUT_TERMINAL_COMMUNICATION_SPEAKER 0x306
-#define USB_AC_OUTPUT_TERMINAL_LOW_FREQ_EFFECTS_SPEAKER 0x307
+/* Terminals - 2.3 Output Terminal Types */
+#define UAC_OUTPUT_TERMINAL_UNDEFINED 0x300
+#define UAC_OUTPUT_TERMINAL_SPEAKER 0x301
+#define UAC_OUTPUT_TERMINAL_HEADPHONES 0x302
+#define UAC_OUTPUT_TERMINAL_HEAD_MOUNTED_DISPLAY_AUDIO 0x303
+#define UAC_OUTPUT_TERMINAL_DESKTOP_SPEAKER 0x304
+#define UAC_OUTPUT_TERMINAL_ROOM_SPEAKER 0x305
+#define UAC_OUTPUT_TERMINAL_COMMUNICATION_SPEAKER 0x306
+#define UAC_OUTPUT_TERMINAL_LOW_FREQ_EFFECTS_SPEAKER 0x307
/* Set bControlSize = 2 as default setting */
-#define USB_DT_AC_FEATURE_UNIT_SIZE(ch) (7 + ((ch) + 1) * 2)
+#define UAC_DT_FEATURE_UNIT_SIZE(ch) (7 + ((ch) + 1) * 2)
/* As above, but more useful for defining your own descriptors: */
-#define DECLARE_USB_AC_FEATURE_UNIT_DESCRIPTOR(ch) \
-struct usb_ac_feature_unit_descriptor_##ch { \
+#define DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(ch) \
+struct uac_feature_unit_descriptor_##ch { \
__u8 bLength; \
__u8 bDescriptorType; \
__u8 bDescriptorSubtype; \
@@ -179,7 +171,7 @@ struct usb_ac_feature_unit_descriptor_##ch { \
} __attribute__ ((packed))
/* 4.5.2 Class-Specific AS Interface Descriptor */
-struct usb_as_header_descriptor {
+struct uac_as_header_descriptor {
__u8 bLength; /* in bytes: 7 */
__u8 bDescriptorType; /* USB_DT_CS_INTERFACE */
__u8 bDescriptorSubtype; /* AS_GENERAL */
@@ -188,16 +180,17 @@ struct usb_as_header_descriptor {
__le16 wFormatTag; /* The Audio Data Format */
} __attribute__ ((packed));
-#define USB_DT_AS_HEADER_SIZE 7
+#define UAC_DT_AS_HEADER_SIZE 7
-#define USB_AS_AUDIO_FORMAT_TYPE_I_UNDEFINED 0x0
-#define USB_AS_AUDIO_FORMAT_TYPE_I_PCM 0x1
-#define USB_AS_AUDIO_FORMAT_TYPE_I_PCM8 0x2
-#define USB_AS_AUDIO_FORMAT_TYPE_I_IEEE_FLOAT 0x3
-#define USB_AS_AUDIO_FORMAT_TYPE_I_ALAW 0x4
-#define USB_AS_AUDIO_FORMAT_TYPE_I_MULAW 0x5
+/* Formats - A.1.1 Audio Data Format Type I Codes */
+#define UAC_FORMAT_TYPE_I_UNDEFINED 0x0
+#define UAC_FORMAT_TYPE_I_PCM 0x1
+#define UAC_FORMAT_TYPE_I_PCM8 0x2
+#define UAC_FORMAT_TYPE_I_IEEE_FLOAT 0x3
+#define UAC_FORMAT_TYPE_I_ALAW 0x4
+#define UAC_FORMAT_TYPE_I_MULAW 0x5
-struct usb_as_format_type_i_continuous_descriptor {
+struct uac_format_type_i_continuous_descriptor {
__u8 bLength; /* in bytes: 8 + (ns * 3) */
__u8 bDescriptorType; /* USB_DT_CS_INTERFACE */
__u8 bDescriptorSubtype; /* FORMAT_TYPE */
@@ -210,9 +203,9 @@ struct usb_as_format_type_i_continuous_descriptor {
__u8 tUpperSamFreq[3];
} __attribute__ ((packed));
-#define USB_AS_FORMAT_TYPE_I_CONTINUOUS_DESC_SIZE 14
+#define UAC_FORMAT_TYPE_I_CONTINUOUS_DESC_SIZE 14
-struct usb_as_formate_type_i_discrete_descriptor {
+struct uac_format_type_i_discrete_descriptor {
__u8 bLength; /* in bytes: 8 + (ns * 3) */
__u8 bDescriptorType; /* USB_DT_CS_INTERFACE */
__u8 bDescriptorSubtype; /* FORMAT_TYPE */
@@ -224,8 +217,8 @@ struct usb_as_formate_type_i_discrete_descriptor {
__u8 tSamFreq[][3];
} __attribute__ ((packed));
-#define DECLARE_USB_AS_FORMAT_TYPE_I_DISCRETE_DESC(n) \
-struct usb_as_formate_type_i_discrete_descriptor_##n { \
+#define DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(n) \
+struct uac_format_type_i_discrete_descriptor_##n { \
__u8 bLength; \
__u8 bDescriptorType; \
__u8 bDescriptorSubtype; \
@@ -237,18 +230,15 @@ struct usb_as_formate_type_i_discrete_descriptor_##n { \
__u8 tSamFreq[n][3]; \
} __attribute__ ((packed))
-#define USB_AS_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(n) (8 + (n * 3))
-
-#define USB_AS_FORMAT_TYPE_UNDEFINED 0x0
-#define USB_AS_FORMAT_TYPE_I 0x1
-#define USB_AS_FORMAT_TYPE_II 0x2
-#define USB_AS_FORMAT_TYPE_III 0x3
+#define UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(n) (8 + (n * 3))
-#define USB_AS_ENDPOINT_ASYNC (1 << 2)
-#define USB_AS_ENDPOINT_ADAPTIVE (2 << 2)
-#define USB_AS_ENDPOINT_SYNC (3 << 2)
+/* Formats - A.2 Format Type Codes */
+#define UAC_FORMAT_TYPE_UNDEFINED 0x0
+#define UAC_FORMAT_TYPE_I 0x1
+#define UAC_FORMAT_TYPE_II 0x2
+#define UAC_FORMAT_TYPE_III 0x3
-struct usb_as_iso_endpoint_descriptor {
+struct uac_iso_endpoint_descriptor {
__u8 bLength; /* in bytes: 7 */
__u8 bDescriptorType; /* USB_DT_CS_ENDPOINT */
__u8 bDescriptorSubtype; /* EP_GENERAL */
@@ -256,30 +246,37 @@ struct usb_as_iso_endpoint_descriptor {
__u8 bLockDelayUnits;
__le16 wLockDelay;
};
-#define USB_AS_ISO_ENDPOINT_DESC_SIZE 7
-
-#define FU_CONTROL_UNDEFINED 0x00
-#define MUTE_CONTROL 0x01
-#define VOLUME_CONTROL 0x02
-#define BASS_CONTROL 0x03
-#define MID_CONTROL 0x04
-#define TREBLE_CONTROL 0x05
-#define GRAPHIC_EQUALIZER_CONTROL 0x06
-#define AUTOMATIC_GAIN_CONTROL 0x07
-#define DELAY_CONTROL 0x08
-#define BASS_BOOST_CONTROL 0x09
-#define LOUDNESS_CONTROL 0x0a
-
-#define FU_MUTE (1 << (MUTE_CONTROL - 1))
-#define FU_VOLUME (1 << (VOLUME_CONTROL - 1))
-#define FU_BASS (1 << (BASS_CONTROL - 1))
-#define FU_MID (1 << (MID_CONTROL - 1))
-#define FU_TREBLE (1 << (TREBLE_CONTROL - 1))
-#define FU_GRAPHIC_EQ (1 << (GRAPHIC_EQUALIZER_CONTROL - 1))
-#define FU_AUTO_GAIN (1 << (AUTOMATIC_GAIN_CONTROL - 1))
-#define FU_DELAY (1 << (DELAY_CONTROL - 1))
-#define FU_BASS_BOOST (1 << (BASS_BOOST_CONTROL - 1))
-#define FU_LOUDNESS (1 << (LOUDNESS_CONTROL - 1))
+#define UAC_ISO_ENDPOINT_DESC_SIZE 7
+
+#define UAC_EP_CS_ATTR_SAMPLE_RATE 0x01
+#define UAC_EP_CS_ATTR_PITCH_CONTROL 0x02
+#define UAC_EP_CS_ATTR_FILL_MAX 0x80
+
+/* A.10.2 Feature Unit Control Selectors */
+#define UAC_FU_CONTROL_UNDEFINED 0x00
+#define UAC_MUTE_CONTROL 0x01
+#define UAC_VOLUME_CONTROL 0x02
+#define UAC_BASS_CONTROL 0x03
+#define UAC_MID_CONTROL 0x04
+#define UAC_TREBLE_CONTROL 0x05
+#define UAC_GRAPHIC_EQUALIZER_CONTROL 0x06
+#define UAC_AUTOMATIC_GAIN_CONTROL 0x07
+#define UAC_DELAY_CONTROL 0x08
+#define UAC_BASS_BOOST_CONTROL 0x09
+#define UAC_LOUDNESS_CONTROL 0x0a
+
+#define UAC_FU_MUTE (1 << (UAC_MUTE_CONTROL - 1))
+#define UAC_FU_VOLUME (1 << (UAC_VOLUME_CONTROL - 1))
+#define UAC_FU_BASS (1 << (UAC_BASS_CONTROL - 1))
+#define UAC_FU_MID (1 << (UAC_MID_CONTROL - 1))
+#define UAC_FU_TREBLE (1 << (UAC_TREBLE_CONTROL - 1))
+#define UAC_FU_GRAPHIC_EQ (1 << (UAC_GRAPHIC_EQUALIZER_CONTROL - 1))
+#define UAC_FU_AUTO_GAIN (1 << (UAC_AUTOMATIC_GAIN_CONTROL - 1))
+#define UAC_FU_DELAY (1 << (UAC_DELAY_CONTROL - 1))
+#define UAC_FU_BASS_BOOST (1 << (UAC_BASS_BOOST_CONTROL - 1))
+#define UAC_FU_LOUDNESS (1 << (UAC_LOUDNESS_CONTROL - 1))
+
+#ifdef __KERNEL__
struct usb_audio_control {
struct list_head list;
@@ -290,18 +287,6 @@ struct usb_audio_control {
int (*get)(struct usb_audio_control *con, u8 cmd);
};
-static inline int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value)
-{
- con->data[cmd] = value;
-
- return 0;
-}
-
-static inline int generic_get_cmd(struct usb_audio_control *con, u8 cmd)
-{
- return con->data[cmd];
-}
-
struct usb_audio_control_selector {
struct list_head list;
struct list_head control;
@@ -311,4 +296,6 @@ struct usb_audio_control_selector {
struct usb_descriptor_header *desc;
};
+#endif /* __KERNEL__ */
+
#endif /* __LINUX_USB_AUDIO_H */
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index 93223638f70..94012e649d8 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -258,6 +258,8 @@ struct usb_device_descriptor {
#define USB_CLASS_APP_SPEC 0xfe
#define USB_CLASS_VENDOR_SPEC 0xff
+#define USB_SUBCLASS_VENDOR_SPEC 0xff
+
/*-------------------------------------------------------------------------*/
/* USB_DT_CONFIG: Configuration descriptor information.
@@ -348,6 +350,12 @@ struct usb_endpoint_descriptor {
#define USB_ENDPOINT_NUMBER_MASK 0x0f /* in bEndpointAddress */
#define USB_ENDPOINT_DIR_MASK 0x80
+#define USB_ENDPOINT_SYNCTYPE 0x0c
+#define USB_ENDPOINT_SYNC_NONE (0 << 2)
+#define USB_ENDPOINT_SYNC_ASYNC (1 << 2)
+#define USB_ENDPOINT_SYNC_ADAPTIVE (2 << 2)
+#define USB_ENDPOINT_SYNC_SYNC (3 << 2)
+
#define USB_ENDPOINT_XFERTYPE_MASK 0x03 /* in bmAttributes */
#define USB_ENDPOINT_XFER_CONTROL 0
#define USB_ENDPOINT_XFER_ISOC 1
diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
index 5b88e36c910..af4b86f3aca 100644
--- a/include/linux/usb/ehci_def.h
+++ b/include/linux/usb/ehci_def.h
@@ -105,6 +105,7 @@ struct ehci_regs {
#define PORT_WKDISC_E (1<<21) /* wake on disconnect (enable) */
#define PORT_WKCONN_E (1<<20) /* wake on connect (enable) */
/* 19:16 for port testing */
+#define PORT_TEST_PKT (0x4<<16) /* Port Test Control - packet test */
#define PORT_LED_OFF (0<<14)
#define PORT_LED_AMBER (1<<14)
#define PORT_LED_GREEN (2<<14)
@@ -132,6 +133,19 @@ struct ehci_regs {
#define USBMODE_CM_HC (3<<0) /* host controller mode */
#define USBMODE_CM_IDLE (0<<0) /* idle state */
+/* Moorestown has some non-standard registers, partially due to the fact that
+ * its EHCI controller has both TT and LPM support. HOSTPCx are extentions to
+ * PORTSCx
+ */
+#define HOSTPC0 0x84 /* HOSTPC extension */
+#define HOSTPC_PHCD (1<<22) /* Phy clock disable */
+#define HOSTPC_PSPD (3<<25) /* Port speed detection */
+#define USBMODE_EX 0xc8 /* USB Device mode extension */
+#define USBMODE_EX_VBPS (1<<5) /* VBus Power Select On */
+#define USBMODE_EX_HC (3<<0) /* host controller mode */
+#define TXFILLTUNING 0x24 /* TX FIFO Tuning register */
+#define TXFIFO_DEFAULT (8<<16) /* FIFO burst threshold 8 */
+
/* Appendix C, Debug port ... intended for use with special "debug devices"
* that can help if there's no serial console. (nonstandard enumeration.)
*/
@@ -157,4 +171,25 @@ struct ehci_dbg_port {
#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
} __attribute__ ((packed));
+#ifdef CONFIG_EARLY_PRINTK_DBGP
+#include <linux/init.h>
+extern int __init early_dbgp_init(char *s);
+extern struct console early_dbgp_console;
+#endif /* CONFIG_EARLY_PRINTK_DBGP */
+
+#ifdef CONFIG_EARLY_PRINTK_DBGP
+/* Call backs from ehci host driver to ehci debug driver */
+extern int dbgp_external_startup(void);
+extern int dbgp_reset_prep(void);
+#else
+static inline int dbgp_reset_prep(void)
+{
+ return 1;
+}
+static inline int dbgp_external_startup(void)
+{
+ return -1;
+}
+#endif
+
#endif /* __LINUX_USB_EHCI_DEF_H */
diff --git a/include/linux/usb/isp1362.h b/include/linux/usb/isp1362.h
new file mode 100644
index 00000000000..642684bb929
--- /dev/null
+++ b/include/linux/usb/isp1362.h
@@ -0,0 +1,46 @@
+/*
+ * board initialization code should put one of these into dev->platform_data
+ * and place the isp1362 onto platform_bus.
+ */
+
+#ifndef __LINUX_USB_ISP1362_H__
+#define __LINUX_USB_ISP1362_H__
+
+struct isp1362_platform_data {
+ /* Enable internal pulldown resistors on downstream ports */
+ unsigned sel15Kres:1;
+ /* Clock cannot be stopped */
+ unsigned clknotstop:1;
+ /* On-chip overcurrent protection */
+ unsigned oc_enable:1;
+ /* INT output polarity */
+ unsigned int_act_high:1;
+ /* INT edge or level triggered */
+ unsigned int_edge_triggered:1;
+ /* DREQ output polarity */
+ unsigned dreq_act_high:1;
+ /* DACK input polarity */
+ unsigned dack_act_high:1;
+ /* chip can be resumed via H_WAKEUP pin */
+ unsigned remote_wakeup_connected:1;
+ /* Switch or not to switch (keep always powered) */
+ unsigned no_power_switching:1;
+ /* Ganged port power switching (0) or individual port power switching (1) */
+ unsigned power_switching_mode:1;
+ /* Given port_power, msec/2 after power on till power good */
+ u8 potpg;
+ /* Hardware reset set/clear */
+ void (*reset) (struct device *dev, int set);
+ /* Clock start/stop */
+ void (*clock) (struct device *dev, int start);
+ /* Inter-io delay (ns). The chip is picky about access timings; it
+ * expects at least:
+ * 110ns delay between consecutive accesses to DATA_REG,
+ * 300ns delay between access to ADDR_REG and DATA_REG (registers)
+ * 462ns delay between access to ADDR_REG and DATA_REG (buffer memory)
+ * WE MUST NOT be activated during these intervals (even without CS!)
+ */
+ void (*delay) (struct device *dev, unsigned int delay);
+};
+
+#endif
diff --git a/include/linux/usb/isp1760.h b/include/linux/usb/isp1760.h
new file mode 100644
index 00000000000..de7de53c553
--- /dev/null
+++ b/include/linux/usb/isp1760.h
@@ -0,0 +1,18 @@
+/*
+ * board initialization should put one of these into dev->platform_data
+ * and place the isp1760 onto platform_bus named "isp1760-hcd".
+ */
+
+#ifndef __LINUX_USB_ISP1760_H
+#define __LINUX_USB_ISP1760_H
+
+struct isp1760_platform_data {
+ unsigned is_isp1761:1; /* Chip is ISP1761 */
+ unsigned bus_width_16:1; /* 16/32-bit data bus width */
+ unsigned port1_otg:1; /* Port 1 supports OTG */
+ unsigned analog_oc:1; /* Analog overcurrent */
+ unsigned dack_polarity_high:1; /* DACK active high */
+ unsigned dreq_polarity_high:1; /* DREQ active high */
+};
+
+#endif /* __LINUX_USB_ISP1760_H */
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 7b85e327af9..c17eb64d721 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -59,6 +59,7 @@ enum port_dev_state {
* @bulk_out_buffer: pointer to the bulk out buffer for this port.
* @bulk_out_size: the size of the bulk_out_buffer, in bytes.
* @write_urb: pointer to the bulk out struct urb for this port.
+ * @write_fifo: kfifo used to buffer outgoing data
* @write_urb_busy: port`s writing status
* @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this
* port.
@@ -96,6 +97,7 @@ struct usb_serial_port {
unsigned char *bulk_out_buffer;
int bulk_out_size;
struct urb *write_urb;
+ struct kfifo *write_fifo;
int write_urb_busy;
__u8 bulk_out_endpointAddress;
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index bb69e256cd1..f8147305205 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -89,6 +89,7 @@ struct driver_info {
#define FLAG_FRAMING_AX 0x0040 /* AX88772/178 packets */
#define FLAG_WLAN 0x0080 /* use "wlan%d" names */
#define FLAG_AVOID_UNLINK_URBS 0x0100 /* don't unlink urbs at usbnet_stop() */
+#define FLAG_SEND_ZLP 0x0200 /* hw requires ZLPs are sent */
/* init device ... can sleep, or cause probe() failure */
diff --git a/include/linux/usbdevice_fs.h b/include/linux/usbdevice_fs.h
index 0044d9b4cb8..b2a7d8ba6ee 100644
--- a/include/linux/usbdevice_fs.h
+++ b/include/linux/usbdevice_fs.h
@@ -77,6 +77,7 @@ struct usbdevfs_connectinfo {
#define USBDEVFS_URB_SHORT_NOT_OK 0x01
#define USBDEVFS_URB_ISO_ASAP 0x02
+#define USBDEVFS_URB_BULK_CONTINUATION 0x04
#define USBDEVFS_URB_NO_FSBR 0x20
#define USBDEVFS_URB_ZERO_PACKET 0x40
#define USBDEVFS_URB_NO_INTERRUPT 0x80
@@ -175,4 +176,6 @@ struct usbdevfs_ioctl32 {
#define USBDEVFS_CLEAR_HALT _IOR('U', 21, unsigned int)
#define USBDEVFS_DISCONNECT _IO('U', 22)
#define USBDEVFS_CONNECT _IO('U', 23)
+#define USBDEVFS_CLAIM_PORT _IOR('U', 24, unsigned int)
+#define USBDEVFS_RELEASE_PORT _IOR('U', 25, unsigned int)
#endif /* _LINUX_USBDEVICE_FS_H */
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index 3656b300de3..69f39974c04 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -36,7 +36,6 @@ struct new_utsname {
#include <linux/kref.h>
#include <linux/nsproxy.h>
#include <linux/err.h>
-#include <asm/atomic.h>
struct uts_namespace {
struct kref kref;
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index e81c64af80c..2dfaa293ae8 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -1,5 +1,6 @@
/*
- * vgaarb.c
+ * The VGA aribiter manages VGA space routing and VGA resource decode to
+ * allow multiple VGA devices to be used in a system in a safe way.
*
* (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
* (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com>
@@ -41,7 +42,7 @@
* interrupts at any time.
*/
extern void vga_set_legacy_decoding(struct pci_dev *pdev,
- unsigned int decodes);
+ unsigned int decodes);
/**
* vga_get - acquire & locks VGA resources
@@ -193,8 +194,17 @@ static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2)
* They driver will get a callback when VGA arbitration is first used
* by userspace since we some older X servers have issues.
*/
+#if defined(CONFIG_VGA_ARB)
int vga_client_register(struct pci_dev *pdev, void *cookie,
void (*irq_set_state)(void *cookie, bool state),
unsigned int (*set_vga_decode)(void *cookie, bool state));
+#else
+static inline int vga_client_register(struct pci_dev *pdev, void *cookie,
+ void (*irq_set_state)(void *cookie, bool state),
+ unsigned int (*set_vga_decode)(void *cookie, bool state))
+{
+ return 0;
+}
+#endif
#endif /* LINUX_VGA_H */
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 4fca4f5440b..057a2e01075 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -34,7 +34,7 @@ struct virtqueue {
* out_num: the number of sg readable by other side
* in_num: the number of sg which are writable (after readable ones)
* data: the token identifying the buffer.
- * Returns 0 or an error.
+ * Returns remaining capacity of queue (sg segments) or a negative error.
* @kick: update after add_buf
* vq: the struct virtqueue
* After one or more add_buf calls, invoke this to kick the other side.
diff --git a/include/linux/virtio_9p.h b/include/linux/virtio_9p.h
index b3c4a60ceeb..ea7226a45ac 100644
--- a/include/linux/virtio_9p.h
+++ b/include/linux/virtio_9p.h
@@ -4,8 +4,6 @@
* compatible drivers/servers. */
#include <linux/virtio_config.h>
-/* The ID for virtio console */
-#define VIRTIO_ID_9P 9
/* Maximum number of virtio channels per partition (1 for now) */
#define MAX_9P_CHAN 1
diff --git a/include/linux/virtio_balloon.h b/include/linux/virtio_balloon.h
index 8726ff77763..09d73008506 100644
--- a/include/linux/virtio_balloon.h
+++ b/include/linux/virtio_balloon.h
@@ -4,9 +4,6 @@
* compatible drivers/servers. */
#include <linux/virtio_config.h>
-/* The ID for virtio_balloon */
-#define VIRTIO_ID_BALLOON 5
-
/* The feature bitmap for virtio balloon */
#define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h
index 8dab9f2b883..15cb666581d 100644
--- a/include/linux/virtio_blk.h
+++ b/include/linux/virtio_blk.h
@@ -5,9 +5,6 @@
#include <linux/types.h>
#include <linux/virtio_config.h>
-/* The ID for virtio_block */
-#define VIRTIO_ID_BLOCK 2
-
/* Feature bits */
#define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */
#define VIRTIO_BLK_F_SIZE_MAX 1 /* Indicates maximum segment size */
@@ -17,6 +14,7 @@
#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/
#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
#define VIRTIO_BLK_F_IDENTIFY 8 /* ATA IDENTIFY supported */
+#define VIRTIO_BLK_F_FLUSH 9 /* Cache flush command support */
#define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */
@@ -38,6 +36,17 @@ struct virtio_blk_config {
__u8 identify[VIRTIO_BLK_ID_BYTES];
} __attribute__((packed));
+/*
+ * Command types
+ *
+ * Usage is a bit tricky as some bits are used as flags and some are not.
+ *
+ * Rules:
+ * VIRTIO_BLK_T_OUT may be combined with VIRTIO_BLK_T_SCSI_CMD or
+ * VIRTIO_BLK_T_BARRIER. VIRTIO_BLK_T_FLUSH is a command of its own
+ * and may not be combined with any of the other flags.
+ */
+
/* These two define direction. */
#define VIRTIO_BLK_T_IN 0
#define VIRTIO_BLK_T_OUT 1
@@ -45,6 +54,9 @@ struct virtio_blk_config {
/* This bit says it's a scsi command, not an actual read or write. */
#define VIRTIO_BLK_T_SCSI_CMD 2
+/* Cache flush command */
+#define VIRTIO_BLK_T_FLUSH 4
+
/* Barrier before this op. */
#define VIRTIO_BLK_T_BARRIER 0x80000000
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index e547e3c8ee9..0093dd7c1d6 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -109,8 +109,7 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
unsigned int fbit)
{
/* Did you forget to fix assumptions on max features? */
- if (__builtin_constant_p(fbit))
- BUILD_BUG_ON(fbit >= 32);
+ MAYBE_BUILD_BUG_ON(fbit >= 32);
if (fbit < VIRTIO_TRANSPORT_F_START)
virtio_check_driver_offered_feature(vdev, fbit);
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h
index dc161115ae3..b5f51980601 100644
--- a/include/linux/virtio_console.h
+++ b/include/linux/virtio_console.h
@@ -5,9 +5,6 @@
/* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
* anyone can use the definitions to implement compatible drivers/servers. */
-/* The ID for virtio console */
-#define VIRTIO_ID_CONSOLE 3
-
/* Feature bits */
#define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */
diff --git a/include/linux/virtio_ids.h b/include/linux/virtio_ids.h
new file mode 100644
index 00000000000..06660c0a78d
--- /dev/null
+++ b/include/linux/virtio_ids.h
@@ -0,0 +1,17 @@
+#ifndef _LINUX_VIRTIO_IDS_H
+#define _LINUX_VIRTIO_IDS_H
+/*
+ * Virtio IDs
+ *
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ */
+
+#define VIRTIO_ID_NET 1 /* virtio net */
+#define VIRTIO_ID_BLOCK 2 /* virtio block */
+#define VIRTIO_ID_CONSOLE 3 /* virtio console */
+#define VIRTIO_ID_RNG 4 /* virtio ring */
+#define VIRTIO_ID_BALLOON 5 /* virtio balloon */
+#define VIRTIO_ID_9P 9 /* 9p virtio console */
+
+#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index d8dd539c9f4..1f41734bbb7 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -6,9 +6,6 @@
#include <linux/virtio_config.h>
#include <linux/if_ether.h>
-/* The ID for virtio_net */
-#define VIRTIO_ID_NET 1
-
/* The feature bitmap for virtio net */
#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
diff --git a/include/linux/virtio_rng.h b/include/linux/virtio_rng.h
index 1a85dab8a94..48121c3c434 100644
--- a/include/linux/virtio_rng.h
+++ b/include/linux/virtio_rng.h
@@ -4,7 +4,4 @@
* compatible drivers/servers. */
#include <linux/virtio_config.h>
-/* The ID for virtio_rng */
-#define VIRTIO_ID_RNG 4
-
#endif /* _LINUX_VIRTIO_RNG_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 81a97cf8f0a..2d0f222388a 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -166,15 +166,8 @@ static inline unsigned long zone_page_state(struct zone *zone,
return x;
}
-extern unsigned long global_lru_pages(void);
-
-static inline unsigned long zone_lru_pages(struct zone *zone)
-{
- return (zone_page_state(zone, NR_ACTIVE_ANON)
- + zone_page_state(zone, NR_ACTIVE_FILE)
- + zone_page_state(zone, NR_INACTIVE_ANON)
- + zone_page_state(zone, NR_INACTIVE_FILE));
-}
+extern unsigned long global_reclaimable_pages(void);
+extern unsigned long zone_reclaimable_pages(struct zone *zone);
#ifdef CONFIG_NUMA
/*
@@ -210,11 +203,6 @@ extern void zone_statistics(struct zone *, struct zone *);
#endif /* CONFIG_NUMA */
-#define __add_zone_page_state(__z, __i, __d) \
- __mod_zone_page_state(__z, __i, __d)
-#define __sub_zone_page_state(__z, __i, __d) \
- __mod_zone_page_state(__z, __i,-(__d))
-
#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
diff --git a/include/linux/wm97xx.h b/include/linux/wm97xx.h
index 0c9878123d5..38e8c4d9289 100644
--- a/include/linux/wm97xx.h
+++ b/include/linux/wm97xx.h
@@ -293,6 +293,24 @@ struct wm97xx {
u16 suspend_mode; /* PRP in suspend mode */
};
+struct wm97xx_batt_pdata {
+ int batt_aux;
+ int temp_aux;
+ int charge_gpio;
+ int min_voltage;
+ int max_voltage;
+ int batt_div;
+ int batt_mult;
+ int temp_div;
+ int temp_mult;
+ int batt_tech;
+ char *batt_name;
+};
+
+struct wm97xx_pdata {
+ struct wm97xx_batt_pdata *batt_pdata; /* battery data */
+};
+
/*
* Codec GPIO access (not supported on WM9705)
* This can be used to set/get codec GPIO and Virtual GPIO status.
diff --git a/include/linux/wm97xx_batt.h b/include/linux/wm97xx_batt.h
index 9681d1ab0e4..a1d6419c2ff 100644
--- a/include/linux/wm97xx_batt.h
+++ b/include/linux/wm97xx_batt.h
@@ -3,22 +3,12 @@
#include <linux/wm97xx.h>
-struct wm97xx_batt_info {
- int batt_aux;
- int temp_aux;
- int charge_gpio;
- int min_voltage;
- int max_voltage;
- int batt_div;
- int batt_mult;
- int temp_div;
- int temp_mult;
- int batt_tech;
- char *batt_name;
-};
+#warning This file will be removed soon, use wm97xx.h instead!
+
+#define wm97xx_batt_info wm97xx_batt_pdata
#ifdef CONFIG_BATTERY_WM97XX
-void __init wm97xx_bat_set_pdata(struct wm97xx_batt_info *data);
+void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data);
#else
static inline void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data) {}
#endif
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 6273fa97b52..7ef0c7b94f3 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -94,7 +94,7 @@ struct execute_work {
/*
* initialize all of a work item in one go
*
- * NOTE! No point in using "atomic_long_set()": useing a direct
+ * NOTE! No point in using "atomic_long_set()": using a direct
* assignment of the work data initializer allows the compiler
* to generate better code.
*/
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 75cf58666ff..66ebddcff66 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -110,21 +110,20 @@ extern int laptop_mode;
extern unsigned long determine_dirtyable_memory(void);
extern int dirty_background_ratio_handler(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos);
extern int dirty_background_bytes_handler(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos);
extern int dirty_ratio_handler(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos);
extern int dirty_bytes_handler(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos);
struct ctl_table;
-struct file;
-int dirty_writeback_centisecs_handler(struct ctl_table *, int, struct file *,
+int dirty_writeback_centisecs_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
void get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index b77c1478c99..a7fb54808a2 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -38,6 +38,8 @@
* @P9_DEBUG_SLABS: memory management tracing
* @P9_DEBUG_FCALL: verbose dump of protocol messages
* @P9_DEBUG_FID: fid allocation/deallocation tracking
+ * @P9_DEBUG_PKT: packet marshalling/unmarshalling
+ * @P9_DEBUG_FSC: FS-cache tracing
*
* These flags are passed at mount time to turn on various levels of
* verbosity and tracing which will be output to the system logs.
@@ -54,6 +56,7 @@ enum p9_debug_flags {
P9_DEBUG_FCALL = (1<<8),
P9_DEBUG_FID = (1<<9),
P9_DEBUG_PKT = (1<<10),
+ P9_DEBUG_FSC = (1<<11),
};
#ifdef CONFIG_NET_9P_DEBUG
diff --git a/include/net/ip.h b/include/net/ip.h
index 72c36926c26..5b26a0bd178 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -399,7 +399,7 @@ extern void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
* fed into the routing cache should use these handlers.
*/
int ipv4_doint_and_flush(ctl_table *ctl, int write,
- struct file* filp, void __user *buffer,
+ void __user *buffer,
size_t *lenp, loff_t *ppos);
int ipv4_doint_and_flush_strategy(ctl_table *table,
void __user *oldval, size_t __user *oldlenp,
diff --git a/include/net/ipip.h b/include/net/ipip.h
index 5d3036fa151..76e3ea6e2fe 100644
--- a/include/net/ipip.h
+++ b/include/net/ipip.h
@@ -12,7 +12,6 @@ struct ip_tunnel
struct ip_tunnel *next;
struct net_device *dev;
- int recursion; /* Depth of hard_start_xmit recursion */
int err_count; /* Number of arrived ICMP errors */
unsigned long err_time; /* Time when the last ICMP error arrived */
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 1459ed3e269..f76f22d0572 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -55,7 +55,6 @@ enum {
#include <net/neighbour.h>
struct ctl_table;
-struct file;
struct inet6_dev;
struct net_device;
struct net_proto_family;
@@ -139,7 +138,6 @@ extern int igmp6_event_report(struct sk_buff *skb);
#ifdef CONFIG_SYSCTL
extern int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl,
int write,
- struct file * filp,
void __user *buffer,
size_t *lenp,
loff_t *ppos);
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 93885830430..c8f94e8db69 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -482,7 +482,7 @@ int ib_send_cm_rej(struct ib_cm_id *cm_id,
* message.
* @cm_id: Connection identifier associated with the connection message.
* @service_timeout: The lower 5-bits specify the maximum time required for
- * the sender to reply to to the connection message. The upper 3-bits
+ * the sender to reply to the connection message. The upper 3-bits
* specify additional control flags.
* @private_data: Optional user-defined private data sent with the
* message receipt acknowledgement.
diff --git a/include/scsi/fc/fc_fc2.h b/include/scsi/fc/fc_fc2.h
index cff8a8c22f5..f87777d0d5b 100644
--- a/include/scsi/fc/fc_fc2.h
+++ b/include/scsi/fc/fc_fc2.h
@@ -92,8 +92,7 @@ struct fc_esb {
__u8 _esb_resvd[4];
__u8 esb_service_params[112]; /* TBD */
__u8 esb_seq_status[8]; /* sequence statuses, 8 bytes each */
-} __attribute__((packed));;
-
+} __attribute__((packed));
/*
* Define expected size for ASSERTs.
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 1493c541f9c..eaf46bdd18a 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -225,6 +225,169 @@ TRACE_EVENT(kmem_cache_free,
TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
);
+
+TRACE_EVENT(mm_page_free_direct,
+
+ TP_PROTO(struct page *page, unsigned int order),
+
+ TP_ARGS(page, order),
+
+ TP_STRUCT__entry(
+ __field( struct page *, page )
+ __field( unsigned int, order )
+ ),
+
+ TP_fast_assign(
+ __entry->page = page;
+ __entry->order = order;
+ ),
+
+ TP_printk("page=%p pfn=%lu order=%d",
+ __entry->page,
+ page_to_pfn(__entry->page),
+ __entry->order)
+);
+
+TRACE_EVENT(mm_pagevec_free,
+
+ TP_PROTO(struct page *page, int cold),
+
+ TP_ARGS(page, cold),
+
+ TP_STRUCT__entry(
+ __field( struct page *, page )
+ __field( int, cold )
+ ),
+
+ TP_fast_assign(
+ __entry->page = page;
+ __entry->cold = cold;
+ ),
+
+ TP_printk("page=%p pfn=%lu order=0 cold=%d",
+ __entry->page,
+ page_to_pfn(__entry->page),
+ __entry->cold)
+);
+
+TRACE_EVENT(mm_page_alloc,
+
+ TP_PROTO(struct page *page, unsigned int order,
+ gfp_t gfp_flags, int migratetype),
+
+ TP_ARGS(page, order, gfp_flags, migratetype),
+
+ TP_STRUCT__entry(
+ __field( struct page *, page )
+ __field( unsigned int, order )
+ __field( gfp_t, gfp_flags )
+ __field( int, migratetype )
+ ),
+
+ TP_fast_assign(
+ __entry->page = page;
+ __entry->order = order;
+ __entry->gfp_flags = gfp_flags;
+ __entry->migratetype = migratetype;
+ ),
+
+ TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
+ __entry->page,
+ page_to_pfn(__entry->page),
+ __entry->order,
+ __entry->migratetype,
+ show_gfp_flags(__entry->gfp_flags))
+);
+
+TRACE_EVENT(mm_page_alloc_zone_locked,
+
+ TP_PROTO(struct page *page, unsigned int order, int migratetype),
+
+ TP_ARGS(page, order, migratetype),
+
+ TP_STRUCT__entry(
+ __field( struct page *, page )
+ __field( unsigned int, order )
+ __field( int, migratetype )
+ ),
+
+ TP_fast_assign(
+ __entry->page = page;
+ __entry->order = order;
+ __entry->migratetype = migratetype;
+ ),
+
+ TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
+ __entry->page,
+ page_to_pfn(__entry->page),
+ __entry->order,
+ __entry->migratetype,
+ __entry->order == 0)
+);
+
+TRACE_EVENT(mm_page_pcpu_drain,
+
+ TP_PROTO(struct page *page, int order, int migratetype),
+
+ TP_ARGS(page, order, migratetype),
+
+ TP_STRUCT__entry(
+ __field( struct page *, page )
+ __field( int, order )
+ __field( int, migratetype )
+ ),
+
+ TP_fast_assign(
+ __entry->page = page;
+ __entry->order = order;
+ __entry->migratetype = migratetype;
+ ),
+
+ TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
+ __entry->page,
+ page_to_pfn(__entry->page),
+ __entry->order,
+ __entry->migratetype)
+);
+
+TRACE_EVENT(mm_page_alloc_extfrag,
+
+ TP_PROTO(struct page *page,
+ int alloc_order, int fallback_order,
+ int alloc_migratetype, int fallback_migratetype),
+
+ TP_ARGS(page,
+ alloc_order, fallback_order,
+ alloc_migratetype, fallback_migratetype),
+
+ TP_STRUCT__entry(
+ __field( struct page *, page )
+ __field( int, alloc_order )
+ __field( int, fallback_order )
+ __field( int, alloc_migratetype )
+ __field( int, fallback_migratetype )
+ ),
+
+ TP_fast_assign(
+ __entry->page = page;
+ __entry->alloc_order = alloc_order;
+ __entry->fallback_order = fallback_order;
+ __entry->alloc_migratetype = alloc_migratetype;
+ __entry->fallback_migratetype = fallback_migratetype;
+ ),
+
+ TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
+ __entry->page,
+ page_to_pfn(__entry->page),
+ __entry->alloc_order,
+ __entry->fallback_order,
+ pageblock_order,
+ __entry->alloc_migratetype,
+ __entry->fallback_migratetype,
+ __entry->fallback_order < pageblock_order,
+ __entry->alloc_migratetype == __entry->fallback_migratetype)
+);
+
#endif /* _TRACE_KMEM_H */
/* This part must be outside protection */
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
new file mode 100644
index 00000000000..1844c48d640
--- /dev/null
+++ b/include/trace/events/timer.h
@@ -0,0 +1,342 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM timer
+
+#if !defined(_TRACE_TIMER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_TIMER_H
+
+#include <linux/tracepoint.h>
+#include <linux/hrtimer.h>
+#include <linux/timer.h>
+
+/**
+ * timer_init - called when the timer is initialized
+ * @timer: pointer to struct timer_list
+ */
+TRACE_EVENT(timer_init,
+
+ TP_PROTO(struct timer_list *timer),
+
+ TP_ARGS(timer),
+
+ TP_STRUCT__entry(
+ __field( void *, timer )
+ ),
+
+ TP_fast_assign(
+ __entry->timer = timer;
+ ),
+
+ TP_printk("timer %p", __entry->timer)
+);
+
+/**
+ * timer_start - called when the timer is started
+ * @timer: pointer to struct timer_list
+ * @expires: the timers expiry time
+ */
+TRACE_EVENT(timer_start,
+
+ TP_PROTO(struct timer_list *timer, unsigned long expires),
+
+ TP_ARGS(timer, expires),
+
+ TP_STRUCT__entry(
+ __field( void *, timer )
+ __field( void *, function )
+ __field( unsigned long, expires )
+ __field( unsigned long, now )
+ ),
+
+ TP_fast_assign(
+ __entry->timer = timer;
+ __entry->function = timer->function;
+ __entry->expires = expires;
+ __entry->now = jiffies;
+ ),
+
+ TP_printk("timer %p: func %pf, expires %lu, timeout %ld",
+ __entry->timer, __entry->function, __entry->expires,
+ (long)__entry->expires - __entry->now)
+);
+
+/**
+ * timer_expire_entry - called immediately before the timer callback
+ * @timer: pointer to struct timer_list
+ *
+ * Allows to determine the timer latency.
+ */
+TRACE_EVENT(timer_expire_entry,
+
+ TP_PROTO(struct timer_list *timer),
+
+ TP_ARGS(timer),
+
+ TP_STRUCT__entry(
+ __field( void *, timer )
+ __field( unsigned long, now )
+ ),
+
+ TP_fast_assign(
+ __entry->timer = timer;
+ __entry->now = jiffies;
+ ),
+
+ TP_printk("timer %p: now %lu", __entry->timer, __entry->now)
+);
+
+/**
+ * timer_expire_exit - called immediately after the timer callback returns
+ * @timer: pointer to struct timer_list
+ *
+ * When used in combination with the timer_expire_entry tracepoint we can
+ * determine the runtime of the timer callback function.
+ *
+ * NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might
+ * be invalid. We solely track the pointer.
+ */
+TRACE_EVENT(timer_expire_exit,
+
+ TP_PROTO(struct timer_list *timer),
+
+ TP_ARGS(timer),
+
+ TP_STRUCT__entry(
+ __field(void *, timer )
+ ),
+
+ TP_fast_assign(
+ __entry->timer = timer;
+ ),
+
+ TP_printk("timer %p", __entry->timer)
+);
+
+/**
+ * timer_cancel - called when the timer is canceled
+ * @timer: pointer to struct timer_list
+ */
+TRACE_EVENT(timer_cancel,
+
+ TP_PROTO(struct timer_list *timer),
+
+ TP_ARGS(timer),
+
+ TP_STRUCT__entry(
+ __field( void *, timer )
+ ),
+
+ TP_fast_assign(
+ __entry->timer = timer;
+ ),
+
+ TP_printk("timer %p", __entry->timer)
+);
+
+/**
+ * hrtimer_init - called when the hrtimer is initialized
+ * @timer: pointer to struct hrtimer
+ * @clockid: the hrtimers clock
+ * @mode: the hrtimers mode
+ */
+TRACE_EVENT(hrtimer_init,
+
+ TP_PROTO(struct hrtimer *timer, clockid_t clockid,
+ enum hrtimer_mode mode),
+
+ TP_ARGS(timer, clockid, mode),
+
+ TP_STRUCT__entry(
+ __field( void *, timer )
+ __field( clockid_t, clockid )
+ __field( enum hrtimer_mode, mode )
+ ),
+
+ TP_fast_assign(
+ __entry->timer = timer;
+ __entry->clockid = clockid;
+ __entry->mode = mode;
+ ),
+
+ TP_printk("hrtimer %p, clockid %s, mode %s", __entry->timer,
+ __entry->clockid == CLOCK_REALTIME ?
+ "CLOCK_REALTIME" : "CLOCK_MONOTONIC",
+ __entry->mode == HRTIMER_MODE_ABS ?
+ "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
+);
+
+/**
+ * hrtimer_start - called when the hrtimer is started
+ * @timer: pointer to struct hrtimer
+ */
+TRACE_EVENT(hrtimer_start,
+
+ TP_PROTO(struct hrtimer *timer),
+
+ TP_ARGS(timer),
+
+ TP_STRUCT__entry(
+ __field( void *, timer )
+ __field( void *, function )
+ __field( s64, expires )
+ __field( s64, softexpires )
+ ),
+
+ TP_fast_assign(
+ __entry->timer = timer;
+ __entry->function = timer->function;
+ __entry->expires = hrtimer_get_expires(timer).tv64;
+ __entry->softexpires = hrtimer_get_softexpires(timer).tv64;
+ ),
+
+ TP_printk("hrtimer %p, func %pf, expires %llu, softexpires %llu",
+ __entry->timer, __entry->function,
+ (unsigned long long)ktime_to_ns((ktime_t) {
+ .tv64 = __entry->expires }),
+ (unsigned long long)ktime_to_ns((ktime_t) {
+ .tv64 = __entry->softexpires }))
+);
+
+/**
+ * htimmer_expire_entry - called immediately before the hrtimer callback
+ * @timer: pointer to struct hrtimer
+ * @now: pointer to variable which contains current time of the
+ * timers base.
+ *
+ * Allows to determine the timer latency.
+ */
+TRACE_EVENT(hrtimer_expire_entry,
+
+ TP_PROTO(struct hrtimer *timer, ktime_t *now),
+
+ TP_ARGS(timer, now),
+
+ TP_STRUCT__entry(
+ __field( void *, timer )
+ __field( s64, now )
+ ),
+
+ TP_fast_assign(
+ __entry->timer = timer;
+ __entry->now = now->tv64;
+ ),
+
+ TP_printk("hrtimer %p, now %llu", __entry->timer,
+ (unsigned long long)ktime_to_ns((ktime_t) {
+ .tv64 = __entry->now }))
+ );
+
+/**
+ * hrtimer_expire_exit - called immediately after the hrtimer callback returns
+ * @timer: pointer to struct hrtimer
+ *
+ * When used in combination with the hrtimer_expire_entry tracepoint we can
+ * determine the runtime of the callback function.
+ */
+TRACE_EVENT(hrtimer_expire_exit,
+
+ TP_PROTO(struct hrtimer *timer),
+
+ TP_ARGS(timer),
+
+ TP_STRUCT__entry(
+ __field( void *, timer )
+ ),
+
+ TP_fast_assign(
+ __entry->timer = timer;
+ ),
+
+ TP_printk("hrtimer %p", __entry->timer)
+);
+
+/**
+ * hrtimer_cancel - called when the hrtimer is canceled
+ * @timer: pointer to struct hrtimer
+ */
+TRACE_EVENT(hrtimer_cancel,
+
+ TP_PROTO(struct hrtimer *timer),
+
+ TP_ARGS(timer),
+
+ TP_STRUCT__entry(
+ __field( void *, timer )
+ ),
+
+ TP_fast_assign(
+ __entry->timer = timer;
+ ),
+
+ TP_printk("hrtimer %p", __entry->timer)
+);
+
+/**
+ * itimer_state - called when itimer is started or canceled
+ * @which: name of the interval timer
+ * @value: the itimers value, itimer is canceled if value->it_value is
+ * zero, otherwise it is started
+ * @expires: the itimers expiry time
+ */
+TRACE_EVENT(itimer_state,
+
+ TP_PROTO(int which, const struct itimerval *const value,
+ cputime_t expires),
+
+ TP_ARGS(which, value, expires),
+
+ TP_STRUCT__entry(
+ __field( int, which )
+ __field( cputime_t, expires )
+ __field( long, value_sec )
+ __field( long, value_usec )
+ __field( long, interval_sec )
+ __field( long, interval_usec )
+ ),
+
+ TP_fast_assign(
+ __entry->which = which;
+ __entry->expires = expires;
+ __entry->value_sec = value->it_value.tv_sec;
+ __entry->value_usec = value->it_value.tv_usec;
+ __entry->interval_sec = value->it_interval.tv_sec;
+ __entry->interval_usec = value->it_interval.tv_usec;
+ ),
+
+ TP_printk("which %d, expires %lu, it_value %lu.%lu, it_interval %lu.%lu",
+ __entry->which, __entry->expires,
+ __entry->value_sec, __entry->value_usec,
+ __entry->interval_sec, __entry->interval_usec)
+);
+
+/**
+ * itimer_expire - called when itimer expires
+ * @which: type of the interval timer
+ * @pid: pid of the process which owns the timer
+ * @now: current time, used to calculate the latency of itimer
+ */
+TRACE_EVENT(itimer_expire,
+
+ TP_PROTO(int which, struct pid *pid, cputime_t now),
+
+ TP_ARGS(which, pid, now),
+
+ TP_STRUCT__entry(
+ __field( int , which )
+ __field( pid_t, pid )
+ __field( cputime_t, now )
+ ),
+
+ TP_fast_assign(
+ __entry->which = which;
+ __entry->now = now;
+ __entry->pid = pid_nr(pid);
+ ),
+
+ TP_printk("which %d, pid %d, now %lu", __entry->which,
+ (int) __entry->pid, __entry->now)
+);
+
+#endif /* _TRACE_TIMER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index a0361cb6976..cc0d9667e18 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -378,7 +378,7 @@ static inline int ftrace_get_offsets_##call( \
#ifdef CONFIG_EVENT_PROFILE
/*
- * Generate the functions needed for tracepoint perf_counter support.
+ * Generate the functions needed for tracepoint perf_event support.
*
* NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
*
@@ -644,7 +644,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
* {
* struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
* struct ftrace_event_call *event_call = &event_<call>;
- * extern void perf_tpcounter_event(int, u64, u64, void *, int);
+ * extern void perf_tp_event(int, u64, u64, void *, int);
* struct ftrace_raw_##call *entry;
* u64 __addr = 0, __count = 1;
* unsigned long irq_flags;
@@ -690,7 +690,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
*
* <assign> <- affect our values
*
- * perf_tpcounter_event(event_call->id, __addr, __count, entry,
+ * perf_tp_event(event_call->id, __addr, __count, entry,
* __entry_size); <- submit them to perf counter
*
* }
@@ -710,7 +710,7 @@ static void ftrace_profile_##call(proto) \
{ \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
struct ftrace_event_call *event_call = &event_##call; \
- extern void perf_tpcounter_event(int, u64, u64, void *, int); \
+ extern void perf_tp_event(int, u64, u64, void *, int); \
struct ftrace_raw_##call *entry; \
u64 __addr = 0, __count = 1; \
unsigned long irq_flags; \
@@ -755,7 +755,7 @@ static void ftrace_profile_##call(proto) \
\
{ assign; } \
\
- perf_tpcounter_event(event_call->id, __addr, __count, entry, \
+ perf_tp_event(event_call->id, __addr, __count, entry, \
__entry_size); \
\
end: \
diff --git a/include/video/da8xx-fb.h b/include/video/da8xx-fb.h
new file mode 100644
index 00000000000..c051a50ed52
--- /dev/null
+++ b/include/video/da8xx-fb.h
@@ -0,0 +1,103 @@
+/*
+ * Header file for TI DA8XX LCD controller platform data.
+ *
+ * Copyright (C) 2008-2009 MontaVista Software Inc.
+ * Copyright (C) 2008-2009 Texas Instruments Inc
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef DA8XX_FB_H
+#define DA8XX_FB_H
+
+enum panel_type {
+ QVGA = 0
+};
+
+enum panel_shade {
+ MONOCHROME = 0,
+ COLOR_ACTIVE,
+ COLOR_PASSIVE,
+};
+
+enum raster_load_mode {
+ LOAD_DATA = 1,
+ LOAD_PALETTE,
+};
+
+struct display_panel {
+ enum panel_type panel_type; /* QVGA */
+ int max_bpp;
+ int min_bpp;
+ enum panel_shade panel_shade;
+};
+
+struct da8xx_lcdc_platform_data {
+ const char manu_name[10];
+ void *controller_data;
+ const char type[25];
+};
+
+struct lcd_ctrl_config {
+ const struct display_panel *p_disp_panel;
+
+ /* AC Bias Pin Frequency */
+ int ac_bias;
+
+ /* AC Bias Pin Transitions per Interrupt */
+ int ac_bias_intrpt;
+
+ /* DMA burst size */
+ int dma_burst_sz;
+
+ /* Bits per pixel */
+ int bpp;
+
+ /* FIFO DMA Request Delay */
+ int fdd;
+
+ /* TFT Alternative Signal Mapping (Only for active) */
+ unsigned char tft_alt_mode;
+
+ /* 12 Bit Per Pixel (5-6-5) Mode (Only for passive) */
+ unsigned char stn_565_mode;
+
+ /* Mono 8-bit Mode: 1=D0-D7 or 0=D0-D3 */
+ unsigned char mono_8bit_mode;
+
+ /* Invert line clock */
+ unsigned char invert_line_clock;
+
+ /* Invert frame clock */
+ unsigned char invert_frm_clock;
+
+ /* Horizontal and Vertical Sync Edge: 0=rising 1=falling */
+ unsigned char sync_edge;
+
+ /* Horizontal and Vertical Sync: Control: 0=ignore */
+ unsigned char sync_ctrl;
+
+ /* Raster Data Order Select: 1=Most-to-least 0=Least-to-most */
+ unsigned char raster_order;
+};
+
+struct lcd_sync_arg {
+ int back_porch;
+ int front_porch;
+ int pulse_width;
+};
+
+/* ioctls */
+#define FBIOGET_CONTRAST _IOR('F', 1, int)
+#define FBIOPUT_CONTRAST _IOW('F', 2, int)
+#define FBIGET_BRIGHTNESS _IOR('F', 3, int)
+#define FBIPUT_BRIGHTNESS _IOW('F', 3, int)
+#define FBIGET_COLOR _IOR('F', 5, int)
+#define FBIPUT_COLOR _IOW('F', 6, int)
+#define FBIPUT_HSYNC _IOW('F', 9, int)
+#define FBIPUT_VSYNC _IOW('F', 10, int)
+
+#endif /* ifndef DA8XX_FB_H */
+
diff --git a/init/Kconfig b/init/Kconfig
index 0121c0ea3e0..c7bac39d6c6 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -916,31 +916,36 @@ config AIO
by some high performance threaded applications. Disabling
this option saves about 7k.
-config HAVE_PERF_COUNTERS
+config HAVE_PERF_EVENTS
bool
help
See tools/perf/design.txt for details.
-menu "Performance Counters"
+menu "Kernel Performance Events And Counters"
-config PERF_COUNTERS
- bool "Kernel Performance Counters"
- default y if PROFILING
- depends on HAVE_PERF_COUNTERS
+config PERF_EVENTS
+ bool "Kernel performance events and counters"
+ default y if (PROFILING || PERF_COUNTERS)
+ depends on HAVE_PERF_EVENTS
select ANON_INODES
help
- Enable kernel support for performance counter hardware.
+ Enable kernel support for various performance events provided
+ by software and hardware.
+
+ Software events are supported either build-in or via the
+ use of generic tracepoints.
- Performance counters are special hardware registers available
- on most modern CPUs. These registers count the number of certain
+ Most modern CPUs support performance events via performance
+ counter registers. These registers count the number of certain
types of hw events: such as instructions executed, cachemisses
suffered, or branches mis-predicted - without slowing down the
kernel or applications. These registers can also trigger interrupts
when a threshold number of events have passed - and can thus be
used to profile the code that runs on that CPU.
- The Linux Performance Counter subsystem provides an abstraction of
- these hardware capabilities, available via a system call. It
+ The Linux Performance Event subsystem provides an abstraction of
+ these software and hardware cevent apabilities, available via a
+ system call and used by the "perf" utility in tools/perf/. It
provides per task and per CPU counters, and it provides event
capabilities on top of those.
@@ -948,17 +953,29 @@ config PERF_COUNTERS
config EVENT_PROFILE
bool "Tracepoint profiling sources"
- depends on PERF_COUNTERS && EVENT_TRACING
+ depends on PERF_EVENTS && EVENT_TRACING
default y
help
- Allow the use of tracepoints as software performance counters.
+ Allow the use of tracepoints as software performance events.
- When this is enabled, you can create perf counters based on
+ When this is enabled, you can create perf events based on
tracepoints using PERF_TYPE_TRACEPOINT and the tracepoint ID
found in debugfs://tracing/events/*/*/id. (The -e/--events
option to the perf tool can parse and interpret symbolic
tracepoints, in the subsystem:tracepoint_name format.)
+config PERF_COUNTERS
+ bool "Kernel performance counters (old config option)"
+ depends on HAVE_PERF_EVENTS
+ help
+ This config has been obsoleted by the PERF_EVENTS
+ config option - please see that one for details.
+
+ It has no effect on the kernel whether you enable
+ it or not, it is a compatibility placeholder.
+
+ Say N if unsure.
+
endmenu
config VM_EVENT_COUNTERS
@@ -989,14 +1006,6 @@ config SLUB_DEBUG
SLUB sysfs support. /sys/slab will not exist and there will be
no support for cache validation etc.
-config STRIP_ASM_SYMS
- bool "Strip assembler-generated symbols during link"
- default n
- help
- Strip internal assembler-generated symbols during a link (symbols
- that look like '.Lxxx') so they don't pollute the output of
- get_wchan() and suchlike.
-
config COMPAT_BRK
bool "Disable heap randomization"
default y
diff --git a/init/main.c b/init/main.c
index 34971becbd3..7449819a480 100644
--- a/init/main.c
+++ b/init/main.c
@@ -18,7 +18,6 @@
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/delay.h>
-#include <linux/utsname.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
@@ -68,6 +67,7 @@
#include <linux/async.h>
#include <linux/kmemcheck.h>
#include <linux/kmemtrace.h>
+#include <linux/sfi.h>
#include <linux/shmem_fs.h>
#include <trace/boot.h>
@@ -359,11 +359,6 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { }
#else
-#if NR_CPUS > BITS_PER_LONG
-cpumask_t cpu_mask_all __read_mostly = CPU_MASK_ALL;
-EXPORT_SYMBOL(cpu_mask_all);
-#endif
-
/* Setup number of possible processor ids */
int nr_cpu_ids __read_mostly = NR_CPUS;
EXPORT_SYMBOL(nr_cpu_ids);
@@ -668,12 +663,12 @@ asmlinkage void __init start_kernel(void)
#endif
thread_info_cache_init();
cred_init();
- fork_init(num_physpages);
+ fork_init(totalram_pages);
proc_caches_init();
buffer_init();
key_init();
security_init();
- vfs_caches_init(num_physpages);
+ vfs_caches_init(totalram_pages);
radix_tree_init();
signals_init();
/* rootfs populating might need page-writeback */
@@ -689,6 +684,7 @@ asmlinkage void __init start_kernel(void)
check_bugs();
acpi_early_init(); /* before LAPIC and SMP init */
+ sfi_init_late();
ftrace_init();
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index 40eab7314ae..7d3704750ef 100644
--- a/ipc/ipc_sysctl.c
+++ b/ipc/ipc_sysctl.c
@@ -27,18 +27,18 @@ static void *get_ipc(ctl_table *table)
}
#ifdef CONFIG_PROC_SYSCTL
-static int proc_ipc_dointvec(ctl_table *table, int write, struct file *filp,
+static int proc_ipc_dointvec(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table ipc_table;
memcpy(&ipc_table, table, sizeof(ipc_table));
ipc_table.data = get_ipc(table);
- return proc_dointvec(&ipc_table, write, filp, buffer, lenp, ppos);
+ return proc_dointvec(&ipc_table, write, buffer, lenp, ppos);
}
static int proc_ipc_callback_dointvec(ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table ipc_table;
size_t lenp_bef = *lenp;
@@ -47,7 +47,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
memcpy(&ipc_table, table, sizeof(ipc_table));
ipc_table.data = get_ipc(table);
- rc = proc_dointvec(&ipc_table, write, filp, buffer, lenp, ppos);
+ rc = proc_dointvec(&ipc_table, write, buffer, lenp, ppos);
if (write && !rc && lenp_bef == *lenp)
/*
@@ -61,13 +61,13 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
}
static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table ipc_table;
memcpy(&ipc_table, table, sizeof(ipc_table));
ipc_table.data = get_ipc(table);
- return proc_doulongvec_minmax(&ipc_table, write, filp, buffer,
+ return proc_doulongvec_minmax(&ipc_table, write, buffer,
lenp, ppos);
}
@@ -95,7 +95,7 @@ static void ipc_auto_callback(int val)
}
static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table ipc_table;
size_t lenp_bef = *lenp;
@@ -106,7 +106,7 @@ static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
ipc_table.data = get_ipc(table);
oldval = *((int *)(ipc_table.data));
- rc = proc_dointvec_minmax(&ipc_table, write, filp, buffer, lenp, ppos);
+ rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos);
if (write && !rc && lenp_bef == *lenp) {
int newval = *((int *)(ipc_table.data));
diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
index 24ae46dfe45..8a058711fc1 100644
--- a/ipc/mq_sysctl.c
+++ b/ipc/mq_sysctl.c
@@ -31,24 +31,24 @@ static void *get_mq(ctl_table *table)
return which;
}
-static int proc_mq_dointvec(ctl_table *table, int write, struct file *filp,
+static int proc_mq_dointvec(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table mq_table;
memcpy(&mq_table, table, sizeof(mq_table));
mq_table.data = get_mq(table);
- return proc_dointvec(&mq_table, write, filp, buffer, lenp, ppos);
+ return proc_dointvec(&mq_table, write, buffer, lenp, ppos);
}
static int proc_mq_dointvec_minmax(ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table mq_table;
memcpy(&mq_table, table, sizeof(mq_table));
mq_table.data = get_mq(table);
- return proc_dointvec_minmax(&mq_table, write, filp, buffer,
+ return proc_dointvec_minmax(&mq_table, write, buffer,
lenp, ppos);
}
#else
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index c5e68adc673..ee9d69707c0 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -77,7 +77,7 @@ struct mqueue_inode_info {
static const struct inode_operations mqueue_dir_inode_operations;
static const struct file_operations mqueue_file_operations;
-static struct super_operations mqueue_super_ops;
+static const struct super_operations mqueue_super_ops;
static void remove_notification(struct mqueue_inode_info *info);
static struct kmem_cache *mqueue_inode_cachep;
@@ -1224,7 +1224,7 @@ static const struct file_operations mqueue_file_operations = {
.read = mqueue_read_file,
};
-static struct super_operations mqueue_super_ops = {
+static const struct super_operations mqueue_super_ops = {
.alloc_inode = mqueue_alloc_inode,
.destroy_inode = mqueue_destroy_inode,
.statfs = simple_statfs,
diff --git a/ipc/shm.c b/ipc/shm.c
index 30162a59621..9eb1488b543 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -370,7 +370,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
if (shmflg & SHM_NORESERVE)
acctflag = VM_NORESERVE;
file = hugetlb_file_setup(name, size, acctflag,
- &shp->mlock_user);
+ &shp->mlock_user, HUGETLB_SHMFS_INODE);
} else {
/*
* Do not allow no accounting for OVERCOMMIT_NEVER, even
diff --git a/ipc/util.c b/ipc/util.c
index b8e4ba92f6d..79ce84e890f 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -942,7 +942,7 @@ static int sysvipc_proc_show(struct seq_file *s, void *it)
return iface->show(s, it);
}
-static struct seq_operations sysvipc_proc_seqops = {
+static const struct seq_operations sysvipc_proc_seqops = {
.start = sysvipc_proc_start,
.stop = sysvipc_proc_stop,
.next = sysvipc_proc_next,
diff --git a/kernel/Makefile b/kernel/Makefile
index 7c9b0a58550..b8d4cd8ac0b 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -58,7 +58,6 @@ obj-$(CONFIG_KEXEC) += kexec.o
obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o
obj-$(CONFIG_COMPAT) += compat.o
obj-$(CONFIG_CGROUPS) += cgroup.o
-obj-$(CONFIG_CGROUP_DEBUG) += cgroup_debug.o
obj-$(CONFIG_CGROUP_FREEZER) += cgroup_freezer.o
obj-$(CONFIG_CPUSETS) += cpuset.o
obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o
@@ -95,7 +94,7 @@ obj-$(CONFIG_X86_DS) += trace/
obj-$(CONFIG_RING_BUFFER) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o
obj-$(CONFIG_SLOW_WORK) += slow-work.o
-obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
diff --git a/kernel/audit.c b/kernel/audit.c
index defc2e6f1e3..5feed232be9 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -855,18 +855,24 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
break;
}
case AUDIT_SIGNAL_INFO:
- err = security_secid_to_secctx(audit_sig_sid, &ctx, &len);
- if (err)
- return err;
+ len = 0;
+ if (audit_sig_sid) {
+ err = security_secid_to_secctx(audit_sig_sid, &ctx, &len);
+ if (err)
+ return err;
+ }
sig_data = kmalloc(sizeof(*sig_data) + len, GFP_KERNEL);
if (!sig_data) {
- security_release_secctx(ctx, len);
+ if (audit_sig_sid)
+ security_release_secctx(ctx, len);
return -ENOMEM;
}
sig_data->uid = audit_sig_uid;
sig_data->pid = audit_sig_pid;
- memcpy(sig_data->ctx, ctx, len);
- security_release_secctx(ctx, len);
+ if (audit_sig_sid) {
+ memcpy(sig_data->ctx, ctx, len);
+ security_release_secctx(ctx, len);
+ }
audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO,
0, 0, sig_data, sizeof(*sig_data) + len);
kfree(sig_data);
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 0e96dbc60ea..cc7e87936cb 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -45,8 +45,8 @@
struct audit_watch {
atomic_t count; /* reference count */
- char *path; /* insertion path */
dev_t dev; /* associated superblock device */
+ char *path; /* insertion path */
unsigned long ino; /* associated inode number */
struct audit_parent *parent; /* associated parent */
struct list_head wlist; /* entry in parent->watches list */
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 68d3c6a0ecd..267e484f019 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -168,12 +168,12 @@ struct audit_context {
int in_syscall; /* 1 if task is in a syscall */
enum audit_state state, current_state;
unsigned int serial; /* serial number for record */
- struct timespec ctime; /* time of syscall entry */
int major; /* syscall number */
+ struct timespec ctime; /* time of syscall entry */
unsigned long argv[4]; /* syscall arguments */
- int return_valid; /* return code is valid */
long return_code;/* syscall return code */
u64 prio;
+ int return_valid; /* return code is valid */
int name_count;
struct audit_names names[AUDIT_NAMES];
char * filterkey; /* key for rule that triggered record */
@@ -198,8 +198,8 @@ struct audit_context {
char target_comm[TASK_COMM_LEN];
struct audit_tree_refs *trees, *first_trees;
- int tree_count;
struct list_head killed_trees;
+ int tree_count;
int type;
union {
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index c7ece8f027f..7ccba4bc5e3 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -23,6 +23,7 @@
*/
#include <linux/cgroup.h>
+#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/kernel.h>
@@ -48,6 +49,8 @@
#include <linux/namei.h>
#include <linux/smp_lock.h>
#include <linux/pid_namespace.h>
+#include <linux/idr.h>
+#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
#include <asm/atomic.h>
@@ -60,6 +63,8 @@ static struct cgroup_subsys *subsys[] = {
#include <linux/cgroup_subsys.h>
};
+#define MAX_CGROUP_ROOT_NAMELEN 64
+
/*
* A cgroupfs_root represents the root of a cgroup hierarchy,
* and may be associated with a superblock to form an active
@@ -74,6 +79,9 @@ struct cgroupfs_root {
*/
unsigned long subsys_bits;
+ /* Unique id for this hierarchy. */
+ int hierarchy_id;
+
/* The bitmask of subsystems currently attached to this hierarchy */
unsigned long actual_subsys_bits;
@@ -94,6 +102,9 @@ struct cgroupfs_root {
/* The path to use for release notifications. */
char release_agent_path[PATH_MAX];
+
+ /* The name for this hierarchy - may be empty */
+ char name[MAX_CGROUP_ROOT_NAMELEN];
};
/*
@@ -141,6 +152,10 @@ struct css_id {
static LIST_HEAD(roots);
static int root_count;
+static DEFINE_IDA(hierarchy_ida);
+static int next_hierarchy_id;
+static DEFINE_SPINLOCK(hierarchy_id_lock);
+
/* dummytop is a shorthand for the dummy hierarchy's top cgroup */
#define dummytop (&rootnode.top_cgroup)
@@ -201,6 +216,7 @@ struct cg_cgroup_link {
* cgroup, anchored on cgroup->css_sets
*/
struct list_head cgrp_link_list;
+ struct cgroup *cgrp;
/*
* List running through cg_cgroup_links pointing at a
* single css_set object, anchored on css_set->cg_links
@@ -227,8 +243,11 @@ static int cgroup_subsys_init_idr(struct cgroup_subsys *ss);
static DEFINE_RWLOCK(css_set_lock);
static int css_set_count;
-/* hash table for cgroup groups. This improves the performance to
- * find an existing css_set */
+/*
+ * hash table for cgroup groups. This improves the performance to find
+ * an existing css_set. This hash doesn't (currently) take into
+ * account cgroups in empty hierarchies.
+ */
#define CSS_SET_HASH_BITS 7
#define CSS_SET_TABLE_SIZE (1 << CSS_SET_HASH_BITS)
static struct hlist_head css_set_table[CSS_SET_TABLE_SIZE];
@@ -248,48 +267,22 @@ static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[])
return &css_set_table[index];
}
+static void free_css_set_rcu(struct rcu_head *obj)
+{
+ struct css_set *cg = container_of(obj, struct css_set, rcu_head);
+ kfree(cg);
+}
+
/* We don't maintain the lists running through each css_set to its
* task until after the first call to cgroup_iter_start(). This
* reduces the fork()/exit() overhead for people who have cgroups
* compiled into their kernel but not actually in use */
static int use_task_css_set_links __read_mostly;
-/* When we create or destroy a css_set, the operation simply
- * takes/releases a reference count on all the cgroups referenced
- * by subsystems in this css_set. This can end up multiple-counting
- * some cgroups, but that's OK - the ref-count is just a
- * busy/not-busy indicator; ensuring that we only count each cgroup
- * once would require taking a global lock to ensure that no
- * subsystems moved between hierarchies while we were doing so.
- *
- * Possible TODO: decide at boot time based on the number of
- * registered subsystems and the number of CPUs or NUMA nodes whether
- * it's better for performance to ref-count every subsystem, or to
- * take a global lock and only add one ref count to each hierarchy.
- */
-
-/*
- * unlink a css_set from the list and free it
- */
-static void unlink_css_set(struct css_set *cg)
+static void __put_css_set(struct css_set *cg, int taskexit)
{
struct cg_cgroup_link *link;
struct cg_cgroup_link *saved_link;
-
- hlist_del(&cg->hlist);
- css_set_count--;
-
- list_for_each_entry_safe(link, saved_link, &cg->cg_links,
- cg_link_list) {
- list_del(&link->cg_link_list);
- list_del(&link->cgrp_link_list);
- kfree(link);
- }
-}
-
-static void __put_css_set(struct css_set *cg, int taskexit)
-{
- int i;
/*
* Ensure that the refcount doesn't hit zero while any readers
* can see it. Similar to atomic_dec_and_lock(), but for an
@@ -302,21 +295,28 @@ static void __put_css_set(struct css_set *cg, int taskexit)
write_unlock(&css_set_lock);
return;
}
- unlink_css_set(cg);
- write_unlock(&css_set_lock);
- rcu_read_lock();
- for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
- struct cgroup *cgrp = rcu_dereference(cg->subsys[i]->cgroup);
+ /* This css_set is dead. unlink it and release cgroup refcounts */
+ hlist_del(&cg->hlist);
+ css_set_count--;
+
+ list_for_each_entry_safe(link, saved_link, &cg->cg_links,
+ cg_link_list) {
+ struct cgroup *cgrp = link->cgrp;
+ list_del(&link->cg_link_list);
+ list_del(&link->cgrp_link_list);
if (atomic_dec_and_test(&cgrp->count) &&
notify_on_release(cgrp)) {
if (taskexit)
set_bit(CGRP_RELEASABLE, &cgrp->flags);
check_for_release(cgrp);
}
+
+ kfree(link);
}
- rcu_read_unlock();
- kfree(cg);
+
+ write_unlock(&css_set_lock);
+ call_rcu(&cg->rcu_head, free_css_set_rcu);
}
/*
@@ -338,6 +338,78 @@ static inline void put_css_set_taskexit(struct css_set *cg)
}
/*
+ * compare_css_sets - helper function for find_existing_css_set().
+ * @cg: candidate css_set being tested
+ * @old_cg: existing css_set for a task
+ * @new_cgrp: cgroup that's being entered by the task
+ * @template: desired set of css pointers in css_set (pre-calculated)
+ *
+ * Returns true if "cg" matches "old_cg" except for the hierarchy
+ * which "new_cgrp" belongs to, for which it should match "new_cgrp".
+ */
+static bool compare_css_sets(struct css_set *cg,
+ struct css_set *old_cg,
+ struct cgroup *new_cgrp,
+ struct cgroup_subsys_state *template[])
+{
+ struct list_head *l1, *l2;
+
+ if (memcmp(template, cg->subsys, sizeof(cg->subsys))) {
+ /* Not all subsystems matched */
+ return false;
+ }
+
+ /*
+ * Compare cgroup pointers in order to distinguish between
+ * different cgroups in heirarchies with no subsystems. We
+ * could get by with just this check alone (and skip the
+ * memcmp above) but on most setups the memcmp check will
+ * avoid the need for this more expensive check on almost all
+ * candidates.
+ */
+
+ l1 = &cg->cg_links;
+ l2 = &old_cg->cg_links;
+ while (1) {
+ struct cg_cgroup_link *cgl1, *cgl2;
+ struct cgroup *cg1, *cg2;
+
+ l1 = l1->next;
+ l2 = l2->next;
+ /* See if we reached the end - both lists are equal length. */
+ if (l1 == &cg->cg_links) {
+ BUG_ON(l2 != &old_cg->cg_links);
+ break;
+ } else {
+ BUG_ON(l2 == &old_cg->cg_links);
+ }
+ /* Locate the cgroups associated with these links. */
+ cgl1 = list_entry(l1, struct cg_cgroup_link, cg_link_list);
+ cgl2 = list_entry(l2, struct cg_cgroup_link, cg_link_list);
+ cg1 = cgl1->cgrp;
+ cg2 = cgl2->cgrp;
+ /* Hierarchies should be linked in the same order. */
+ BUG_ON(cg1->root != cg2->root);
+
+ /*
+ * If this hierarchy is the hierarchy of the cgroup
+ * that's changing, then we need to check that this
+ * css_set points to the new cgroup; if it's any other
+ * hierarchy, then this css_set should point to the
+ * same cgroup as the old css_set.
+ */
+ if (cg1->root == new_cgrp->root) {
+ if (cg1 != new_cgrp)
+ return false;
+ } else {
+ if (cg1 != cg2)
+ return false;
+ }
+ }
+ return true;
+}
+
+/*
* find_existing_css_set() is a helper for
* find_css_set(), and checks to see whether an existing
* css_set is suitable.
@@ -378,10 +450,11 @@ static struct css_set *find_existing_css_set(
hhead = css_set_hash(template);
hlist_for_each_entry(cg, node, hhead, hlist) {
- if (!memcmp(template, cg->subsys, sizeof(cg->subsys))) {
- /* All subsystems matched */
- return cg;
- }
+ if (!compare_css_sets(cg, oldcg, cgrp, template))
+ continue;
+
+ /* This css_set matches what we need */
+ return cg;
}
/* No existing cgroup group matched */
@@ -435,8 +508,14 @@ static void link_css_set(struct list_head *tmp_cg_links,
link = list_first_entry(tmp_cg_links, struct cg_cgroup_link,
cgrp_link_list);
link->cg = cg;
+ link->cgrp = cgrp;
+ atomic_inc(&cgrp->count);
list_move(&link->cgrp_link_list, &cgrp->css_sets);
- list_add(&link->cg_link_list, &cg->cg_links);
+ /*
+ * Always add links to the tail of the list so that the list
+ * is sorted by order of hierarchy creation
+ */
+ list_add_tail(&link->cg_link_list, &cg->cg_links);
}
/*
@@ -451,11 +530,11 @@ static struct css_set *find_css_set(
{
struct css_set *res;
struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
- int i;
struct list_head tmp_cg_links;
struct hlist_head *hhead;
+ struct cg_cgroup_link *link;
/* First see if we already have a cgroup group that matches
* the desired set */
@@ -489,20 +568,12 @@ static struct css_set *find_css_set(
write_lock(&css_set_lock);
/* Add reference counts and links from the new css_set. */
- for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
- struct cgroup *cgrp = res->subsys[i]->cgroup;
- struct cgroup_subsys *ss = subsys[i];
- atomic_inc(&cgrp->count);
- /*
- * We want to add a link once per cgroup, so we
- * only do it for the first subsystem in each
- * hierarchy
- */
- if (ss->root->subsys_list.next == &ss->sibling)
- link_css_set(&tmp_cg_links, res, cgrp);
+ list_for_each_entry(link, &oldcg->cg_links, cg_link_list) {
+ struct cgroup *c = link->cgrp;
+ if (c->root == cgrp->root)
+ c = cgrp;
+ link_css_set(&tmp_cg_links, res, c);
}
- if (list_empty(&rootnode.subsys_list))
- link_css_set(&tmp_cg_links, res, dummytop);
BUG_ON(!list_empty(&tmp_cg_links));
@@ -518,6 +589,41 @@ static struct css_set *find_css_set(
}
/*
+ * Return the cgroup for "task" from the given hierarchy. Must be
+ * called with cgroup_mutex held.
+ */
+static struct cgroup *task_cgroup_from_root(struct task_struct *task,
+ struct cgroupfs_root *root)
+{
+ struct css_set *css;
+ struct cgroup *res = NULL;
+
+ BUG_ON(!mutex_is_locked(&cgroup_mutex));
+ read_lock(&css_set_lock);
+ /*
+ * No need to lock the task - since we hold cgroup_mutex the
+ * task can't change groups, so the only thing that can happen
+ * is that it exits and its css is set back to init_css_set.
+ */
+ css = task->cgroups;
+ if (css == &init_css_set) {
+ res = &root->top_cgroup;
+ } else {
+ struct cg_cgroup_link *link;
+ list_for_each_entry(link, &css->cg_links, cg_link_list) {
+ struct cgroup *c = link->cgrp;
+ if (c->root == root) {
+ res = c;
+ break;
+ }
+ }
+ }
+ read_unlock(&css_set_lock);
+ BUG_ON(!res);
+ return res;
+}
+
+/*
* There is one global cgroup mutex. We also require taking
* task_lock() when dereferencing a task's cgroup subsys pointers.
* See "The task_lock() exception", at the end of this comment.
@@ -596,7 +702,7 @@ void cgroup_unlock(void)
static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
static int cgroup_populate_dir(struct cgroup *cgrp);
-static struct inode_operations cgroup_dir_inode_operations;
+static const struct inode_operations cgroup_dir_inode_operations;
static struct file_operations proc_cgroupstats_operations;
static struct backing_dev_info cgroup_backing_dev_info = {
@@ -677,6 +783,12 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
*/
deactivate_super(cgrp->root->sb);
+ /*
+ * if we're getting rid of the cgroup, refcount should ensure
+ * that there are no pidlists left.
+ */
+ BUG_ON(!list_empty(&cgrp->pidlists));
+
call_rcu(&cgrp->rcu_head, free_cgroup_rcu);
}
iput(inode);
@@ -841,6 +953,8 @@ static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_puts(seq, ",noprefix");
if (strlen(root->release_agent_path))
seq_printf(seq, ",release_agent=%s", root->release_agent_path);
+ if (strlen(root->name))
+ seq_printf(seq, ",name=%s", root->name);
mutex_unlock(&cgroup_mutex);
return 0;
}
@@ -849,6 +963,12 @@ struct cgroup_sb_opts {
unsigned long subsys_bits;
unsigned long flags;
char *release_agent;
+ char *name;
+ /* User explicitly requested empty subsystem */
+ bool none;
+
+ struct cgroupfs_root *new_root;
+
};
/* Convert a hierarchy specifier into a bitmask of subsystems and
@@ -863,9 +983,7 @@ static int parse_cgroupfs_options(char *data,
mask = ~(1UL << cpuset_subsys_id);
#endif
- opts->subsys_bits = 0;
- opts->flags = 0;
- opts->release_agent = NULL;
+ memset(opts, 0, sizeof(*opts));
while ((token = strsep(&o, ",")) != NULL) {
if (!*token)
@@ -879,17 +997,42 @@ static int parse_cgroupfs_options(char *data,
if (!ss->disabled)
opts->subsys_bits |= 1ul << i;
}
+ } else if (!strcmp(token, "none")) {
+ /* Explicitly have no subsystems */
+ opts->none = true;
} else if (!strcmp(token, "noprefix")) {
set_bit(ROOT_NOPREFIX, &opts->flags);
} else if (!strncmp(token, "release_agent=", 14)) {
/* Specifying two release agents is forbidden */
if (opts->release_agent)
return -EINVAL;
- opts->release_agent = kzalloc(PATH_MAX, GFP_KERNEL);
+ opts->release_agent =
+ kstrndup(token + 14, PATH_MAX, GFP_KERNEL);
if (!opts->release_agent)
return -ENOMEM;
- strncpy(opts->release_agent, token + 14, PATH_MAX - 1);
- opts->release_agent[PATH_MAX - 1] = 0;
+ } else if (!strncmp(token, "name=", 5)) {
+ int i;
+ const char *name = token + 5;
+ /* Can't specify an empty name */
+ if (!strlen(name))
+ return -EINVAL;
+ /* Must match [\w.-]+ */
+ for (i = 0; i < strlen(name); i++) {
+ char c = name[i];
+ if (isalnum(c))
+ continue;
+ if ((c == '.') || (c == '-') || (c == '_'))
+ continue;
+ return -EINVAL;
+ }
+ /* Specifying two names is forbidden */
+ if (opts->name)
+ return -EINVAL;
+ opts->name = kstrndup(name,
+ MAX_CGROUP_ROOT_NAMELEN,
+ GFP_KERNEL);
+ if (!opts->name)
+ return -ENOMEM;
} else {
struct cgroup_subsys *ss;
int i;
@@ -906,6 +1049,8 @@ static int parse_cgroupfs_options(char *data,
}
}
+ /* Consistency checks */
+
/*
* Option noprefix was introduced just for backward compatibility
* with the old cpuset, so we allow noprefix only if mounting just
@@ -915,8 +1060,16 @@ static int parse_cgroupfs_options(char *data,
(opts->subsys_bits & mask))
return -EINVAL;
- /* We can't have an empty hierarchy */
- if (!opts->subsys_bits)
+
+ /* Can't specify "none" and some subsystems */
+ if (opts->subsys_bits && opts->none)
+ return -EINVAL;
+
+ /*
+ * We either have to specify by name or by subsystems. (So all
+ * empty hierarchies must have a name).
+ */
+ if (!opts->subsys_bits && !opts->name)
return -EINVAL;
return 0;
@@ -944,6 +1097,12 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
goto out_unlock;
}
+ /* Don't allow name to change at remount */
+ if (opts.name && strcmp(opts.name, root->name)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
ret = rebind_subsystems(root, opts.subsys_bits);
if (ret)
goto out_unlock;
@@ -955,13 +1114,14 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
strcpy(root->release_agent_path, opts.release_agent);
out_unlock:
kfree(opts.release_agent);
+ kfree(opts.name);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
unlock_kernel();
return ret;
}
-static struct super_operations cgroup_ops = {
+static const struct super_operations cgroup_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
.show_options = cgroup_show_options,
@@ -974,9 +1134,10 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
INIT_LIST_HEAD(&cgrp->children);
INIT_LIST_HEAD(&cgrp->css_sets);
INIT_LIST_HEAD(&cgrp->release_list);
- INIT_LIST_HEAD(&cgrp->pids_list);
- init_rwsem(&cgrp->pids_mutex);
+ INIT_LIST_HEAD(&cgrp->pidlists);
+ mutex_init(&cgrp->pidlist_mutex);
}
+
static void init_cgroup_root(struct cgroupfs_root *root)
{
struct cgroup *cgrp = &root->top_cgroup;
@@ -988,33 +1149,106 @@ static void init_cgroup_root(struct cgroupfs_root *root)
init_cgroup_housekeeping(cgrp);
}
+static bool init_root_id(struct cgroupfs_root *root)
+{
+ int ret = 0;
+
+ do {
+ if (!ida_pre_get(&hierarchy_ida, GFP_KERNEL))
+ return false;
+ spin_lock(&hierarchy_id_lock);
+ /* Try to allocate the next unused ID */
+ ret = ida_get_new_above(&hierarchy_ida, next_hierarchy_id,
+ &root->hierarchy_id);
+ if (ret == -ENOSPC)
+ /* Try again starting from 0 */
+ ret = ida_get_new(&hierarchy_ida, &root->hierarchy_id);
+ if (!ret) {
+ next_hierarchy_id = root->hierarchy_id + 1;
+ } else if (ret != -EAGAIN) {
+ /* Can only get here if the 31-bit IDR is full ... */
+ BUG_ON(ret);
+ }
+ spin_unlock(&hierarchy_id_lock);
+ } while (ret);
+ return true;
+}
+
static int cgroup_test_super(struct super_block *sb, void *data)
{
- struct cgroupfs_root *new = data;
+ struct cgroup_sb_opts *opts = data;
struct cgroupfs_root *root = sb->s_fs_info;
- /* First check subsystems */
- if (new->subsys_bits != root->subsys_bits)
- return 0;
+ /* If we asked for a name then it must match */
+ if (opts->name && strcmp(opts->name, root->name))
+ return 0;
- /* Next check flags */
- if (new->flags != root->flags)
+ /*
+ * If we asked for subsystems (or explicitly for no
+ * subsystems) then they must match
+ */
+ if ((opts->subsys_bits || opts->none)
+ && (opts->subsys_bits != root->subsys_bits))
return 0;
return 1;
}
+static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
+{
+ struct cgroupfs_root *root;
+
+ if (!opts->subsys_bits && !opts->none)
+ return NULL;
+
+ root = kzalloc(sizeof(*root), GFP_KERNEL);
+ if (!root)
+ return ERR_PTR(-ENOMEM);
+
+ if (!init_root_id(root)) {
+ kfree(root);
+ return ERR_PTR(-ENOMEM);
+ }
+ init_cgroup_root(root);
+
+ root->subsys_bits = opts->subsys_bits;
+ root->flags = opts->flags;
+ if (opts->release_agent)
+ strcpy(root->release_agent_path, opts->release_agent);
+ if (opts->name)
+ strcpy(root->name, opts->name);
+ return root;
+}
+
+static void cgroup_drop_root(struct cgroupfs_root *root)
+{
+ if (!root)
+ return;
+
+ BUG_ON(!root->hierarchy_id);
+ spin_lock(&hierarchy_id_lock);
+ ida_remove(&hierarchy_ida, root->hierarchy_id);
+ spin_unlock(&hierarchy_id_lock);
+ kfree(root);
+}
+
static int cgroup_set_super(struct super_block *sb, void *data)
{
int ret;
- struct cgroupfs_root *root = data;
+ struct cgroup_sb_opts *opts = data;
+
+ /* If we don't have a new root, we can't set up a new sb */
+ if (!opts->new_root)
+ return -EINVAL;
+
+ BUG_ON(!opts->subsys_bits && !opts->none);
ret = set_anon_super(sb, NULL);
if (ret)
return ret;
- sb->s_fs_info = root;
- root->sb = sb;
+ sb->s_fs_info = opts->new_root;
+ opts->new_root->sb = sb;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
@@ -1051,48 +1285,43 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
void *data, struct vfsmount *mnt)
{
struct cgroup_sb_opts opts;
+ struct cgroupfs_root *root;
int ret = 0;
struct super_block *sb;
- struct cgroupfs_root *root;
- struct list_head tmp_cg_links;
+ struct cgroupfs_root *new_root;
/* First find the desired set of subsystems */
ret = parse_cgroupfs_options(data, &opts);
- if (ret) {
- kfree(opts.release_agent);
- return ret;
- }
-
- root = kzalloc(sizeof(*root), GFP_KERNEL);
- if (!root) {
- kfree(opts.release_agent);
- return -ENOMEM;
- }
+ if (ret)
+ goto out_err;
- init_cgroup_root(root);
- root->subsys_bits = opts.subsys_bits;
- root->flags = opts.flags;
- if (opts.release_agent) {
- strcpy(root->release_agent_path, opts.release_agent);
- kfree(opts.release_agent);
+ /*
+ * Allocate a new cgroup root. We may not need it if we're
+ * reusing an existing hierarchy.
+ */
+ new_root = cgroup_root_from_opts(&opts);
+ if (IS_ERR(new_root)) {
+ ret = PTR_ERR(new_root);
+ goto out_err;
}
+ opts.new_root = new_root;
- sb = sget(fs_type, cgroup_test_super, cgroup_set_super, root);
-
+ /* Locate an existing or new sb for this hierarchy */
+ sb = sget(fs_type, cgroup_test_super, cgroup_set_super, &opts);
if (IS_ERR(sb)) {
- kfree(root);
- return PTR_ERR(sb);
+ ret = PTR_ERR(sb);
+ cgroup_drop_root(opts.new_root);
+ goto out_err;
}
- if (sb->s_fs_info != root) {
- /* Reusing an existing superblock */
- BUG_ON(sb->s_root == NULL);
- kfree(root);
- root = NULL;
- } else {
- /* New superblock */
+ root = sb->s_fs_info;
+ BUG_ON(!root);
+ if (root == opts.new_root) {
+ /* We used the new root structure, so this is a new hierarchy */
+ struct list_head tmp_cg_links;
struct cgroup *root_cgrp = &root->top_cgroup;
struct inode *inode;
+ struct cgroupfs_root *existing_root;
int i;
BUG_ON(sb->s_root != NULL);
@@ -1105,6 +1334,18 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
mutex_lock(&inode->i_mutex);
mutex_lock(&cgroup_mutex);
+ if (strlen(root->name)) {
+ /* Check for name clashes with existing mounts */
+ for_each_active_root(existing_root) {
+ if (!strcmp(existing_root->name, root->name)) {
+ ret = -EBUSY;
+ mutex_unlock(&cgroup_mutex);
+ mutex_unlock(&inode->i_mutex);
+ goto drop_new_super;
+ }
+ }
+ }
+
/*
* We're accessing css_set_count without locking
* css_set_lock here, but that's OK - it can only be
@@ -1123,7 +1364,8 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
if (ret == -EBUSY) {
mutex_unlock(&cgroup_mutex);
mutex_unlock(&inode->i_mutex);
- goto free_cg_links;
+ free_cg_links(&tmp_cg_links);
+ goto drop_new_super;
}
/* EBUSY should be the only error here */
@@ -1155,17 +1397,27 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
BUG_ON(root->number_of_cgroups != 1);
cgroup_populate_dir(root_cgrp);
- mutex_unlock(&inode->i_mutex);
mutex_unlock(&cgroup_mutex);
+ mutex_unlock(&inode->i_mutex);
+ } else {
+ /*
+ * We re-used an existing hierarchy - the new root (if
+ * any) is not needed
+ */
+ cgroup_drop_root(opts.new_root);
}
simple_set_mnt(mnt, sb);
+ kfree(opts.release_agent);
+ kfree(opts.name);
return 0;
- free_cg_links:
- free_cg_links(&tmp_cg_links);
drop_new_super:
deactivate_locked_super(sb);
+ out_err:
+ kfree(opts.release_agent);
+ kfree(opts.name);
+
return ret;
}
@@ -1211,7 +1463,7 @@ static void cgroup_kill_sb(struct super_block *sb) {
mutex_unlock(&cgroup_mutex);
kill_litter_super(sb);
- kfree(root);
+ cgroup_drop_root(root);
}
static struct file_system_type cgroup_fs_type = {
@@ -1276,27 +1528,6 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
return 0;
}
-/*
- * Return the first subsystem attached to a cgroup's hierarchy, and
- * its subsystem id.
- */
-
-static void get_first_subsys(const struct cgroup *cgrp,
- struct cgroup_subsys_state **css, int *subsys_id)
-{
- const struct cgroupfs_root *root = cgrp->root;
- const struct cgroup_subsys *test_ss;
- BUG_ON(list_empty(&root->subsys_list));
- test_ss = list_entry(root->subsys_list.next,
- struct cgroup_subsys, sibling);
- if (css) {
- *css = cgrp->subsys[test_ss->subsys_id];
- BUG_ON(!*css);
- }
- if (subsys_id)
- *subsys_id = test_ss->subsys_id;
-}
-
/**
* cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp'
* @cgrp: the cgroup the task is attaching to
@@ -1313,18 +1544,15 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
struct css_set *cg;
struct css_set *newcg;
struct cgroupfs_root *root = cgrp->root;
- int subsys_id;
-
- get_first_subsys(cgrp, NULL, &subsys_id);
/* Nothing to do if the task is already in that cgroup */
- oldcgrp = task_cgroup(tsk, subsys_id);
+ oldcgrp = task_cgroup_from_root(tsk, root);
if (cgrp == oldcgrp)
return 0;
for_each_subsys(root, ss) {
if (ss->can_attach) {
- retval = ss->can_attach(ss, cgrp, tsk);
+ retval = ss->can_attach(ss, cgrp, tsk, false);
if (retval)
return retval;
}
@@ -1362,7 +1590,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
for_each_subsys(root, ss) {
if (ss->attach)
- ss->attach(ss, cgrp, oldcgrp, tsk);
+ ss->attach(ss, cgrp, oldcgrp, tsk, false);
}
set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
synchronize_rcu();
@@ -1423,15 +1651,6 @@ static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
return ret;
}
-/* The various types of files and directories in a cgroup file system */
-enum cgroup_filetype {
- FILE_ROOT,
- FILE_DIR,
- FILE_TASKLIST,
- FILE_NOTIFY_ON_RELEASE,
- FILE_RELEASE_AGENT,
-};
-
/**
* cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
* @cgrp: the cgroup to be checked for liveness
@@ -1711,7 +1930,7 @@ static struct file_operations cgroup_file_operations = {
.release = cgroup_file_release,
};
-static struct inode_operations cgroup_dir_inode_operations = {
+static const struct inode_operations cgroup_dir_inode_operations = {
.lookup = simple_lookup,
.mkdir = cgroup_mkdir,
.rmdir = cgroup_rmdir,
@@ -1876,7 +2095,7 @@ int cgroup_task_count(const struct cgroup *cgrp)
* the start of a css_set
*/
static void cgroup_advance_iter(struct cgroup *cgrp,
- struct cgroup_iter *it)
+ struct cgroup_iter *it)
{
struct list_head *l = it->cg_link;
struct cg_cgroup_link *link;
@@ -2129,7 +2348,7 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
}
/*
- * Stuff for reading the 'tasks' file.
+ * Stuff for reading the 'tasks'/'procs' files.
*
* Reading this file can return large amounts of data if a cgroup has
* *lots* of attached tasks. So it may need several calls to read(),
@@ -2139,27 +2358,196 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
*/
/*
- * Load into 'pidarray' up to 'npids' of the tasks using cgroup
- * 'cgrp'. Return actual number of pids loaded. No need to
- * task_lock(p) when reading out p->cgroup, since we're in an RCU
- * read section, so the css_set can't go away, and is
- * immutable after creation.
+ * The following two functions "fix" the issue where there are more pids
+ * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
+ * TODO: replace with a kernel-wide solution to this problem
+ */
+#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
+static void *pidlist_allocate(int count)
+{
+ if (PIDLIST_TOO_LARGE(count))
+ return vmalloc(count * sizeof(pid_t));
+ else
+ return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
+}
+static void pidlist_free(void *p)
+{
+ if (is_vmalloc_addr(p))
+ vfree(p);
+ else
+ kfree(p);
+}
+static void *pidlist_resize(void *p, int newcount)
+{
+ void *newlist;
+ /* note: if new alloc fails, old p will still be valid either way */
+ if (is_vmalloc_addr(p)) {
+ newlist = vmalloc(newcount * sizeof(pid_t));
+ if (!newlist)
+ return NULL;
+ memcpy(newlist, p, newcount * sizeof(pid_t));
+ vfree(p);
+ } else {
+ newlist = krealloc(p, newcount * sizeof(pid_t), GFP_KERNEL);
+ }
+ return newlist;
+}
+
+/*
+ * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
+ * If the new stripped list is sufficiently smaller and there's enough memory
+ * to allocate a new buffer, will let go of the unneeded memory. Returns the
+ * number of unique elements.
+ */
+/* is the size difference enough that we should re-allocate the array? */
+#define PIDLIST_REALLOC_DIFFERENCE(old, new) ((old) - PAGE_SIZE >= (new))
+static int pidlist_uniq(pid_t **p, int length)
+{
+ int src, dest = 1;
+ pid_t *list = *p;
+ pid_t *newlist;
+
+ /*
+ * we presume the 0th element is unique, so i starts at 1. trivial
+ * edge cases first; no work needs to be done for either
+ */
+ if (length == 0 || length == 1)
+ return length;
+ /* src and dest walk down the list; dest counts unique elements */
+ for (src = 1; src < length; src++) {
+ /* find next unique element */
+ while (list[src] == list[src-1]) {
+ src++;
+ if (src == length)
+ goto after;
+ }
+ /* dest always points to where the next unique element goes */
+ list[dest] = list[src];
+ dest++;
+ }
+after:
+ /*
+ * if the length difference is large enough, we want to allocate a
+ * smaller buffer to save memory. if this fails due to out of memory,
+ * we'll just stay with what we've got.
+ */
+ if (PIDLIST_REALLOC_DIFFERENCE(length, dest)) {
+ newlist = pidlist_resize(list, dest);
+ if (newlist)
+ *p = newlist;
+ }
+ return dest;
+}
+
+static int cmppid(const void *a, const void *b)
+{
+ return *(pid_t *)a - *(pid_t *)b;
+}
+
+/*
+ * find the appropriate pidlist for our purpose (given procs vs tasks)
+ * returns with the lock on that pidlist already held, and takes care
+ * of the use count, or returns NULL with no locks held if we're out of
+ * memory.
*/
-static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cgrp)
+static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
+ enum cgroup_filetype type)
{
- int n = 0, pid;
+ struct cgroup_pidlist *l;
+ /* don't need task_nsproxy() if we're looking at ourself */
+ struct pid_namespace *ns = get_pid_ns(current->nsproxy->pid_ns);
+ /*
+ * We can't drop the pidlist_mutex before taking the l->mutex in case
+ * the last ref-holder is trying to remove l from the list at the same
+ * time. Holding the pidlist_mutex precludes somebody taking whichever
+ * list we find out from under us - compare release_pid_array().
+ */
+ mutex_lock(&cgrp->pidlist_mutex);
+ list_for_each_entry(l, &cgrp->pidlists, links) {
+ if (l->key.type == type && l->key.ns == ns) {
+ /* found a matching list - drop the extra refcount */
+ put_pid_ns(ns);
+ /* make sure l doesn't vanish out from under us */
+ down_write(&l->mutex);
+ mutex_unlock(&cgrp->pidlist_mutex);
+ l->use_count++;
+ return l;
+ }
+ }
+ /* entry not found; create a new one */
+ l = kmalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
+ if (!l) {
+ mutex_unlock(&cgrp->pidlist_mutex);
+ put_pid_ns(ns);
+ return l;
+ }
+ init_rwsem(&l->mutex);
+ down_write(&l->mutex);
+ l->key.type = type;
+ l->key.ns = ns;
+ l->use_count = 0; /* don't increment here */
+ l->list = NULL;
+ l->owner = cgrp;
+ list_add(&l->links, &cgrp->pidlists);
+ mutex_unlock(&cgrp->pidlist_mutex);
+ return l;
+}
+
+/*
+ * Load a cgroup's pidarray with either procs' tgids or tasks' pids
+ */
+static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
+ struct cgroup_pidlist **lp)
+{
+ pid_t *array;
+ int length;
+ int pid, n = 0; /* used for populating the array */
struct cgroup_iter it;
struct task_struct *tsk;
+ struct cgroup_pidlist *l;
+
+ /*
+ * If cgroup gets more users after we read count, we won't have
+ * enough space - tough. This race is indistinguishable to the
+ * caller from the case that the additional cgroup users didn't
+ * show up until sometime later on.
+ */
+ length = cgroup_task_count(cgrp);
+ array = pidlist_allocate(length);
+ if (!array)
+ return -ENOMEM;
+ /* now, populate the array */
cgroup_iter_start(cgrp, &it);
while ((tsk = cgroup_iter_next(cgrp, &it))) {
- if (unlikely(n == npids))
+ if (unlikely(n == length))
break;
- pid = task_pid_vnr(tsk);
- if (pid > 0)
- pidarray[n++] = pid;
+ /* get tgid or pid for procs or tasks file respectively */
+ if (type == CGROUP_FILE_PROCS)
+ pid = task_tgid_vnr(tsk);
+ else
+ pid = task_pid_vnr(tsk);
+ if (pid > 0) /* make sure to only use valid results */
+ array[n++] = pid;
}
cgroup_iter_end(cgrp, &it);
- return n;
+ length = n;
+ /* now sort & (if procs) strip out duplicates */
+ sort(array, length, sizeof(pid_t), cmppid, NULL);
+ if (type == CGROUP_FILE_PROCS)
+ length = pidlist_uniq(&array, length);
+ l = cgroup_pidlist_find(cgrp, type);
+ if (!l) {
+ pidlist_free(array);
+ return -ENOMEM;
+ }
+ /* store array, freeing old if necessary - lock already held */
+ pidlist_free(l->list);
+ l->list = array;
+ l->length = length;
+ l->use_count++;
+ up_write(&l->mutex);
+ *lp = l;
+ return 0;
}
/**
@@ -2216,37 +2604,14 @@ err:
return ret;
}
-/*
- * Cache pids for all threads in the same pid namespace that are
- * opening the same "tasks" file.
- */
-struct cgroup_pids {
- /* The node in cgrp->pids_list */
- struct list_head list;
- /* The cgroup those pids belong to */
- struct cgroup *cgrp;
- /* The namepsace those pids belong to */
- struct pid_namespace *ns;
- /* Array of process ids in the cgroup */
- pid_t *tasks_pids;
- /* How many files are using the this tasks_pids array */
- int use_count;
- /* Length of the current tasks_pids array */
- int length;
-};
-
-static int cmppid(const void *a, const void *b)
-{
- return *(pid_t *)a - *(pid_t *)b;
-}
/*
- * seq_file methods for the "tasks" file. The seq_file position is the
+ * seq_file methods for the tasks/procs files. The seq_file position is the
* next pid to display; the seq_file iterator is a pointer to the pid
- * in the cgroup->tasks_pids array.
+ * in the cgroup->l->list array.
*/
-static void *cgroup_tasks_start(struct seq_file *s, loff_t *pos)
+static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
{
/*
* Initially we receive a position value that corresponds to
@@ -2254,48 +2619,45 @@ static void *cgroup_tasks_start(struct seq_file *s, loff_t *pos)
* after a seek to the start). Use a binary-search to find the
* next pid to display, if any
*/
- struct cgroup_pids *cp = s->private;
- struct cgroup *cgrp = cp->cgrp;
+ struct cgroup_pidlist *l = s->private;
int index = 0, pid = *pos;
int *iter;
- down_read(&cgrp->pids_mutex);
+ down_read(&l->mutex);
if (pid) {
- int end = cp->length;
+ int end = l->length;
while (index < end) {
int mid = (index + end) / 2;
- if (cp->tasks_pids[mid] == pid) {
+ if (l->list[mid] == pid) {
index = mid;
break;
- } else if (cp->tasks_pids[mid] <= pid)
+ } else if (l->list[mid] <= pid)
index = mid + 1;
else
end = mid;
}
}
/* If we're off the end of the array, we're done */
- if (index >= cp->length)
+ if (index >= l->length)
return NULL;
/* Update the abstract position to be the actual pid that we found */
- iter = cp->tasks_pids + index;
+ iter = l->list + index;
*pos = *iter;
return iter;
}
-static void cgroup_tasks_stop(struct seq_file *s, void *v)
+static void cgroup_pidlist_stop(struct seq_file *s, void *v)
{
- struct cgroup_pids *cp = s->private;
- struct cgroup *cgrp = cp->cgrp;
- up_read(&cgrp->pids_mutex);
+ struct cgroup_pidlist *l = s->private;
+ up_read(&l->mutex);
}
-static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos)
+static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
{
- struct cgroup_pids *cp = s->private;
- int *p = v;
- int *end = cp->tasks_pids + cp->length;
-
+ struct cgroup_pidlist *l = s->private;
+ pid_t *p = v;
+ pid_t *end = l->list + l->length;
/*
* Advance to the next pid in the array. If this goes off the
* end, we're done
@@ -2309,124 +2671,107 @@ static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos)
}
}
-static int cgroup_tasks_show(struct seq_file *s, void *v)
+static int cgroup_pidlist_show(struct seq_file *s, void *v)
{
return seq_printf(s, "%d\n", *(int *)v);
}
-static struct seq_operations cgroup_tasks_seq_operations = {
- .start = cgroup_tasks_start,
- .stop = cgroup_tasks_stop,
- .next = cgroup_tasks_next,
- .show = cgroup_tasks_show,
+/*
+ * seq_operations functions for iterating on pidlists through seq_file -
+ * independent of whether it's tasks or procs
+ */
+static const struct seq_operations cgroup_pidlist_seq_operations = {
+ .start = cgroup_pidlist_start,
+ .stop = cgroup_pidlist_stop,
+ .next = cgroup_pidlist_next,
+ .show = cgroup_pidlist_show,
};
-static void release_cgroup_pid_array(struct cgroup_pids *cp)
+static void cgroup_release_pid_array(struct cgroup_pidlist *l)
{
- struct cgroup *cgrp = cp->cgrp;
-
- down_write(&cgrp->pids_mutex);
- BUG_ON(!cp->use_count);
- if (!--cp->use_count) {
- list_del(&cp->list);
- put_pid_ns(cp->ns);
- kfree(cp->tasks_pids);
- kfree(cp);
+ /*
+ * the case where we're the last user of this particular pidlist will
+ * have us remove it from the cgroup's list, which entails taking the
+ * mutex. since in pidlist_find the pidlist->lock depends on cgroup->
+ * pidlist_mutex, we have to take pidlist_mutex first.
+ */
+ mutex_lock(&l->owner->pidlist_mutex);
+ down_write(&l->mutex);
+ BUG_ON(!l->use_count);
+ if (!--l->use_count) {
+ /* we're the last user if refcount is 0; remove and free */
+ list_del(&l->links);
+ mutex_unlock(&l->owner->pidlist_mutex);
+ pidlist_free(l->list);
+ put_pid_ns(l->key.ns);
+ up_write(&l->mutex);
+ kfree(l);
+ return;
}
- up_write(&cgrp->pids_mutex);
+ mutex_unlock(&l->owner->pidlist_mutex);
+ up_write(&l->mutex);
}
-static int cgroup_tasks_release(struct inode *inode, struct file *file)
+static int cgroup_pidlist_release(struct inode *inode, struct file *file)
{
- struct seq_file *seq;
- struct cgroup_pids *cp;
-
+ struct cgroup_pidlist *l;
if (!(file->f_mode & FMODE_READ))
return 0;
-
- seq = file->private_data;
- cp = seq->private;
-
- release_cgroup_pid_array(cp);
+ /*
+ * the seq_file will only be initialized if the file was opened for
+ * reading; hence we check if it's not null only in that case.
+ */
+ l = ((struct seq_file *)file->private_data)->private;
+ cgroup_release_pid_array(l);
return seq_release(inode, file);
}
-static struct file_operations cgroup_tasks_operations = {
+static const struct file_operations cgroup_pidlist_operations = {
.read = seq_read,
.llseek = seq_lseek,
.write = cgroup_file_write,
- .release = cgroup_tasks_release,
+ .release = cgroup_pidlist_release,
};
/*
- * Handle an open on 'tasks' file. Prepare an array containing the
- * process id's of tasks currently attached to the cgroup being opened.
+ * The following functions handle opens on a file that displays a pidlist
+ * (tasks or procs). Prepare an array of the process/thread IDs of whoever's
+ * in the cgroup.
*/
-
-static int cgroup_tasks_open(struct inode *unused, struct file *file)
+/* helper function for the two below it */
+static int cgroup_pidlist_open(struct file *file, enum cgroup_filetype type)
{
struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
- struct pid_namespace *ns = current->nsproxy->pid_ns;
- struct cgroup_pids *cp;
- pid_t *pidarray;
- int npids;
+ struct cgroup_pidlist *l;
int retval;
/* Nothing to do for write-only files */
if (!(file->f_mode & FMODE_READ))
return 0;
- /*
- * If cgroup gets more users after we read count, we won't have
- * enough space - tough. This race is indistinguishable to the
- * caller from the case that the additional cgroup users didn't
- * show up until sometime later on.
- */
- npids = cgroup_task_count(cgrp);
- pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL);
- if (!pidarray)
- return -ENOMEM;
- npids = pid_array_load(pidarray, npids, cgrp);
- sort(pidarray, npids, sizeof(pid_t), cmppid, NULL);
-
- /*
- * Store the array in the cgroup, freeing the old
- * array if necessary
- */
- down_write(&cgrp->pids_mutex);
-
- list_for_each_entry(cp, &cgrp->pids_list, list) {
- if (ns == cp->ns)
- goto found;
- }
-
- cp = kzalloc(sizeof(*cp), GFP_KERNEL);
- if (!cp) {
- up_write(&cgrp->pids_mutex);
- kfree(pidarray);
- return -ENOMEM;
- }
- cp->cgrp = cgrp;
- cp->ns = ns;
- get_pid_ns(ns);
- list_add(&cp->list, &cgrp->pids_list);
-found:
- kfree(cp->tasks_pids);
- cp->tasks_pids = pidarray;
- cp->length = npids;
- cp->use_count++;
- up_write(&cgrp->pids_mutex);
-
- file->f_op = &cgroup_tasks_operations;
+ /* have the array populated */
+ retval = pidlist_array_load(cgrp, type, &l);
+ if (retval)
+ return retval;
+ /* configure file information */
+ file->f_op = &cgroup_pidlist_operations;
- retval = seq_open(file, &cgroup_tasks_seq_operations);
+ retval = seq_open(file, &cgroup_pidlist_seq_operations);
if (retval) {
- release_cgroup_pid_array(cp);
+ cgroup_release_pid_array(l);
return retval;
}
- ((struct seq_file *)file->private_data)->private = cp;
+ ((struct seq_file *)file->private_data)->private = l;
return 0;
}
+static int cgroup_tasks_open(struct inode *unused, struct file *file)
+{
+ return cgroup_pidlist_open(file, CGROUP_FILE_TASKS);
+}
+static int cgroup_procs_open(struct inode *unused, struct file *file)
+{
+ return cgroup_pidlist_open(file, CGROUP_FILE_PROCS);
+}
static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
struct cftype *cft)
@@ -2449,21 +2794,27 @@ static int cgroup_write_notify_on_release(struct cgroup *cgrp,
/*
* for the common functions, 'private' gives the type of file
*/
+/* for hysterical raisins, we can't put this on the older files */
+#define CGROUP_FILE_GENERIC_PREFIX "cgroup."
static struct cftype files[] = {
{
.name = "tasks",
.open = cgroup_tasks_open,
.write_u64 = cgroup_tasks_write,
- .release = cgroup_tasks_release,
- .private = FILE_TASKLIST,
+ .release = cgroup_pidlist_release,
.mode = S_IRUGO | S_IWUSR,
},
-
+ {
+ .name = CGROUP_FILE_GENERIC_PREFIX "procs",
+ .open = cgroup_procs_open,
+ /* .write_u64 = cgroup_procs_write, TODO */
+ .release = cgroup_pidlist_release,
+ .mode = S_IRUGO,
+ },
{
.name = "notify_on_release",
.read_u64 = cgroup_read_notify_on_release,
.write_u64 = cgroup_write_notify_on_release,
- .private = FILE_NOTIFY_ON_RELEASE,
},
};
@@ -2472,7 +2823,6 @@ static struct cftype cft_release_agent = {
.read_seq_string = cgroup_release_agent_show,
.write_string = cgroup_release_agent_write,
.max_write_len = PATH_MAX,
- .private = FILE_RELEASE_AGENT,
};
static int cgroup_populate_dir(struct cgroup *cgrp)
@@ -2879,6 +3229,7 @@ int __init cgroup_init_early(void)
init_task.cgroups = &init_css_set;
init_css_set_link.cg = &init_css_set;
+ init_css_set_link.cgrp = dummytop;
list_add(&init_css_set_link.cgrp_link_list,
&rootnode.top_cgroup.css_sets);
list_add(&init_css_set_link.cg_link_list,
@@ -2933,7 +3284,7 @@ int __init cgroup_init(void)
/* Add init_css_set to the hash table */
hhead = css_set_hash(init_css_set.subsys);
hlist_add_head(&init_css_set.hlist, hhead);
-
+ BUG_ON(!init_root_id(&rootnode));
err = register_filesystem(&cgroup_fs_type);
if (err < 0)
goto out;
@@ -2986,15 +3337,16 @@ static int proc_cgroup_show(struct seq_file *m, void *v)
for_each_active_root(root) {
struct cgroup_subsys *ss;
struct cgroup *cgrp;
- int subsys_id;
int count = 0;
- seq_printf(m, "%lu:", root->subsys_bits);
+ seq_printf(m, "%d:", root->hierarchy_id);
for_each_subsys(root, ss)
seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
+ if (strlen(root->name))
+ seq_printf(m, "%sname=%s", count ? "," : "",
+ root->name);
seq_putc(m, ':');
- get_first_subsys(&root->top_cgroup, NULL, &subsys_id);
- cgrp = task_cgroup(tsk, subsys_id);
+ cgrp = task_cgroup_from_root(tsk, root);
retval = cgroup_path(cgrp, buf, PAGE_SIZE);
if (retval < 0)
goto out_unlock;
@@ -3033,8 +3385,8 @@ static int proc_cgroupstats_show(struct seq_file *m, void *v)
mutex_lock(&cgroup_mutex);
for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
struct cgroup_subsys *ss = subsys[i];
- seq_printf(m, "%s\t%lu\t%d\t%d\n",
- ss->name, ss->root->subsys_bits,
+ seq_printf(m, "%s\t%d\t%d\t%d\n",
+ ss->name, ss->root->hierarchy_id,
ss->root->number_of_cgroups, !ss->disabled);
}
mutex_unlock(&cgroup_mutex);
@@ -3320,13 +3672,11 @@ int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task)
{
int ret;
struct cgroup *target;
- int subsys_id;
if (cgrp == dummytop)
return 1;
- get_first_subsys(cgrp, NULL, &subsys_id);
- target = task_cgroup(task, subsys_id);
+ target = task_cgroup_from_root(task, cgrp->root);
while (cgrp != target && cgrp!= cgrp->top_cgroup)
cgrp = cgrp->parent;
ret = (cgrp == target);
@@ -3693,3 +4043,154 @@ css_get_next(struct cgroup_subsys *ss, int id,
return ret;
}
+#ifdef CONFIG_CGROUP_DEBUG
+static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss,
+ struct cgroup *cont)
+{
+ struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
+
+ if (!css)
+ return ERR_PTR(-ENOMEM);
+
+ return css;
+}
+
+static void debug_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
+{
+ kfree(cont->subsys[debug_subsys_id]);
+}
+
+static u64 cgroup_refcount_read(struct cgroup *cont, struct cftype *cft)
+{
+ return atomic_read(&cont->count);
+}
+
+static u64 debug_taskcount_read(struct cgroup *cont, struct cftype *cft)
+{
+ return cgroup_task_count(cont);
+}
+
+static u64 current_css_set_read(struct cgroup *cont, struct cftype *cft)
+{
+ return (u64)(unsigned long)current->cgroups;
+}
+
+static u64 current_css_set_refcount_read(struct cgroup *cont,
+ struct cftype *cft)
+{
+ u64 count;
+
+ rcu_read_lock();
+ count = atomic_read(&current->cgroups->refcount);
+ rcu_read_unlock();
+ return count;
+}
+
+static int current_css_set_cg_links_read(struct cgroup *cont,
+ struct cftype *cft,
+ struct seq_file *seq)
+{
+ struct cg_cgroup_link *link;
+ struct css_set *cg;
+
+ read_lock(&css_set_lock);
+ rcu_read_lock();
+ cg = rcu_dereference(current->cgroups);
+ list_for_each_entry(link, &cg->cg_links, cg_link_list) {
+ struct cgroup *c = link->cgrp;
+ const char *name;
+
+ if (c->dentry)
+ name = c->dentry->d_name.name;
+ else
+ name = "?";
+ seq_printf(seq, "Root %d group %s\n",
+ c->root->hierarchy_id, name);
+ }
+ rcu_read_unlock();
+ read_unlock(&css_set_lock);
+ return 0;
+}
+
+#define MAX_TASKS_SHOWN_PER_CSS 25
+static int cgroup_css_links_read(struct cgroup *cont,
+ struct cftype *cft,
+ struct seq_file *seq)
+{
+ struct cg_cgroup_link *link;
+
+ read_lock(&css_set_lock);
+ list_for_each_entry(link, &cont->css_sets, cgrp_link_list) {
+ struct css_set *cg = link->cg;
+ struct task_struct *task;
+ int count = 0;
+ seq_printf(seq, "css_set %p\n", cg);
+ list_for_each_entry(task, &cg->tasks, cg_list) {
+ if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
+ seq_puts(seq, " ...\n");
+ break;
+ } else {
+ seq_printf(seq, " task %d\n",
+ task_pid_vnr(task));
+ }
+ }
+ }
+ read_unlock(&css_set_lock);
+ return 0;
+}
+
+static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft)
+{
+ return test_bit(CGRP_RELEASABLE, &cgrp->flags);
+}
+
+static struct cftype debug_files[] = {
+ {
+ .name = "cgroup_refcount",
+ .read_u64 = cgroup_refcount_read,
+ },
+ {
+ .name = "taskcount",
+ .read_u64 = debug_taskcount_read,
+ },
+
+ {
+ .name = "current_css_set",
+ .read_u64 = current_css_set_read,
+ },
+
+ {
+ .name = "current_css_set_refcount",
+ .read_u64 = current_css_set_refcount_read,
+ },
+
+ {
+ .name = "current_css_set_cg_links",
+ .read_seq_string = current_css_set_cg_links_read,
+ },
+
+ {
+ .name = "cgroup_css_links",
+ .read_seq_string = cgroup_css_links_read,
+ },
+
+ {
+ .name = "releasable",
+ .read_u64 = releasable_read,
+ },
+};
+
+static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont)
+{
+ return cgroup_add_files(cont, ss, debug_files,
+ ARRAY_SIZE(debug_files));
+}
+
+struct cgroup_subsys debug_subsys = {
+ .name = "debug",
+ .create = debug_create,
+ .destroy = debug_destroy,
+ .populate = debug_populate,
+ .subsys_id = debug_subsys_id,
+};
+#endif /* CONFIG_CGROUP_DEBUG */
diff --git a/kernel/cgroup_debug.c b/kernel/cgroup_debug.c
deleted file mode 100644
index 0c92d797baa..00000000000
--- a/kernel/cgroup_debug.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * kernel/cgroup_debug.c - Example cgroup subsystem that
- * exposes debug info
- *
- * Copyright (C) Google Inc, 2007
- *
- * Developed by Paul Menage (menage@google.com)
- *
- */
-
-#include <linux/cgroup.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/rcupdate.h>
-
-#include <asm/atomic.h>
-
-static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss,
- struct cgroup *cont)
-{
- struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
-
- if (!css)
- return ERR_PTR(-ENOMEM);
-
- return css;
-}
-
-static void debug_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
-{
- kfree(cont->subsys[debug_subsys_id]);
-}
-
-static u64 cgroup_refcount_read(struct cgroup *cont, struct cftype *cft)
-{
- return atomic_read(&cont->count);
-}
-
-static u64 taskcount_read(struct cgroup *cont, struct cftype *cft)
-{
- u64 count;
-
- count = cgroup_task_count(cont);
- return count;
-}
-
-static u64 current_css_set_read(struct cgroup *cont, struct cftype *cft)
-{
- return (u64)(long)current->cgroups;
-}
-
-static u64 current_css_set_refcount_read(struct cgroup *cont,
- struct cftype *cft)
-{
- u64 count;
-
- rcu_read_lock();
- count = atomic_read(&current->cgroups->refcount);
- rcu_read_unlock();
- return count;
-}
-
-static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft)
-{
- return test_bit(CGRP_RELEASABLE, &cgrp->flags);
-}
-
-static struct cftype files[] = {
- {
- .name = "cgroup_refcount",
- .read_u64 = cgroup_refcount_read,
- },
- {
- .name = "taskcount",
- .read_u64 = taskcount_read,
- },
-
- {
- .name = "current_css_set",
- .read_u64 = current_css_set_read,
- },
-
- {
- .name = "current_css_set_refcount",
- .read_u64 = current_css_set_refcount_read,
- },
-
- {
- .name = "releasable",
- .read_u64 = releasable_read,
- },
-};
-
-static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont)
-{
- return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
-}
-
-struct cgroup_subsys debug_subsys = {
- .name = "debug",
- .create = debug_create,
- .destroy = debug_destroy,
- .populate = debug_populate,
- .subsys_id = debug_subsys_id,
-};
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index fb249e2bcad..59e9ef6aab4 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -159,7 +159,7 @@ static bool is_task_frozen_enough(struct task_struct *task)
*/
static int freezer_can_attach(struct cgroup_subsys *ss,
struct cgroup *new_cgroup,
- struct task_struct *task)
+ struct task_struct *task, bool threadgroup)
{
struct freezer *freezer;
@@ -177,6 +177,19 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
if (freezer->state == CGROUP_FROZEN)
return -EBUSY;
+ if (threadgroup) {
+ struct task_struct *c;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
+ if (is_task_frozen_enough(c)) {
+ rcu_read_unlock();
+ return -EBUSY;
+ }
+ }
+ rcu_read_unlock();
+ }
+
return 0;
}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 7e75a41bd50..b5cb469d254 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1324,9 +1324,10 @@ static int fmeter_getrate(struct fmeter *fmp)
static cpumask_var_t cpus_attach;
/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
-static int cpuset_can_attach(struct cgroup_subsys *ss,
- struct cgroup *cont, struct task_struct *tsk)
+static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
+ struct task_struct *tsk, bool threadgroup)
{
+ int ret;
struct cpuset *cs = cgroup_cs(cont);
if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
@@ -1343,18 +1344,51 @@ static int cpuset_can_attach(struct cgroup_subsys *ss,
if (tsk->flags & PF_THREAD_BOUND)
return -EINVAL;
- return security_task_setscheduler(tsk, 0, NULL);
+ ret = security_task_setscheduler(tsk, 0, NULL);
+ if (ret)
+ return ret;
+ if (threadgroup) {
+ struct task_struct *c;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
+ ret = security_task_setscheduler(c, 0, NULL);
+ if (ret) {
+ rcu_read_unlock();
+ return ret;
+ }
+ }
+ rcu_read_unlock();
+ }
+ return 0;
+}
+
+static void cpuset_attach_task(struct task_struct *tsk, nodemask_t *to,
+ struct cpuset *cs)
+{
+ int err;
+ /*
+ * can_attach beforehand should guarantee that this doesn't fail.
+ * TODO: have a better way to handle failure here
+ */
+ err = set_cpus_allowed_ptr(tsk, cpus_attach);
+ WARN_ON_ONCE(err);
+
+ task_lock(tsk);
+ cpuset_change_task_nodemask(tsk, to);
+ task_unlock(tsk);
+ cpuset_update_task_spread_flag(cs, tsk);
+
}
-static void cpuset_attach(struct cgroup_subsys *ss,
- struct cgroup *cont, struct cgroup *oldcont,
- struct task_struct *tsk)
+static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont,
+ struct cgroup *oldcont, struct task_struct *tsk,
+ bool threadgroup)
{
nodemask_t from, to;
struct mm_struct *mm;
struct cpuset *cs = cgroup_cs(cont);
struct cpuset *oldcs = cgroup_cs(oldcont);
- int err;
if (cs == &top_cpuset) {
cpumask_copy(cpus_attach, cpu_possible_mask);
@@ -1363,15 +1397,19 @@ static void cpuset_attach(struct cgroup_subsys *ss,
guarantee_online_cpus(cs, cpus_attach);
guarantee_online_mems(cs, &to);
}
- err = set_cpus_allowed_ptr(tsk, cpus_attach);
- if (err)
- return;
- task_lock(tsk);
- cpuset_change_task_nodemask(tsk, &to);
- task_unlock(tsk);
- cpuset_update_task_spread_flag(cs, tsk);
+ /* do per-task migration stuff possibly for each in the threadgroup */
+ cpuset_attach_task(tsk, &to, cs);
+ if (threadgroup) {
+ struct task_struct *c;
+ rcu_read_lock();
+ list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
+ cpuset_attach_task(c, &to, cs);
+ }
+ rcu_read_unlock();
+ }
+ /* change mm; only needs to be done once even if threadgroup */
from = oldcs->mems_allowed;
to = cs->mems_allowed;
mm = get_task_mm(tsk);
diff --git a/kernel/cred.c b/kernel/cred.c
index d7f7a01082e..dd76cfe5f5b 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -782,6 +782,25 @@ EXPORT_SYMBOL(set_create_files_as);
#ifdef CONFIG_DEBUG_CREDENTIALS
+bool creds_are_invalid(const struct cred *cred)
+{
+ if (cred->magic != CRED_MAGIC)
+ return true;
+ if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers))
+ return true;
+#ifdef CONFIG_SECURITY_SELINUX
+ if (selinux_is_enabled()) {
+ if ((unsigned long) cred->security < PAGE_SIZE)
+ return true;
+ if ((*(u32 *)cred->security & 0xffffff00) ==
+ (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8))
+ return true;
+ }
+#endif
+ return false;
+}
+EXPORT_SYMBOL(creds_are_invalid);
+
/*
* dump invalid credentials
*/
diff --git a/kernel/exit.c b/kernel/exit.c
index ae5d8660ddf..5859f598c95 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -47,7 +47,7 @@
#include <linux/tracehook.h>
#include <linux/fs_struct.h>
#include <linux/init_task.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <trace/events/sched.h>
#include <asm/uaccess.h>
@@ -154,8 +154,8 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
{
struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
-#ifdef CONFIG_PERF_COUNTERS
- WARN_ON_ONCE(tsk->perf_counter_ctxp);
+#ifdef CONFIG_PERF_EVENTS
+ WARN_ON_ONCE(tsk->perf_event_ctxp);
#endif
trace_sched_process_free(tsk);
put_task_struct(tsk);
@@ -359,8 +359,10 @@ void __set_special_pids(struct pid *pid)
{
struct task_struct *curr = current->group_leader;
- if (task_session(curr) != pid)
+ if (task_session(curr) != pid) {
change_pid(curr, PIDTYPE_SID, pid);
+ proc_sid_connector(curr);
+ }
if (task_pgrp(curr) != pid)
change_pid(curr, PIDTYPE_PGID, pid);
@@ -945,6 +947,8 @@ NORET_TYPE void do_exit(long code)
if (group_dead) {
hrtimer_cancel(&tsk->signal->real_timer);
exit_itimers(tsk->signal);
+ if (tsk->mm)
+ setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
}
acct_collect(code, group_dead);
if (group_dead)
@@ -972,8 +976,6 @@ NORET_TYPE void do_exit(long code)
disassociate_ctty(1);
module_put(task_thread_info(tsk)->exec_domain->module);
- if (tsk->binfmt)
- module_put(tsk->binfmt->module);
proc_exit_connector(tsk);
@@ -981,7 +983,7 @@ NORET_TYPE void do_exit(long code)
* Flush inherited counters to the parent - before the parent
* gets woken up by child-exit notifications.
*/
- perf_counter_exit_task(tsk);
+ perf_event_exit_task(tsk);
exit_notify(tsk, group_dead);
#ifdef CONFIG_NUMA
@@ -1093,28 +1095,28 @@ struct wait_opts {
int __user *wo_stat;
struct rusage __user *wo_rusage;
+ wait_queue_t child_wait;
int notask_error;
};
-static struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
+static inline
+struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
{
- struct pid *pid = NULL;
- if (type == PIDTYPE_PID)
- pid = task->pids[type].pid;
- else if (type < PIDTYPE_MAX)
- pid = task->group_leader->pids[type].pid;
- return pid;
+ if (type != PIDTYPE_PID)
+ task = task->group_leader;
+ return task->pids[type].pid;
}
-static int eligible_child(struct wait_opts *wo, struct task_struct *p)
+static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
{
- int err;
-
- if (wo->wo_type < PIDTYPE_MAX) {
- if (task_pid_type(p, wo->wo_type) != wo->wo_pid)
- return 0;
- }
+ return wo->wo_type == PIDTYPE_MAX ||
+ task_pid_type(p, wo->wo_type) == wo->wo_pid;
+}
+static int eligible_child(struct wait_opts *wo, struct task_struct *p)
+{
+ if (!eligible_pid(wo, p))
+ return 0;
/* Wait for all children (clone and not) if __WALL is set;
* otherwise, wait for clone children *only* if __WCLONE is
* set; otherwise, wait for non-clone children *only*. (Note:
@@ -1124,10 +1126,6 @@ static int eligible_child(struct wait_opts *wo, struct task_struct *p)
&& !(wo->wo_flags & __WALL))
return 0;
- err = security_task_wait(p);
- if (err)
- return err;
-
return 1;
}
@@ -1140,18 +1138,20 @@ static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p,
put_task_struct(p);
infop = wo->wo_info;
- if (!retval)
- retval = put_user(SIGCHLD, &infop->si_signo);
- if (!retval)
- retval = put_user(0, &infop->si_errno);
- if (!retval)
- retval = put_user((short)why, &infop->si_code);
- if (!retval)
- retval = put_user(pid, &infop->si_pid);
- if (!retval)
- retval = put_user(uid, &infop->si_uid);
- if (!retval)
- retval = put_user(status, &infop->si_status);
+ if (infop) {
+ if (!retval)
+ retval = put_user(SIGCHLD, &infop->si_signo);
+ if (!retval)
+ retval = put_user(0, &infop->si_errno);
+ if (!retval)
+ retval = put_user((short)why, &infop->si_code);
+ if (!retval)
+ retval = put_user(pid, &infop->si_pid);
+ if (!retval)
+ retval = put_user(uid, &infop->si_uid);
+ if (!retval)
+ retval = put_user(status, &infop->si_status);
+ }
if (!retval)
retval = pid;
return retval;
@@ -1208,6 +1208,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
if (likely(!traced) && likely(!task_detached(p))) {
struct signal_struct *psig;
struct signal_struct *sig;
+ unsigned long maxrss;
/*
* The resource counters for the group leader are in its
@@ -1256,6 +1257,9 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
psig->coublock +=
task_io_get_oublock(p) +
sig->oublock + sig->coublock;
+ maxrss = max(sig->maxrss, sig->cmaxrss);
+ if (psig->cmaxrss < maxrss)
+ psig->cmaxrss = maxrss;
task_io_accounting_add(&psig->ioac, &p->ioac);
task_io_accounting_add(&psig->ioac, &sig->ioac);
spin_unlock_irq(&p->real_parent->sighand->siglock);
@@ -1477,13 +1481,14 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
* then ->notask_error is 0 if @p is an eligible child,
* or another error from security_task_wait(), or still -ECHILD.
*/
-static int wait_consider_task(struct wait_opts *wo, struct task_struct *parent,
- int ptrace, struct task_struct *p)
+static int wait_consider_task(struct wait_opts *wo, int ptrace,
+ struct task_struct *p)
{
int ret = eligible_child(wo, p);
if (!ret)
return ret;
+ ret = security_task_wait(p);
if (unlikely(ret < 0)) {
/*
* If we have not yet seen any eligible child,
@@ -1545,7 +1550,7 @@ static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
* Do not consider detached threads.
*/
if (!task_detached(p)) {
- int ret = wait_consider_task(wo, tsk, 0, p);
+ int ret = wait_consider_task(wo, 0, p);
if (ret)
return ret;
}
@@ -1559,7 +1564,7 @@ static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
struct task_struct *p;
list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
- int ret = wait_consider_task(wo, tsk, 1, p);
+ int ret = wait_consider_task(wo, 1, p);
if (ret)
return ret;
}
@@ -1567,15 +1572,38 @@ static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
return 0;
}
+static int child_wait_callback(wait_queue_t *wait, unsigned mode,
+ int sync, void *key)
+{
+ struct wait_opts *wo = container_of(wait, struct wait_opts,
+ child_wait);
+ struct task_struct *p = key;
+
+ if (!eligible_pid(wo, p))
+ return 0;
+
+ if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
+ return 0;
+
+ return default_wake_function(wait, mode, sync, key);
+}
+
+void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
+{
+ __wake_up_sync_key(&parent->signal->wait_chldexit,
+ TASK_INTERRUPTIBLE, 1, p);
+}
+
static long do_wait(struct wait_opts *wo)
{
- DECLARE_WAITQUEUE(wait, current);
struct task_struct *tsk;
int retval;
trace_sched_process_wait(wo->wo_pid);
- add_wait_queue(&current->signal->wait_chldexit,&wait);
+ init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
+ wo->child_wait.private = current;
+ add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
repeat:
/*
* If there is nothing that can match our critiera just get out.
@@ -1616,32 +1644,7 @@ notask:
}
end:
__set_current_state(TASK_RUNNING);
- remove_wait_queue(&current->signal->wait_chldexit,&wait);
- if (wo->wo_info) {
- struct siginfo __user *infop = wo->wo_info;
-
- if (retval > 0)
- retval = 0;
- else {
- /*
- * For a WNOHANG return, clear out all the fields
- * we would set so the user can easily tell the
- * difference.
- */
- if (!retval)
- retval = put_user(0, &infop->si_signo);
- if (!retval)
- retval = put_user(0, &infop->si_errno);
- if (!retval)
- retval = put_user(0, &infop->si_code);
- if (!retval)
- retval = put_user(0, &infop->si_pid);
- if (!retval)
- retval = put_user(0, &infop->si_uid);
- if (!retval)
- retval = put_user(0, &infop->si_status);
- }
- }
+ remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
return retval;
}
@@ -1686,6 +1689,29 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
wo.wo_stat = NULL;
wo.wo_rusage = ru;
ret = do_wait(&wo);
+
+ if (ret > 0) {
+ ret = 0;
+ } else if (infop) {
+ /*
+ * For a WNOHANG return, clear out all the fields
+ * we would set so the user can easily tell the
+ * difference.
+ */
+ if (!ret)
+ ret = put_user(0, &infop->si_signo);
+ if (!ret)
+ ret = put_user(0, &infop->si_errno);
+ if (!ret)
+ ret = put_user(0, &infop->si_code);
+ if (!ret)
+ ret = put_user(0, &infop->si_pid);
+ if (!ret)
+ ret = put_user(0, &infop->si_uid);
+ if (!ret)
+ ret = put_user(0, &infop->si_status);
+ }
+
put_pid(pid);
/* avoid REGPARM breakage on x86: */
diff --git a/kernel/fork.c b/kernel/fork.c
index bfee931ee3f..266c6af6ef1 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -49,6 +49,7 @@
#include <linux/ftrace.h>
#include <linux/profile.h>
#include <linux/rmap.h>
+#include <linux/ksm.h>
#include <linux/acct.h>
#include <linux/tsacct_kern.h>
#include <linux/cn_proc.h>
@@ -61,7 +62,8 @@
#include <linux/blkdev.h>
#include <linux/fs_struct.h>
#include <linux/magic.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
+#include <linux/posix-timers.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -136,9 +138,17 @@ struct kmem_cache *vm_area_cachep;
/* SLAB cache for mm_struct structures (tsk->mm) */
static struct kmem_cache *mm_cachep;
+static void account_kernel_stack(struct thread_info *ti, int account)
+{
+ struct zone *zone = page_zone(virt_to_page(ti));
+
+ mod_zone_page_state(zone, NR_KERNEL_STACK, account);
+}
+
void free_task(struct task_struct *tsk)
{
prop_local_destroy_single(&tsk->dirties);
+ account_kernel_stack(tsk->stack, -1);
free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk);
ftrace_graph_exit_task(tsk);
@@ -253,6 +263,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
tsk->btrace_seq = 0;
#endif
tsk->splice_pipe = NULL;
+
+ account_kernel_stack(ti, 1);
+
return tsk;
out:
@@ -288,6 +301,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
rb_link = &mm->mm_rb.rb_node;
rb_parent = NULL;
pprev = &mm->mmap;
+ retval = ksm_fork(mm, oldmm);
+ if (retval)
+ goto out;
for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
struct file *file;
@@ -418,22 +434,30 @@ __setup("coredump_filter=", coredump_filter_setup);
#include <linux/init_task.h>
+static void mm_init_aio(struct mm_struct *mm)
+{
+#ifdef CONFIG_AIO
+ spin_lock_init(&mm->ioctx_lock);
+ INIT_HLIST_HEAD(&mm->ioctx_list);
+#endif
+}
+
static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
{
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
init_rwsem(&mm->mmap_sem);
INIT_LIST_HEAD(&mm->mmlist);
- mm->flags = (current->mm) ? current->mm->flags : default_dump_filter;
+ mm->flags = (current->mm) ?
+ (current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
mm->core_state = NULL;
mm->nr_ptes = 0;
set_mm_counter(mm, file_rss, 0);
set_mm_counter(mm, anon_rss, 0);
spin_lock_init(&mm->page_table_lock);
- spin_lock_init(&mm->ioctx_lock);
- INIT_HLIST_HEAD(&mm->ioctx_list);
mm->free_area_cache = TASK_UNMAPPED_BASE;
mm->cached_hole_size = ~0UL;
+ mm_init_aio(mm);
mm_init_owner(mm, p);
if (likely(!mm_alloc_pgd(mm))) {
@@ -485,6 +509,7 @@ void mmput(struct mm_struct *mm)
if (atomic_dec_and_test(&mm->mm_users)) {
exit_aio(mm);
+ ksm_exit(mm);
exit_mmap(mm);
set_mm_exe_file(mm, NULL);
if (!list_empty(&mm->mmlist)) {
@@ -493,6 +518,8 @@ void mmput(struct mm_struct *mm)
spin_unlock(&mmlist_lock);
}
put_swap_token(mm);
+ if (mm->binfmt)
+ module_put(mm->binfmt->module);
mmdrop(mm);
}
}
@@ -618,9 +645,14 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
mm->hiwater_rss = get_mm_rss(mm);
mm->hiwater_vm = mm->total_vm;
+ if (mm->binfmt && !try_module_get(mm->binfmt->module))
+ goto free_pt;
+
return mm;
free_pt:
+ /* don't put binfmt in mmput, we haven't got module yet */
+ mm->binfmt = NULL;
mmput(mm);
fail_nomem:
@@ -788,10 +820,10 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
thread_group_cputime_init(sig);
/* Expiration times and increments. */
- sig->it_virt_expires = cputime_zero;
- sig->it_virt_incr = cputime_zero;
- sig->it_prof_expires = cputime_zero;
- sig->it_prof_incr = cputime_zero;
+ sig->it[CPUCLOCK_PROF].expires = cputime_zero;
+ sig->it[CPUCLOCK_PROF].incr = cputime_zero;
+ sig->it[CPUCLOCK_VIRT].expires = cputime_zero;
+ sig->it[CPUCLOCK_VIRT].incr = cputime_zero;
/* Cached expiration times. */
sig->cputime_expires.prof_exp = cputime_zero;
@@ -849,6 +881,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
+ sig->maxrss = sig->cmaxrss = 0;
task_io_accounting_init(&sig->ioac);
sig->sum_sched_runtime = 0;
taskstats_tgid_init(sig);
@@ -863,6 +896,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
tty_audit_fork(sig);
+ sig->oom_adj = current->signal->oom_adj;
+
return 0;
}
@@ -958,6 +993,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
return ERR_PTR(-EINVAL);
+ /*
+ * Siblings of global init remain as zombies on exit since they are
+ * not reaped by their parent (swapper). To solve this and to avoid
+ * multi-rooted process trees, prevent global and container-inits
+ * from creating siblings.
+ */
+ if ((clone_flags & CLONE_PARENT) &&
+ current->signal->flags & SIGNAL_UNKILLABLE)
+ return ERR_PTR(-EINVAL);
+
retval = security_task_create(clone_flags);
if (retval)
goto fork_out;
@@ -999,9 +1044,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if (!try_module_get(task_thread_info(p)->exec_domain->module))
goto bad_fork_cleanup_count;
- if (p->binfmt && !try_module_get(p->binfmt->module))
- goto bad_fork_cleanup_put_domain;
-
p->did_exec = 0;
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
copy_flags(clone_flags, p);
@@ -1075,10 +1117,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->bts = NULL;
+ p->stack_start = stack_start;
+
/* Perform scheduler related setup. Assign this task to a CPU. */
sched_fork(p, clone_flags);
- retval = perf_counter_init_task(p);
+ retval = perf_event_init_task(p);
if (retval)
goto bad_fork_cleanup_policy;
@@ -1253,7 +1297,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
write_unlock_irq(&tasklist_lock);
proc_fork_connector(p);
cgroup_post_fork(p);
- perf_counter_fork(p);
+ perf_event_fork(p);
return p;
bad_fork_free_pid:
@@ -1280,16 +1324,13 @@ bad_fork_cleanup_semundo:
bad_fork_cleanup_audit:
audit_free(p);
bad_fork_cleanup_policy:
- perf_counter_free_task(p);
+ perf_event_free_task(p);
#ifdef CONFIG_NUMA
mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
cgroup_exit(p, cgroup_callbacks_done);
delayacct_tsk_free(p);
- if (p->binfmt)
- module_put(p->binfmt->module);
-bad_fork_cleanup_put_domain:
module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
atomic_dec(&p->cred->user->processes);
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index 654efd09f6a..70a298d6da7 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -34,7 +34,7 @@ config GCOV_KERNEL
config GCOV_PROFILE_ALL
bool "Profile entire Kernel"
depends on GCOV_KERNEL
- depends on S390 || X86 || (PPC && EXPERIMENTAL)
+ depends on S390 || X86 || (PPC && EXPERIMENTAL) || MICROBLAZE
default n
---help---
This options activates profiling for the entire kernel.
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index c03f221fee4..e5d98ce50f8 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -48,6 +48,8 @@
#include <asm/uaccess.h>
+#include <trace/events/timer.h>
+
/*
* The timer bases:
*
@@ -442,6 +444,26 @@ static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
#endif
+static inline void
+debug_init(struct hrtimer *timer, clockid_t clockid,
+ enum hrtimer_mode mode)
+{
+ debug_hrtimer_init(timer);
+ trace_hrtimer_init(timer, clockid, mode);
+}
+
+static inline void debug_activate(struct hrtimer *timer)
+{
+ debug_hrtimer_activate(timer);
+ trace_hrtimer_start(timer);
+}
+
+static inline void debug_deactivate(struct hrtimer *timer)
+{
+ debug_hrtimer_deactivate(timer);
+ trace_hrtimer_cancel(timer);
+}
+
/* High resolution timer related functions */
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -798,7 +820,7 @@ static int enqueue_hrtimer(struct hrtimer *timer,
struct hrtimer *entry;
int leftmost = 1;
- debug_hrtimer_activate(timer);
+ debug_activate(timer);
/*
* Find the right place in the rbtree:
@@ -884,7 +906,7 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
* reprogramming happens in the interrupt handler. This is a
* rare case and less expensive than a smp call.
*/
- debug_hrtimer_deactivate(timer);
+ debug_deactivate(timer);
timer_stats_hrtimer_clear_start_info(timer);
reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
@@ -1117,7 +1139,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
enum hrtimer_mode mode)
{
- debug_hrtimer_init(timer);
+ debug_init(timer, clock_id, mode);
__hrtimer_init(timer, clock_id, mode);
}
EXPORT_SYMBOL_GPL(hrtimer_init);
@@ -1141,7 +1163,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
}
EXPORT_SYMBOL_GPL(hrtimer_get_res);
-static void __run_hrtimer(struct hrtimer *timer)
+static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
{
struct hrtimer_clock_base *base = timer->base;
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
@@ -1150,7 +1172,7 @@ static void __run_hrtimer(struct hrtimer *timer)
WARN_ON(!irqs_disabled());
- debug_hrtimer_deactivate(timer);
+ debug_deactivate(timer);
__remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
timer_stats_account_hrtimer(timer);
fn = timer->function;
@@ -1161,7 +1183,9 @@ static void __run_hrtimer(struct hrtimer *timer)
* the timer base.
*/
spin_unlock(&cpu_base->lock);
+ trace_hrtimer_expire_entry(timer, now);
restart = fn(timer);
+ trace_hrtimer_expire_exit(timer);
spin_lock(&cpu_base->lock);
/*
@@ -1272,7 +1296,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
break;
}
- __run_hrtimer(timer);
+ __run_hrtimer(timer, &basenow);
}
base++;
}
@@ -1394,7 +1418,7 @@ void hrtimer_run_queues(void)
hrtimer_get_expires_tv64(timer))
break;
- __run_hrtimer(timer);
+ __run_hrtimer(timer, &base->softirq_time);
}
spin_unlock(&cpu_base->lock);
}
@@ -1571,7 +1595,7 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
while ((node = rb_first(&old_base->active))) {
timer = rb_entry(node, struct hrtimer, node);
BUG_ON(hrtimer_callback_running(timer));
- debug_hrtimer_deactivate(timer);
+ debug_deactivate(timer);
/*
* Mark it as STATE_MIGRATE not INACTIVE otherwise the
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 022a4927b78..d4e84174740 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -171,12 +171,12 @@ static unsigned long timeout_jiffies(unsigned long timeout)
* Process updating of timeout sysctl
*/
int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer,
+ void __user *buffer,
size_t *lenp, loff_t *ppos)
{
int ret;
- ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos);
+ ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
if (ret || !write)
goto out;
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 58762f7077e..b03451ede52 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -12,6 +12,7 @@
#include <linux/time.h>
#include <linux/posix-timers.h>
#include <linux/hrtimer.h>
+#include <trace/events/timer.h>
#include <asm/uaccess.h>
@@ -41,10 +42,43 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer)
return ktime_to_timeval(rem);
}
+static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
+ struct itimerval *const value)
+{
+ cputime_t cval, cinterval;
+ struct cpu_itimer *it = &tsk->signal->it[clock_id];
+
+ spin_lock_irq(&tsk->sighand->siglock);
+
+ cval = it->expires;
+ cinterval = it->incr;
+ if (!cputime_eq(cval, cputime_zero)) {
+ struct task_cputime cputime;
+ cputime_t t;
+
+ thread_group_cputimer(tsk, &cputime);
+ if (clock_id == CPUCLOCK_PROF)
+ t = cputime_add(cputime.utime, cputime.stime);
+ else
+ /* CPUCLOCK_VIRT */
+ t = cputime.utime;
+
+ if (cputime_le(cval, t))
+ /* about to fire */
+ cval = cputime_one_jiffy;
+ else
+ cval = cputime_sub(cval, t);
+ }
+
+ spin_unlock_irq(&tsk->sighand->siglock);
+
+ cputime_to_timeval(cval, &value->it_value);
+ cputime_to_timeval(cinterval, &value->it_interval);
+}
+
int do_getitimer(int which, struct itimerval *value)
{
struct task_struct *tsk = current;
- cputime_t cinterval, cval;
switch (which) {
case ITIMER_REAL:
@@ -55,44 +89,10 @@ int do_getitimer(int which, struct itimerval *value)
spin_unlock_irq(&tsk->sighand->siglock);
break;
case ITIMER_VIRTUAL:
- spin_lock_irq(&tsk->sighand->siglock);
- cval = tsk->signal->it_virt_expires;
- cinterval = tsk->signal->it_virt_incr;
- if (!cputime_eq(cval, cputime_zero)) {
- struct task_cputime cputime;
- cputime_t utime;
-
- thread_group_cputimer(tsk, &cputime);
- utime = cputime.utime;
- if (cputime_le(cval, utime)) { /* about to fire */
- cval = jiffies_to_cputime(1);
- } else {
- cval = cputime_sub(cval, utime);
- }
- }
- spin_unlock_irq(&tsk->sighand->siglock);
- cputime_to_timeval(cval, &value->it_value);
- cputime_to_timeval(cinterval, &value->it_interval);
+ get_cpu_itimer(tsk, CPUCLOCK_VIRT, value);
break;
case ITIMER_PROF:
- spin_lock_irq(&tsk->sighand->siglock);
- cval = tsk->signal->it_prof_expires;
- cinterval = tsk->signal->it_prof_incr;
- if (!cputime_eq(cval, cputime_zero)) {
- struct task_cputime times;
- cputime_t ptime;
-
- thread_group_cputimer(tsk, &times);
- ptime = cputime_add(times.utime, times.stime);
- if (cputime_le(cval, ptime)) { /* about to fire */
- cval = jiffies_to_cputime(1);
- } else {
- cval = cputime_sub(cval, ptime);
- }
- }
- spin_unlock_irq(&tsk->sighand->siglock);
- cputime_to_timeval(cval, &value->it_value);
- cputime_to_timeval(cinterval, &value->it_interval);
+ get_cpu_itimer(tsk, CPUCLOCK_PROF, value);
break;
default:
return(-EINVAL);
@@ -123,11 +123,62 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer)
struct signal_struct *sig =
container_of(timer, struct signal_struct, real_timer);
+ trace_itimer_expire(ITIMER_REAL, sig->leader_pid, 0);
kill_pid_info(SIGALRM, SEND_SIG_PRIV, sig->leader_pid);
return HRTIMER_NORESTART;
}
+static inline u32 cputime_sub_ns(cputime_t ct, s64 real_ns)
+{
+ struct timespec ts;
+ s64 cpu_ns;
+
+ cputime_to_timespec(ct, &ts);
+ cpu_ns = timespec_to_ns(&ts);
+
+ return (cpu_ns <= real_ns) ? 0 : cpu_ns - real_ns;
+}
+
+static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
+ const struct itimerval *const value,
+ struct itimerval *const ovalue)
+{
+ cputime_t cval, nval, cinterval, ninterval;
+ s64 ns_ninterval, ns_nval;
+ struct cpu_itimer *it = &tsk->signal->it[clock_id];
+
+ nval = timeval_to_cputime(&value->it_value);
+ ns_nval = timeval_to_ns(&value->it_value);
+ ninterval = timeval_to_cputime(&value->it_interval);
+ ns_ninterval = timeval_to_ns(&value->it_interval);
+
+ it->incr_error = cputime_sub_ns(ninterval, ns_ninterval);
+ it->error = cputime_sub_ns(nval, ns_nval);
+
+ spin_lock_irq(&tsk->sighand->siglock);
+
+ cval = it->expires;
+ cinterval = it->incr;
+ if (!cputime_eq(cval, cputime_zero) ||
+ !cputime_eq(nval, cputime_zero)) {
+ if (cputime_gt(nval, cputime_zero))
+ nval = cputime_add(nval, cputime_one_jiffy);
+ set_process_cpu_timer(tsk, clock_id, &nval, &cval);
+ }
+ it->expires = nval;
+ it->incr = ninterval;
+ trace_itimer_state(clock_id == CPUCLOCK_VIRT ?
+ ITIMER_VIRTUAL : ITIMER_PROF, value, nval);
+
+ spin_unlock_irq(&tsk->sighand->siglock);
+
+ if (ovalue) {
+ cputime_to_timeval(cval, &ovalue->it_value);
+ cputime_to_timeval(cinterval, &ovalue->it_interval);
+ }
+}
+
/*
* Returns true if the timeval is in canonical form
*/
@@ -139,7 +190,6 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
struct task_struct *tsk = current;
struct hrtimer *timer;
ktime_t expires;
- cputime_t cval, cinterval, nval, ninterval;
/*
* Validate the timevals in value.
@@ -171,51 +221,14 @@ again:
} else
tsk->signal->it_real_incr.tv64 = 0;
+ trace_itimer_state(ITIMER_REAL, value, 0);
spin_unlock_irq(&tsk->sighand->siglock);
break;
case ITIMER_VIRTUAL:
- nval = timeval_to_cputime(&value->it_value);
- ninterval = timeval_to_cputime(&value->it_interval);
- spin_lock_irq(&tsk->sighand->siglock);
- cval = tsk->signal->it_virt_expires;
- cinterval = tsk->signal->it_virt_incr;
- if (!cputime_eq(cval, cputime_zero) ||
- !cputime_eq(nval, cputime_zero)) {
- if (cputime_gt(nval, cputime_zero))
- nval = cputime_add(nval,
- jiffies_to_cputime(1));
- set_process_cpu_timer(tsk, CPUCLOCK_VIRT,
- &nval, &cval);
- }
- tsk->signal->it_virt_expires = nval;
- tsk->signal->it_virt_incr = ninterval;
- spin_unlock_irq(&tsk->sighand->siglock);
- if (ovalue) {
- cputime_to_timeval(cval, &ovalue->it_value);
- cputime_to_timeval(cinterval, &ovalue->it_interval);
- }
+ set_cpu_itimer(tsk, CPUCLOCK_VIRT, value, ovalue);
break;
case ITIMER_PROF:
- nval = timeval_to_cputime(&value->it_value);
- ninterval = timeval_to_cputime(&value->it_interval);
- spin_lock_irq(&tsk->sighand->siglock);
- cval = tsk->signal->it_prof_expires;
- cinterval = tsk->signal->it_prof_incr;
- if (!cputime_eq(cval, cputime_zero) ||
- !cputime_eq(nval, cputime_zero)) {
- if (cputime_gt(nval, cputime_zero))
- nval = cputime_add(nval,
- jiffies_to_cputime(1));
- set_process_cpu_timer(tsk, CPUCLOCK_PROF,
- &nval, &cval);
- }
- tsk->signal->it_prof_expires = nval;
- tsk->signal->it_prof_incr = ninterval;
- spin_unlock_irq(&tsk->sighand->siglock);
- if (ovalue) {
- cputime_to_timeval(cval, &ovalue->it_value);
- cputime_to_timeval(cinterval, &ovalue->it_interval);
- }
+ set_cpu_itimer(tsk, CPUCLOCK_PROF, value, ovalue);
break;
default:
return -EINVAL;
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 3a29dbe7898..8b6b8b697c6 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -59,7 +59,8 @@ static inline int is_kernel_inittext(unsigned long addr)
static inline int is_kernel_text(unsigned long addr)
{
- if (addr >= (unsigned long)_stext && addr <= (unsigned long)_etext)
+ if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
+ arch_is_kernel_text(addr))
return 1;
return in_gate_area_no_task(addr);
}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ef177d653b2..cfadc1291d0 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1321,7 +1321,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
return 0;
}
-static struct seq_operations kprobes_seq_ops = {
+static const struct seq_operations kprobes_seq_ops = {
.start = kprobe_seq_start,
.next = kprobe_seq_next,
.stop = kprobe_seq_stop,
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index f74d2d7aa60..3815ac1d58b 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -578,6 +578,9 @@ static int static_obj(void *obj)
if ((addr >= start) && (addr < end))
return 1;
+ if (arch_is_kernel_data(addr))
+ return 1;
+
#ifdef CONFIG_SMP
/*
* percpu var?
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index d4b3dbc79fd..d4aba4f3584 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -594,7 +594,7 @@ static int ls_show(struct seq_file *m, void *v)
return 0;
}
-static struct seq_operations lockstat_ops = {
+static const struct seq_operations lockstat_ops = {
.start = ls_start,
.next = ls_next,
.stop = ls_stop,
diff --git a/kernel/module.c b/kernel/module.c
index 392eb3defbc..fe748a86d45 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -47,6 +47,7 @@
#include <linux/rculist.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
#include <linux/license.h>
#include <asm/sections.h>
#include <linux/tracepoint.h>
@@ -1535,6 +1536,10 @@ static void free_module(struct module *mod)
/* Finally, free the core (containing the module structure) */
module_free(mod, mod->module_core);
+
+#ifdef CONFIG_MPU
+ update_protections(current->mm);
+#endif
}
void *__symbol_get(const char *symbol)
@@ -1792,6 +1797,17 @@ static void setup_modinfo(struct module *mod, Elf_Shdr *sechdrs,
}
}
+static void free_modinfo(struct module *mod)
+{
+ struct module_attribute *attr;
+ int i;
+
+ for (i = 0; (attr = modinfo_attrs[i]); i++) {
+ if (attr->free)
+ attr->free(mod);
+ }
+}
+
#ifdef CONFIG_KALLSYMS
/* lookup symbol in given range of kernel_symbols */
@@ -1857,13 +1873,93 @@ static char elf_type(const Elf_Sym *sym,
return '?';
}
+static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
+ unsigned int shnum)
+{
+ const Elf_Shdr *sec;
+
+ if (src->st_shndx == SHN_UNDEF
+ || src->st_shndx >= shnum
+ || !src->st_name)
+ return false;
+
+ sec = sechdrs + src->st_shndx;
+ if (!(sec->sh_flags & SHF_ALLOC)
+#ifndef CONFIG_KALLSYMS_ALL
+ || !(sec->sh_flags & SHF_EXECINSTR)
+#endif
+ || (sec->sh_entsize & INIT_OFFSET_MASK))
+ return false;
+
+ return true;
+}
+
+static unsigned long layout_symtab(struct module *mod,
+ Elf_Shdr *sechdrs,
+ unsigned int symindex,
+ unsigned int strindex,
+ const Elf_Ehdr *hdr,
+ const char *secstrings,
+ unsigned long *pstroffs,
+ unsigned long *strmap)
+{
+ unsigned long symoffs;
+ Elf_Shdr *symsect = sechdrs + symindex;
+ Elf_Shdr *strsect = sechdrs + strindex;
+ const Elf_Sym *src;
+ const char *strtab;
+ unsigned int i, nsrc, ndst;
+
+ /* Put symbol section at end of init part of module. */
+ symsect->sh_flags |= SHF_ALLOC;
+ symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
+ symindex) | INIT_OFFSET_MASK;
+ DEBUGP("\t%s\n", secstrings + symsect->sh_name);
+
+ src = (void *)hdr + symsect->sh_offset;
+ nsrc = symsect->sh_size / sizeof(*src);
+ strtab = (void *)hdr + strsect->sh_offset;
+ for (ndst = i = 1; i < nsrc; ++i, ++src)
+ if (is_core_symbol(src, sechdrs, hdr->e_shnum)) {
+ unsigned int j = src->st_name;
+
+ while(!__test_and_set_bit(j, strmap) && strtab[j])
+ ++j;
+ ++ndst;
+ }
+
+ /* Append room for core symbols at end of core part. */
+ symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
+ mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
+
+ /* Put string table section at end of init part of module. */
+ strsect->sh_flags |= SHF_ALLOC;
+ strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
+ strindex) | INIT_OFFSET_MASK;
+ DEBUGP("\t%s\n", secstrings + strsect->sh_name);
+
+ /* Append room for core symbols' strings at end of core part. */
+ *pstroffs = mod->core_size;
+ __set_bit(0, strmap);
+ mod->core_size += bitmap_weight(strmap, strsect->sh_size);
+
+ return symoffs;
+}
+
static void add_kallsyms(struct module *mod,
Elf_Shdr *sechdrs,
+ unsigned int shnum,
unsigned int symindex,
unsigned int strindex,
- const char *secstrings)
+ unsigned long symoffs,
+ unsigned long stroffs,
+ const char *secstrings,
+ unsigned long *strmap)
{
- unsigned int i;
+ unsigned int i, ndst;
+ const Elf_Sym *src;
+ Elf_Sym *dst;
+ char *s;
mod->symtab = (void *)sechdrs[symindex].sh_addr;
mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
@@ -1873,13 +1969,44 @@ static void add_kallsyms(struct module *mod,
for (i = 0; i < mod->num_symtab; i++)
mod->symtab[i].st_info
= elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
+
+ mod->core_symtab = dst = mod->module_core + symoffs;
+ src = mod->symtab;
+ *dst = *src;
+ for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
+ if (!is_core_symbol(src, sechdrs, shnum))
+ continue;
+ dst[ndst] = *src;
+ dst[ndst].st_name = bitmap_weight(strmap, dst[ndst].st_name);
+ ++ndst;
+ }
+ mod->core_num_syms = ndst;
+
+ mod->core_strtab = s = mod->module_core + stroffs;
+ for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
+ if (test_bit(i, strmap))
+ *++s = mod->strtab[i];
}
#else
+static inline unsigned long layout_symtab(struct module *mod,
+ Elf_Shdr *sechdrs,
+ unsigned int symindex,
+ unsigned int strindex,
+ const Elf_Hdr *hdr,
+ const char *secstrings,
+ unsigned long *pstroffs,
+ unsigned long *strmap)
+{
+}
static inline void add_kallsyms(struct module *mod,
Elf_Shdr *sechdrs,
+ unsigned int shnum,
unsigned int symindex,
unsigned int strindex,
- const char *secstrings)
+ unsigned long symoffs,
+ unsigned long stroffs,
+ const char *secstrings,
+ const unsigned long *strmap)
{
}
#endif /* CONFIG_KALLSYMS */
@@ -1954,6 +2081,9 @@ static noinline struct module *load_module(void __user *umod,
struct module *mod;
long err = 0;
void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
+#ifdef CONFIG_KALLSYMS
+ unsigned long symoffs, stroffs, *strmap;
+#endif
mm_segment_t old_fs;
DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
@@ -2035,11 +2165,6 @@ static noinline struct module *load_module(void __user *umod,
/* Don't keep modinfo and version sections. */
sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
sechdrs[versindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
-#ifdef CONFIG_KALLSYMS
- /* Keep symbol and string tables for decoding later. */
- sechdrs[symindex].sh_flags |= SHF_ALLOC;
- sechdrs[strindex].sh_flags |= SHF_ALLOC;
-#endif
/* Check module struct version now, before we try to use module. */
if (!check_modstruct_version(sechdrs, versindex, mod)) {
@@ -2075,6 +2200,13 @@ static noinline struct module *load_module(void __user *umod,
goto free_hdr;
}
+ strmap = kzalloc(BITS_TO_LONGS(sechdrs[strindex].sh_size)
+ * sizeof(long), GFP_KERNEL);
+ if (!strmap) {
+ err = -ENOMEM;
+ goto free_mod;
+ }
+
if (find_module(mod->name)) {
err = -EEXIST;
goto free_mod;
@@ -2104,6 +2236,8 @@ static noinline struct module *load_module(void __user *umod,
this is done generically; there doesn't appear to be any
special cases for the architectures. */
layout_sections(mod, hdr, sechdrs, secstrings);
+ symoffs = layout_symtab(mod, sechdrs, symindex, strindex, hdr,
+ secstrings, &stroffs, strmap);
/* Do the allocs. */
ptr = module_alloc_update_bounds(mod->core_size);
@@ -2308,7 +2442,10 @@ static noinline struct module *load_module(void __user *umod,
percpu_modcopy(mod->percpu, (void *)sechdrs[pcpuindex].sh_addr,
sechdrs[pcpuindex].sh_size);
- add_kallsyms(mod, sechdrs, symindex, strindex, secstrings);
+ add_kallsyms(mod, sechdrs, hdr->e_shnum, symindex, strindex,
+ symoffs, stroffs, secstrings, strmap);
+ kfree(strmap);
+ strmap = NULL;
if (!mod->taints) {
struct _ddebug *debug;
@@ -2380,13 +2517,14 @@ static noinline struct module *load_module(void __user *umod,
synchronize_sched();
module_arch_cleanup(mod);
cleanup:
+ free_modinfo(mod);
kobject_del(&mod->mkobj.kobj);
kobject_put(&mod->mkobj.kobj);
free_unload:
module_unload_free(mod);
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
- free_init:
percpu_modfree(mod->refptr);
+ free_init:
#endif
module_free(mod, mod->module_init);
free_core:
@@ -2397,6 +2535,7 @@ static noinline struct module *load_module(void __user *umod,
percpu_modfree(percpu);
free_mod:
kfree(args);
+ kfree(strmap);
free_hdr:
vfree(hdr);
return ERR_PTR(err);
@@ -2486,6 +2625,11 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
/* Drop initial reference. */
module_put(mod);
trim_init_extable(mod);
+#ifdef CONFIG_KALLSYMS
+ mod->num_symtab = mod->core_num_syms;
+ mod->symtab = mod->core_symtab;
+ mod->strtab = mod->core_strtab;
+#endif
module_free(mod, mod->module_init);
mod->module_init = NULL;
mod->init_size = 0;
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c
index 5aa854f9e5a..2a5dfec8efe 100644
--- a/kernel/ns_cgroup.c
+++ b/kernel/ns_cgroup.c
@@ -42,8 +42,8 @@ int ns_cgroup_clone(struct task_struct *task, struct pid *pid)
* (hence either you are in the same cgroup as task, or in an
* ancestor cgroup thereof)
*/
-static int ns_can_attach(struct cgroup_subsys *ss,
- struct cgroup *new_cgroup, struct task_struct *task)
+static int ns_can_attach(struct cgroup_subsys *ss, struct cgroup *new_cgroup,
+ struct task_struct *task, bool threadgroup)
{
if (current != task) {
if (!capable(CAP_SYS_ADMIN))
@@ -56,6 +56,18 @@ static int ns_can_attach(struct cgroup_subsys *ss,
if (!cgroup_is_descendant(new_cgroup, task))
return -EPERM;
+ if (threadgroup) {
+ struct task_struct *c;
+ rcu_read_lock();
+ list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
+ if (!cgroup_is_descendant(new_cgroup, c)) {
+ rcu_read_unlock();
+ return -EPERM;
+ }
+ }
+ rcu_read_unlock();
+ }
+
return 0;
}
diff --git a/kernel/panic.c b/kernel/panic.c
index 512ab73b0ca..bcdef26e333 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -177,7 +177,7 @@ static const struct tnt tnts[] = {
* 'W' - Taint on warning.
* 'C' - modules from drivers/staging are loaded.
*
- * The string is overwritten by the next call to print_taint().
+ * The string is overwritten by the next call to print_tainted().
*/
const char *print_tainted(void)
{
diff --git a/kernel/params.c b/kernel/params.c
index 7f6912ced2b..9da58eabdcb 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -23,6 +23,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/ctype.h>
#if 0
#define DEBUGP printk
@@ -87,7 +88,7 @@ static char *next_arg(char *args, char **param, char **val)
}
for (i = 0; args[i]; i++) {
- if (args[i] == ' ' && !in_quote)
+ if (isspace(args[i]) && !in_quote)
break;
if (equals == 0) {
if (args[i] == '=')
@@ -121,7 +122,7 @@ static char *next_arg(char *args, char **param, char **val)
next = args + i;
/* Chew up trailing spaces. */
- while (*next == ' ')
+ while (isspace(*next))
next++;
return next;
}
@@ -138,7 +139,7 @@ int parse_args(const char *name,
DEBUGP("Parsing ARGS: %s\n", args);
/* Chew leading spaces */
- while (*args == ' ')
+ while (isspace(*args))
args++;
while (*args) {
diff --git a/kernel/perf_counter.c b/kernel/perf_event.c
index cc768ab81ac..76ac4db405e 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_event.c
@@ -1,12 +1,12 @@
/*
- * Performance counter core code
+ * Performance events core code:
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*
- * For licensing details see kernel-base/COPYING
+ * For licensing details see kernel-base/COPYING
*/
#include <linux/fs.h>
@@ -26,66 +26,66 @@
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
#include <linux/kernel_stat.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/irq_regs.h>
/*
- * Each CPU has a list of per CPU counters:
+ * Each CPU has a list of per CPU events:
*/
DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
-int perf_max_counters __read_mostly = 1;
+int perf_max_events __read_mostly = 1;
static int perf_reserved_percpu __read_mostly;
static int perf_overcommit __read_mostly = 1;
-static atomic_t nr_counters __read_mostly;
-static atomic_t nr_mmap_counters __read_mostly;
-static atomic_t nr_comm_counters __read_mostly;
-static atomic_t nr_task_counters __read_mostly;
+static atomic_t nr_events __read_mostly;
+static atomic_t nr_mmap_events __read_mostly;
+static atomic_t nr_comm_events __read_mostly;
+static atomic_t nr_task_events __read_mostly;
/*
- * perf counter paranoia level:
+ * perf event paranoia level:
* -1 - not paranoid at all
* 0 - disallow raw tracepoint access for unpriv
- * 1 - disallow cpu counters for unpriv
+ * 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
-int sysctl_perf_counter_paranoid __read_mostly = 1;
+int sysctl_perf_event_paranoid __read_mostly = 1;
static inline bool perf_paranoid_tracepoint_raw(void)
{
- return sysctl_perf_counter_paranoid > -1;
+ return sysctl_perf_event_paranoid > -1;
}
static inline bool perf_paranoid_cpu(void)
{
- return sysctl_perf_counter_paranoid > 0;
+ return sysctl_perf_event_paranoid > 0;
}
static inline bool perf_paranoid_kernel(void)
{
- return sysctl_perf_counter_paranoid > 1;
+ return sysctl_perf_event_paranoid > 1;
}
-int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
+int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
/*
- * max perf counter sample rate
+ * max perf event sample rate
*/
-int sysctl_perf_counter_sample_rate __read_mostly = 100000;
+int sysctl_perf_event_sample_rate __read_mostly = 100000;
-static atomic64_t perf_counter_id;
+static atomic64_t perf_event_id;
/*
- * Lock for (sysadmin-configurable) counter reservations:
+ * Lock for (sysadmin-configurable) event reservations:
*/
static DEFINE_SPINLOCK(perf_resource_lock);
/*
* Architecture provided APIs - weak aliases:
*/
-extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
+extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
{
return NULL;
}
@@ -93,18 +93,18 @@ extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counte
void __weak hw_perf_disable(void) { barrier(); }
void __weak hw_perf_enable(void) { barrier(); }
-void __weak hw_perf_counter_setup(int cpu) { barrier(); }
-void __weak hw_perf_counter_setup_online(int cpu) { barrier(); }
+void __weak hw_perf_event_setup(int cpu) { barrier(); }
+void __weak hw_perf_event_setup_online(int cpu) { barrier(); }
int __weak
-hw_perf_group_sched_in(struct perf_counter *group_leader,
+hw_perf_group_sched_in(struct perf_event *group_leader,
struct perf_cpu_context *cpuctx,
- struct perf_counter_context *ctx, int cpu)
+ struct perf_event_context *ctx, int cpu)
{
return 0;
}
-void __weak perf_counter_print_debug(void) { }
+void __weak perf_event_print_debug(void) { }
static DEFINE_PER_CPU(int, perf_disable_count);
@@ -130,20 +130,20 @@ void perf_enable(void)
hw_perf_enable();
}
-static void get_ctx(struct perf_counter_context *ctx)
+static void get_ctx(struct perf_event_context *ctx)
{
WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
}
static void free_ctx(struct rcu_head *head)
{
- struct perf_counter_context *ctx;
+ struct perf_event_context *ctx;
- ctx = container_of(head, struct perf_counter_context, rcu_head);
+ ctx = container_of(head, struct perf_event_context, rcu_head);
kfree(ctx);
}
-static void put_ctx(struct perf_counter_context *ctx)
+static void put_ctx(struct perf_event_context *ctx)
{
if (atomic_dec_and_test(&ctx->refcount)) {
if (ctx->parent_ctx)
@@ -154,7 +154,7 @@ static void put_ctx(struct perf_counter_context *ctx)
}
}
-static void unclone_ctx(struct perf_counter_context *ctx)
+static void unclone_ctx(struct perf_event_context *ctx)
{
if (ctx->parent_ctx) {
put_ctx(ctx->parent_ctx);
@@ -163,37 +163,37 @@ static void unclone_ctx(struct perf_counter_context *ctx)
}
/*
- * If we inherit counters we want to return the parent counter id
+ * If we inherit events we want to return the parent event id
* to userspace.
*/
-static u64 primary_counter_id(struct perf_counter *counter)
+static u64 primary_event_id(struct perf_event *event)
{
- u64 id = counter->id;
+ u64 id = event->id;
- if (counter->parent)
- id = counter->parent->id;
+ if (event->parent)
+ id = event->parent->id;
return id;
}
/*
- * Get the perf_counter_context for a task and lock it.
+ * Get the perf_event_context for a task and lock it.
* This has to cope with with the fact that until it is locked,
* the context could get moved to another task.
*/
-static struct perf_counter_context *
+static struct perf_event_context *
perf_lock_task_context(struct task_struct *task, unsigned long *flags)
{
- struct perf_counter_context *ctx;
+ struct perf_event_context *ctx;
rcu_read_lock();
retry:
- ctx = rcu_dereference(task->perf_counter_ctxp);
+ ctx = rcu_dereference(task->perf_event_ctxp);
if (ctx) {
/*
* If this context is a clone of another, it might
* get swapped for another underneath us by
- * perf_counter_task_sched_out, though the
+ * perf_event_task_sched_out, though the
* rcu_read_lock() protects us from any context
* getting freed. Lock the context and check if it
* got swapped before we could get the lock, and retry
@@ -201,7 +201,7 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
* can't get swapped on us any more.
*/
spin_lock_irqsave(&ctx->lock, *flags);
- if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
+ if (ctx != rcu_dereference(task->perf_event_ctxp)) {
spin_unlock_irqrestore(&ctx->lock, *flags);
goto retry;
}
@@ -220,9 +220,9 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
* can't get swapped to another task. This also increments its
* reference count so that the context can't get freed.
*/
-static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
+static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
{
- struct perf_counter_context *ctx;
+ struct perf_event_context *ctx;
unsigned long flags;
ctx = perf_lock_task_context(task, &flags);
@@ -233,7 +233,7 @@ static struct perf_counter_context *perf_pin_task_context(struct task_struct *ta
return ctx;
}
-static void perf_unpin_context(struct perf_counter_context *ctx)
+static void perf_unpin_context(struct perf_event_context *ctx)
{
unsigned long flags;
@@ -244,123 +244,122 @@ static void perf_unpin_context(struct perf_counter_context *ctx)
}
/*
- * Add a counter from the lists for its context.
+ * Add a event from the lists for its context.
* Must be called with ctx->mutex and ctx->lock held.
*/
static void
-list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
+list_add_event(struct perf_event *event, struct perf_event_context *ctx)
{
- struct perf_counter *group_leader = counter->group_leader;
+ struct perf_event *group_leader = event->group_leader;
/*
- * Depending on whether it is a standalone or sibling counter,
- * add it straight to the context's counter list, or to the group
+ * Depending on whether it is a standalone or sibling event,
+ * add it straight to the context's event list, or to the group
* leader's sibling list:
*/
- if (group_leader == counter)
- list_add_tail(&counter->list_entry, &ctx->counter_list);
+ if (group_leader == event)
+ list_add_tail(&event->group_entry, &ctx->group_list);
else {
- list_add_tail(&counter->list_entry, &group_leader->sibling_list);
+ list_add_tail(&event->group_entry, &group_leader->sibling_list);
group_leader->nr_siblings++;
}
- list_add_rcu(&counter->event_entry, &ctx->event_list);
- ctx->nr_counters++;
- if (counter->attr.inherit_stat)
+ list_add_rcu(&event->event_entry, &ctx->event_list);
+ ctx->nr_events++;
+ if (event->attr.inherit_stat)
ctx->nr_stat++;
}
/*
- * Remove a counter from the lists for its context.
+ * Remove a event from the lists for its context.
* Must be called with ctx->mutex and ctx->lock held.
*/
static void
-list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
+list_del_event(struct perf_event *event, struct perf_event_context *ctx)
{
- struct perf_counter *sibling, *tmp;
+ struct perf_event *sibling, *tmp;
- if (list_empty(&counter->list_entry))
+ if (list_empty(&event->group_entry))
return;
- ctx->nr_counters--;
- if (counter->attr.inherit_stat)
+ ctx->nr_events--;
+ if (event->attr.inherit_stat)
ctx->nr_stat--;
- list_del_init(&counter->list_entry);
- list_del_rcu(&counter->event_entry);
+ list_del_init(&event->group_entry);
+ list_del_rcu(&event->event_entry);
- if (counter->group_leader != counter)
- counter->group_leader->nr_siblings--;
+ if (event->group_leader != event)
+ event->group_leader->nr_siblings--;
/*
- * If this was a group counter with sibling counters then
- * upgrade the siblings to singleton counters by adding them
+ * If this was a group event with sibling events then
+ * upgrade the siblings to singleton events by adding them
* to the context list directly:
*/
- list_for_each_entry_safe(sibling, tmp,
- &counter->sibling_list, list_entry) {
+ list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
- list_move_tail(&sibling->list_entry, &ctx->counter_list);
+ list_move_tail(&sibling->group_entry, &ctx->group_list);
sibling->group_leader = sibling;
}
}
static void
-counter_sched_out(struct perf_counter *counter,
+event_sched_out(struct perf_event *event,
struct perf_cpu_context *cpuctx,
- struct perf_counter_context *ctx)
+ struct perf_event_context *ctx)
{
- if (counter->state != PERF_COUNTER_STATE_ACTIVE)
+ if (event->state != PERF_EVENT_STATE_ACTIVE)
return;
- counter->state = PERF_COUNTER_STATE_INACTIVE;
- if (counter->pending_disable) {
- counter->pending_disable = 0;
- counter->state = PERF_COUNTER_STATE_OFF;
+ event->state = PERF_EVENT_STATE_INACTIVE;
+ if (event->pending_disable) {
+ event->pending_disable = 0;
+ event->state = PERF_EVENT_STATE_OFF;
}
- counter->tstamp_stopped = ctx->time;
- counter->pmu->disable(counter);
- counter->oncpu = -1;
+ event->tstamp_stopped = ctx->time;
+ event->pmu->disable(event);
+ event->oncpu = -1;
- if (!is_software_counter(counter))
+ if (!is_software_event(event))
cpuctx->active_oncpu--;
ctx->nr_active--;
- if (counter->attr.exclusive || !cpuctx->active_oncpu)
+ if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0;
}
static void
-group_sched_out(struct perf_counter *group_counter,
+group_sched_out(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
- struct perf_counter_context *ctx)
+ struct perf_event_context *ctx)
{
- struct perf_counter *counter;
+ struct perf_event *event;
- if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
+ if (group_event->state != PERF_EVENT_STATE_ACTIVE)
return;
- counter_sched_out(group_counter, cpuctx, ctx);
+ event_sched_out(group_event, cpuctx, ctx);
/*
* Schedule out siblings (if any):
*/
- list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
- counter_sched_out(counter, cpuctx, ctx);
+ list_for_each_entry(event, &group_event->sibling_list, group_entry)
+ event_sched_out(event, cpuctx, ctx);
- if (group_counter->attr.exclusive)
+ if (group_event->attr.exclusive)
cpuctx->exclusive = 0;
}
/*
- * Cross CPU call to remove a performance counter
+ * Cross CPU call to remove a performance event
*
- * We disable the counter on the hardware level first. After that we
+ * We disable the event on the hardware level first. After that we
* remove it from the context list.
*/
-static void __perf_counter_remove_from_context(void *info)
+static void __perf_event_remove_from_context(void *info)
{
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
- struct perf_counter *counter = info;
- struct perf_counter_context *ctx = counter->ctx;
+ struct perf_event *event = info;
+ struct perf_event_context *ctx = event->ctx;
/*
* If this is a task context, we need to check whether it is
@@ -373,22 +372,22 @@ static void __perf_counter_remove_from_context(void *info)
spin_lock(&ctx->lock);
/*
* Protect the list operation against NMI by disabling the
- * counters on a global level.
+ * events on a global level.
*/
perf_disable();
- counter_sched_out(counter, cpuctx, ctx);
+ event_sched_out(event, cpuctx, ctx);
- list_del_counter(counter, ctx);
+ list_del_event(event, ctx);
if (!ctx->task) {
/*
- * Allow more per task counters with respect to the
+ * Allow more per task events with respect to the
* reservation:
*/
cpuctx->max_pertask =
- min(perf_max_counters - ctx->nr_counters,
- perf_max_counters - perf_reserved_percpu);
+ min(perf_max_events - ctx->nr_events,
+ perf_max_events - perf_reserved_percpu);
}
perf_enable();
@@ -397,56 +396,56 @@ static void __perf_counter_remove_from_context(void *info)
/*
- * Remove the counter from a task's (or a CPU's) list of counters.
+ * Remove the event from a task's (or a CPU's) list of events.
*
* Must be called with ctx->mutex held.
*
- * CPU counters are removed with a smp call. For task counters we only
+ * CPU events are removed with a smp call. For task events we only
* call when the task is on a CPU.
*
- * If counter->ctx is a cloned context, callers must make sure that
- * every task struct that counter->ctx->task could possibly point to
+ * If event->ctx is a cloned context, callers must make sure that
+ * every task struct that event->ctx->task could possibly point to
* remains valid. This is OK when called from perf_release since
* that only calls us on the top-level context, which can't be a clone.
- * When called from perf_counter_exit_task, it's OK because the
+ * When called from perf_event_exit_task, it's OK because the
* context has been detached from its task.
*/
-static void perf_counter_remove_from_context(struct perf_counter *counter)
+static void perf_event_remove_from_context(struct perf_event *event)
{
- struct perf_counter_context *ctx = counter->ctx;
+ struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
if (!task) {
/*
- * Per cpu counters are removed via an smp call and
+ * Per cpu events are removed via an smp call and
* the removal is always sucessful.
*/
- smp_call_function_single(counter->cpu,
- __perf_counter_remove_from_context,
- counter, 1);
+ smp_call_function_single(event->cpu,
+ __perf_event_remove_from_context,
+ event, 1);
return;
}
retry:
- task_oncpu_function_call(task, __perf_counter_remove_from_context,
- counter);
+ task_oncpu_function_call(task, __perf_event_remove_from_context,
+ event);
spin_lock_irq(&ctx->lock);
/*
* If the context is active we need to retry the smp call.
*/
- if (ctx->nr_active && !list_empty(&counter->list_entry)) {
+ if (ctx->nr_active && !list_empty(&event->group_entry)) {
spin_unlock_irq(&ctx->lock);
goto retry;
}
/*
* The lock prevents that this context is scheduled in so we
- * can remove the counter safely, if the call above did not
+ * can remove the event safely, if the call above did not
* succeed.
*/
- if (!list_empty(&counter->list_entry)) {
- list_del_counter(counter, ctx);
+ if (!list_empty(&event->group_entry)) {
+ list_del_event(event, ctx);
}
spin_unlock_irq(&ctx->lock);
}
@@ -459,7 +458,7 @@ static inline u64 perf_clock(void)
/*
* Update the record of the current time in a context.
*/
-static void update_context_time(struct perf_counter_context *ctx)
+static void update_context_time(struct perf_event_context *ctx)
{
u64 now = perf_clock();
@@ -468,51 +467,51 @@ static void update_context_time(struct perf_counter_context *ctx)
}
/*
- * Update the total_time_enabled and total_time_running fields for a counter.
+ * Update the total_time_enabled and total_time_running fields for a event.
*/
-static void update_counter_times(struct perf_counter *counter)
+static void update_event_times(struct perf_event *event)
{
- struct perf_counter_context *ctx = counter->ctx;
+ struct perf_event_context *ctx = event->ctx;
u64 run_end;
- if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
- counter->group_leader->state < PERF_COUNTER_STATE_INACTIVE)
+ if (event->state < PERF_EVENT_STATE_INACTIVE ||
+ event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
return;
- counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
+ event->total_time_enabled = ctx->time - event->tstamp_enabled;
- if (counter->state == PERF_COUNTER_STATE_INACTIVE)
- run_end = counter->tstamp_stopped;
+ if (event->state == PERF_EVENT_STATE_INACTIVE)
+ run_end = event->tstamp_stopped;
else
run_end = ctx->time;
- counter->total_time_running = run_end - counter->tstamp_running;
+ event->total_time_running = run_end - event->tstamp_running;
}
/*
- * Update total_time_enabled and total_time_running for all counters in a group.
+ * Update total_time_enabled and total_time_running for all events in a group.
*/
-static void update_group_times(struct perf_counter *leader)
+static void update_group_times(struct perf_event *leader)
{
- struct perf_counter *counter;
+ struct perf_event *event;
- update_counter_times(leader);
- list_for_each_entry(counter, &leader->sibling_list, list_entry)
- update_counter_times(counter);
+ update_event_times(leader);
+ list_for_each_entry(event, &leader->sibling_list, group_entry)
+ update_event_times(event);
}
/*
- * Cross CPU call to disable a performance counter
+ * Cross CPU call to disable a performance event
*/
-static void __perf_counter_disable(void *info)
+static void __perf_event_disable(void *info)
{
- struct perf_counter *counter = info;
+ struct perf_event *event = info;
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
- struct perf_counter_context *ctx = counter->ctx;
+ struct perf_event_context *ctx = event->ctx;
/*
- * If this is a per-task counter, need to check whether this
- * counter's task is the current task on this cpu.
+ * If this is a per-task event, need to check whether this
+ * event's task is the current task on this cpu.
*/
if (ctx->task && cpuctx->task_ctx != ctx)
return;
@@ -520,57 +519,57 @@ static void __perf_counter_disable(void *info)
spin_lock(&ctx->lock);
/*
- * If the counter is on, turn it off.
+ * If the event is on, turn it off.
* If it is in error state, leave it in error state.
*/
- if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
+ if (event->state >= PERF_EVENT_STATE_INACTIVE) {
update_context_time(ctx);
- update_group_times(counter);
- if (counter == counter->group_leader)
- group_sched_out(counter, cpuctx, ctx);
+ update_group_times(event);
+ if (event == event->group_leader)
+ group_sched_out(event, cpuctx, ctx);
else
- counter_sched_out(counter, cpuctx, ctx);
- counter->state = PERF_COUNTER_STATE_OFF;
+ event_sched_out(event, cpuctx, ctx);
+ event->state = PERF_EVENT_STATE_OFF;
}
spin_unlock(&ctx->lock);
}
/*
- * Disable a counter.
+ * Disable a event.
*
- * If counter->ctx is a cloned context, callers must make sure that
- * every task struct that counter->ctx->task could possibly point to
+ * If event->ctx is a cloned context, callers must make sure that
+ * every task struct that event->ctx->task could possibly point to
* remains valid. This condition is satisifed when called through
- * perf_counter_for_each_child or perf_counter_for_each because they
- * hold the top-level counter's child_mutex, so any descendant that
- * goes to exit will block in sync_child_counter.
- * When called from perf_pending_counter it's OK because counter->ctx
+ * perf_event_for_each_child or perf_event_for_each because they
+ * hold the top-level event's child_mutex, so any descendant that
+ * goes to exit will block in sync_child_event.
+ * When called from perf_pending_event it's OK because event->ctx
* is the current context on this CPU and preemption is disabled,
- * hence we can't get into perf_counter_task_sched_out for this context.
+ * hence we can't get into perf_event_task_sched_out for this context.
*/
-static void perf_counter_disable(struct perf_counter *counter)
+static void perf_event_disable(struct perf_event *event)
{
- struct perf_counter_context *ctx = counter->ctx;
+ struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
if (!task) {
/*
- * Disable the counter on the cpu that it's on
+ * Disable the event on the cpu that it's on
*/
- smp_call_function_single(counter->cpu, __perf_counter_disable,
- counter, 1);
+ smp_call_function_single(event->cpu, __perf_event_disable,
+ event, 1);
return;
}
retry:
- task_oncpu_function_call(task, __perf_counter_disable, counter);
+ task_oncpu_function_call(task, __perf_event_disable, event);
spin_lock_irq(&ctx->lock);
/*
- * If the counter is still active, we need to retry the cross-call.
+ * If the event is still active, we need to retry the cross-call.
*/
- if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
+ if (event->state == PERF_EVENT_STATE_ACTIVE) {
spin_unlock_irq(&ctx->lock);
goto retry;
}
@@ -579,73 +578,73 @@ static void perf_counter_disable(struct perf_counter *counter)
* Since we have the lock this context can't be scheduled
* in, so we can change the state safely.
*/
- if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
- update_group_times(counter);
- counter->state = PERF_COUNTER_STATE_OFF;
+ if (event->state == PERF_EVENT_STATE_INACTIVE) {
+ update_group_times(event);
+ event->state = PERF_EVENT_STATE_OFF;
}
spin_unlock_irq(&ctx->lock);
}
static int
-counter_sched_in(struct perf_counter *counter,
+event_sched_in(struct perf_event *event,
struct perf_cpu_context *cpuctx,
- struct perf_counter_context *ctx,
+ struct perf_event_context *ctx,
int cpu)
{
- if (counter->state <= PERF_COUNTER_STATE_OFF)
+ if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
- counter->state = PERF_COUNTER_STATE_ACTIVE;
- counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
+ event->state = PERF_EVENT_STATE_ACTIVE;
+ event->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
/*
* The new state must be visible before we turn it on in the hardware:
*/
smp_wmb();
- if (counter->pmu->enable(counter)) {
- counter->state = PERF_COUNTER_STATE_INACTIVE;
- counter->oncpu = -1;
+ if (event->pmu->enable(event)) {
+ event->state = PERF_EVENT_STATE_INACTIVE;
+ event->oncpu = -1;
return -EAGAIN;
}
- counter->tstamp_running += ctx->time - counter->tstamp_stopped;
+ event->tstamp_running += ctx->time - event->tstamp_stopped;
- if (!is_software_counter(counter))
+ if (!is_software_event(event))
cpuctx->active_oncpu++;
ctx->nr_active++;
- if (counter->attr.exclusive)
+ if (event->attr.exclusive)
cpuctx->exclusive = 1;
return 0;
}
static int
-group_sched_in(struct perf_counter *group_counter,
+group_sched_in(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
- struct perf_counter_context *ctx,
+ struct perf_event_context *ctx,
int cpu)
{
- struct perf_counter *counter, *partial_group;
+ struct perf_event *event, *partial_group;
int ret;
- if (group_counter->state == PERF_COUNTER_STATE_OFF)
+ if (group_event->state == PERF_EVENT_STATE_OFF)
return 0;
- ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
+ ret = hw_perf_group_sched_in(group_event, cpuctx, ctx, cpu);
if (ret)
return ret < 0 ? ret : 0;
- if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
+ if (event_sched_in(group_event, cpuctx, ctx, cpu))
return -EAGAIN;
/*
* Schedule in siblings as one group (if any):
*/
- list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
- if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
- partial_group = counter;
+ list_for_each_entry(event, &group_event->sibling_list, group_entry) {
+ if (event_sched_in(event, cpuctx, ctx, cpu)) {
+ partial_group = event;
goto group_error;
}
}
@@ -657,57 +656,57 @@ group_error:
* Groups can be scheduled in as one unit only, so undo any
* partial group before returning:
*/
- list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
- if (counter == partial_group)
+ list_for_each_entry(event, &group_event->sibling_list, group_entry) {
+ if (event == partial_group)
break;
- counter_sched_out(counter, cpuctx, ctx);
+ event_sched_out(event, cpuctx, ctx);
}
- counter_sched_out(group_counter, cpuctx, ctx);
+ event_sched_out(group_event, cpuctx, ctx);
return -EAGAIN;
}
/*
- * Return 1 for a group consisting entirely of software counters,
- * 0 if the group contains any hardware counters.
+ * Return 1 for a group consisting entirely of software events,
+ * 0 if the group contains any hardware events.
*/
-static int is_software_only_group(struct perf_counter *leader)
+static int is_software_only_group(struct perf_event *leader)
{
- struct perf_counter *counter;
+ struct perf_event *event;
- if (!is_software_counter(leader))
+ if (!is_software_event(leader))
return 0;
- list_for_each_entry(counter, &leader->sibling_list, list_entry)
- if (!is_software_counter(counter))
+ list_for_each_entry(event, &leader->sibling_list, group_entry)
+ if (!is_software_event(event))
return 0;
return 1;
}
/*
- * Work out whether we can put this counter group on the CPU now.
+ * Work out whether we can put this event group on the CPU now.
*/
-static int group_can_go_on(struct perf_counter *counter,
+static int group_can_go_on(struct perf_event *event,
struct perf_cpu_context *cpuctx,
int can_add_hw)
{
/*
- * Groups consisting entirely of software counters can always go on.
+ * Groups consisting entirely of software events can always go on.
*/
- if (is_software_only_group(counter))
+ if (is_software_only_group(event))
return 1;
/*
* If an exclusive group is already on, no other hardware
- * counters can go on.
+ * events can go on.
*/
if (cpuctx->exclusive)
return 0;
/*
* If this group is exclusive and there are already
- * counters on the CPU, it can't go on.
+ * events on the CPU, it can't go on.
*/
- if (counter->attr.exclusive && cpuctx->active_oncpu)
+ if (event->attr.exclusive && cpuctx->active_oncpu)
return 0;
/*
* Otherwise, try to add it if all previous groups were able
@@ -716,26 +715,26 @@ static int group_can_go_on(struct perf_counter *counter,
return can_add_hw;
}
-static void add_counter_to_ctx(struct perf_counter *counter,
- struct perf_counter_context *ctx)
+static void add_event_to_ctx(struct perf_event *event,
+ struct perf_event_context *ctx)
{
- list_add_counter(counter, ctx);
- counter->tstamp_enabled = ctx->time;
- counter->tstamp_running = ctx->time;
- counter->tstamp_stopped = ctx->time;
+ list_add_event(event, ctx);
+ event->tstamp_enabled = ctx->time;
+ event->tstamp_running = ctx->time;
+ event->tstamp_stopped = ctx->time;
}
/*
- * Cross CPU call to install and enable a performance counter
+ * Cross CPU call to install and enable a performance event
*
* Must be called with ctx->mutex held
*/
static void __perf_install_in_context(void *info)
{
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
- struct perf_counter *counter = info;
- struct perf_counter_context *ctx = counter->ctx;
- struct perf_counter *leader = counter->group_leader;
+ struct perf_event *event = info;
+ struct perf_event_context *ctx = event->ctx;
+ struct perf_event *leader = event->group_leader;
int cpu = smp_processor_id();
int err;
@@ -744,7 +743,7 @@ static void __perf_install_in_context(void *info)
* the current task context of this cpu. If not it has been
* scheduled out before the smp call arrived.
* Or possibly this is the right context but it isn't
- * on this cpu because it had no counters.
+ * on this cpu because it had no events.
*/
if (ctx->task && cpuctx->task_ctx != ctx) {
if (cpuctx->task_ctx || ctx->task != current)
@@ -758,41 +757,41 @@ static void __perf_install_in_context(void *info)
/*
* Protect the list operation against NMI by disabling the
- * counters on a global level. NOP for non NMI based counters.
+ * events on a global level. NOP for non NMI based events.
*/
perf_disable();
- add_counter_to_ctx(counter, ctx);
+ add_event_to_ctx(event, ctx);
/*
- * Don't put the counter on if it is disabled or if
+ * Don't put the event on if it is disabled or if
* it is in a group and the group isn't on.
*/
- if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
- (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
+ if (event->state != PERF_EVENT_STATE_INACTIVE ||
+ (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
goto unlock;
/*
- * An exclusive counter can't go on if there are already active
- * hardware counters, and no hardware counter can go on if there
- * is already an exclusive counter on.
+ * An exclusive event can't go on if there are already active
+ * hardware events, and no hardware event can go on if there
+ * is already an exclusive event on.
*/
- if (!group_can_go_on(counter, cpuctx, 1))
+ if (!group_can_go_on(event, cpuctx, 1))
err = -EEXIST;
else
- err = counter_sched_in(counter, cpuctx, ctx, cpu);
+ err = event_sched_in(event, cpuctx, ctx, cpu);
if (err) {
/*
- * This counter couldn't go on. If it is in a group
+ * This event couldn't go on. If it is in a group
* then we have to pull the whole group off.
- * If the counter group is pinned then put it in error state.
+ * If the event group is pinned then put it in error state.
*/
- if (leader != counter)
+ if (leader != event)
group_sched_out(leader, cpuctx, ctx);
if (leader->attr.pinned) {
update_group_times(leader);
- leader->state = PERF_COUNTER_STATE_ERROR;
+ leader->state = PERF_EVENT_STATE_ERROR;
}
}
@@ -806,92 +805,92 @@ static void __perf_install_in_context(void *info)
}
/*
- * Attach a performance counter to a context
+ * Attach a performance event to a context
*
- * First we add the counter to the list with the hardware enable bit
- * in counter->hw_config cleared.
+ * First we add the event to the list with the hardware enable bit
+ * in event->hw_config cleared.
*
- * If the counter is attached to a task which is on a CPU we use a smp
+ * If the event is attached to a task which is on a CPU we use a smp
* call to enable it in the task context. The task might have been
* scheduled away, but we check this in the smp call again.
*
* Must be called with ctx->mutex held.
*/
static void
-perf_install_in_context(struct perf_counter_context *ctx,
- struct perf_counter *counter,
+perf_install_in_context(struct perf_event_context *ctx,
+ struct perf_event *event,
int cpu)
{
struct task_struct *task = ctx->task;
if (!task) {
/*
- * Per cpu counters are installed via an smp call and
+ * Per cpu events are installed via an smp call and
* the install is always sucessful.
*/
smp_call_function_single(cpu, __perf_install_in_context,
- counter, 1);
+ event, 1);
return;
}
retry:
task_oncpu_function_call(task, __perf_install_in_context,
- counter);
+ event);
spin_lock_irq(&ctx->lock);
/*
* we need to retry the smp call.
*/
- if (ctx->is_active && list_empty(&counter->list_entry)) {
+ if (ctx->is_active && list_empty(&event->group_entry)) {
spin_unlock_irq(&ctx->lock);
goto retry;
}
/*
* The lock prevents that this context is scheduled in so we
- * can add the counter safely, if it the call above did not
+ * can add the event safely, if it the call above did not
* succeed.
*/
- if (list_empty(&counter->list_entry))
- add_counter_to_ctx(counter, ctx);
+ if (list_empty(&event->group_entry))
+ add_event_to_ctx(event, ctx);
spin_unlock_irq(&ctx->lock);
}
/*
- * Put a counter into inactive state and update time fields.
+ * Put a event into inactive state and update time fields.
* Enabling the leader of a group effectively enables all
* the group members that aren't explicitly disabled, so we
* have to update their ->tstamp_enabled also.
* Note: this works for group members as well as group leaders
* since the non-leader members' sibling_lists will be empty.
*/
-static void __perf_counter_mark_enabled(struct perf_counter *counter,
- struct perf_counter_context *ctx)
+static void __perf_event_mark_enabled(struct perf_event *event,
+ struct perf_event_context *ctx)
{
- struct perf_counter *sub;
+ struct perf_event *sub;
- counter->state = PERF_COUNTER_STATE_INACTIVE;
- counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
- list_for_each_entry(sub, &counter->sibling_list, list_entry)
- if (sub->state >= PERF_COUNTER_STATE_INACTIVE)
+ event->state = PERF_EVENT_STATE_INACTIVE;
+ event->tstamp_enabled = ctx->time - event->total_time_enabled;
+ list_for_each_entry(sub, &event->sibling_list, group_entry)
+ if (sub->state >= PERF_EVENT_STATE_INACTIVE)
sub->tstamp_enabled =
ctx->time - sub->total_time_enabled;
}
/*
- * Cross CPU call to enable a performance counter
+ * Cross CPU call to enable a performance event
*/
-static void __perf_counter_enable(void *info)
+static void __perf_event_enable(void *info)
{
- struct perf_counter *counter = info;
+ struct perf_event *event = info;
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
- struct perf_counter_context *ctx = counter->ctx;
- struct perf_counter *leader = counter->group_leader;
+ struct perf_event_context *ctx = event->ctx;
+ struct perf_event *leader = event->group_leader;
int err;
/*
- * If this is a per-task counter, need to check whether this
- * counter's task is the current task on this cpu.
+ * If this is a per-task event, need to check whether this
+ * event's task is the current task on this cpu.
*/
if (ctx->task && cpuctx->task_ctx != ctx) {
if (cpuctx->task_ctx || ctx->task != current)
@@ -903,40 +902,40 @@ static void __perf_counter_enable(void *info)
ctx->is_active = 1;
update_context_time(ctx);
- if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
+ if (event->state >= PERF_EVENT_STATE_INACTIVE)
goto unlock;
- __perf_counter_mark_enabled(counter, ctx);
+ __perf_event_mark_enabled(event, ctx);
/*
- * If the counter is in a group and isn't the group leader,
+ * If the event is in a group and isn't the group leader,
* then don't put it on unless the group is on.
*/
- if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
+ if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
goto unlock;
- if (!group_can_go_on(counter, cpuctx, 1)) {
+ if (!group_can_go_on(event, cpuctx, 1)) {
err = -EEXIST;
} else {
perf_disable();
- if (counter == leader)
- err = group_sched_in(counter, cpuctx, ctx,
+ if (event == leader)
+ err = group_sched_in(event, cpuctx, ctx,
smp_processor_id());
else
- err = counter_sched_in(counter, cpuctx, ctx,
+ err = event_sched_in(event, cpuctx, ctx,
smp_processor_id());
perf_enable();
}
if (err) {
/*
- * If this counter can't go on and it's part of a
+ * If this event can't go on and it's part of a
* group, then the whole group has to come off.
*/
- if (leader != counter)
+ if (leader != event)
group_sched_out(leader, cpuctx, ctx);
if (leader->attr.pinned) {
update_group_times(leader);
- leader->state = PERF_COUNTER_STATE_ERROR;
+ leader->state = PERF_EVENT_STATE_ERROR;
}
}
@@ -945,98 +944,98 @@ static void __perf_counter_enable(void *info)
}
/*
- * Enable a counter.
+ * Enable a event.
*
- * If counter->ctx is a cloned context, callers must make sure that
- * every task struct that counter->ctx->task could possibly point to
+ * If event->ctx is a cloned context, callers must make sure that
+ * every task struct that event->ctx->task could possibly point to
* remains valid. This condition is satisfied when called through
- * perf_counter_for_each_child or perf_counter_for_each as described
- * for perf_counter_disable.
+ * perf_event_for_each_child or perf_event_for_each as described
+ * for perf_event_disable.
*/
-static void perf_counter_enable(struct perf_counter *counter)
+static void perf_event_enable(struct perf_event *event)
{
- struct perf_counter_context *ctx = counter->ctx;
+ struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
if (!task) {
/*
- * Enable the counter on the cpu that it's on
+ * Enable the event on the cpu that it's on
*/
- smp_call_function_single(counter->cpu, __perf_counter_enable,
- counter, 1);
+ smp_call_function_single(event->cpu, __perf_event_enable,
+ event, 1);
return;
}
spin_lock_irq(&ctx->lock);
- if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
+ if (event->state >= PERF_EVENT_STATE_INACTIVE)
goto out;
/*
- * If the counter is in error state, clear that first.
- * That way, if we see the counter in error state below, we
+ * If the event is in error state, clear that first.
+ * That way, if we see the event in error state below, we
* know that it has gone back into error state, as distinct
* from the task having been scheduled away before the
* cross-call arrived.
*/
- if (counter->state == PERF_COUNTER_STATE_ERROR)
- counter->state = PERF_COUNTER_STATE_OFF;
+ if (event->state == PERF_EVENT_STATE_ERROR)
+ event->state = PERF_EVENT_STATE_OFF;
retry:
spin_unlock_irq(&ctx->lock);
- task_oncpu_function_call(task, __perf_counter_enable, counter);
+ task_oncpu_function_call(task, __perf_event_enable, event);
spin_lock_irq(&ctx->lock);
/*
- * If the context is active and the counter is still off,
+ * If the context is active and the event is still off,
* we need to retry the cross-call.
*/
- if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
+ if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
goto retry;
/*
* Since we have the lock this context can't be scheduled
* in, so we can change the state safely.
*/
- if (counter->state == PERF_COUNTER_STATE_OFF)
- __perf_counter_mark_enabled(counter, ctx);
+ if (event->state == PERF_EVENT_STATE_OFF)
+ __perf_event_mark_enabled(event, ctx);
out:
spin_unlock_irq(&ctx->lock);
}
-static int perf_counter_refresh(struct perf_counter *counter, int refresh)
+static int perf_event_refresh(struct perf_event *event, int refresh)
{
/*
- * not supported on inherited counters
+ * not supported on inherited events
*/
- if (counter->attr.inherit)
+ if (event->attr.inherit)
return -EINVAL;
- atomic_add(refresh, &counter->event_limit);
- perf_counter_enable(counter);
+ atomic_add(refresh, &event->event_limit);
+ perf_event_enable(event);
return 0;
}
-void __perf_counter_sched_out(struct perf_counter_context *ctx,
+void __perf_event_sched_out(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx)
{
- struct perf_counter *counter;
+ struct perf_event *event;
spin_lock(&ctx->lock);
ctx->is_active = 0;
- if (likely(!ctx->nr_counters))
+ if (likely(!ctx->nr_events))
goto out;
update_context_time(ctx);
perf_disable();
if (ctx->nr_active) {
- list_for_each_entry(counter, &ctx->counter_list, list_entry) {
- if (counter != counter->group_leader)
- counter_sched_out(counter, cpuctx, ctx);
+ list_for_each_entry(event, &ctx->group_list, group_entry) {
+ if (event != event->group_leader)
+ event_sched_out(event, cpuctx, ctx);
else
- group_sched_out(counter, cpuctx, ctx);
+ group_sched_out(event, cpuctx, ctx);
}
}
perf_enable();
@@ -1047,46 +1046,46 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx,
/*
* Test whether two contexts are equivalent, i.e. whether they
* have both been cloned from the same version of the same context
- * and they both have the same number of enabled counters.
- * If the number of enabled counters is the same, then the set
- * of enabled counters should be the same, because these are both
- * inherited contexts, therefore we can't access individual counters
+ * and they both have the same number of enabled events.
+ * If the number of enabled events is the same, then the set
+ * of enabled events should be the same, because these are both
+ * inherited contexts, therefore we can't access individual events
* in them directly with an fd; we can only enable/disable all
- * counters via prctl, or enable/disable all counters in a family
+ * events via prctl, or enable/disable all events in a family
* via ioctl, which will have the same effect on both contexts.
*/
-static int context_equiv(struct perf_counter_context *ctx1,
- struct perf_counter_context *ctx2)
+static int context_equiv(struct perf_event_context *ctx1,
+ struct perf_event_context *ctx2)
{
return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
&& ctx1->parent_gen == ctx2->parent_gen
&& !ctx1->pin_count && !ctx2->pin_count;
}
-static void __perf_counter_read(void *counter);
+static void __perf_event_read(void *event);
-static void __perf_counter_sync_stat(struct perf_counter *counter,
- struct perf_counter *next_counter)
+static void __perf_event_sync_stat(struct perf_event *event,
+ struct perf_event *next_event)
{
u64 value;
- if (!counter->attr.inherit_stat)
+ if (!event->attr.inherit_stat)
return;
/*
- * Update the counter value, we cannot use perf_counter_read()
+ * Update the event value, we cannot use perf_event_read()
* because we're in the middle of a context switch and have IRQs
* disabled, which upsets smp_call_function_single(), however
- * we know the counter must be on the current CPU, therefore we
+ * we know the event must be on the current CPU, therefore we
* don't need to use it.
*/
- switch (counter->state) {
- case PERF_COUNTER_STATE_ACTIVE:
- __perf_counter_read(counter);
+ switch (event->state) {
+ case PERF_EVENT_STATE_ACTIVE:
+ __perf_event_read(event);
break;
- case PERF_COUNTER_STATE_INACTIVE:
- update_counter_times(counter);
+ case PERF_EVENT_STATE_INACTIVE:
+ update_event_times(event);
break;
default:
@@ -1094,73 +1093,73 @@ static void __perf_counter_sync_stat(struct perf_counter *counter,
}
/*
- * In order to keep per-task stats reliable we need to flip the counter
+ * In order to keep per-task stats reliable we need to flip the event
* values when we flip the contexts.
*/
- value = atomic64_read(&next_counter->count);
- value = atomic64_xchg(&counter->count, value);
- atomic64_set(&next_counter->count, value);
+ value = atomic64_read(&next_event->count);
+ value = atomic64_xchg(&event->count, value);
+ atomic64_set(&next_event->count, value);
- swap(counter->total_time_enabled, next_counter->total_time_enabled);
- swap(counter->total_time_running, next_counter->total_time_running);
+ swap(event->total_time_enabled, next_event->total_time_enabled);
+ swap(event->total_time_running, next_event->total_time_running);
/*
* Since we swizzled the values, update the user visible data too.
*/
- perf_counter_update_userpage(counter);
- perf_counter_update_userpage(next_counter);
+ perf_event_update_userpage(event);
+ perf_event_update_userpage(next_event);
}
#define list_next_entry(pos, member) \
list_entry(pos->member.next, typeof(*pos), member)
-static void perf_counter_sync_stat(struct perf_counter_context *ctx,
- struct perf_counter_context *next_ctx)
+static void perf_event_sync_stat(struct perf_event_context *ctx,
+ struct perf_event_context *next_ctx)
{
- struct perf_counter *counter, *next_counter;
+ struct perf_event *event, *next_event;
if (!ctx->nr_stat)
return;
- counter = list_first_entry(&ctx->event_list,
- struct perf_counter, event_entry);
+ event = list_first_entry(&ctx->event_list,
+ struct perf_event, event_entry);
- next_counter = list_first_entry(&next_ctx->event_list,
- struct perf_counter, event_entry);
+ next_event = list_first_entry(&next_ctx->event_list,
+ struct perf_event, event_entry);
- while (&counter->event_entry != &ctx->event_list &&
- &next_counter->event_entry != &next_ctx->event_list) {
+ while (&event->event_entry != &ctx->event_list &&
+ &next_event->event_entry != &next_ctx->event_list) {
- __perf_counter_sync_stat(counter, next_counter);
+ __perf_event_sync_stat(event, next_event);
- counter = list_next_entry(counter, event_entry);
- next_counter = list_next_entry(next_counter, event_entry);
+ event = list_next_entry(event, event_entry);
+ next_event = list_next_entry(next_event, event_entry);
}
}
/*
- * Called from scheduler to remove the counters of the current task,
+ * Called from scheduler to remove the events of the current task,
* with interrupts disabled.
*
- * We stop each counter and update the counter value in counter->count.
+ * We stop each event and update the event value in event->count.
*
* This does not protect us against NMI, but disable()
- * sets the disabled bit in the control field of counter _before_
- * accessing the counter control register. If a NMI hits, then it will
- * not restart the counter.
+ * sets the disabled bit in the control field of event _before_
+ * accessing the event control register. If a NMI hits, then it will
+ * not restart the event.
*/
-void perf_counter_task_sched_out(struct task_struct *task,
+void perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next, int cpu)
{
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
- struct perf_counter_context *ctx = task->perf_counter_ctxp;
- struct perf_counter_context *next_ctx;
- struct perf_counter_context *parent;
+ struct perf_event_context *ctx = task->perf_event_ctxp;
+ struct perf_event_context *next_ctx;
+ struct perf_event_context *parent;
struct pt_regs *regs;
int do_switch = 1;
regs = task_pt_regs(task);
- perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
+ perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
if (likely(!ctx || !cpuctx->task_ctx))
return;
@@ -1169,7 +1168,7 @@ void perf_counter_task_sched_out(struct task_struct *task,
rcu_read_lock();
parent = rcu_dereference(ctx->parent_ctx);
- next_ctx = next->perf_counter_ctxp;
+ next_ctx = next->perf_event_ctxp;
if (parent && next_ctx &&
rcu_dereference(next_ctx->parent_ctx) == parent) {
/*
@@ -1186,15 +1185,15 @@ void perf_counter_task_sched_out(struct task_struct *task,
if (context_equiv(ctx, next_ctx)) {
/*
* XXX do we need a memory barrier of sorts
- * wrt to rcu_dereference() of perf_counter_ctxp
+ * wrt to rcu_dereference() of perf_event_ctxp
*/
- task->perf_counter_ctxp = next_ctx;
- next->perf_counter_ctxp = ctx;
+ task->perf_event_ctxp = next_ctx;
+ next->perf_event_ctxp = ctx;
ctx->task = next;
next_ctx->task = task;
do_switch = 0;
- perf_counter_sync_stat(ctx, next_ctx);
+ perf_event_sync_stat(ctx, next_ctx);
}
spin_unlock(&next_ctx->lock);
spin_unlock(&ctx->lock);
@@ -1202,7 +1201,7 @@ void perf_counter_task_sched_out(struct task_struct *task,
rcu_read_unlock();
if (do_switch) {
- __perf_counter_sched_out(ctx, cpuctx);
+ __perf_event_sched_out(ctx, cpuctx);
cpuctx->task_ctx = NULL;
}
}
@@ -1210,7 +1209,7 @@ void perf_counter_task_sched_out(struct task_struct *task,
/*
* Called with IRQs disabled
*/
-static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
+static void __perf_event_task_sched_out(struct perf_event_context *ctx)
{
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
@@ -1220,28 +1219,28 @@ static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
return;
- __perf_counter_sched_out(ctx, cpuctx);
+ __perf_event_sched_out(ctx, cpuctx);
cpuctx->task_ctx = NULL;
}
/*
* Called with IRQs disabled
*/
-static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
+static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx)
{
- __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
+ __perf_event_sched_out(&cpuctx->ctx, cpuctx);
}
static void
-__perf_counter_sched_in(struct perf_counter_context *ctx,
+__perf_event_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx, int cpu)
{
- struct perf_counter *counter;
+ struct perf_event *event;
int can_add_hw = 1;
spin_lock(&ctx->lock);
ctx->is_active = 1;
- if (likely(!ctx->nr_counters))
+ if (likely(!ctx->nr_events))
goto out;
ctx->timestamp = perf_clock();
@@ -1252,52 +1251,52 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
*/
- list_for_each_entry(counter, &ctx->counter_list, list_entry) {
- if (counter->state <= PERF_COUNTER_STATE_OFF ||
- !counter->attr.pinned)
+ list_for_each_entry(event, &ctx->group_list, group_entry) {
+ if (event->state <= PERF_EVENT_STATE_OFF ||
+ !event->attr.pinned)
continue;
- if (counter->cpu != -1 && counter->cpu != cpu)
+ if (event->cpu != -1 && event->cpu != cpu)
continue;
- if (counter != counter->group_leader)
- counter_sched_in(counter, cpuctx, ctx, cpu);
+ if (event != event->group_leader)
+ event_sched_in(event, cpuctx, ctx, cpu);
else {
- if (group_can_go_on(counter, cpuctx, 1))
- group_sched_in(counter, cpuctx, ctx, cpu);
+ if (group_can_go_on(event, cpuctx, 1))
+ group_sched_in(event, cpuctx, ctx, cpu);
}
/*
* If this pinned group hasn't been scheduled,
* put it in error state.
*/
- if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
- update_group_times(counter);
- counter->state = PERF_COUNTER_STATE_ERROR;
+ if (event->state == PERF_EVENT_STATE_INACTIVE) {
+ update_group_times(event);
+ event->state = PERF_EVENT_STATE_ERROR;
}
}
- list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ list_for_each_entry(event, &ctx->group_list, group_entry) {
/*
- * Ignore counters in OFF or ERROR state, and
- * ignore pinned counters since we did them already.
+ * Ignore events in OFF or ERROR state, and
+ * ignore pinned events since we did them already.
*/
- if (counter->state <= PERF_COUNTER_STATE_OFF ||
- counter->attr.pinned)
+ if (event->state <= PERF_EVENT_STATE_OFF ||
+ event->attr.pinned)
continue;
/*
* Listen to the 'cpu' scheduling filter constraint
- * of counters:
+ * of events:
*/
- if (counter->cpu != -1 && counter->cpu != cpu)
+ if (event->cpu != -1 && event->cpu != cpu)
continue;
- if (counter != counter->group_leader) {
- if (counter_sched_in(counter, cpuctx, ctx, cpu))
+ if (event != event->group_leader) {
+ if (event_sched_in(event, cpuctx, ctx, cpu))
can_add_hw = 0;
} else {
- if (group_can_go_on(counter, cpuctx, can_add_hw)) {
- if (group_sched_in(counter, cpuctx, ctx, cpu))
+ if (group_can_go_on(event, cpuctx, can_add_hw)) {
+ if (group_sched_in(event, cpuctx, ctx, cpu))
can_add_hw = 0;
}
}
@@ -1308,48 +1307,48 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
}
/*
- * Called from scheduler to add the counters of the current task
+ * Called from scheduler to add the events of the current task
* with interrupts disabled.
*
- * We restore the counter value and then enable it.
+ * We restore the event value and then enable it.
*
* This does not protect us against NMI, but enable()
- * sets the enabled bit in the control field of counter _before_
- * accessing the counter control register. If a NMI hits, then it will
- * keep the counter running.
+ * sets the enabled bit in the control field of event _before_
+ * accessing the event control register. If a NMI hits, then it will
+ * keep the event running.
*/
-void perf_counter_task_sched_in(struct task_struct *task, int cpu)
+void perf_event_task_sched_in(struct task_struct *task, int cpu)
{
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
- struct perf_counter_context *ctx = task->perf_counter_ctxp;
+ struct perf_event_context *ctx = task->perf_event_ctxp;
if (likely(!ctx))
return;
if (cpuctx->task_ctx == ctx)
return;
- __perf_counter_sched_in(ctx, cpuctx, cpu);
+ __perf_event_sched_in(ctx, cpuctx, cpu);
cpuctx->task_ctx = ctx;
}
-static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
+static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
{
- struct perf_counter_context *ctx = &cpuctx->ctx;
+ struct perf_event_context *ctx = &cpuctx->ctx;
- __perf_counter_sched_in(ctx, cpuctx, cpu);
+ __perf_event_sched_in(ctx, cpuctx, cpu);
}
#define MAX_INTERRUPTS (~0ULL)
-static void perf_log_throttle(struct perf_counter *counter, int enable);
+static void perf_log_throttle(struct perf_event *event, int enable);
-static void perf_adjust_period(struct perf_counter *counter, u64 events)
+static void perf_adjust_period(struct perf_event *event, u64 events)
{
- struct hw_perf_counter *hwc = &counter->hw;
+ struct hw_perf_event *hwc = &event->hw;
u64 period, sample_period;
s64 delta;
events *= hwc->sample_period;
- period = div64_u64(events, counter->attr.sample_freq);
+ period = div64_u64(events, event->attr.sample_freq);
delta = (s64)(period - hwc->sample_period);
delta = (delta + 7) / 8; /* low pass filter */
@@ -1362,39 +1361,39 @@ static void perf_adjust_period(struct perf_counter *counter, u64 events)
hwc->sample_period = sample_period;
}
-static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
+static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
{
- struct perf_counter *counter;
- struct hw_perf_counter *hwc;
+ struct perf_event *event;
+ struct hw_perf_event *hwc;
u64 interrupts, freq;
spin_lock(&ctx->lock);
- list_for_each_entry(counter, &ctx->counter_list, list_entry) {
- if (counter->state != PERF_COUNTER_STATE_ACTIVE)
+ list_for_each_entry(event, &ctx->group_list, group_entry) {
+ if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
- hwc = &counter->hw;
+ hwc = &event->hw;
interrupts = hwc->interrupts;
hwc->interrupts = 0;
/*
- * unthrottle counters on the tick
+ * unthrottle events on the tick
*/
if (interrupts == MAX_INTERRUPTS) {
- perf_log_throttle(counter, 1);
- counter->pmu->unthrottle(counter);
- interrupts = 2*sysctl_perf_counter_sample_rate/HZ;
+ perf_log_throttle(event, 1);
+ event->pmu->unthrottle(event);
+ interrupts = 2*sysctl_perf_event_sample_rate/HZ;
}
- if (!counter->attr.freq || !counter->attr.sample_freq)
+ if (!event->attr.freq || !event->attr.sample_freq)
continue;
/*
* if the specified freq < HZ then we need to skip ticks
*/
- if (counter->attr.sample_freq < HZ) {
- freq = counter->attr.sample_freq;
+ if (event->attr.sample_freq < HZ) {
+ freq = event->attr.sample_freq;
hwc->freq_count += freq;
hwc->freq_interrupts += interrupts;
@@ -1408,7 +1407,7 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
} else
freq = HZ;
- perf_adjust_period(counter, freq * interrupts);
+ perf_adjust_period(event, freq * interrupts);
/*
* In order to avoid being stalled by an (accidental) huge
@@ -1417,9 +1416,9 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
*/
if (!interrupts) {
perf_disable();
- counter->pmu->disable(counter);
+ event->pmu->disable(event);
atomic64_set(&hwc->period_left, 0);
- counter->pmu->enable(counter);
+ event->pmu->enable(event);
perf_enable();
}
}
@@ -1427,22 +1426,22 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
}
/*
- * Round-robin a context's counters:
+ * Round-robin a context's events:
*/
-static void rotate_ctx(struct perf_counter_context *ctx)
+static void rotate_ctx(struct perf_event_context *ctx)
{
- struct perf_counter *counter;
+ struct perf_event *event;
- if (!ctx->nr_counters)
+ if (!ctx->nr_events)
return;
spin_lock(&ctx->lock);
/*
- * Rotate the first entry last (works just fine for group counters too):
+ * Rotate the first entry last (works just fine for group events too):
*/
perf_disable();
- list_for_each_entry(counter, &ctx->counter_list, list_entry) {
- list_move_tail(&counter->list_entry, &ctx->counter_list);
+ list_for_each_entry(event, &ctx->group_list, group_entry) {
+ list_move_tail(&event->group_entry, &ctx->group_list);
break;
}
perf_enable();
@@ -1450,93 +1449,93 @@ static void rotate_ctx(struct perf_counter_context *ctx)
spin_unlock(&ctx->lock);
}
-void perf_counter_task_tick(struct task_struct *curr, int cpu)
+void perf_event_task_tick(struct task_struct *curr, int cpu)
{
struct perf_cpu_context *cpuctx;
- struct perf_counter_context *ctx;
+ struct perf_event_context *ctx;
- if (!atomic_read(&nr_counters))
+ if (!atomic_read(&nr_events))
return;
cpuctx = &per_cpu(perf_cpu_context, cpu);
- ctx = curr->perf_counter_ctxp;
+ ctx = curr->perf_event_ctxp;
perf_ctx_adjust_freq(&cpuctx->ctx);
if (ctx)
perf_ctx_adjust_freq(ctx);
- perf_counter_cpu_sched_out(cpuctx);
+ perf_event_cpu_sched_out(cpuctx);
if (ctx)
- __perf_counter_task_sched_out(ctx);
+ __perf_event_task_sched_out(ctx);
rotate_ctx(&cpuctx->ctx);
if (ctx)
rotate_ctx(ctx);
- perf_counter_cpu_sched_in(cpuctx, cpu);
+ perf_event_cpu_sched_in(cpuctx, cpu);
if (ctx)
- perf_counter_task_sched_in(curr, cpu);
+ perf_event_task_sched_in(curr, cpu);
}
/*
- * Enable all of a task's counters that have been marked enable-on-exec.
+ * Enable all of a task's events that have been marked enable-on-exec.
* This expects task == current.
*/
-static void perf_counter_enable_on_exec(struct task_struct *task)
+static void perf_event_enable_on_exec(struct task_struct *task)
{
- struct perf_counter_context *ctx;
- struct perf_counter *counter;
+ struct perf_event_context *ctx;
+ struct perf_event *event;
unsigned long flags;
int enabled = 0;
local_irq_save(flags);
- ctx = task->perf_counter_ctxp;
- if (!ctx || !ctx->nr_counters)
+ ctx = task->perf_event_ctxp;
+ if (!ctx || !ctx->nr_events)
goto out;
- __perf_counter_task_sched_out(ctx);
+ __perf_event_task_sched_out(ctx);
spin_lock(&ctx->lock);
- list_for_each_entry(counter, &ctx->counter_list, list_entry) {
- if (!counter->attr.enable_on_exec)
+ list_for_each_entry(event, &ctx->group_list, group_entry) {
+ if (!event->attr.enable_on_exec)
continue;
- counter->attr.enable_on_exec = 0;
- if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
+ event->attr.enable_on_exec = 0;
+ if (event->state >= PERF_EVENT_STATE_INACTIVE)
continue;
- __perf_counter_mark_enabled(counter, ctx);
+ __perf_event_mark_enabled(event, ctx);
enabled = 1;
}
/*
- * Unclone this context if we enabled any counter.
+ * Unclone this context if we enabled any event.
*/
if (enabled)
unclone_ctx(ctx);
spin_unlock(&ctx->lock);
- perf_counter_task_sched_in(task, smp_processor_id());
+ perf_event_task_sched_in(task, smp_processor_id());
out:
local_irq_restore(flags);
}
/*
- * Cross CPU call to read the hardware counter
+ * Cross CPU call to read the hardware event
*/
-static void __perf_counter_read(void *info)
+static void __perf_event_read(void *info)
{
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
- struct perf_counter *counter = info;
- struct perf_counter_context *ctx = counter->ctx;
+ struct perf_event *event = info;
+ struct perf_event_context *ctx = event->ctx;
unsigned long flags;
/*
* If this is a task context, we need to check whether it is
* the current task context of this cpu. If not it has been
* scheduled out before the smp call arrived. In that case
- * counter->count would have been updated to a recent sample
- * when the counter was scheduled out.
+ * event->count would have been updated to a recent sample
+ * when the event was scheduled out.
*/
if (ctx->task && cpuctx->task_ctx != ctx)
return;
@@ -1544,56 +1543,56 @@ static void __perf_counter_read(void *info)
local_irq_save(flags);
if (ctx->is_active)
update_context_time(ctx);
- counter->pmu->read(counter);
- update_counter_times(counter);
+ event->pmu->read(event);
+ update_event_times(event);
local_irq_restore(flags);
}
-static u64 perf_counter_read(struct perf_counter *counter)
+static u64 perf_event_read(struct perf_event *event)
{
/*
- * If counter is enabled and currently active on a CPU, update the
- * value in the counter structure:
+ * If event is enabled and currently active on a CPU, update the
+ * value in the event structure:
*/
- if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
- smp_call_function_single(counter->oncpu,
- __perf_counter_read, counter, 1);
- } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
- update_counter_times(counter);
+ if (event->state == PERF_EVENT_STATE_ACTIVE) {
+ smp_call_function_single(event->oncpu,
+ __perf_event_read, event, 1);
+ } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
+ update_event_times(event);
}
- return atomic64_read(&counter->count);
+ return atomic64_read(&event->count);
}
/*
- * Initialize the perf_counter context in a task_struct:
+ * Initialize the perf_event context in a task_struct:
*/
static void
-__perf_counter_init_context(struct perf_counter_context *ctx,
+__perf_event_init_context(struct perf_event_context *ctx,
struct task_struct *task)
{
memset(ctx, 0, sizeof(*ctx));
spin_lock_init(&ctx->lock);
mutex_init(&ctx->mutex);
- INIT_LIST_HEAD(&ctx->counter_list);
+ INIT_LIST_HEAD(&ctx->group_list);
INIT_LIST_HEAD(&ctx->event_list);
atomic_set(&ctx->refcount, 1);
ctx->task = task;
}
-static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
+static struct perf_event_context *find_get_context(pid_t pid, int cpu)
{
- struct perf_counter_context *ctx;
+ struct perf_event_context *ctx;
struct perf_cpu_context *cpuctx;
struct task_struct *task;
unsigned long flags;
int err;
/*
- * If cpu is not a wildcard then this is a percpu counter:
+ * If cpu is not a wildcard then this is a percpu event:
*/
if (cpu != -1) {
- /* Must be root to operate on a CPU counter: */
+ /* Must be root to operate on a CPU event: */
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return ERR_PTR(-EACCES);
@@ -1601,7 +1600,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
return ERR_PTR(-EINVAL);
/*
- * We could be clever and allow to attach a counter to an
+ * We could be clever and allow to attach a event to an
* offline CPU and activate it when the CPU comes up, but
* that's for later.
*/
@@ -1628,7 +1627,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
return ERR_PTR(-ESRCH);
/*
- * Can't attach counters to a dying task.
+ * Can't attach events to a dying task.
*/
err = -ESRCH;
if (task->flags & PF_EXITING)
@@ -1647,13 +1646,13 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
}
if (!ctx) {
- ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
+ ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL);
err = -ENOMEM;
if (!ctx)
goto errout;
- __perf_counter_init_context(ctx, task);
+ __perf_event_init_context(ctx, task);
get_ctx(ctx);
- if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
+ if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
/*
* We raced with some other task; use
* the context they set.
@@ -1672,42 +1671,42 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
return ERR_PTR(err);
}
-static void free_counter_rcu(struct rcu_head *head)
+static void free_event_rcu(struct rcu_head *head)
{
- struct perf_counter *counter;
+ struct perf_event *event;
- counter = container_of(head, struct perf_counter, rcu_head);
- if (counter->ns)
- put_pid_ns(counter->ns);
- kfree(counter);
+ event = container_of(head, struct perf_event, rcu_head);
+ if (event->ns)
+ put_pid_ns(event->ns);
+ kfree(event);
}
-static void perf_pending_sync(struct perf_counter *counter);
+static void perf_pending_sync(struct perf_event *event);
-static void free_counter(struct perf_counter *counter)
+static void free_event(struct perf_event *event)
{
- perf_pending_sync(counter);
+ perf_pending_sync(event);
- if (!counter->parent) {
- atomic_dec(&nr_counters);
- if (counter->attr.mmap)
- atomic_dec(&nr_mmap_counters);
- if (counter->attr.comm)
- atomic_dec(&nr_comm_counters);
- if (counter->attr.task)
- atomic_dec(&nr_task_counters);
+ if (!event->parent) {
+ atomic_dec(&nr_events);
+ if (event->attr.mmap)
+ atomic_dec(&nr_mmap_events);
+ if (event->attr.comm)
+ atomic_dec(&nr_comm_events);
+ if (event->attr.task)
+ atomic_dec(&nr_task_events);
}
- if (counter->output) {
- fput(counter->output->filp);
- counter->output = NULL;
+ if (event->output) {
+ fput(event->output->filp);
+ event->output = NULL;
}
- if (counter->destroy)
- counter->destroy(counter);
+ if (event->destroy)
+ event->destroy(event);
- put_ctx(counter->ctx);
- call_rcu(&counter->rcu_head, free_counter_rcu);
+ put_ctx(event->ctx);
+ call_rcu(&event->rcu_head, free_event_rcu);
}
/*
@@ -1715,43 +1714,43 @@ static void free_counter(struct perf_counter *counter)
*/
static int perf_release(struct inode *inode, struct file *file)
{
- struct perf_counter *counter = file->private_data;
- struct perf_counter_context *ctx = counter->ctx;
+ struct perf_event *event = file->private_data;
+ struct perf_event_context *ctx = event->ctx;
file->private_data = NULL;
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
- perf_counter_remove_from_context(counter);
+ perf_event_remove_from_context(event);
mutex_unlock(&ctx->mutex);
- mutex_lock(&counter->owner->perf_counter_mutex);
- list_del_init(&counter->owner_entry);
- mutex_unlock(&counter->owner->perf_counter_mutex);
- put_task_struct(counter->owner);
+ mutex_lock(&event->owner->perf_event_mutex);
+ list_del_init(&event->owner_entry);
+ mutex_unlock(&event->owner->perf_event_mutex);
+ put_task_struct(event->owner);
- free_counter(counter);
+ free_event(event);
return 0;
}
-static int perf_counter_read_size(struct perf_counter *counter)
+static int perf_event_read_size(struct perf_event *event)
{
int entry = sizeof(u64); /* value */
int size = 0;
int nr = 1;
- if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
size += sizeof(u64);
- if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
size += sizeof(u64);
- if (counter->attr.read_format & PERF_FORMAT_ID)
+ if (event->attr.read_format & PERF_FORMAT_ID)
entry += sizeof(u64);
- if (counter->attr.read_format & PERF_FORMAT_GROUP) {
- nr += counter->group_leader->nr_siblings;
+ if (event->attr.read_format & PERF_FORMAT_GROUP) {
+ nr += event->group_leader->nr_siblings;
size += sizeof(u64);
}
@@ -1760,27 +1759,27 @@ static int perf_counter_read_size(struct perf_counter *counter)
return size;
}
-static u64 perf_counter_read_value(struct perf_counter *counter)
+static u64 perf_event_read_value(struct perf_event *event)
{
- struct perf_counter *child;
+ struct perf_event *child;
u64 total = 0;
- total += perf_counter_read(counter);
- list_for_each_entry(child, &counter->child_list, child_list)
- total += perf_counter_read(child);
+ total += perf_event_read(event);
+ list_for_each_entry(child, &event->child_list, child_list)
+ total += perf_event_read(child);
return total;
}
-static int perf_counter_read_entry(struct perf_counter *counter,
+static int perf_event_read_entry(struct perf_event *event,
u64 read_format, char __user *buf)
{
int n = 0, count = 0;
u64 values[2];
- values[n++] = perf_counter_read_value(counter);
+ values[n++] = perf_event_read_value(event);
if (read_format & PERF_FORMAT_ID)
- values[n++] = primary_counter_id(counter);
+ values[n++] = primary_event_id(event);
count = n * sizeof(u64);
@@ -1790,10 +1789,10 @@ static int perf_counter_read_entry(struct perf_counter *counter,
return count;
}
-static int perf_counter_read_group(struct perf_counter *counter,
+static int perf_event_read_group(struct perf_event *event,
u64 read_format, char __user *buf)
{
- struct perf_counter *leader = counter->group_leader, *sub;
+ struct perf_event *leader = event->group_leader, *sub;
int n = 0, size = 0, err = -EFAULT;
u64 values[3];
@@ -1812,14 +1811,14 @@ static int perf_counter_read_group(struct perf_counter *counter,
if (copy_to_user(buf, values, size))
return -EFAULT;
- err = perf_counter_read_entry(leader, read_format, buf + size);
+ err = perf_event_read_entry(leader, read_format, buf + size);
if (err < 0)
return err;
size += err;
- list_for_each_entry(sub, &leader->sibling_list, list_entry) {
- err = perf_counter_read_entry(sub, read_format,
+ list_for_each_entry(sub, &leader->sibling_list, group_entry) {
+ err = perf_event_read_entry(sub, read_format,
buf + size);
if (err < 0)
return err;
@@ -1830,23 +1829,23 @@ static int perf_counter_read_group(struct perf_counter *counter,
return size;
}
-static int perf_counter_read_one(struct perf_counter *counter,
+static int perf_event_read_one(struct perf_event *event,
u64 read_format, char __user *buf)
{
u64 values[4];
int n = 0;
- values[n++] = perf_counter_read_value(counter);
+ values[n++] = perf_event_read_value(event);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
- values[n++] = counter->total_time_enabled +
- atomic64_read(&counter->child_total_time_enabled);
+ values[n++] = event->total_time_enabled +
+ atomic64_read(&event->child_total_time_enabled);
}
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
- values[n++] = counter->total_time_running +
- atomic64_read(&counter->child_total_time_running);
+ values[n++] = event->total_time_running +
+ atomic64_read(&event->child_total_time_running);
}
if (read_format & PERF_FORMAT_ID)
- values[n++] = primary_counter_id(counter);
+ values[n++] = primary_event_id(event);
if (copy_to_user(buf, values, n * sizeof(u64)))
return -EFAULT;
@@ -1855,32 +1854,32 @@ static int perf_counter_read_one(struct perf_counter *counter,
}
/*
- * Read the performance counter - simple non blocking version for now
+ * Read the performance event - simple non blocking version for now
*/
static ssize_t
-perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
+perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
{
- u64 read_format = counter->attr.read_format;
+ u64 read_format = event->attr.read_format;
int ret;
/*
- * Return end-of-file for a read on a counter that is in
+ * Return end-of-file for a read on a event that is in
* error state (i.e. because it was pinned but it couldn't be
* scheduled on to the CPU at some point).
*/
- if (counter->state == PERF_COUNTER_STATE_ERROR)
+ if (event->state == PERF_EVENT_STATE_ERROR)
return 0;
- if (count < perf_counter_read_size(counter))
+ if (count < perf_event_read_size(event))
return -ENOSPC;
- WARN_ON_ONCE(counter->ctx->parent_ctx);
- mutex_lock(&counter->child_mutex);
+ WARN_ON_ONCE(event->ctx->parent_ctx);
+ mutex_lock(&event->child_mutex);
if (read_format & PERF_FORMAT_GROUP)
- ret = perf_counter_read_group(counter, read_format, buf);
+ ret = perf_event_read_group(event, read_format, buf);
else
- ret = perf_counter_read_one(counter, read_format, buf);
- mutex_unlock(&counter->child_mutex);
+ ret = perf_event_read_one(event, read_format, buf);
+ mutex_unlock(&event->child_mutex);
return ret;
}
@@ -1888,79 +1887,79 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
- struct perf_counter *counter = file->private_data;
+ struct perf_event *event = file->private_data;
- return perf_read_hw(counter, buf, count);
+ return perf_read_hw(event, buf, count);
}
static unsigned int perf_poll(struct file *file, poll_table *wait)
{
- struct perf_counter *counter = file->private_data;
+ struct perf_event *event = file->private_data;
struct perf_mmap_data *data;
unsigned int events = POLL_HUP;
rcu_read_lock();
- data = rcu_dereference(counter->data);
+ data = rcu_dereference(event->data);
if (data)
events = atomic_xchg(&data->poll, 0);
rcu_read_unlock();
- poll_wait(file, &counter->waitq, wait);
+ poll_wait(file, &event->waitq, wait);
return events;
}
-static void perf_counter_reset(struct perf_counter *counter)
+static void perf_event_reset(struct perf_event *event)
{
- (void)perf_counter_read(counter);
- atomic64_set(&counter->count, 0);
- perf_counter_update_userpage(counter);
+ (void)perf_event_read(event);
+ atomic64_set(&event->count, 0);
+ perf_event_update_userpage(event);
}
/*
- * Holding the top-level counter's child_mutex means that any
- * descendant process that has inherited this counter will block
- * in sync_child_counter if it goes to exit, thus satisfying the
- * task existence requirements of perf_counter_enable/disable.
+ * Holding the top-level event's child_mutex means that any
+ * descendant process that has inherited this event will block
+ * in sync_child_event if it goes to exit, thus satisfying the
+ * task existence requirements of perf_event_enable/disable.
*/
-static void perf_counter_for_each_child(struct perf_counter *counter,
- void (*func)(struct perf_counter *))
+static void perf_event_for_each_child(struct perf_event *event,
+ void (*func)(struct perf_event *))
{
- struct perf_counter *child;
+ struct perf_event *child;
- WARN_ON_ONCE(counter->ctx->parent_ctx);
- mutex_lock(&counter->child_mutex);
- func(counter);
- list_for_each_entry(child, &counter->child_list, child_list)
+ WARN_ON_ONCE(event->ctx->parent_ctx);
+ mutex_lock(&event->child_mutex);
+ func(event);
+ list_for_each_entry(child, &event->child_list, child_list)
func(child);
- mutex_unlock(&counter->child_mutex);
+ mutex_unlock(&event->child_mutex);
}
-static void perf_counter_for_each(struct perf_counter *counter,
- void (*func)(struct perf_counter *))
+static void perf_event_for_each(struct perf_event *event,
+ void (*func)(struct perf_event *))
{
- struct perf_counter_context *ctx = counter->ctx;
- struct perf_counter *sibling;
+ struct perf_event_context *ctx = event->ctx;
+ struct perf_event *sibling;
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
- counter = counter->group_leader;
+ event = event->group_leader;
- perf_counter_for_each_child(counter, func);
- func(counter);
- list_for_each_entry(sibling, &counter->sibling_list, list_entry)
- perf_counter_for_each_child(counter, func);
+ perf_event_for_each_child(event, func);
+ func(event);
+ list_for_each_entry(sibling, &event->sibling_list, group_entry)
+ perf_event_for_each_child(event, func);
mutex_unlock(&ctx->mutex);
}
-static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
+static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
- struct perf_counter_context *ctx = counter->ctx;
+ struct perf_event_context *ctx = event->ctx;
unsigned long size;
int ret = 0;
u64 value;
- if (!counter->attr.sample_period)
+ if (!event->attr.sample_period)
return -EINVAL;
size = copy_from_user(&value, arg, sizeof(value));
@@ -1971,16 +1970,16 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
return -EINVAL;
spin_lock_irq(&ctx->lock);
- if (counter->attr.freq) {
- if (value > sysctl_perf_counter_sample_rate) {
+ if (event->attr.freq) {
+ if (value > sysctl_perf_event_sample_rate) {
ret = -EINVAL;
goto unlock;
}
- counter->attr.sample_freq = value;
+ event->attr.sample_freq = value;
} else {
- counter->attr.sample_period = value;
- counter->hw.sample_period = value;
+ event->attr.sample_period = value;
+ event->hw.sample_period = value;
}
unlock:
spin_unlock_irq(&ctx->lock);
@@ -1988,80 +1987,80 @@ unlock:
return ret;
}
-int perf_counter_set_output(struct perf_counter *counter, int output_fd);
+int perf_event_set_output(struct perf_event *event, int output_fd);
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct perf_counter *counter = file->private_data;
- void (*func)(struct perf_counter *);
+ struct perf_event *event = file->private_data;
+ void (*func)(struct perf_event *);
u32 flags = arg;
switch (cmd) {
- case PERF_COUNTER_IOC_ENABLE:
- func = perf_counter_enable;
+ case PERF_EVENT_IOC_ENABLE:
+ func = perf_event_enable;
break;
- case PERF_COUNTER_IOC_DISABLE:
- func = perf_counter_disable;
+ case PERF_EVENT_IOC_DISABLE:
+ func = perf_event_disable;
break;
- case PERF_COUNTER_IOC_RESET:
- func = perf_counter_reset;
+ case PERF_EVENT_IOC_RESET:
+ func = perf_event_reset;
break;
- case PERF_COUNTER_IOC_REFRESH:
- return perf_counter_refresh(counter, arg);
+ case PERF_EVENT_IOC_REFRESH:
+ return perf_event_refresh(event, arg);
- case PERF_COUNTER_IOC_PERIOD:
- return perf_counter_period(counter, (u64 __user *)arg);
+ case PERF_EVENT_IOC_PERIOD:
+ return perf_event_period(event, (u64 __user *)arg);
- case PERF_COUNTER_IOC_SET_OUTPUT:
- return perf_counter_set_output(counter, arg);
+ case PERF_EVENT_IOC_SET_OUTPUT:
+ return perf_event_set_output(event, arg);
default:
return -ENOTTY;
}
if (flags & PERF_IOC_FLAG_GROUP)
- perf_counter_for_each(counter, func);
+ perf_event_for_each(event, func);
else
- perf_counter_for_each_child(counter, func);
+ perf_event_for_each_child(event, func);
return 0;
}
-int perf_counter_task_enable(void)
+int perf_event_task_enable(void)
{
- struct perf_counter *counter;
+ struct perf_event *event;
- mutex_lock(&current->perf_counter_mutex);
- list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
- perf_counter_for_each_child(counter, perf_counter_enable);
- mutex_unlock(&current->perf_counter_mutex);
+ mutex_lock(&current->perf_event_mutex);
+ list_for_each_entry(event, &current->perf_event_list, owner_entry)
+ perf_event_for_each_child(event, perf_event_enable);
+ mutex_unlock(&current->perf_event_mutex);
return 0;
}
-int perf_counter_task_disable(void)
+int perf_event_task_disable(void)
{
- struct perf_counter *counter;
+ struct perf_event *event;
- mutex_lock(&current->perf_counter_mutex);
- list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
- perf_counter_for_each_child(counter, perf_counter_disable);
- mutex_unlock(&current->perf_counter_mutex);
+ mutex_lock(&current->perf_event_mutex);
+ list_for_each_entry(event, &current->perf_event_list, owner_entry)
+ perf_event_for_each_child(event, perf_event_disable);
+ mutex_unlock(&current->perf_event_mutex);
return 0;
}
-#ifndef PERF_COUNTER_INDEX_OFFSET
-# define PERF_COUNTER_INDEX_OFFSET 0
+#ifndef PERF_EVENT_INDEX_OFFSET
+# define PERF_EVENT_INDEX_OFFSET 0
#endif
-static int perf_counter_index(struct perf_counter *counter)
+static int perf_event_index(struct perf_event *event)
{
- if (counter->state != PERF_COUNTER_STATE_ACTIVE)
+ if (event->state != PERF_EVENT_STATE_ACTIVE)
return 0;
- return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET;
+ return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
}
/*
@@ -2069,13 +2068,13 @@ static int perf_counter_index(struct perf_counter *counter)
* the seqlock logic goes bad. We can not serialize this because the arch
* code calls this from NMI context.
*/
-void perf_counter_update_userpage(struct perf_counter *counter)
+void perf_event_update_userpage(struct perf_event *event)
{
- struct perf_counter_mmap_page *userpg;
+ struct perf_event_mmap_page *userpg;
struct perf_mmap_data *data;
rcu_read_lock();
- data = rcu_dereference(counter->data);
+ data = rcu_dereference(event->data);
if (!data)
goto unlock;
@@ -2088,16 +2087,16 @@ void perf_counter_update_userpage(struct perf_counter *counter)
preempt_disable();
++userpg->lock;
barrier();
- userpg->index = perf_counter_index(counter);
- userpg->offset = atomic64_read(&counter->count);
- if (counter->state == PERF_COUNTER_STATE_ACTIVE)
- userpg->offset -= atomic64_read(&counter->hw.prev_count);
+ userpg->index = perf_event_index(event);
+ userpg->offset = atomic64_read(&event->count);
+ if (event->state == PERF_EVENT_STATE_ACTIVE)
+ userpg->offset -= atomic64_read(&event->hw.prev_count);
- userpg->time_enabled = counter->total_time_enabled +
- atomic64_read(&counter->child_total_time_enabled);
+ userpg->time_enabled = event->total_time_enabled +
+ atomic64_read(&event->child_total_time_enabled);
- userpg->time_running = counter->total_time_running +
- atomic64_read(&counter->child_total_time_running);
+ userpg->time_running = event->total_time_running +
+ atomic64_read(&event->child_total_time_running);
barrier();
++userpg->lock;
@@ -2108,7 +2107,7 @@ unlock:
static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- struct perf_counter *counter = vma->vm_file->private_data;
+ struct perf_event *event = vma->vm_file->private_data;
struct perf_mmap_data *data;
int ret = VM_FAULT_SIGBUS;
@@ -2119,7 +2118,7 @@ static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
rcu_read_lock();
- data = rcu_dereference(counter->data);
+ data = rcu_dereference(event->data);
if (!data)
goto unlock;
@@ -2148,13 +2147,13 @@ unlock:
return ret;
}
-static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
+static int perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
{
struct perf_mmap_data *data;
unsigned long size;
int i;
- WARN_ON(atomic_read(&counter->mmap_count));
+ WARN_ON(atomic_read(&event->mmap_count));
size = sizeof(struct perf_mmap_data);
size += nr_pages * sizeof(void *);
@@ -2176,14 +2175,14 @@ static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
data->nr_pages = nr_pages;
atomic_set(&data->lock, -1);
- if (counter->attr.watermark) {
+ if (event->attr.watermark) {
data->watermark = min_t(long, PAGE_SIZE * nr_pages,
- counter->attr.wakeup_watermark);
+ event->attr.wakeup_watermark);
}
if (!data->watermark)
data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4);
- rcu_assign_pointer(counter->data, data);
+ rcu_assign_pointer(event->data, data);
return 0;
@@ -2222,35 +2221,35 @@ static void __perf_mmap_data_free(struct rcu_head *rcu_head)
kfree(data);
}
-static void perf_mmap_data_free(struct perf_counter *counter)
+static void perf_mmap_data_free(struct perf_event *event)
{
- struct perf_mmap_data *data = counter->data;
+ struct perf_mmap_data *data = event->data;
- WARN_ON(atomic_read(&counter->mmap_count));
+ WARN_ON(atomic_read(&event->mmap_count));
- rcu_assign_pointer(counter->data, NULL);
+ rcu_assign_pointer(event->data, NULL);
call_rcu(&data->rcu_head, __perf_mmap_data_free);
}
static void perf_mmap_open(struct vm_area_struct *vma)
{
- struct perf_counter *counter = vma->vm_file->private_data;
+ struct perf_event *event = vma->vm_file->private_data;
- atomic_inc(&counter->mmap_count);
+ atomic_inc(&event->mmap_count);
}
static void perf_mmap_close(struct vm_area_struct *vma)
{
- struct perf_counter *counter = vma->vm_file->private_data;
+ struct perf_event *event = vma->vm_file->private_data;
- WARN_ON_ONCE(counter->ctx->parent_ctx);
- if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
+ WARN_ON_ONCE(event->ctx->parent_ctx);
+ if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
struct user_struct *user = current_user();
- atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
- vma->vm_mm->locked_vm -= counter->data->nr_locked;
- perf_mmap_data_free(counter);
- mutex_unlock(&counter->mmap_mutex);
+ atomic_long_sub(event->data->nr_pages + 1, &user->locked_vm);
+ vma->vm_mm->locked_vm -= event->data->nr_locked;
+ perf_mmap_data_free(event);
+ mutex_unlock(&event->mmap_mutex);
}
}
@@ -2263,7 +2262,7 @@ static struct vm_operations_struct perf_mmap_vmops = {
static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct perf_counter *counter = file->private_data;
+ struct perf_event *event = file->private_data;
unsigned long user_locked, user_lock_limit;
struct user_struct *user = current_user();
unsigned long locked, lock_limit;
@@ -2291,21 +2290,21 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
if (vma->vm_pgoff != 0)
return -EINVAL;
- WARN_ON_ONCE(counter->ctx->parent_ctx);
- mutex_lock(&counter->mmap_mutex);
- if (counter->output) {
+ WARN_ON_ONCE(event->ctx->parent_ctx);
+ mutex_lock(&event->mmap_mutex);
+ if (event->output) {
ret = -EINVAL;
goto unlock;
}
- if (atomic_inc_not_zero(&counter->mmap_count)) {
- if (nr_pages != counter->data->nr_pages)
+ if (atomic_inc_not_zero(&event->mmap_count)) {
+ if (nr_pages != event->data->nr_pages)
ret = -EINVAL;
goto unlock;
}
user_extra = nr_pages + 1;
- user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
+ user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
/*
* Increase the limit linearly with more CPUs:
@@ -2328,20 +2327,20 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
goto unlock;
}
- WARN_ON(counter->data);
- ret = perf_mmap_data_alloc(counter, nr_pages);
+ WARN_ON(event->data);
+ ret = perf_mmap_data_alloc(event, nr_pages);
if (ret)
goto unlock;
- atomic_set(&counter->mmap_count, 1);
+ atomic_set(&event->mmap_count, 1);
atomic_long_add(user_extra, &user->locked_vm);
vma->vm_mm->locked_vm += extra;
- counter->data->nr_locked = extra;
+ event->data->nr_locked = extra;
if (vma->vm_flags & VM_WRITE)
- counter->data->writable = 1;
+ event->data->writable = 1;
unlock:
- mutex_unlock(&counter->mmap_mutex);
+ mutex_unlock(&event->mmap_mutex);
vma->vm_flags |= VM_RESERVED;
vma->vm_ops = &perf_mmap_vmops;
@@ -2352,11 +2351,11 @@ unlock:
static int perf_fasync(int fd, struct file *filp, int on)
{
struct inode *inode = filp->f_path.dentry->d_inode;
- struct perf_counter *counter = filp->private_data;
+ struct perf_event *event = filp->private_data;
int retval;
mutex_lock(&inode->i_mutex);
- retval = fasync_helper(fd, filp, on, &counter->fasync);
+ retval = fasync_helper(fd, filp, on, &event->fasync);
mutex_unlock(&inode->i_mutex);
if (retval < 0)
@@ -2376,19 +2375,19 @@ static const struct file_operations perf_fops = {
};
/*
- * Perf counter wakeup
+ * Perf event wakeup
*
* If there's data, ensure we set the poll() state and publish everything
* to user-space before waking everybody up.
*/
-void perf_counter_wakeup(struct perf_counter *counter)
+void perf_event_wakeup(struct perf_event *event)
{
- wake_up_all(&counter->waitq);
+ wake_up_all(&event->waitq);
- if (counter->pending_kill) {
- kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
- counter->pending_kill = 0;
+ if (event->pending_kill) {
+ kill_fasync(&event->fasync, SIGIO, event->pending_kill);
+ event->pending_kill = 0;
}
}
@@ -2401,19 +2400,19 @@ void perf_counter_wakeup(struct perf_counter *counter)
* single linked list and use cmpxchg() to add entries lockless.
*/
-static void perf_pending_counter(struct perf_pending_entry *entry)
+static void perf_pending_event(struct perf_pending_entry *entry)
{
- struct perf_counter *counter = container_of(entry,
- struct perf_counter, pending);
+ struct perf_event *event = container_of(entry,
+ struct perf_event, pending);
- if (counter->pending_disable) {
- counter->pending_disable = 0;
- __perf_counter_disable(counter);
+ if (event->pending_disable) {
+ event->pending_disable = 0;
+ __perf_event_disable(event);
}
- if (counter->pending_wakeup) {
- counter->pending_wakeup = 0;
- perf_counter_wakeup(counter);
+ if (event->pending_wakeup) {
+ event->pending_wakeup = 0;
+ perf_event_wakeup(event);
}
}
@@ -2439,7 +2438,7 @@ static void perf_pending_queue(struct perf_pending_entry *entry,
entry->next = *head;
} while (cmpxchg(head, entry->next, entry) != entry->next);
- set_perf_counter_pending();
+ set_perf_event_pending();
put_cpu_var(perf_pending_head);
}
@@ -2472,7 +2471,7 @@ static int __perf_pending_run(void)
return nr;
}
-static inline int perf_not_pending(struct perf_counter *counter)
+static inline int perf_not_pending(struct perf_event *event)
{
/*
* If we flush on whatever cpu we run, there is a chance we don't
@@ -2487,15 +2486,15 @@ static inline int perf_not_pending(struct perf_counter *counter)
* so that we do not miss the wakeup. -- see perf_pending_handle()
*/
smp_rmb();
- return counter->pending.next == NULL;
+ return event->pending.next == NULL;
}
-static void perf_pending_sync(struct perf_counter *counter)
+static void perf_pending_sync(struct perf_event *event)
{
- wait_event(counter->waitq, perf_not_pending(counter));
+ wait_event(event->waitq, perf_not_pending(event));
}
-void perf_counter_do_pending(void)
+void perf_event_do_pending(void)
{
__perf_pending_run();
}
@@ -2536,25 +2535,25 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
atomic_set(&handle->data->poll, POLL_IN);
if (handle->nmi) {
- handle->counter->pending_wakeup = 1;
- perf_pending_queue(&handle->counter->pending,
- perf_pending_counter);
+ handle->event->pending_wakeup = 1;
+ perf_pending_queue(&handle->event->pending,
+ perf_pending_event);
} else
- perf_counter_wakeup(handle->counter);
+ perf_event_wakeup(handle->event);
}
/*
* Curious locking construct.
*
- * We need to ensure a later event doesn't publish a head when a former
- * event isn't done writing. However since we need to deal with NMIs we
+ * We need to ensure a later event_id doesn't publish a head when a former
+ * event_id isn't done writing. However since we need to deal with NMIs we
* cannot fully serialize things.
*
* What we do is serialize between CPUs so we only have to deal with NMI
* nesting on a single CPU.
*
* We only publish the head (and generate a wakeup) when the outer-most
- * event completes.
+ * event_id completes.
*/
static void perf_output_lock(struct perf_output_handle *handle)
{
@@ -2658,10 +2657,10 @@ void perf_output_copy(struct perf_output_handle *handle,
}
int perf_output_begin(struct perf_output_handle *handle,
- struct perf_counter *counter, unsigned int size,
+ struct perf_event *event, unsigned int size,
int nmi, int sample)
{
- struct perf_counter *output_counter;
+ struct perf_event *output_event;
struct perf_mmap_data *data;
unsigned long tail, offset, head;
int have_lost;
@@ -2673,21 +2672,21 @@ int perf_output_begin(struct perf_output_handle *handle,
rcu_read_lock();
/*
- * For inherited counters we send all the output towards the parent.
+ * For inherited events we send all the output towards the parent.
*/
- if (counter->parent)
- counter = counter->parent;
+ if (event->parent)
+ event = event->parent;
- output_counter = rcu_dereference(counter->output);
- if (output_counter)
- counter = output_counter;
+ output_event = rcu_dereference(event->output);
+ if (output_event)
+ event = output_event;
- data = rcu_dereference(counter->data);
+ data = rcu_dereference(event->data);
if (!data)
goto out;
handle->data = data;
- handle->counter = counter;
+ handle->event = event;
handle->nmi = nmi;
handle->sample = sample;
@@ -2721,10 +2720,10 @@ int perf_output_begin(struct perf_output_handle *handle,
atomic_set(&data->wakeup, 1);
if (have_lost) {
- lost_event.header.type = PERF_EVENT_LOST;
+ lost_event.header.type = PERF_RECORD_LOST;
lost_event.header.misc = 0;
lost_event.header.size = sizeof(lost_event);
- lost_event.id = counter->id;
+ lost_event.id = event->id;
lost_event.lost = atomic_xchg(&data->lost, 0);
perf_output_put(handle, lost_event);
@@ -2743,10 +2742,10 @@ out:
void perf_output_end(struct perf_output_handle *handle)
{
- struct perf_counter *counter = handle->counter;
+ struct perf_event *event = handle->event;
struct perf_mmap_data *data = handle->data;
- int wakeup_events = counter->attr.wakeup_events;
+ int wakeup_events = event->attr.wakeup_events;
if (handle->sample && wakeup_events) {
int events = atomic_inc_return(&data->events);
@@ -2760,58 +2759,58 @@ void perf_output_end(struct perf_output_handle *handle)
rcu_read_unlock();
}
-static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
+static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
{
/*
- * only top level counters have the pid namespace they were created in
+ * only top level events have the pid namespace they were created in
*/
- if (counter->parent)
- counter = counter->parent;
+ if (event->parent)
+ event = event->parent;
- return task_tgid_nr_ns(p, counter->ns);
+ return task_tgid_nr_ns(p, event->ns);
}
-static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
+static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
{
/*
- * only top level counters have the pid namespace they were created in
+ * only top level events have the pid namespace they were created in
*/
- if (counter->parent)
- counter = counter->parent;
+ if (event->parent)
+ event = event->parent;
- return task_pid_nr_ns(p, counter->ns);
+ return task_pid_nr_ns(p, event->ns);
}
static void perf_output_read_one(struct perf_output_handle *handle,
- struct perf_counter *counter)
+ struct perf_event *event)
{
- u64 read_format = counter->attr.read_format;
+ u64 read_format = event->attr.read_format;
u64 values[4];
int n = 0;
- values[n++] = atomic64_read(&counter->count);
+ values[n++] = atomic64_read(&event->count);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
- values[n++] = counter->total_time_enabled +
- atomic64_read(&counter->child_total_time_enabled);
+ values[n++] = event->total_time_enabled +
+ atomic64_read(&event->child_total_time_enabled);
}
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
- values[n++] = counter->total_time_running +
- atomic64_read(&counter->child_total_time_running);
+ values[n++] = event->total_time_running +
+ atomic64_read(&event->child_total_time_running);
}
if (read_format & PERF_FORMAT_ID)
- values[n++] = primary_counter_id(counter);
+ values[n++] = primary_event_id(event);
perf_output_copy(handle, values, n * sizeof(u64));
}
/*
- * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult.
+ * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
*/
static void perf_output_read_group(struct perf_output_handle *handle,
- struct perf_counter *counter)
+ struct perf_event *event)
{
- struct perf_counter *leader = counter->group_leader, *sub;
- u64 read_format = counter->attr.read_format;
+ struct perf_event *leader = event->group_leader, *sub;
+ u64 read_format = event->attr.read_format;
u64 values[5];
int n = 0;
@@ -2823,42 +2822,42 @@ static void perf_output_read_group(struct perf_output_handle *handle,
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = leader->total_time_running;
- if (leader != counter)
+ if (leader != event)
leader->pmu->read(leader);
values[n++] = atomic64_read(&leader->count);
if (read_format & PERF_FORMAT_ID)
- values[n++] = primary_counter_id(leader);
+ values[n++] = primary_event_id(leader);
perf_output_copy(handle, values, n * sizeof(u64));
- list_for_each_entry(sub, &leader->sibling_list, list_entry) {
+ list_for_each_entry(sub, &leader->sibling_list, group_entry) {
n = 0;
- if (sub != counter)
+ if (sub != event)
sub->pmu->read(sub);
values[n++] = atomic64_read(&sub->count);
if (read_format & PERF_FORMAT_ID)
- values[n++] = primary_counter_id(sub);
+ values[n++] = primary_event_id(sub);
perf_output_copy(handle, values, n * sizeof(u64));
}
}
static void perf_output_read(struct perf_output_handle *handle,
- struct perf_counter *counter)
+ struct perf_event *event)
{
- if (counter->attr.read_format & PERF_FORMAT_GROUP)
- perf_output_read_group(handle, counter);
+ if (event->attr.read_format & PERF_FORMAT_GROUP)
+ perf_output_read_group(handle, event);
else
- perf_output_read_one(handle, counter);
+ perf_output_read_one(handle, event);
}
void perf_output_sample(struct perf_output_handle *handle,
struct perf_event_header *header,
struct perf_sample_data *data,
- struct perf_counter *counter)
+ struct perf_event *event)
{
u64 sample_type = data->type;
@@ -2889,7 +2888,7 @@ void perf_output_sample(struct perf_output_handle *handle,
perf_output_put(handle, data->period);
if (sample_type & PERF_SAMPLE_READ)
- perf_output_read(handle, counter);
+ perf_output_read(handle, event);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
if (data->callchain) {
@@ -2927,14 +2926,14 @@ void perf_output_sample(struct perf_output_handle *handle,
void perf_prepare_sample(struct perf_event_header *header,
struct perf_sample_data *data,
- struct perf_counter *counter,
+ struct perf_event *event,
struct pt_regs *regs)
{
- u64 sample_type = counter->attr.sample_type;
+ u64 sample_type = event->attr.sample_type;
data->type = sample_type;
- header->type = PERF_EVENT_SAMPLE;
+ header->type = PERF_RECORD_SAMPLE;
header->size = sizeof(*header);
header->misc = 0;
@@ -2948,8 +2947,8 @@ void perf_prepare_sample(struct perf_event_header *header,
if (sample_type & PERF_SAMPLE_TID) {
/* namespace issues */
- data->tid_entry.pid = perf_counter_pid(counter, current);
- data->tid_entry.tid = perf_counter_tid(counter, current);
+ data->tid_entry.pid = perf_event_pid(event, current);
+ data->tid_entry.tid = perf_event_tid(event, current);
header->size += sizeof(data->tid_entry);
}
@@ -2964,13 +2963,13 @@ void perf_prepare_sample(struct perf_event_header *header,
header->size += sizeof(data->addr);
if (sample_type & PERF_SAMPLE_ID) {
- data->id = primary_counter_id(counter);
+ data->id = primary_event_id(event);
header->size += sizeof(data->id);
}
if (sample_type & PERF_SAMPLE_STREAM_ID) {
- data->stream_id = counter->id;
+ data->stream_id = event->id;
header->size += sizeof(data->stream_id);
}
@@ -2986,7 +2985,7 @@ void perf_prepare_sample(struct perf_event_header *header,
header->size += sizeof(data->period);
if (sample_type & PERF_SAMPLE_READ)
- header->size += perf_counter_read_size(counter);
+ header->size += perf_event_read_size(event);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
int size = 1;
@@ -3012,25 +3011,25 @@ void perf_prepare_sample(struct perf_event_header *header,
}
}
-static void perf_counter_output(struct perf_counter *counter, int nmi,
+static void perf_event_output(struct perf_event *event, int nmi,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct perf_output_handle handle;
struct perf_event_header header;
- perf_prepare_sample(&header, data, counter, regs);
+ perf_prepare_sample(&header, data, event, regs);
- if (perf_output_begin(&handle, counter, header.size, nmi, 1))
+ if (perf_output_begin(&handle, event, header.size, nmi, 1))
return;
- perf_output_sample(&handle, &header, data, counter);
+ perf_output_sample(&handle, &header, data, event);
perf_output_end(&handle);
}
/*
- * read event
+ * read event_id
*/
struct perf_read_event {
@@ -3041,27 +3040,27 @@ struct perf_read_event {
};
static void
-perf_counter_read_event(struct perf_counter *counter,
+perf_event_read_event(struct perf_event *event,
struct task_struct *task)
{
struct perf_output_handle handle;
- struct perf_read_event event = {
+ struct perf_read_event read_event = {
.header = {
- .type = PERF_EVENT_READ,
+ .type = PERF_RECORD_READ,
.misc = 0,
- .size = sizeof(event) + perf_counter_read_size(counter),
+ .size = sizeof(read_event) + perf_event_read_size(event),
},
- .pid = perf_counter_pid(counter, task),
- .tid = perf_counter_tid(counter, task),
+ .pid = perf_event_pid(event, task),
+ .tid = perf_event_tid(event, task),
};
int ret;
- ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
+ ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
if (ret)
return;
- perf_output_put(&handle, event);
- perf_output_read(&handle, counter);
+ perf_output_put(&handle, read_event);
+ perf_output_read(&handle, event);
perf_output_end(&handle);
}
@@ -3074,7 +3073,7 @@ perf_counter_read_event(struct perf_counter *counter,
struct perf_task_event {
struct task_struct *task;
- struct perf_counter_context *task_ctx;
+ struct perf_event_context *task_ctx;
struct {
struct perf_event_header header;
@@ -3084,10 +3083,10 @@ struct perf_task_event {
u32 tid;
u32 ptid;
u64 time;
- } event;
+ } event_id;
};
-static void perf_counter_task_output(struct perf_counter *counter,
+static void perf_event_task_output(struct perf_event *event,
struct perf_task_event *task_event)
{
struct perf_output_handle handle;
@@ -3095,85 +3094,85 @@ static void perf_counter_task_output(struct perf_counter *counter,
struct task_struct *task = task_event->task;
int ret;
- size = task_event->event.header.size;
- ret = perf_output_begin(&handle, counter, size, 0, 0);
+ size = task_event->event_id.header.size;
+ ret = perf_output_begin(&handle, event, size, 0, 0);
if (ret)
return;
- task_event->event.pid = perf_counter_pid(counter, task);
- task_event->event.ppid = perf_counter_pid(counter, current);
+ task_event->event_id.pid = perf_event_pid(event, task);
+ task_event->event_id.ppid = perf_event_pid(event, current);
- task_event->event.tid = perf_counter_tid(counter, task);
- task_event->event.ptid = perf_counter_tid(counter, current);
+ task_event->event_id.tid = perf_event_tid(event, task);
+ task_event->event_id.ptid = perf_event_tid(event, current);
- task_event->event.time = perf_clock();
+ task_event->event_id.time = perf_clock();
- perf_output_put(&handle, task_event->event);
+ perf_output_put(&handle, task_event->event_id);
perf_output_end(&handle);
}
-static int perf_counter_task_match(struct perf_counter *counter)
+static int perf_event_task_match(struct perf_event *event)
{
- if (counter->attr.comm || counter->attr.mmap || counter->attr.task)
+ if (event->attr.comm || event->attr.mmap || event->attr.task)
return 1;
return 0;
}
-static void perf_counter_task_ctx(struct perf_counter_context *ctx,
+static void perf_event_task_ctx(struct perf_event_context *ctx,
struct perf_task_event *task_event)
{
- struct perf_counter *counter;
+ struct perf_event *event;
if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
return;
rcu_read_lock();
- list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
- if (perf_counter_task_match(counter))
- perf_counter_task_output(counter, task_event);
+ list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
+ if (perf_event_task_match(event))
+ perf_event_task_output(event, task_event);
}
rcu_read_unlock();
}
-static void perf_counter_task_event(struct perf_task_event *task_event)
+static void perf_event_task_event(struct perf_task_event *task_event)
{
struct perf_cpu_context *cpuctx;
- struct perf_counter_context *ctx = task_event->task_ctx;
+ struct perf_event_context *ctx = task_event->task_ctx;
cpuctx = &get_cpu_var(perf_cpu_context);
- perf_counter_task_ctx(&cpuctx->ctx, task_event);
+ perf_event_task_ctx(&cpuctx->ctx, task_event);
put_cpu_var(perf_cpu_context);
rcu_read_lock();
if (!ctx)
- ctx = rcu_dereference(task_event->task->perf_counter_ctxp);
+ ctx = rcu_dereference(task_event->task->perf_event_ctxp);
if (ctx)
- perf_counter_task_ctx(ctx, task_event);
+ perf_event_task_ctx(ctx, task_event);
rcu_read_unlock();
}
-static void perf_counter_task(struct task_struct *task,
- struct perf_counter_context *task_ctx,
+static void perf_event_task(struct task_struct *task,
+ struct perf_event_context *task_ctx,
int new)
{
struct perf_task_event task_event;
- if (!atomic_read(&nr_comm_counters) &&
- !atomic_read(&nr_mmap_counters) &&
- !atomic_read(&nr_task_counters))
+ if (!atomic_read(&nr_comm_events) &&
+ !atomic_read(&nr_mmap_events) &&
+ !atomic_read(&nr_task_events))
return;
task_event = (struct perf_task_event){
.task = task,
.task_ctx = task_ctx,
- .event = {
+ .event_id = {
.header = {
- .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT,
+ .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
.misc = 0,
- .size = sizeof(task_event.event),
+ .size = sizeof(task_event.event_id),
},
/* .pid */
/* .ppid */
@@ -3182,12 +3181,12 @@ static void perf_counter_task(struct task_struct *task,
},
};
- perf_counter_task_event(&task_event);
+ perf_event_task_event(&task_event);
}
-void perf_counter_fork(struct task_struct *task)
+void perf_event_fork(struct task_struct *task)
{
- perf_counter_task(task, NULL, 1);
+ perf_event_task(task, NULL, 1);
}
/*
@@ -3204,56 +3203,56 @@ struct perf_comm_event {
u32 pid;
u32 tid;
- } event;
+ } event_id;
};
-static void perf_counter_comm_output(struct perf_counter *counter,
+static void perf_event_comm_output(struct perf_event *event,
struct perf_comm_event *comm_event)
{
struct perf_output_handle handle;
- int size = comm_event->event.header.size;
- int ret = perf_output_begin(&handle, counter, size, 0, 0);
+ int size = comm_event->event_id.header.size;
+ int ret = perf_output_begin(&handle, event, size, 0, 0);
if (ret)
return;
- comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
- comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
+ comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
+ comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
- perf_output_put(&handle, comm_event->event);
+ perf_output_put(&handle, comm_event->event_id);
perf_output_copy(&handle, comm_event->comm,
comm_event->comm_size);
perf_output_end(&handle);
}
-static int perf_counter_comm_match(struct perf_counter *counter)
+static int perf_event_comm_match(struct perf_event *event)
{
- if (counter->attr.comm)
+ if (event->attr.comm)
return 1;
return 0;
}
-static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
+static void perf_event_comm_ctx(struct perf_event_context *ctx,
struct perf_comm_event *comm_event)
{
- struct perf_counter *counter;
+ struct perf_event *event;
if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
return;
rcu_read_lock();
- list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
- if (perf_counter_comm_match(counter))
- perf_counter_comm_output(counter, comm_event);
+ list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
+ if (perf_event_comm_match(event))
+ perf_event_comm_output(event, comm_event);
}
rcu_read_unlock();
}
-static void perf_counter_comm_event(struct perf_comm_event *comm_event)
+static void perf_event_comm_event(struct perf_comm_event *comm_event)
{
struct perf_cpu_context *cpuctx;
- struct perf_counter_context *ctx;
+ struct perf_event_context *ctx;
unsigned int size;
char comm[TASK_COMM_LEN];
@@ -3264,10 +3263,10 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event)
comm_event->comm = comm;
comm_event->comm_size = size;
- comm_event->event.header.size = sizeof(comm_event->event) + size;
+ comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
cpuctx = &get_cpu_var(perf_cpu_context);
- perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
+ perf_event_comm_ctx(&cpuctx->ctx, comm_event);
put_cpu_var(perf_cpu_context);
rcu_read_lock();
@@ -3275,29 +3274,29 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event)
* doesn't really matter which of the child contexts the
* events ends up in.
*/
- ctx = rcu_dereference(current->perf_counter_ctxp);
+ ctx = rcu_dereference(current->perf_event_ctxp);
if (ctx)
- perf_counter_comm_ctx(ctx, comm_event);
+ perf_event_comm_ctx(ctx, comm_event);
rcu_read_unlock();
}
-void perf_counter_comm(struct task_struct *task)
+void perf_event_comm(struct task_struct *task)
{
struct perf_comm_event comm_event;
- if (task->perf_counter_ctxp)
- perf_counter_enable_on_exec(task);
+ if (task->perf_event_ctxp)
+ perf_event_enable_on_exec(task);
- if (!atomic_read(&nr_comm_counters))
+ if (!atomic_read(&nr_comm_events))
return;
comm_event = (struct perf_comm_event){
.task = task,
/* .comm */
/* .comm_size */
- .event = {
+ .event_id = {
.header = {
- .type = PERF_EVENT_COMM,
+ .type = PERF_RECORD_COMM,
.misc = 0,
/* .size */
},
@@ -3306,7 +3305,7 @@ void perf_counter_comm(struct task_struct *task)
},
};
- perf_counter_comm_event(&comm_event);
+ perf_event_comm_event(&comm_event);
}
/*
@@ -3327,57 +3326,57 @@ struct perf_mmap_event {
u64 start;
u64 len;
u64 pgoff;
- } event;
+ } event_id;
};
-static void perf_counter_mmap_output(struct perf_counter *counter,
+static void perf_event_mmap_output(struct perf_event *event,
struct perf_mmap_event *mmap_event)
{
struct perf_output_handle handle;
- int size = mmap_event->event.header.size;
- int ret = perf_output_begin(&handle, counter, size, 0, 0);
+ int size = mmap_event->event_id.header.size;
+ int ret = perf_output_begin(&handle, event, size, 0, 0);
if (ret)
return;
- mmap_event->event.pid = perf_counter_pid(counter, current);
- mmap_event->event.tid = perf_counter_tid(counter, current);
+ mmap_event->event_id.pid = perf_event_pid(event, current);
+ mmap_event->event_id.tid = perf_event_tid(event, current);
- perf_output_put(&handle, mmap_event->event);
+ perf_output_put(&handle, mmap_event->event_id);
perf_output_copy(&handle, mmap_event->file_name,
mmap_event->file_size);
perf_output_end(&handle);
}
-static int perf_counter_mmap_match(struct perf_counter *counter,
+static int perf_event_mmap_match(struct perf_event *event,
struct perf_mmap_event *mmap_event)
{
- if (counter->attr.mmap)
+ if (event->attr.mmap)
return 1;
return 0;
}
-static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
+static void perf_event_mmap_ctx(struct perf_event_context *ctx,
struct perf_mmap_event *mmap_event)
{
- struct perf_counter *counter;
+ struct perf_event *event;
if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
return;
rcu_read_lock();
- list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
- if (perf_counter_mmap_match(counter, mmap_event))
- perf_counter_mmap_output(counter, mmap_event);
+ list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
+ if (perf_event_mmap_match(event, mmap_event))
+ perf_event_mmap_output(event, mmap_event);
}
rcu_read_unlock();
}
-static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
+static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
{
struct perf_cpu_context *cpuctx;
- struct perf_counter_context *ctx;
+ struct perf_event_context *ctx;
struct vm_area_struct *vma = mmap_event->vma;
struct file *file = vma->vm_file;
unsigned int size;
@@ -3425,10 +3424,10 @@ got_name:
mmap_event->file_name = name;
mmap_event->file_size = size;
- mmap_event->event.header.size = sizeof(mmap_event->event) + size;
+ mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
cpuctx = &get_cpu_var(perf_cpu_context);
- perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
+ perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
put_cpu_var(perf_cpu_context);
rcu_read_lock();
@@ -3436,28 +3435,28 @@ got_name:
* doesn't really matter which of the child contexts the
* events ends up in.
*/
- ctx = rcu_dereference(current->perf_counter_ctxp);
+ ctx = rcu_dereference(current->perf_event_ctxp);
if (ctx)
- perf_counter_mmap_ctx(ctx, mmap_event);
+ perf_event_mmap_ctx(ctx, mmap_event);
rcu_read_unlock();
kfree(buf);
}
-void __perf_counter_mmap(struct vm_area_struct *vma)
+void __perf_event_mmap(struct vm_area_struct *vma)
{
struct perf_mmap_event mmap_event;
- if (!atomic_read(&nr_mmap_counters))
+ if (!atomic_read(&nr_mmap_events))
return;
mmap_event = (struct perf_mmap_event){
.vma = vma,
/* .file_name */
/* .file_size */
- .event = {
+ .event_id = {
.header = {
- .type = PERF_EVENT_MMAP,
+ .type = PERF_RECORD_MMAP,
.misc = 0,
/* .size */
},
@@ -3469,14 +3468,14 @@ void __perf_counter_mmap(struct vm_area_struct *vma)
},
};
- perf_counter_mmap_event(&mmap_event);
+ perf_event_mmap_event(&mmap_event);
}
/*
* IRQ throttle logging
*/
-static void perf_log_throttle(struct perf_counter *counter, int enable)
+static void perf_log_throttle(struct perf_event *event, int enable)
{
struct perf_output_handle handle;
int ret;
@@ -3488,19 +3487,19 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
u64 stream_id;
} throttle_event = {
.header = {
- .type = PERF_EVENT_THROTTLE,
+ .type = PERF_RECORD_THROTTLE,
.misc = 0,
.size = sizeof(throttle_event),
},
.time = perf_clock(),
- .id = primary_counter_id(counter),
- .stream_id = counter->id,
+ .id = primary_event_id(event),
+ .stream_id = event->id,
};
if (enable)
- throttle_event.header.type = PERF_EVENT_UNTHROTTLE;
+ throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
- ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
+ ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
if (ret)
return;
@@ -3509,18 +3508,18 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
}
/*
- * Generic counter overflow handling, sampling.
+ * Generic event overflow handling, sampling.
*/
-static int __perf_counter_overflow(struct perf_counter *counter, int nmi,
+static int __perf_event_overflow(struct perf_event *event, int nmi,
int throttle, struct perf_sample_data *data,
struct pt_regs *regs)
{
- int events = atomic_read(&counter->event_limit);
- struct hw_perf_counter *hwc = &counter->hw;
+ int events = atomic_read(&event->event_limit);
+ struct hw_perf_event *hwc = &event->hw;
int ret = 0;
- throttle = (throttle && counter->pmu->unthrottle != NULL);
+ throttle = (throttle && event->pmu->unthrottle != NULL);
if (!throttle) {
hwc->interrupts++;
@@ -3528,73 +3527,73 @@ static int __perf_counter_overflow(struct perf_counter *counter, int nmi,
if (hwc->interrupts != MAX_INTERRUPTS) {
hwc->interrupts++;
if (HZ * hwc->interrupts >
- (u64)sysctl_perf_counter_sample_rate) {
+ (u64)sysctl_perf_event_sample_rate) {
hwc->interrupts = MAX_INTERRUPTS;
- perf_log_throttle(counter, 0);
+ perf_log_throttle(event, 0);
ret = 1;
}
} else {
/*
- * Keep re-disabling counters even though on the previous
+ * Keep re-disabling events even though on the previous
* pass we disabled it - just in case we raced with a
- * sched-in and the counter got enabled again:
+ * sched-in and the event got enabled again:
*/
ret = 1;
}
}
- if (counter->attr.freq) {
+ if (event->attr.freq) {
u64 now = perf_clock();
s64 delta = now - hwc->freq_stamp;
hwc->freq_stamp = now;
if (delta > 0 && delta < TICK_NSEC)
- perf_adjust_period(counter, NSEC_PER_SEC / (int)delta);
+ perf_adjust_period(event, NSEC_PER_SEC / (int)delta);
}
/*
* XXX event_limit might not quite work as expected on inherited
- * counters
+ * events
*/
- counter->pending_kill = POLL_IN;
- if (events && atomic_dec_and_test(&counter->event_limit)) {
+ event->pending_kill = POLL_IN;
+ if (events && atomic_dec_and_test(&event->event_limit)) {
ret = 1;
- counter->pending_kill = POLL_HUP;
+ event->pending_kill = POLL_HUP;
if (nmi) {
- counter->pending_disable = 1;
- perf_pending_queue(&counter->pending,
- perf_pending_counter);
+ event->pending_disable = 1;
+ perf_pending_queue(&event->pending,
+ perf_pending_event);
} else
- perf_counter_disable(counter);
+ perf_event_disable(event);
}
- perf_counter_output(counter, nmi, data, regs);
+ perf_event_output(event, nmi, data, regs);
return ret;
}
-int perf_counter_overflow(struct perf_counter *counter, int nmi,
+int perf_event_overflow(struct perf_event *event, int nmi,
struct perf_sample_data *data,
struct pt_regs *regs)
{
- return __perf_counter_overflow(counter, nmi, 1, data, regs);
+ return __perf_event_overflow(event, nmi, 1, data, regs);
}
/*
- * Generic software counter infrastructure
+ * Generic software event infrastructure
*/
/*
- * We directly increment counter->count and keep a second value in
- * counter->hw.period_left to count intervals. This period counter
+ * We directly increment event->count and keep a second value in
+ * event->hw.period_left to count intervals. This period event
* is kept in the range [-sample_period, 0] so that we can use the
* sign as trigger.
*/
-static u64 perf_swcounter_set_period(struct perf_counter *counter)
+static u64 perf_swevent_set_period(struct perf_event *event)
{
- struct hw_perf_counter *hwc = &counter->hw;
+ struct hw_perf_event *hwc = &event->hw;
u64 period = hwc->last_period;
u64 nr, offset;
s64 old, val;
@@ -3615,22 +3614,22 @@ again:
return nr;
}
-static void perf_swcounter_overflow(struct perf_counter *counter,
+static void perf_swevent_overflow(struct perf_event *event,
int nmi, struct perf_sample_data *data,
struct pt_regs *regs)
{
- struct hw_perf_counter *hwc = &counter->hw;
+ struct hw_perf_event *hwc = &event->hw;
int throttle = 0;
u64 overflow;
- data->period = counter->hw.last_period;
- overflow = perf_swcounter_set_period(counter);
+ data->period = event->hw.last_period;
+ overflow = perf_swevent_set_period(event);
if (hwc->interrupts == MAX_INTERRUPTS)
return;
for (; overflow; overflow--) {
- if (__perf_counter_overflow(counter, nmi, throttle,
+ if (__perf_event_overflow(event, nmi, throttle,
data, regs)) {
/*
* We inhibit the overflow from happening when
@@ -3642,20 +3641,20 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
}
}
-static void perf_swcounter_unthrottle(struct perf_counter *counter)
+static void perf_swevent_unthrottle(struct perf_event *event)
{
/*
* Nothing to do, we already reset hwc->interrupts.
*/
}
-static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
+static void perf_swevent_add(struct perf_event *event, u64 nr,
int nmi, struct perf_sample_data *data,
struct pt_regs *regs)
{
- struct hw_perf_counter *hwc = &counter->hw;
+ struct hw_perf_event *hwc = &event->hw;
- atomic64_add(nr, &counter->count);
+ atomic64_add(nr, &event->count);
if (!hwc->sample_period)
return;
@@ -3664,29 +3663,29 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
return;
if (!atomic64_add_negative(nr, &hwc->period_left))
- perf_swcounter_overflow(counter, nmi, data, regs);
+ perf_swevent_overflow(event, nmi, data, regs);
}
-static int perf_swcounter_is_counting(struct perf_counter *counter)
+static int perf_swevent_is_counting(struct perf_event *event)
{
/*
- * The counter is active, we're good!
+ * The event is active, we're good!
*/
- if (counter->state == PERF_COUNTER_STATE_ACTIVE)
+ if (event->state == PERF_EVENT_STATE_ACTIVE)
return 1;
/*
- * The counter is off/error, not counting.
+ * The event is off/error, not counting.
*/
- if (counter->state != PERF_COUNTER_STATE_INACTIVE)
+ if (event->state != PERF_EVENT_STATE_INACTIVE)
return 0;
/*
- * The counter is inactive, if the context is active
+ * The event is inactive, if the context is active
* we're part of a group that didn't make it on the 'pmu',
* not counting.
*/
- if (counter->ctx->is_active)
+ if (event->ctx->is_active)
return 0;
/*
@@ -3697,49 +3696,49 @@ static int perf_swcounter_is_counting(struct perf_counter *counter)
return 1;
}
-static int perf_swcounter_match(struct perf_counter *counter,
+static int perf_swevent_match(struct perf_event *event,
enum perf_type_id type,
- u32 event, struct pt_regs *regs)
+ u32 event_id, struct pt_regs *regs)
{
- if (!perf_swcounter_is_counting(counter))
+ if (!perf_swevent_is_counting(event))
return 0;
- if (counter->attr.type != type)
+ if (event->attr.type != type)
return 0;
- if (counter->attr.config != event)
+ if (event->attr.config != event_id)
return 0;
if (regs) {
- if (counter->attr.exclude_user && user_mode(regs))
+ if (event->attr.exclude_user && user_mode(regs))
return 0;
- if (counter->attr.exclude_kernel && !user_mode(regs))
+ if (event->attr.exclude_kernel && !user_mode(regs))
return 0;
}
return 1;
}
-static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
+static void perf_swevent_ctx_event(struct perf_event_context *ctx,
enum perf_type_id type,
- u32 event, u64 nr, int nmi,
+ u32 event_id, u64 nr, int nmi,
struct perf_sample_data *data,
struct pt_regs *regs)
{
- struct perf_counter *counter;
+ struct perf_event *event;
if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
return;
rcu_read_lock();
- list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
- if (perf_swcounter_match(counter, type, event, regs))
- perf_swcounter_add(counter, nr, nmi, data, regs);
+ list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
+ if (perf_swevent_match(event, type, event_id, regs))
+ perf_swevent_add(event, nr, nmi, data, regs);
}
rcu_read_unlock();
}
-static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
+static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx)
{
if (in_nmi())
return &cpuctx->recursion[3];
@@ -3753,14 +3752,14 @@ static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
return &cpuctx->recursion[0];
}
-static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
+static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
u64 nr, int nmi,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
- int *recursion = perf_swcounter_recursion_context(cpuctx);
- struct perf_counter_context *ctx;
+ int *recursion = perf_swevent_recursion_context(cpuctx);
+ struct perf_event_context *ctx;
if (*recursion)
goto out;
@@ -3768,16 +3767,16 @@ static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
(*recursion)++;
barrier();
- perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
+ perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
nr, nmi, data, regs);
rcu_read_lock();
/*
* doesn't really matter which of the child contexts the
* events ends up in.
*/
- ctx = rcu_dereference(current->perf_counter_ctxp);
+ ctx = rcu_dereference(current->perf_event_ctxp);
if (ctx)
- perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data, regs);
+ perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs);
rcu_read_unlock();
barrier();
@@ -3787,57 +3786,57 @@ out:
put_cpu_var(perf_cpu_context);
}
-void __perf_swcounter_event(u32 event, u64 nr, int nmi,
+void __perf_sw_event(u32 event_id, u64 nr, int nmi,
struct pt_regs *regs, u64 addr)
{
struct perf_sample_data data = {
.addr = addr,
};
- do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi,
+ do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi,
&data, regs);
}
-static void perf_swcounter_read(struct perf_counter *counter)
+static void perf_swevent_read(struct perf_event *event)
{
}
-static int perf_swcounter_enable(struct perf_counter *counter)
+static int perf_swevent_enable(struct perf_event *event)
{
- struct hw_perf_counter *hwc = &counter->hw;
+ struct hw_perf_event *hwc = &event->hw;
if (hwc->sample_period) {
hwc->last_period = hwc->sample_period;
- perf_swcounter_set_period(counter);
+ perf_swevent_set_period(event);
}
return 0;
}
-static void perf_swcounter_disable(struct perf_counter *counter)
+static void perf_swevent_disable(struct perf_event *event)
{
}
static const struct pmu perf_ops_generic = {
- .enable = perf_swcounter_enable,
- .disable = perf_swcounter_disable,
- .read = perf_swcounter_read,
- .unthrottle = perf_swcounter_unthrottle,
+ .enable = perf_swevent_enable,
+ .disable = perf_swevent_disable,
+ .read = perf_swevent_read,
+ .unthrottle = perf_swevent_unthrottle,
};
/*
- * hrtimer based swcounter callback
+ * hrtimer based swevent callback
*/
-static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
+static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
{
enum hrtimer_restart ret = HRTIMER_RESTART;
struct perf_sample_data data;
struct pt_regs *regs;
- struct perf_counter *counter;
+ struct perf_event *event;
u64 period;
- counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
- counter->pmu->read(counter);
+ event = container_of(hrtimer, struct perf_event, hw.hrtimer);
+ event->pmu->read(event);
data.addr = 0;
regs = get_irq_regs();
@@ -3845,45 +3844,45 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
* In case we exclude kernel IPs or are somehow not in interrupt
* context, provide the next best thing, the user IP.
*/
- if ((counter->attr.exclude_kernel || !regs) &&
- !counter->attr.exclude_user)
+ if ((event->attr.exclude_kernel || !regs) &&
+ !event->attr.exclude_user)
regs = task_pt_regs(current);
if (regs) {
- if (perf_counter_overflow(counter, 0, &data, regs))
+ if (perf_event_overflow(event, 0, &data, regs))
ret = HRTIMER_NORESTART;
}
- period = max_t(u64, 10000, counter->hw.sample_period);
+ period = max_t(u64, 10000, event->hw.sample_period);
hrtimer_forward_now(hrtimer, ns_to_ktime(period));
return ret;
}
/*
- * Software counter: cpu wall time clock
+ * Software event: cpu wall time clock
*/
-static void cpu_clock_perf_counter_update(struct perf_counter *counter)
+static void cpu_clock_perf_event_update(struct perf_event *event)
{
int cpu = raw_smp_processor_id();
s64 prev;
u64 now;
now = cpu_clock(cpu);
- prev = atomic64_read(&counter->hw.prev_count);
- atomic64_set(&counter->hw.prev_count, now);
- atomic64_add(now - prev, &counter->count);
+ prev = atomic64_read(&event->hw.prev_count);
+ atomic64_set(&event->hw.prev_count, now);
+ atomic64_add(now - prev, &event->count);
}
-static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
+static int cpu_clock_perf_event_enable(struct perf_event *event)
{
- struct hw_perf_counter *hwc = &counter->hw;
+ struct hw_perf_event *hwc = &event->hw;
int cpu = raw_smp_processor_id();
atomic64_set(&hwc->prev_count, cpu_clock(cpu));
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hwc->hrtimer.function = perf_swcounter_hrtimer;
+ hwc->hrtimer.function = perf_swevent_hrtimer;
if (hwc->sample_period) {
u64 period = max_t(u64, 10000, hwc->sample_period);
__hrtimer_start_range_ns(&hwc->hrtimer,
@@ -3894,48 +3893,48 @@ static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
return 0;
}
-static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
+static void cpu_clock_perf_event_disable(struct perf_event *event)
{
- if (counter->hw.sample_period)
- hrtimer_cancel(&counter->hw.hrtimer);
- cpu_clock_perf_counter_update(counter);
+ if (event->hw.sample_period)
+ hrtimer_cancel(&event->hw.hrtimer);
+ cpu_clock_perf_event_update(event);
}
-static void cpu_clock_perf_counter_read(struct perf_counter *counter)
+static void cpu_clock_perf_event_read(struct perf_event *event)
{
- cpu_clock_perf_counter_update(counter);
+ cpu_clock_perf_event_update(event);
}
static const struct pmu perf_ops_cpu_clock = {
- .enable = cpu_clock_perf_counter_enable,
- .disable = cpu_clock_perf_counter_disable,
- .read = cpu_clock_perf_counter_read,
+ .enable = cpu_clock_perf_event_enable,
+ .disable = cpu_clock_perf_event_disable,
+ .read = cpu_clock_perf_event_read,
};
/*
- * Software counter: task time clock
+ * Software event: task time clock
*/
-static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
+static void task_clock_perf_event_update(struct perf_event *event, u64 now)
{
u64 prev;
s64 delta;
- prev = atomic64_xchg(&counter->hw.prev_count, now);
+ prev = atomic64_xchg(&event->hw.prev_count, now);
delta = now - prev;
- atomic64_add(delta, &counter->count);
+ atomic64_add(delta, &event->count);
}
-static int task_clock_perf_counter_enable(struct perf_counter *counter)
+static int task_clock_perf_event_enable(struct perf_event *event)
{
- struct hw_perf_counter *hwc = &counter->hw;
+ struct hw_perf_event *hwc = &event->hw;
u64 now;
- now = counter->ctx->time;
+ now = event->ctx->time;
atomic64_set(&hwc->prev_count, now);
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hwc->hrtimer.function = perf_swcounter_hrtimer;
+ hwc->hrtimer.function = perf_swevent_hrtimer;
if (hwc->sample_period) {
u64 period = max_t(u64, 10000, hwc->sample_period);
__hrtimer_start_range_ns(&hwc->hrtimer,
@@ -3946,38 +3945,38 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter)
return 0;
}
-static void task_clock_perf_counter_disable(struct perf_counter *counter)
+static void task_clock_perf_event_disable(struct perf_event *event)
{
- if (counter->hw.sample_period)
- hrtimer_cancel(&counter->hw.hrtimer);
- task_clock_perf_counter_update(counter, counter->ctx->time);
+ if (event->hw.sample_period)
+ hrtimer_cancel(&event->hw.hrtimer);
+ task_clock_perf_event_update(event, event->ctx->time);
}
-static void task_clock_perf_counter_read(struct perf_counter *counter)
+static void task_clock_perf_event_read(struct perf_event *event)
{
u64 time;
if (!in_nmi()) {
- update_context_time(counter->ctx);
- time = counter->ctx->time;
+ update_context_time(event->ctx);
+ time = event->ctx->time;
} else {
u64 now = perf_clock();
- u64 delta = now - counter->ctx->timestamp;
- time = counter->ctx->time + delta;
+ u64 delta = now - event->ctx->timestamp;
+ time = event->ctx->time + delta;
}
- task_clock_perf_counter_update(counter, time);
+ task_clock_perf_event_update(event, time);
}
static const struct pmu perf_ops_task_clock = {
- .enable = task_clock_perf_counter_enable,
- .disable = task_clock_perf_counter_disable,
- .read = task_clock_perf_counter_read,
+ .enable = task_clock_perf_event_enable,
+ .disable = task_clock_perf_event_disable,
+ .read = task_clock_perf_event_read,
};
#ifdef CONFIG_EVENT_PROFILE
-void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
+void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
int entry_size)
{
struct perf_raw_record raw = {
@@ -3995,78 +3994,78 @@ void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
if (!regs)
regs = task_pt_regs(current);
- do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
+ do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
&data, regs);
}
-EXPORT_SYMBOL_GPL(perf_tpcounter_event);
+EXPORT_SYMBOL_GPL(perf_tp_event);
extern int ftrace_profile_enable(int);
extern void ftrace_profile_disable(int);
-static void tp_perf_counter_destroy(struct perf_counter *counter)
+static void tp_perf_event_destroy(struct perf_event *event)
{
- ftrace_profile_disable(counter->attr.config);
+ ftrace_profile_disable(event->attr.config);
}
-static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
+static const struct pmu *tp_perf_event_init(struct perf_event *event)
{
/*
* Raw tracepoint data is a severe data leak, only allow root to
* have these.
*/
- if ((counter->attr.sample_type & PERF_SAMPLE_RAW) &&
+ if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
perf_paranoid_tracepoint_raw() &&
!capable(CAP_SYS_ADMIN))
return ERR_PTR(-EPERM);
- if (ftrace_profile_enable(counter->attr.config))
+ if (ftrace_profile_enable(event->attr.config))
return NULL;
- counter->destroy = tp_perf_counter_destroy;
+ event->destroy = tp_perf_event_destroy;
return &perf_ops_generic;
}
#else
-static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
+static const struct pmu *tp_perf_event_init(struct perf_event *event)
{
return NULL;
}
#endif
-atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
+atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
-static void sw_perf_counter_destroy(struct perf_counter *counter)
+static void sw_perf_event_destroy(struct perf_event *event)
{
- u64 event = counter->attr.config;
+ u64 event_id = event->attr.config;
- WARN_ON(counter->parent);
+ WARN_ON(event->parent);
- atomic_dec(&perf_swcounter_enabled[event]);
+ atomic_dec(&perf_swevent_enabled[event_id]);
}
-static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
+static const struct pmu *sw_perf_event_init(struct perf_event *event)
{
const struct pmu *pmu = NULL;
- u64 event = counter->attr.config;
+ u64 event_id = event->attr.config;
/*
- * Software counters (currently) can't in general distinguish
+ * Software events (currently) can't in general distinguish
* between user, kernel and hypervisor events.
* However, context switches and cpu migrations are considered
* to be kernel events, and page faults are never hypervisor
* events.
*/
- switch (event) {
+ switch (event_id) {
case PERF_COUNT_SW_CPU_CLOCK:
pmu = &perf_ops_cpu_clock;
break;
case PERF_COUNT_SW_TASK_CLOCK:
/*
- * If the user instantiates this as a per-cpu counter,
- * use the cpu_clock counter instead.
+ * If the user instantiates this as a per-cpu event,
+ * use the cpu_clock event instead.
*/
- if (counter->ctx->task)
+ if (event->ctx->task)
pmu = &perf_ops_task_clock;
else
pmu = &perf_ops_cpu_clock;
@@ -4077,9 +4076,9 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
case PERF_COUNT_SW_CONTEXT_SWITCHES:
case PERF_COUNT_SW_CPU_MIGRATIONS:
- if (!counter->parent) {
- atomic_inc(&perf_swcounter_enabled[event]);
- counter->destroy = sw_perf_counter_destroy;
+ if (!event->parent) {
+ atomic_inc(&perf_swevent_enabled[event_id]);
+ event->destroy = sw_perf_event_destroy;
}
pmu = &perf_ops_generic;
break;
@@ -4089,62 +4088,62 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
}
/*
- * Allocate and initialize a counter structure
+ * Allocate and initialize a event structure
*/
-static struct perf_counter *
-perf_counter_alloc(struct perf_counter_attr *attr,
+static struct perf_event *
+perf_event_alloc(struct perf_event_attr *attr,
int cpu,
- struct perf_counter_context *ctx,
- struct perf_counter *group_leader,
- struct perf_counter *parent_counter,
+ struct perf_event_context *ctx,
+ struct perf_event *group_leader,
+ struct perf_event *parent_event,
gfp_t gfpflags)
{
const struct pmu *pmu;
- struct perf_counter *counter;
- struct hw_perf_counter *hwc;
+ struct perf_event *event;
+ struct hw_perf_event *hwc;
long err;
- counter = kzalloc(sizeof(*counter), gfpflags);
- if (!counter)
+ event = kzalloc(sizeof(*event), gfpflags);
+ if (!event)
return ERR_PTR(-ENOMEM);
/*
- * Single counters are their own group leaders, with an
+ * Single events are their own group leaders, with an
* empty sibling list:
*/
if (!group_leader)
- group_leader = counter;
+ group_leader = event;
- mutex_init(&counter->child_mutex);
- INIT_LIST_HEAD(&counter->child_list);
+ mutex_init(&event->child_mutex);
+ INIT_LIST_HEAD(&event->child_list);
- INIT_LIST_HEAD(&counter->list_entry);
- INIT_LIST_HEAD(&counter->event_entry);
- INIT_LIST_HEAD(&counter->sibling_list);
- init_waitqueue_head(&counter->waitq);
+ INIT_LIST_HEAD(&event->group_entry);
+ INIT_LIST_HEAD(&event->event_entry);
+ INIT_LIST_HEAD(&event->sibling_list);
+ init_waitqueue_head(&event->waitq);
- mutex_init(&counter->mmap_mutex);
+ mutex_init(&event->mmap_mutex);
- counter->cpu = cpu;
- counter->attr = *attr;
- counter->group_leader = group_leader;
- counter->pmu = NULL;
- counter->ctx = ctx;
- counter->oncpu = -1;
+ event->cpu = cpu;
+ event->attr = *attr;
+ event->group_leader = group_leader;
+ event->pmu = NULL;
+ event->ctx = ctx;
+ event->oncpu = -1;
- counter->parent = parent_counter;
+ event->parent = parent_event;
- counter->ns = get_pid_ns(current->nsproxy->pid_ns);
- counter->id = atomic64_inc_return(&perf_counter_id);
+ event->ns = get_pid_ns(current->nsproxy->pid_ns);
+ event->id = atomic64_inc_return(&perf_event_id);
- counter->state = PERF_COUNTER_STATE_INACTIVE;
+ event->state = PERF_EVENT_STATE_INACTIVE;
if (attr->disabled)
- counter->state = PERF_COUNTER_STATE_OFF;
+ event->state = PERF_EVENT_STATE_OFF;
pmu = NULL;
- hwc = &counter->hw;
+ hwc = &event->hw;
hwc->sample_period = attr->sample_period;
if (attr->freq && attr->sample_freq)
hwc->sample_period = 1;
@@ -4153,7 +4152,7 @@ perf_counter_alloc(struct perf_counter_attr *attr,
atomic64_set(&hwc->period_left, hwc->sample_period);
/*
- * we currently do not support PERF_FORMAT_GROUP on inherited counters
+ * we currently do not support PERF_FORMAT_GROUP on inherited events
*/
if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
goto done;
@@ -4162,15 +4161,15 @@ perf_counter_alloc(struct perf_counter_attr *attr,
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
- pmu = hw_perf_counter_init(counter);
+ pmu = hw_perf_event_init(event);
break;
case PERF_TYPE_SOFTWARE:
- pmu = sw_perf_counter_init(counter);
+ pmu = sw_perf_event_init(event);
break;
case PERF_TYPE_TRACEPOINT:
- pmu = tp_perf_counter_init(counter);
+ pmu = tp_perf_event_init(event);
break;
default:
@@ -4184,29 +4183,29 @@ done:
err = PTR_ERR(pmu);
if (err) {
- if (counter->ns)
- put_pid_ns(counter->ns);
- kfree(counter);
+ if (event->ns)
+ put_pid_ns(event->ns);
+ kfree(event);
return ERR_PTR(err);
}
- counter->pmu = pmu;
+ event->pmu = pmu;
- if (!counter->parent) {
- atomic_inc(&nr_counters);
- if (counter->attr.mmap)
- atomic_inc(&nr_mmap_counters);
- if (counter->attr.comm)
- atomic_inc(&nr_comm_counters);
- if (counter->attr.task)
- atomic_inc(&nr_task_counters);
+ if (!event->parent) {
+ atomic_inc(&nr_events);
+ if (event->attr.mmap)
+ atomic_inc(&nr_mmap_events);
+ if (event->attr.comm)
+ atomic_inc(&nr_comm_events);
+ if (event->attr.task)
+ atomic_inc(&nr_task_events);
}
- return counter;
+ return event;
}
-static int perf_copy_attr(struct perf_counter_attr __user *uattr,
- struct perf_counter_attr *attr)
+static int perf_copy_attr(struct perf_event_attr __user *uattr,
+ struct perf_event_attr *attr)
{
u32 size;
int ret;
@@ -4285,11 +4284,11 @@ err_size:
goto out;
}
-int perf_counter_set_output(struct perf_counter *counter, int output_fd)
+int perf_event_set_output(struct perf_event *event, int output_fd)
{
- struct perf_counter *output_counter = NULL;
+ struct perf_event *output_event = NULL;
struct file *output_file = NULL;
- struct perf_counter *old_output;
+ struct perf_event *old_output;
int fput_needed = 0;
int ret = -EINVAL;
@@ -4303,28 +4302,28 @@ int perf_counter_set_output(struct perf_counter *counter, int output_fd)
if (output_file->f_op != &perf_fops)
goto out;
- output_counter = output_file->private_data;
+ output_event = output_file->private_data;
/* Don't chain output fds */
- if (output_counter->output)
+ if (output_event->output)
goto out;
/* Don't set an output fd when we already have an output channel */
- if (counter->data)
+ if (event->data)
goto out;
atomic_long_inc(&output_file->f_count);
set:
- mutex_lock(&counter->mmap_mutex);
- old_output = counter->output;
- rcu_assign_pointer(counter->output, output_counter);
- mutex_unlock(&counter->mmap_mutex);
+ mutex_lock(&event->mmap_mutex);
+ old_output = event->output;
+ rcu_assign_pointer(event->output, output_event);
+ mutex_unlock(&event->mmap_mutex);
if (old_output) {
/*
* we need to make sure no existing perf_output_*()
- * is still referencing this counter.
+ * is still referencing this event.
*/
synchronize_rcu();
fput(old_output->filp);
@@ -4337,21 +4336,21 @@ out:
}
/**
- * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
+ * sys_perf_event_open - open a performance event, associate it to a task/cpu
*
- * @attr_uptr: event type attributes for monitoring/sampling
+ * @attr_uptr: event_id type attributes for monitoring/sampling
* @pid: target pid
* @cpu: target cpu
- * @group_fd: group leader counter fd
+ * @group_fd: group leader event fd
*/
-SYSCALL_DEFINE5(perf_counter_open,
- struct perf_counter_attr __user *, attr_uptr,
+SYSCALL_DEFINE5(perf_event_open,
+ struct perf_event_attr __user *, attr_uptr,
pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
{
- struct perf_counter *counter, *group_leader;
- struct perf_counter_attr attr;
- struct perf_counter_context *ctx;
- struct file *counter_file = NULL;
+ struct perf_event *event, *group_leader;
+ struct perf_event_attr attr;
+ struct perf_event_context *ctx;
+ struct file *event_file = NULL;
struct file *group_file = NULL;
int fput_needed = 0;
int fput_needed2 = 0;
@@ -4371,7 +4370,7 @@ SYSCALL_DEFINE5(perf_counter_open,
}
if (attr.freq) {
- if (attr.sample_freq > sysctl_perf_counter_sample_rate)
+ if (attr.sample_freq > sysctl_perf_event_sample_rate)
return -EINVAL;
}
@@ -4383,7 +4382,7 @@ SYSCALL_DEFINE5(perf_counter_open,
return PTR_ERR(ctx);
/*
- * Look up the group leader (we will attach this counter to it):
+ * Look up the group leader (we will attach this event to it):
*/
group_leader = NULL;
if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) {
@@ -4414,45 +4413,45 @@ SYSCALL_DEFINE5(perf_counter_open,
goto err_put_context;
}
- counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
+ event = perf_event_alloc(&attr, cpu, ctx, group_leader,
NULL, GFP_KERNEL);
- err = PTR_ERR(counter);
- if (IS_ERR(counter))
+ err = PTR_ERR(event);
+ if (IS_ERR(event))
goto err_put_context;
- err = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
+ err = anon_inode_getfd("[perf_event]", &perf_fops, event, 0);
if (err < 0)
goto err_free_put_context;
- counter_file = fget_light(err, &fput_needed2);
- if (!counter_file)
+ event_file = fget_light(err, &fput_needed2);
+ if (!event_file)
goto err_free_put_context;
if (flags & PERF_FLAG_FD_OUTPUT) {
- err = perf_counter_set_output(counter, group_fd);
+ err = perf_event_set_output(event, group_fd);
if (err)
goto err_fput_free_put_context;
}
- counter->filp = counter_file;
+ event->filp = event_file;
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
- perf_install_in_context(ctx, counter, cpu);
+ perf_install_in_context(ctx, event, cpu);
++ctx->generation;
mutex_unlock(&ctx->mutex);
- counter->owner = current;
+ event->owner = current;
get_task_struct(current);
- mutex_lock(&current->perf_counter_mutex);
- list_add_tail(&counter->owner_entry, &current->perf_counter_list);
- mutex_unlock(&current->perf_counter_mutex);
+ mutex_lock(&current->perf_event_mutex);
+ list_add_tail(&event->owner_entry, &current->perf_event_list);
+ mutex_unlock(&current->perf_event_mutex);
err_fput_free_put_context:
- fput_light(counter_file, fput_needed2);
+ fput_light(event_file, fput_needed2);
err_free_put_context:
if (err < 0)
- kfree(counter);
+ kfree(event);
err_put_context:
if (err < 0)
@@ -4464,88 +4463,88 @@ err_put_context:
}
/*
- * inherit a counter from parent task to child task:
+ * inherit a event from parent task to child task:
*/
-static struct perf_counter *
-inherit_counter(struct perf_counter *parent_counter,
+static struct perf_event *
+inherit_event(struct perf_event *parent_event,
struct task_struct *parent,
- struct perf_counter_context *parent_ctx,
+ struct perf_event_context *parent_ctx,
struct task_struct *child,
- struct perf_counter *group_leader,
- struct perf_counter_context *child_ctx)
+ struct perf_event *group_leader,
+ struct perf_event_context *child_ctx)
{
- struct perf_counter *child_counter;
+ struct perf_event *child_event;
/*
- * Instead of creating recursive hierarchies of counters,
- * we link inherited counters back to the original parent,
+ * Instead of creating recursive hierarchies of events,
+ * we link inherited events back to the original parent,
* which has a filp for sure, which we use as the reference
* count:
*/
- if (parent_counter->parent)
- parent_counter = parent_counter->parent;
+ if (parent_event->parent)
+ parent_event = parent_event->parent;
- child_counter = perf_counter_alloc(&parent_counter->attr,
- parent_counter->cpu, child_ctx,
- group_leader, parent_counter,
+ child_event = perf_event_alloc(&parent_event->attr,
+ parent_event->cpu, child_ctx,
+ group_leader, parent_event,
GFP_KERNEL);
- if (IS_ERR(child_counter))
- return child_counter;
+ if (IS_ERR(child_event))
+ return child_event;
get_ctx(child_ctx);
/*
- * Make the child state follow the state of the parent counter,
+ * Make the child state follow the state of the parent event,
* not its attr.disabled bit. We hold the parent's mutex,
- * so we won't race with perf_counter_{en, dis}able_family.
+ * so we won't race with perf_event_{en, dis}able_family.
*/
- if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
- child_counter->state = PERF_COUNTER_STATE_INACTIVE;
+ if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
+ child_event->state = PERF_EVENT_STATE_INACTIVE;
else
- child_counter->state = PERF_COUNTER_STATE_OFF;
+ child_event->state = PERF_EVENT_STATE_OFF;
- if (parent_counter->attr.freq)
- child_counter->hw.sample_period = parent_counter->hw.sample_period;
+ if (parent_event->attr.freq)
+ child_event->hw.sample_period = parent_event->hw.sample_period;
/*
* Link it up in the child's context:
*/
- add_counter_to_ctx(child_counter, child_ctx);
+ add_event_to_ctx(child_event, child_ctx);
/*
* Get a reference to the parent filp - we will fput it
- * when the child counter exits. This is safe to do because
+ * when the child event exits. This is safe to do because
* we are in the parent and we know that the filp still
* exists and has a nonzero count:
*/
- atomic_long_inc(&parent_counter->filp->f_count);
+ atomic_long_inc(&parent_event->filp->f_count);
/*
- * Link this into the parent counter's child list
+ * Link this into the parent event's child list
*/
- WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
- mutex_lock(&parent_counter->child_mutex);
- list_add_tail(&child_counter->child_list, &parent_counter->child_list);
- mutex_unlock(&parent_counter->child_mutex);
+ WARN_ON_ONCE(parent_event->ctx->parent_ctx);
+ mutex_lock(&parent_event->child_mutex);
+ list_add_tail(&child_event->child_list, &parent_event->child_list);
+ mutex_unlock(&parent_event->child_mutex);
- return child_counter;
+ return child_event;
}
-static int inherit_group(struct perf_counter *parent_counter,
+static int inherit_group(struct perf_event *parent_event,
struct task_struct *parent,
- struct perf_counter_context *parent_ctx,
+ struct perf_event_context *parent_ctx,
struct task_struct *child,
- struct perf_counter_context *child_ctx)
+ struct perf_event_context *child_ctx)
{
- struct perf_counter *leader;
- struct perf_counter *sub;
- struct perf_counter *child_ctr;
+ struct perf_event *leader;
+ struct perf_event *sub;
+ struct perf_event *child_ctr;
- leader = inherit_counter(parent_counter, parent, parent_ctx,
+ leader = inherit_event(parent_event, parent, parent_ctx,
child, NULL, child_ctx);
if (IS_ERR(leader))
return PTR_ERR(leader);
- list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
- child_ctr = inherit_counter(sub, parent, parent_ctx,
+ list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
+ child_ctr = inherit_event(sub, parent, parent_ctx,
child, leader, child_ctx);
if (IS_ERR(child_ctr))
return PTR_ERR(child_ctr);
@@ -4553,74 +4552,74 @@ static int inherit_group(struct perf_counter *parent_counter,
return 0;
}
-static void sync_child_counter(struct perf_counter *child_counter,
+static void sync_child_event(struct perf_event *child_event,
struct task_struct *child)
{
- struct perf_counter *parent_counter = child_counter->parent;
+ struct perf_event *parent_event = child_event->parent;
u64 child_val;
- if (child_counter->attr.inherit_stat)
- perf_counter_read_event(child_counter, child);
+ if (child_event->attr.inherit_stat)
+ perf_event_read_event(child_event, child);
- child_val = atomic64_read(&child_counter->count);
+ child_val = atomic64_read(&child_event->count);
/*
* Add back the child's count to the parent's count:
*/
- atomic64_add(child_val, &parent_counter->count);
- atomic64_add(child_counter->total_time_enabled,
- &parent_counter->child_total_time_enabled);
- atomic64_add(child_counter->total_time_running,
- &parent_counter->child_total_time_running);
+ atomic64_add(child_val, &parent_event->count);
+ atomic64_add(child_event->total_time_enabled,
+ &parent_event->child_total_time_enabled);
+ atomic64_add(child_event->total_time_running,
+ &parent_event->child_total_time_running);
/*
- * Remove this counter from the parent's list
+ * Remove this event from the parent's list
*/
- WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
- mutex_lock(&parent_counter->child_mutex);
- list_del_init(&child_counter->child_list);
- mutex_unlock(&parent_counter->child_mutex);
+ WARN_ON_ONCE(parent_event->ctx->parent_ctx);
+ mutex_lock(&parent_event->child_mutex);
+ list_del_init(&child_event->child_list);
+ mutex_unlock(&parent_event->child_mutex);
/*
- * Release the parent counter, if this was the last
+ * Release the parent event, if this was the last
* reference to it.
*/
- fput(parent_counter->filp);
+ fput(parent_event->filp);
}
static void
-__perf_counter_exit_task(struct perf_counter *child_counter,
- struct perf_counter_context *child_ctx,
+__perf_event_exit_task(struct perf_event *child_event,
+ struct perf_event_context *child_ctx,
struct task_struct *child)
{
- struct perf_counter *parent_counter;
+ struct perf_event *parent_event;
- update_counter_times(child_counter);
- perf_counter_remove_from_context(child_counter);
+ update_event_times(child_event);
+ perf_event_remove_from_context(child_event);
- parent_counter = child_counter->parent;
+ parent_event = child_event->parent;
/*
- * It can happen that parent exits first, and has counters
+ * It can happen that parent exits first, and has events
* that are still around due to the child reference. These
- * counters need to be zapped - but otherwise linger.
+ * events need to be zapped - but otherwise linger.
*/
- if (parent_counter) {
- sync_child_counter(child_counter, child);
- free_counter(child_counter);
+ if (parent_event) {
+ sync_child_event(child_event, child);
+ free_event(child_event);
}
}
/*
- * When a child task exits, feed back counter values to parent counters.
+ * When a child task exits, feed back event values to parent events.
*/
-void perf_counter_exit_task(struct task_struct *child)
+void perf_event_exit_task(struct task_struct *child)
{
- struct perf_counter *child_counter, *tmp;
- struct perf_counter_context *child_ctx;
+ struct perf_event *child_event, *tmp;
+ struct perf_event_context *child_ctx;
unsigned long flags;
- if (likely(!child->perf_counter_ctxp)) {
- perf_counter_task(child, NULL, 0);
+ if (likely(!child->perf_event_ctxp)) {
+ perf_event_task(child, NULL, 0);
return;
}
@@ -4631,37 +4630,37 @@ void perf_counter_exit_task(struct task_struct *child)
* scheduled, so we are now safe from rescheduling changing
* our context.
*/
- child_ctx = child->perf_counter_ctxp;
- __perf_counter_task_sched_out(child_ctx);
+ child_ctx = child->perf_event_ctxp;
+ __perf_event_task_sched_out(child_ctx);
/*
* Take the context lock here so that if find_get_context is
- * reading child->perf_counter_ctxp, we wait until it has
+ * reading child->perf_event_ctxp, we wait until it has
* incremented the context's refcount before we do put_ctx below.
*/
spin_lock(&child_ctx->lock);
- child->perf_counter_ctxp = NULL;
+ child->perf_event_ctxp = NULL;
/*
* If this context is a clone; unclone it so it can't get
* swapped to another process while we're removing all
- * the counters from it.
+ * the events from it.
*/
unclone_ctx(child_ctx);
spin_unlock_irqrestore(&child_ctx->lock, flags);
/*
- * Report the task dead after unscheduling the counters so that we
- * won't get any samples after PERF_EVENT_EXIT. We can however still
- * get a few PERF_EVENT_READ events.
+ * Report the task dead after unscheduling the events so that we
+ * won't get any samples after PERF_RECORD_EXIT. We can however still
+ * get a few PERF_RECORD_READ events.
*/
- perf_counter_task(child, child_ctx, 0);
+ perf_event_task(child, child_ctx, 0);
/*
* We can recurse on the same lock type through:
*
- * __perf_counter_exit_task()
- * sync_child_counter()
- * fput(parent_counter->filp)
+ * __perf_event_exit_task()
+ * sync_child_event()
+ * fput(parent_event->filp)
* perf_release()
* mutex_lock(&ctx->mutex)
*
@@ -4670,16 +4669,16 @@ void perf_counter_exit_task(struct task_struct *child)
mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
again:
- list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
- list_entry)
- __perf_counter_exit_task(child_counter, child_ctx, child);
+ list_for_each_entry_safe(child_event, tmp, &child_ctx->group_list,
+ group_entry)
+ __perf_event_exit_task(child_event, child_ctx, child);
/*
- * If the last counter was a group counter, it will have appended all
+ * If the last event was a group event, it will have appended all
* its siblings to the list, but we obtained 'tmp' before that which
* will still point to the list head terminating the iteration.
*/
- if (!list_empty(&child_ctx->counter_list))
+ if (!list_empty(&child_ctx->group_list))
goto again;
mutex_unlock(&child_ctx->mutex);
@@ -4691,33 +4690,33 @@ again:
* free an unexposed, unused context as created by inheritance by
* init_task below, used by fork() in case of fail.
*/
-void perf_counter_free_task(struct task_struct *task)
+void perf_event_free_task(struct task_struct *task)
{
- struct perf_counter_context *ctx = task->perf_counter_ctxp;
- struct perf_counter *counter, *tmp;
+ struct perf_event_context *ctx = task->perf_event_ctxp;
+ struct perf_event *event, *tmp;
if (!ctx)
return;
mutex_lock(&ctx->mutex);
again:
- list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
- struct perf_counter *parent = counter->parent;
+ list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) {
+ struct perf_event *parent = event->parent;
if (WARN_ON_ONCE(!parent))
continue;
mutex_lock(&parent->child_mutex);
- list_del_init(&counter->child_list);
+ list_del_init(&event->child_list);
mutex_unlock(&parent->child_mutex);
fput(parent->filp);
- list_del_counter(counter, ctx);
- free_counter(counter);
+ list_del_event(event, ctx);
+ free_event(event);
}
- if (!list_empty(&ctx->counter_list))
+ if (!list_empty(&ctx->group_list))
goto again;
mutex_unlock(&ctx->mutex);
@@ -4726,37 +4725,37 @@ again:
}
/*
- * Initialize the perf_counter context in task_struct
+ * Initialize the perf_event context in task_struct
*/
-int perf_counter_init_task(struct task_struct *child)
+int perf_event_init_task(struct task_struct *child)
{
- struct perf_counter_context *child_ctx, *parent_ctx;
- struct perf_counter_context *cloned_ctx;
- struct perf_counter *counter;
+ struct perf_event_context *child_ctx, *parent_ctx;
+ struct perf_event_context *cloned_ctx;
+ struct perf_event *event;
struct task_struct *parent = current;
int inherited_all = 1;
int ret = 0;
- child->perf_counter_ctxp = NULL;
+ child->perf_event_ctxp = NULL;
- mutex_init(&child->perf_counter_mutex);
- INIT_LIST_HEAD(&child->perf_counter_list);
+ mutex_init(&child->perf_event_mutex);
+ INIT_LIST_HEAD(&child->perf_event_list);
- if (likely(!parent->perf_counter_ctxp))
+ if (likely(!parent->perf_event_ctxp))
return 0;
/*
* This is executed from the parent task context, so inherit
- * counters that have been marked for cloning.
+ * events that have been marked for cloning.
* First allocate and initialize a context for the child.
*/
- child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
+ child_ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL);
if (!child_ctx)
return -ENOMEM;
- __perf_counter_init_context(child_ctx, child);
- child->perf_counter_ctxp = child_ctx;
+ __perf_event_init_context(child_ctx, child);
+ child->perf_event_ctxp = child_ctx;
get_task_struct(child);
/*
@@ -4782,16 +4781,16 @@ int perf_counter_init_task(struct task_struct *child)
* We dont have to disable NMIs - we are only looking at
* the list, not manipulating it:
*/
- list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
- if (counter != counter->group_leader)
+ list_for_each_entry_rcu(event, &parent_ctx->event_list, event_entry) {
+ if (event != event->group_leader)
continue;
- if (!counter->attr.inherit) {
+ if (!event->attr.inherit) {
inherited_all = 0;
continue;
}
- ret = inherit_group(counter, parent, parent_ctx,
+ ret = inherit_group(event, parent, parent_ctx,
child, child_ctx);
if (ret) {
inherited_all = 0;
@@ -4805,7 +4804,7 @@ int perf_counter_init_task(struct task_struct *child)
* context, or of whatever the parent is a clone of.
* Note that if the parent is a clone, it could get
* uncloned at any point, but that doesn't matter
- * because the list of counters and the generation
+ * because the list of events and the generation
* count can't have changed since we took the mutex.
*/
cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
@@ -4826,41 +4825,41 @@ int perf_counter_init_task(struct task_struct *child)
return ret;
}
-static void __cpuinit perf_counter_init_cpu(int cpu)
+static void __cpuinit perf_event_init_cpu(int cpu)
{
struct perf_cpu_context *cpuctx;
cpuctx = &per_cpu(perf_cpu_context, cpu);
- __perf_counter_init_context(&cpuctx->ctx, NULL);
+ __perf_event_init_context(&cpuctx->ctx, NULL);
spin_lock(&perf_resource_lock);
- cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
+ cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
spin_unlock(&perf_resource_lock);
- hw_perf_counter_setup(cpu);
+ hw_perf_event_setup(cpu);
}
#ifdef CONFIG_HOTPLUG_CPU
-static void __perf_counter_exit_cpu(void *info)
+static void __perf_event_exit_cpu(void *info)
{
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
- struct perf_counter_context *ctx = &cpuctx->ctx;
- struct perf_counter *counter, *tmp;
+ struct perf_event_context *ctx = &cpuctx->ctx;
+ struct perf_event *event, *tmp;
- list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
- __perf_counter_remove_from_context(counter);
+ list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry)
+ __perf_event_remove_from_context(event);
}
-static void perf_counter_exit_cpu(int cpu)
+static void perf_event_exit_cpu(int cpu)
{
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
- struct perf_counter_context *ctx = &cpuctx->ctx;
+ struct perf_event_context *ctx = &cpuctx->ctx;
mutex_lock(&ctx->mutex);
- smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
+ smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
mutex_unlock(&ctx->mutex);
}
#else
-static inline void perf_counter_exit_cpu(int cpu) { }
+static inline void perf_event_exit_cpu(int cpu) { }
#endif
static int __cpuinit
@@ -4872,17 +4871,17 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- perf_counter_init_cpu(cpu);
+ perf_event_init_cpu(cpu);
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- hw_perf_counter_setup_online(cpu);
+ hw_perf_event_setup_online(cpu);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
- perf_counter_exit_cpu(cpu);
+ perf_event_exit_cpu(cpu);
break;
default:
@@ -4900,7 +4899,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = {
.priority = 20,
};
-void __init perf_counter_init(void)
+void __init perf_event_init(void)
{
perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
@@ -4926,7 +4925,7 @@ perf_set_reserve_percpu(struct sysdev_class *class,
err = strict_strtoul(buf, 10, &val);
if (err)
return err;
- if (val > perf_max_counters)
+ if (val > perf_max_events)
return -EINVAL;
spin_lock(&perf_resource_lock);
@@ -4934,8 +4933,8 @@ perf_set_reserve_percpu(struct sysdev_class *class,
for_each_online_cpu(cpu) {
cpuctx = &per_cpu(perf_cpu_context, cpu);
spin_lock_irq(&cpuctx->ctx.lock);
- mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
- perf_max_counters - perf_reserved_percpu);
+ mpt = min(perf_max_events - cpuctx->ctx.nr_events,
+ perf_max_events - perf_reserved_percpu);
cpuctx->max_pertask = mpt;
spin_unlock_irq(&cpuctx->ctx.lock);
}
@@ -4990,12 +4989,12 @@ static struct attribute *perfclass_attrs[] = {
static struct attribute_group perfclass_attr_group = {
.attrs = perfclass_attrs,
- .name = "perf_counters",
+ .name = "perf_events",
};
-static int __init perf_counter_sysfs_init(void)
+static int __init perf_event_sysfs_init(void)
{
return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
&perfclass_attr_group);
}
-device_initcall(perf_counter_sysfs_init);
+device_initcall(perf_event_sysfs_init);
diff --git a/kernel/pid.c b/kernel/pid.c
index 31310b5d3f5..d3f722d20f9 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -40,7 +40,7 @@
#define pid_hashfn(nr, ns) \
hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
static struct hlist_head *pid_hash;
-static int pidhash_shift;
+static unsigned int pidhash_shift = 4;
struct pid init_struct_pid = INIT_STRUCT_PID;
int pid_max = PID_MAX_DEFAULT;
@@ -499,19 +499,12 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
void __init pidhash_init(void)
{
int i, pidhash_size;
- unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);
- pidhash_shift = max(4, fls(megabytes * 4));
- pidhash_shift = min(12, pidhash_shift);
+ pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
+ HASH_EARLY | HASH_SMALL,
+ &pidhash_shift, NULL, 4096);
pidhash_size = 1 << pidhash_shift;
- printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
- pidhash_size, pidhash_shift,
- pidhash_size * sizeof(struct hlist_head));
-
- pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
- if (!pid_hash)
- panic("Could not alloc pidhash!\n");
for (i = 0; i < pidhash_size; i++)
INIT_HLIST_HEAD(&pid_hash[i]);
}
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 821722ae58a..86b3796b043 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -118,7 +118,7 @@ struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old
{
if (!(flags & CLONE_NEWPID))
return get_pid_ns(old_ns);
- if (flags & CLONE_THREAD)
+ if (flags & (CLONE_THREAD|CLONE_PARENT))
return ERR_PTR(-EINVAL);
return create_pid_namespace(old_ns);
}
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index e33a21cb940..5c9dc228747 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -8,17 +8,18 @@
#include <linux/math64.h>
#include <asm/uaccess.h>
#include <linux/kernel_stat.h>
+#include <trace/events/timer.h>
/*
* Called after updating RLIMIT_CPU to set timer expiration if necessary.
*/
void update_rlimit_cpu(unsigned long rlim_new)
{
- cputime_t cputime;
+ cputime_t cputime = secs_to_cputime(rlim_new);
+ struct signal_struct *const sig = current->signal;
- cputime = secs_to_cputime(rlim_new);
- if (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
- cputime_gt(current->signal->it_prof_expires, cputime)) {
+ if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) ||
+ cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) {
spin_lock_irq(&current->sighand->siglock);
set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
spin_unlock_irq(&current->sighand->siglock);
@@ -542,6 +543,17 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
now);
}
+static inline int expires_gt(cputime_t expires, cputime_t new_exp)
+{
+ return cputime_eq(expires, cputime_zero) ||
+ cputime_gt(expires, new_exp);
+}
+
+static inline int expires_le(cputime_t expires, cputime_t new_exp)
+{
+ return !cputime_eq(expires, cputime_zero) &&
+ cputime_le(expires, new_exp);
+}
/*
* Insert the timer on the appropriate list before any timers that
* expire later. This must be called with the tasklist_lock held
@@ -586,34 +598,32 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
+ union cpu_time_count *exp = &nt->expires;
+
switch (CPUCLOCK_WHICH(timer->it_clock)) {
default:
BUG();
case CPUCLOCK_PROF:
- if (cputime_eq(p->cputime_expires.prof_exp,
- cputime_zero) ||
- cputime_gt(p->cputime_expires.prof_exp,
- nt->expires.cpu))
- p->cputime_expires.prof_exp =
- nt->expires.cpu;
+ if (expires_gt(p->cputime_expires.prof_exp,
+ exp->cpu))
+ p->cputime_expires.prof_exp = exp->cpu;
break;
case CPUCLOCK_VIRT:
- if (cputime_eq(p->cputime_expires.virt_exp,
- cputime_zero) ||
- cputime_gt(p->cputime_expires.virt_exp,
- nt->expires.cpu))
- p->cputime_expires.virt_exp =
- nt->expires.cpu;
+ if (expires_gt(p->cputime_expires.virt_exp,
+ exp->cpu))
+ p->cputime_expires.virt_exp = exp->cpu;
break;
case CPUCLOCK_SCHED:
if (p->cputime_expires.sched_exp == 0 ||
- p->cputime_expires.sched_exp >
- nt->expires.sched)
+ p->cputime_expires.sched_exp > exp->sched)
p->cputime_expires.sched_exp =
- nt->expires.sched;
+ exp->sched;
break;
}
} else {
+ struct signal_struct *const sig = p->signal;
+ union cpu_time_count *exp = &timer->it.cpu.expires;
+
/*
* For a process timer, set the cached expiration time.
*/
@@ -621,30 +631,23 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
default:
BUG();
case CPUCLOCK_VIRT:
- if (!cputime_eq(p->signal->it_virt_expires,
- cputime_zero) &&
- cputime_lt(p->signal->it_virt_expires,
- timer->it.cpu.expires.cpu))
+ if (expires_le(sig->it[CPUCLOCK_VIRT].expires,
+ exp->cpu))
break;
- p->signal->cputime_expires.virt_exp =
- timer->it.cpu.expires.cpu;
+ sig->cputime_expires.virt_exp = exp->cpu;
break;
case CPUCLOCK_PROF:
- if (!cputime_eq(p->signal->it_prof_expires,
- cputime_zero) &&
- cputime_lt(p->signal->it_prof_expires,
- timer->it.cpu.expires.cpu))
+ if (expires_le(sig->it[CPUCLOCK_PROF].expires,
+ exp->cpu))
break;
- i = p->signal->rlim[RLIMIT_CPU].rlim_cur;
+ i = sig->rlim[RLIMIT_CPU].rlim_cur;
if (i != RLIM_INFINITY &&
- i <= cputime_to_secs(timer->it.cpu.expires.cpu))
+ i <= cputime_to_secs(exp->cpu))
break;
- p->signal->cputime_expires.prof_exp =
- timer->it.cpu.expires.cpu;
+ sig->cputime_expires.prof_exp = exp->cpu;
break;
case CPUCLOCK_SCHED:
- p->signal->cputime_expires.sched_exp =
- timer->it.cpu.expires.sched;
+ sig->cputime_expires.sched_exp = exp->sched;
break;
}
}
@@ -1071,6 +1074,40 @@ static void stop_process_timers(struct task_struct *tsk)
spin_unlock_irqrestore(&cputimer->lock, flags);
}
+static u32 onecputick;
+
+static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
+ cputime_t *expires, cputime_t cur_time, int signo)
+{
+ if (cputime_eq(it->expires, cputime_zero))
+ return;
+
+ if (cputime_ge(cur_time, it->expires)) {
+ if (!cputime_eq(it->incr, cputime_zero)) {
+ it->expires = cputime_add(it->expires, it->incr);
+ it->error += it->incr_error;
+ if (it->error >= onecputick) {
+ it->expires = cputime_sub(it->expires,
+ cputime_one_jiffy);
+ it->error -= onecputick;
+ }
+ } else {
+ it->expires = cputime_zero;
+ }
+
+ trace_itimer_expire(signo == SIGPROF ?
+ ITIMER_PROF : ITIMER_VIRTUAL,
+ tsk->signal->leader_pid, cur_time);
+ __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
+ }
+
+ if (!cputime_eq(it->expires, cputime_zero) &&
+ (cputime_eq(*expires, cputime_zero) ||
+ cputime_lt(it->expires, *expires))) {
+ *expires = it->expires;
+ }
+}
+
/*
* Check for any per-thread CPU timers that have fired and move them
* off the tsk->*_timers list onto the firing list. Per-thread timers
@@ -1090,10 +1127,10 @@ static void check_process_timers(struct task_struct *tsk,
* Don't sample the current process CPU clocks if there are no timers.
*/
if (list_empty(&timers[CPUCLOCK_PROF]) &&
- cputime_eq(sig->it_prof_expires, cputime_zero) &&
+ cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) &&
sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
list_empty(&timers[CPUCLOCK_VIRT]) &&
- cputime_eq(sig->it_virt_expires, cputime_zero) &&
+ cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) &&
list_empty(&timers[CPUCLOCK_SCHED])) {
stop_process_timers(tsk);
return;
@@ -1153,38 +1190,11 @@ static void check_process_timers(struct task_struct *tsk,
/*
* Check for the special case process timers.
*/
- if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
- if (cputime_ge(ptime, sig->it_prof_expires)) {
- /* ITIMER_PROF fires and reloads. */
- sig->it_prof_expires = sig->it_prof_incr;
- if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
- sig->it_prof_expires = cputime_add(
- sig->it_prof_expires, ptime);
- }
- __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
- }
- if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
- (cputime_eq(prof_expires, cputime_zero) ||
- cputime_lt(sig->it_prof_expires, prof_expires))) {
- prof_expires = sig->it_prof_expires;
- }
- }
- if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
- if (cputime_ge(utime, sig->it_virt_expires)) {
- /* ITIMER_VIRTUAL fires and reloads. */
- sig->it_virt_expires = sig->it_virt_incr;
- if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
- sig->it_virt_expires = cputime_add(
- sig->it_virt_expires, utime);
- }
- __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
- }
- if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
- (cputime_eq(virt_expires, cputime_zero) ||
- cputime_lt(sig->it_virt_expires, virt_expires))) {
- virt_expires = sig->it_virt_expires;
- }
- }
+ check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
+ SIGPROF);
+ check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
+ SIGVTALRM);
+
if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
unsigned long psecs = cputime_to_secs(ptime);
cputime_t x;
@@ -1457,7 +1467,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
if (!cputime_eq(*oldval, cputime_zero)) {
if (cputime_le(*oldval, now.cpu)) {
/* Just about to fire. */
- *oldval = jiffies_to_cputime(1);
+ *oldval = cputime_one_jiffy;
} else {
*oldval = cputime_sub(*oldval, now.cpu);
}
@@ -1703,10 +1713,15 @@ static __init int init_posix_cpu_timers(void)
.nsleep = thread_cpu_nsleep,
.nsleep_restart = thread_cpu_nsleep_restart,
};
+ struct timespec ts;
register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
+ cputime_to_timespec(cputime_one_jiffy, &ts);
+ onecputick = ts.tv_nsec;
+ WARN_ON(ts.tv_sec != 0);
+
return 0;
}
__initcall(init_posix_cpu_timers);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index da2072d7381..cc2e55373b6 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -9,6 +9,7 @@
#undef DEBUG
#include <linux/interrupt.h>
+#include <linux/oom.h>
#include <linux/suspend.h>
#include <linux/module.h>
#include <linux/syscalls.h>
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 97955b0e44f..36cb168e433 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -619,7 +619,7 @@ __register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
BUG_ON(!region);
} else
/* This allocation cannot fail */
- region = alloc_bootmem_low(sizeof(struct nosave_region));
+ region = alloc_bootmem(sizeof(struct nosave_region));
region->start_pfn = start_pfn;
region->end_pfn = end_pfn;
list_add_tail(&region->list, &nosave_regions);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 8ba052c86d4..b101cdc4df3 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/file.h>
-#include <linux/utsname.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/genhd.h>
diff --git a/kernel/printk.c b/kernel/printk.c
index 602033acd6c..f38b07f78a4 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -206,12 +206,11 @@ __setup("log_buf_len=", log_buf_len_setup);
#ifdef CONFIG_BOOT_PRINTK_DELAY
static unsigned int boot_delay; /* msecs delay after each printk during bootup */
-static unsigned long long printk_delay_msec; /* per msec, based on boot_delay */
+static unsigned long long loops_per_msec; /* based on boot_delay */
static int __init boot_delay_setup(char *str)
{
unsigned long lpj;
- unsigned long long loops_per_msec;
lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */
loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
@@ -220,10 +219,9 @@ static int __init boot_delay_setup(char *str)
if (boot_delay > 10 * 1000)
boot_delay = 0;
- printk_delay_msec = loops_per_msec;
- printk(KERN_DEBUG "boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
- "HZ: %d, printk_delay_msec: %llu\n",
- boot_delay, preset_lpj, lpj, HZ, printk_delay_msec);
+ pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
+ "HZ: %d, loops_per_msec: %llu\n",
+ boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
return 1;
}
__setup("boot_delay=", boot_delay_setup);
@@ -236,7 +234,7 @@ static void boot_delay_msec(void)
if (boot_delay == 0 || system_state != SYSTEM_BOOTING)
return;
- k = (unsigned long long)printk_delay_msec * boot_delay;
+ k = (unsigned long long)loops_per_msec * boot_delay;
timeout = jiffies + msecs_to_jiffies(boot_delay);
while (k) {
@@ -655,6 +653,20 @@ static int recursion_bug;
static int new_text_line = 1;
static char printk_buf[1024];
+int printk_delay_msec __read_mostly;
+
+static inline void printk_delay(void)
+{
+ if (unlikely(printk_delay_msec)) {
+ int m = printk_delay_msec;
+
+ while (m--) {
+ mdelay(1);
+ touch_nmi_watchdog();
+ }
+ }
+}
+
asmlinkage int vprintk(const char *fmt, va_list args)
{
int printed_len = 0;
@@ -664,6 +676,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
char *p;
boot_delay_msec();
+ printk_delay();
preempt_disable();
/* This stops the holder of console_sem just where we want him */
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 307c285af59..23bd09cd042 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -266,9 +266,10 @@ static int ignoring_children(struct sighand_struct *sigh)
* or self-reaping. Do notification now if it would have happened earlier.
* If it should reap itself, return true.
*
- * If it's our own child, there is no notification to do.
- * But if our normal children self-reap, then this child
- * was prevented by ptrace and we must reap it now.
+ * If it's our own child, there is no notification to do. But if our normal
+ * children self-reap, then this child was prevented by ptrace and we must
+ * reap it now, in that case we must also wake up sub-threads sleeping in
+ * do_wait().
*/
static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
{
@@ -278,8 +279,10 @@ static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
if (!task_detached(p) && thread_group_empty(p)) {
if (!same_thread_group(p->real_parent, tracer))
do_notify_parent(p, p->exit_signal);
- else if (ignoring_children(tracer->sighand))
+ else if (ignoring_children(tracer->sighand)) {
+ __wake_up_parent(p, tracer);
p->exit_signal = -1;
+ }
}
if (task_detached(p)) {
/* Mark it as in the process of being reaped. */
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index e1338f07431..88faec23e83 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -19,6 +19,7 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent)
{
spin_lock_init(&counter->lock);
counter->limit = RESOURCE_MAX;
+ counter->soft_limit = RESOURCE_MAX;
counter->parent = parent;
}
@@ -36,17 +37,27 @@ int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
}
int res_counter_charge(struct res_counter *counter, unsigned long val,
- struct res_counter **limit_fail_at)
+ struct res_counter **limit_fail_at,
+ struct res_counter **soft_limit_fail_at)
{
int ret;
unsigned long flags;
struct res_counter *c, *u;
*limit_fail_at = NULL;
+ if (soft_limit_fail_at)
+ *soft_limit_fail_at = NULL;
local_irq_save(flags);
for (c = counter; c != NULL; c = c->parent) {
spin_lock(&c->lock);
ret = res_counter_charge_locked(c, val);
+ /*
+ * With soft limits, we return the highest ancestor
+ * that exceeds its soft limit
+ */
+ if (soft_limit_fail_at &&
+ !res_counter_soft_limit_check_locked(c))
+ *soft_limit_fail_at = c;
spin_unlock(&c->lock);
if (ret < 0) {
*limit_fail_at = c;
@@ -74,7 +85,8 @@ void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
counter->usage -= val;
}
-void res_counter_uncharge(struct res_counter *counter, unsigned long val)
+void res_counter_uncharge(struct res_counter *counter, unsigned long val,
+ bool *was_soft_limit_excess)
{
unsigned long flags;
struct res_counter *c;
@@ -82,6 +94,9 @@ void res_counter_uncharge(struct res_counter *counter, unsigned long val)
local_irq_save(flags);
for (c = counter; c != NULL; c = c->parent) {
spin_lock(&c->lock);
+ if (was_soft_limit_excess)
+ *was_soft_limit_excess =
+ !res_counter_soft_limit_check_locked(c);
res_counter_uncharge_locked(c, val);
spin_unlock(&c->lock);
}
@@ -101,6 +116,8 @@ res_counter_member(struct res_counter *counter, int member)
return &counter->limit;
case RES_FAILCNT:
return &counter->failcnt;
+ case RES_SOFT_LIMIT:
+ return &counter->soft_limit;
};
BUG();
diff --git a/kernel/resource.c b/kernel/resource.c
index 78b087221c1..fb11a58b959 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -223,13 +223,13 @@ int release_resource(struct resource *old)
EXPORT_SYMBOL(release_resource);
-#if defined(CONFIG_MEMORY_HOTPLUG) && !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
+#if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
/*
* Finds the lowest memory reosurce exists within [res->start.res->end)
- * the caller must specify res->start, res->end, res->flags.
+ * the caller must specify res->start, res->end, res->flags and "name".
* If found, returns 0, res is overwritten, if not found, returns -1.
*/
-static int find_next_system_ram(struct resource *res)
+static int find_next_system_ram(struct resource *res, char *name)
{
resource_size_t start, end;
struct resource *p;
@@ -245,6 +245,8 @@ static int find_next_system_ram(struct resource *res)
/* system ram is just marked as IORESOURCE_MEM */
if (p->flags != res->flags)
continue;
+ if (name && strcmp(p->name, name))
+ continue;
if (p->start > end) {
p = NULL;
break;
@@ -262,19 +264,26 @@ static int find_next_system_ram(struct resource *res)
res->end = p->end;
return 0;
}
-int
-walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
- int (*func)(unsigned long, unsigned long, void *))
+
+/*
+ * This function calls callback against all memory range of "System RAM"
+ * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY.
+ * Now, this function is only for "System RAM".
+ */
+int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
+ void *arg, int (*func)(unsigned long, unsigned long, void *))
{
struct resource res;
unsigned long pfn, len;
u64 orig_end;
int ret = -1;
+
res.start = (u64) start_pfn << PAGE_SHIFT;
res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
orig_end = res.end;
- while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) {
+ while ((res.start < res.end) &&
+ (find_next_system_ram(&res, "System RAM") >= 0)) {
pfn = (unsigned long)(res.start >> PAGE_SHIFT);
len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT);
ret = (*func)(pfn, len, arg);
diff --git a/kernel/sched.c b/kernel/sched.c
index 830967e1828..ee61f454a98 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -39,7 +39,7 @@
#include <linux/completion.h>
#include <linux/kernel_stat.h>
#include <linux/debug_locks.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/security.h>
#include <linux/notifier.h>
#include <linux/profile.h>
@@ -2053,7 +2053,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
if (task_hot(p, old_rq->clock, NULL))
schedstat_inc(p, se.nr_forced2_migrations);
#endif
- perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS,
+ perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
1, 1, NULL, 0);
}
p->se.vruntime -= old_cfsrq->min_vruntime -
@@ -2718,7 +2718,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
*/
prev_state = prev->state;
finish_arch_switch(prev);
- perf_counter_task_sched_in(current, cpu_of(rq));
+ perf_event_task_sched_in(current, cpu_of(rq));
finish_lock_switch(rq, prev);
fire_sched_in_preempt_notifiers(current);
@@ -2904,6 +2904,19 @@ unsigned long nr_iowait(void)
return sum;
}
+unsigned long nr_iowait_cpu(void)
+{
+ struct rq *this = this_rq();
+ return atomic_read(&this->nr_iowait);
+}
+
+unsigned long this_cpu_load(void)
+{
+ struct rq *this = this_rq();
+ return this->cpu_load[0];
+}
+
+
/* Variables and functions for calc_load */
static atomic_long_t calc_load_tasks;
static unsigned long calc_load_update;
@@ -5079,17 +5092,16 @@ void account_idle_time(cputime_t cputime)
*/
void account_process_tick(struct task_struct *p, int user_tick)
{
- cputime_t one_jiffy = jiffies_to_cputime(1);
- cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
+ cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
struct rq *rq = this_rq();
if (user_tick)
- account_user_time(p, one_jiffy, one_jiffy_scaled);
+ account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
- account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
+ account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
one_jiffy_scaled);
else
- account_idle_time(one_jiffy);
+ account_idle_time(cputime_one_jiffy);
}
/*
@@ -5193,7 +5205,7 @@ void scheduler_tick(void)
curr->sched_class->task_tick(rq, curr, 0);
spin_unlock(&rq->lock);
- perf_counter_task_tick(curr, cpu);
+ perf_event_task_tick(curr, cpu);
#ifdef CONFIG_SMP
rq->idle_at_tick = idle_cpu(cpu);
@@ -5409,7 +5421,7 @@ need_resched_nonpreemptible:
if (likely(prev != next)) {
sched_info_switch(prev, next);
- perf_counter_task_sched_out(prev, next, cpu);
+ perf_event_task_sched_out(prev, next, cpu);
rq->nr_switches++;
rq->curr = next;
@@ -7671,7 +7683,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
/*
* Register at high priority so that task migration (migrate_all_tasks)
* happens before everything else. This has to be lower priority than
- * the notifier in the perf_counter subsystem, though.
+ * the notifier in the perf_event subsystem, though.
*/
static struct notifier_block __cpuinitdata migration_notifier = {
.notifier_call = migration_call,
@@ -9528,7 +9540,7 @@ void __init sched_init(void)
alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
#endif /* SMP */
- perf_counter_init();
+ perf_event_init();
scheduler_running = 1;
}
@@ -10300,7 +10312,7 @@ static int sched_rt_global_constraints(void)
#endif /* CONFIG_RT_GROUP_SCHED */
int sched_rt_handler(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
@@ -10311,7 +10323,7 @@ int sched_rt_handler(struct ctl_table *table, int write,
old_period = sysctl_sched_rt_period;
old_runtime = sysctl_sched_rt_runtime;
- ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (!ret && write) {
ret = sched_rt_global_constraints();
@@ -10365,8 +10377,7 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
}
static int
-cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
- struct task_struct *tsk)
+cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
{
#ifdef CONFIG_RT_GROUP_SCHED
if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
@@ -10376,15 +10387,45 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
if (tsk->sched_class != &fair_sched_class)
return -EINVAL;
#endif
+ return 0;
+}
+static int
+cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+ struct task_struct *tsk, bool threadgroup)
+{
+ int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
+ if (retval)
+ return retval;
+ if (threadgroup) {
+ struct task_struct *c;
+ rcu_read_lock();
+ list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
+ retval = cpu_cgroup_can_attach_task(cgrp, c);
+ if (retval) {
+ rcu_read_unlock();
+ return retval;
+ }
+ }
+ rcu_read_unlock();
+ }
return 0;
}
static void
cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
- struct cgroup *old_cont, struct task_struct *tsk)
+ struct cgroup *old_cont, struct task_struct *tsk,
+ bool threadgroup)
{
sched_move_task(tsk);
+ if (threadgroup) {
+ struct task_struct *c;
+ rcu_read_lock();
+ list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
+ sched_move_task(c);
+ }
+ rcu_read_unlock();
+ }
}
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index ecc637a0d59..4e777b47eed 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -384,10 +384,10 @@ static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
#ifdef CONFIG_SCHED_DEBUG
int sched_nr_latency_handler(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos)
{
- int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret || !write)
return ret;
diff --git a/kernel/signal.c b/kernel/signal.c
index 64c5deeaca5..6705320784f 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -705,7 +705,7 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
if (why) {
/*
- * The first thread which returns from finish_stop()
+ * The first thread which returns from do_signal_stop()
* will take ->siglock, notice SIGNAL_CLD_MASK, and
* notify its parent. See get_signal_to_deliver().
*/
@@ -971,6 +971,20 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
return send_signal(sig, info, t, 0);
}
+int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
+ bool group)
+{
+ unsigned long flags;
+ int ret = -ESRCH;
+
+ if (lock_task_sighand(p, &flags)) {
+ ret = send_signal(sig, info, p, group);
+ unlock_task_sighand(p, &flags);
+ }
+
+ return ret;
+}
+
/*
* Force a signal that the process can't ignore: if necessary
* we unblock the signal and change any SIG_IGN to SIG_DFL.
@@ -1036,12 +1050,6 @@ void zap_other_threads(struct task_struct *p)
}
}
-int __fatal_signal_pending(struct task_struct *tsk)
-{
- return sigismember(&tsk->pending.signal, SIGKILL);
-}
-EXPORT_SYMBOL(__fatal_signal_pending);
-
struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
{
struct sighand_struct *sighand;
@@ -1068,18 +1076,10 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long
*/
int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
{
- unsigned long flags;
- int ret;
+ int ret = check_kill_permission(sig, info, p);
- ret = check_kill_permission(sig, info, p);
-
- if (!ret && sig) {
- ret = -ESRCH;
- if (lock_task_sighand(p, &flags)) {
- ret = __group_send_sig_info(sig, info, p);
- unlock_task_sighand(p, &flags);
- }
- }
+ if (!ret && sig)
+ ret = do_send_sig_info(sig, info, p, true);
return ret;
}
@@ -1224,15 +1224,9 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
* These are for backward compatibility with the rest of the kernel source.
*/
-/*
- * The caller must ensure the task can't exit.
- */
int
send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
{
- int ret;
- unsigned long flags;
-
/*
* Make sure legacy kernel users don't send in bad values
* (normal paths check this in check_kill_permission).
@@ -1240,10 +1234,7 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
if (!valid_signal(sig))
return -EINVAL;
- spin_lock_irqsave(&p->sighand->siglock, flags);
- ret = specific_send_sig_info(sig, info, p);
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
- return ret;
+ return do_send_sig_info(sig, info, p, false);
}
#define __si_special(priv) \
@@ -1383,15 +1374,6 @@ ret:
}
/*
- * Wake up any threads in the parent blocked in wait* syscalls.
- */
-static inline void __wake_up_parent(struct task_struct *p,
- struct task_struct *parent)
-{
- wake_up_interruptible_sync(&parent->signal->wait_chldexit);
-}
-
-/*
* Let a parent know about the death of a child.
* For a stopped/continued status change, use do_notify_parent_cldstop instead.
*
@@ -1673,29 +1655,6 @@ void ptrace_notify(int exit_code)
spin_unlock_irq(&current->sighand->siglock);
}
-static void
-finish_stop(int stop_count)
-{
- /*
- * If there are no other threads in the group, or if there is
- * a group stop in progress and we are the last to stop,
- * report to the parent. When ptraced, every thread reports itself.
- */
- if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
- read_lock(&tasklist_lock);
- do_notify_parent_cldstop(current, CLD_STOPPED);
- read_unlock(&tasklist_lock);
- }
-
- do {
- schedule();
- } while (try_to_freeze());
- /*
- * Now we don't run again until continued.
- */
- current->exit_code = 0;
-}
-
/*
* This performs the stopping for SIGSTOP and other stop signals.
* We have to stop all threads in the thread group.
@@ -1705,15 +1664,9 @@ finish_stop(int stop_count)
static int do_signal_stop(int signr)
{
struct signal_struct *sig = current->signal;
- int stop_count;
+ int notify;
- if (sig->group_stop_count > 0) {
- /*
- * There is a group stop in progress. We don't need to
- * start another one.
- */
- stop_count = --sig->group_stop_count;
- } else {
+ if (!sig->group_stop_count) {
struct task_struct *t;
if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
@@ -1725,7 +1678,7 @@ static int do_signal_stop(int signr)
*/
sig->group_exit_code = signr;
- stop_count = 0;
+ sig->group_stop_count = 1;
for (t = next_thread(current); t != current; t = next_thread(t))
/*
* Setting state to TASK_STOPPED for a group
@@ -1734,19 +1687,44 @@ static int do_signal_stop(int signr)
*/
if (!(t->flags & PF_EXITING) &&
!task_is_stopped_or_traced(t)) {
- stop_count++;
+ sig->group_stop_count++;
signal_wake_up(t, 0);
}
- sig->group_stop_count = stop_count;
}
+ /*
+ * If there are no other threads in the group, or if there is
+ * a group stop in progress and we are the last to stop, report
+ * to the parent. When ptraced, every thread reports itself.
+ */
+ notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0;
+ notify = tracehook_notify_jctl(notify, CLD_STOPPED);
+ /*
+ * tracehook_notify_jctl() can drop and reacquire siglock, so
+ * we keep ->group_stop_count != 0 before the call. If SIGCONT
+ * or SIGKILL comes in between ->group_stop_count == 0.
+ */
+ if (sig->group_stop_count) {
+ if (!--sig->group_stop_count)
+ sig->flags = SIGNAL_STOP_STOPPED;
+ current->exit_code = sig->group_exit_code;
+ __set_current_state(TASK_STOPPED);
+ }
+ spin_unlock_irq(&current->sighand->siglock);
- if (stop_count == 0)
- sig->flags = SIGNAL_STOP_STOPPED;
- current->exit_code = sig->group_exit_code;
- __set_current_state(TASK_STOPPED);
+ if (notify) {
+ read_lock(&tasklist_lock);
+ do_notify_parent_cldstop(current, notify);
+ read_unlock(&tasklist_lock);
+ }
+
+ /* Now we don't run again until woken by SIGCONT or SIGKILL */
+ do {
+ schedule();
+ } while (try_to_freeze());
+
+ tracehook_finish_jctl();
+ current->exit_code = 0;
- spin_unlock_irq(&current->sighand->siglock);
- finish_stop(stop_count);
return 1;
}
@@ -1815,14 +1793,15 @@ relock:
int why = (signal->flags & SIGNAL_STOP_CONTINUED)
? CLD_CONTINUED : CLD_STOPPED;
signal->flags &= ~SIGNAL_CLD_MASK;
- spin_unlock_irq(&sighand->siglock);
- if (unlikely(!tracehook_notify_jctl(1, why)))
- goto relock;
+ why = tracehook_notify_jctl(why, CLD_CONTINUED);
+ spin_unlock_irq(&sighand->siglock);
- read_lock(&tasklist_lock);
- do_notify_parent_cldstop(current->group_leader, why);
- read_unlock(&tasklist_lock);
+ if (why) {
+ read_lock(&tasklist_lock);
+ do_notify_parent_cldstop(current->group_leader, why);
+ read_unlock(&tasklist_lock);
+ }
goto relock;
}
@@ -1987,14 +1966,14 @@ void exit_signals(struct task_struct *tsk)
if (unlikely(tsk->signal->group_stop_count) &&
!--tsk->signal->group_stop_count) {
tsk->signal->flags = SIGNAL_STOP_STOPPED;
- group_stop = 1;
+ group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED);
}
out:
spin_unlock_irq(&tsk->sighand->siglock);
- if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) {
+ if (unlikely(group_stop)) {
read_lock(&tasklist_lock);
- do_notify_parent_cldstop(tsk, CLD_STOPPED);
+ do_notify_parent_cldstop(tsk, group_stop);
read_unlock(&tasklist_lock);
}
}
@@ -2290,7 +2269,6 @@ static int
do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
{
struct task_struct *p;
- unsigned long flags;
int error = -ESRCH;
rcu_read_lock();
@@ -2300,14 +2278,16 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
/*
* The null signal is a permissions and process existence
* probe. No signal is actually delivered.
- *
- * If lock_task_sighand() fails we pretend the task dies
- * after receiving the signal. The window is tiny, and the
- * signal is private anyway.
*/
- if (!error && sig && lock_task_sighand(p, &flags)) {
- error = specific_send_sig_info(sig, info, p);
- unlock_task_sighand(p, &flags);
+ if (!error && sig) {
+ error = do_send_sig_info(sig, info, p, false);
+ /*
+ * If lock_task_sighand() failed we pretend the task
+ * dies after receiving the signal. The window is tiny,
+ * and the signal is private anyway.
+ */
+ if (unlikely(error == -ESRCH))
+ error = 0;
}
}
rcu_read_unlock();
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index 09d7519557d..0d31135efbf 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -26,10 +26,10 @@ static void slow_work_cull_timeout(unsigned long);
static void slow_work_oom_timeout(unsigned long);
#ifdef CONFIG_SYSCTL
-static int slow_work_min_threads_sysctl(struct ctl_table *, int, struct file *,
+static int slow_work_min_threads_sysctl(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
-static int slow_work_max_threads_sysctl(struct ctl_table *, int , struct file *,
+static int slow_work_max_threads_sysctl(struct ctl_table *, int ,
void __user *, size_t *, loff_t *);
#endif
@@ -493,10 +493,10 @@ static void slow_work_oom_timeout(unsigned long data)
* Handle adjustment of the minimum number of threads
*/
static int slow_work_min_threads_sysctl(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer,
+ void __user *buffer,
size_t *lenp, loff_t *ppos)
{
- int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
int n;
if (ret == 0) {
@@ -521,10 +521,10 @@ static int slow_work_min_threads_sysctl(struct ctl_table *table, int write,
* Handle adjustment of the maximum number of threads
*/
static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer,
+ void __user *buffer,
size_t *lenp, loff_t *ppos)
{
- int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
int n;
if (ret == 0) {
diff --git a/kernel/smp.c b/kernel/smp.c
index 8e218500ab1..c9d1c7835c2 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -29,8 +29,7 @@ enum {
struct call_function_data {
struct call_single_data csd;
- spinlock_t lock;
- unsigned int refs;
+ atomic_t refs;
cpumask_var_t cpumask;
};
@@ -39,9 +38,7 @@ struct call_single_queue {
spinlock_t lock;
};
-static DEFINE_PER_CPU(struct call_function_data, cfd_data) = {
- .lock = __SPIN_LOCK_UNLOCKED(cfd_data.lock),
-};
+static DEFINE_PER_CPU(struct call_function_data, cfd_data);
static int
hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
@@ -196,25 +193,18 @@ void generic_smp_call_function_interrupt(void)
list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
int refs;
- spin_lock(&data->lock);
- if (!cpumask_test_cpu(cpu, data->cpumask)) {
- spin_unlock(&data->lock);
+ if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
continue;
- }
- cpumask_clear_cpu(cpu, data->cpumask);
- spin_unlock(&data->lock);
data->csd.func(data->csd.info);
- spin_lock(&data->lock);
- WARN_ON(data->refs == 0);
- refs = --data->refs;
+ refs = atomic_dec_return(&data->refs);
+ WARN_ON(refs < 0);
if (!refs) {
spin_lock(&call_function.lock);
list_del_rcu(&data->csd.list);
spin_unlock(&call_function.lock);
}
- spin_unlock(&data->lock);
if (refs)
continue;
@@ -357,13 +347,6 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
generic_exec_single(cpu, data, wait);
}
-/* Deprecated: shim for archs using old arch_send_call_function_ipi API. */
-
-#ifndef arch_send_call_function_ipi_mask
-# define arch_send_call_function_ipi_mask(maskp) \
- arch_send_call_function_ipi(*(maskp))
-#endif
-
/**
* smp_call_function_many(): Run a function on a set of other CPUs.
* @mask: The set of cpus to run on (only runs on online subset).
@@ -419,23 +402,20 @@ void smp_call_function_many(const struct cpumask *mask,
data = &__get_cpu_var(cfd_data);
csd_lock(&data->csd);
- spin_lock_irqsave(&data->lock, flags);
data->csd.func = func;
data->csd.info = info;
cpumask_and(data->cpumask, mask, cpu_online_mask);
cpumask_clear_cpu(this_cpu, data->cpumask);
- data->refs = cpumask_weight(data->cpumask);
+ atomic_set(&data->refs, cpumask_weight(data->cpumask));
- spin_lock(&call_function.lock);
+ spin_lock_irqsave(&call_function.lock, flags);
/*
* Place entry at the _HEAD_ of the list, so that any cpu still
* observing the entry in generic_smp_call_function_interrupt()
* will not miss any other list entries:
*/
list_add_rcu(&data->csd.list, &call_function.queue);
- spin_unlock(&call_function.lock);
-
- spin_unlock_irqrestore(&data->lock, flags);
+ spin_unlock_irqrestore(&call_function.lock, flags);
/*
* Make the list addition visible before sending the ipi.
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 88796c33083..81324d12eb3 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -90,11 +90,11 @@ void touch_all_softlockup_watchdogs(void)
EXPORT_SYMBOL(touch_all_softlockup_watchdogs);
int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer,
+ void __user *buffer,
size_t *lenp, loff_t *ppos)
{
touch_all_softlockup_watchdogs();
- return proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+ return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
}
/*
diff --git a/kernel/sys.c b/kernel/sys.c
index b3f1097c76f..255475d163e 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -14,7 +14,7 @@
#include <linux/prctl.h>
#include <linux/highuid.h>
#include <linux/fs.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/resource.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
@@ -1338,6 +1338,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
unsigned long flags;
cputime_t utime, stime;
struct task_cputime cputime;
+ unsigned long maxrss = 0;
memset((char *) r, 0, sizeof *r);
utime = stime = cputime_zero;
@@ -1346,6 +1347,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
utime = task_utime(current);
stime = task_stime(current);
accumulate_thread_rusage(p, r);
+ maxrss = p->signal->maxrss;
goto out;
}
@@ -1363,6 +1365,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
r->ru_majflt = p->signal->cmaj_flt;
r->ru_inblock = p->signal->cinblock;
r->ru_oublock = p->signal->coublock;
+ maxrss = p->signal->cmaxrss;
if (who == RUSAGE_CHILDREN)
break;
@@ -1377,6 +1380,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
r->ru_majflt += p->signal->maj_flt;
r->ru_inblock += p->signal->inblock;
r->ru_oublock += p->signal->oublock;
+ if (maxrss < p->signal->maxrss)
+ maxrss = p->signal->maxrss;
t = p;
do {
accumulate_thread_rusage(t, r);
@@ -1392,6 +1397,15 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
out:
cputime_to_timeval(utime, &r->ru_utime);
cputime_to_timeval(stime, &r->ru_stime);
+
+ if (who != RUSAGE_CHILDREN) {
+ struct mm_struct *mm = get_task_mm(p);
+ if (mm) {
+ setmax_mm_hiwater_rss(&maxrss, mm);
+ mmput(mm);
+ }
+ }
+ r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
}
int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
@@ -1511,11 +1525,11 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
case PR_SET_TSC:
error = SET_TSC_CTL(arg2);
break;
- case PR_TASK_PERF_COUNTERS_DISABLE:
- error = perf_counter_task_disable();
+ case PR_TASK_PERF_EVENTS_DISABLE:
+ error = perf_event_task_disable();
break;
- case PR_TASK_PERF_COUNTERS_ENABLE:
- error = perf_counter_task_enable();
+ case PR_TASK_PERF_EVENTS_ENABLE:
+ error = perf_event_task_enable();
break;
case PR_GET_TIMERSLACK:
error = current->timer_slack_ns;
@@ -1528,6 +1542,28 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
current->timer_slack_ns = arg2;
error = 0;
break;
+ case PR_MCE_KILL:
+ if (arg4 | arg5)
+ return -EINVAL;
+ switch (arg2) {
+ case 0:
+ if (arg3 != 0)
+ return -EINVAL;
+ current->flags &= ~PF_MCE_PROCESS;
+ break;
+ case 1:
+ current->flags |= PF_MCE_PROCESS;
+ if (arg3 != 0)
+ current->flags |= PF_MCE_EARLY;
+ else
+ current->flags &= ~PF_MCE_EARLY;
+ break;
+ default:
+ return -EINVAL;
+ }
+ error = 0;
+ break;
+
default:
error = -EINVAL;
break;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 68320f6b07b..e06d0b8d195 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -49,6 +49,7 @@ cond_syscall(sys_sendmsg);
cond_syscall(compat_sys_sendmsg);
cond_syscall(sys_recvmsg);
cond_syscall(compat_sys_recvmsg);
+cond_syscall(compat_sys_recvfrom);
cond_syscall(sys_socketcall);
cond_syscall(sys_futex);
cond_syscall(compat_sys_futex);
@@ -177,4 +178,4 @@ cond_syscall(sys_eventfd);
cond_syscall(sys_eventfd2);
/* performance counters: */
-cond_syscall(sys_perf_counter_open);
+cond_syscall(sys_perf_event_open);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 1a631ba684a..0d949c51741 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -26,7 +26,6 @@
#include <linux/proc_fs.h>
#include <linux/security.h>
#include <linux/ctype.h>
-#include <linux/utsname.h>
#include <linux/kmemcheck.h>
#include <linux/smp_lock.h>
#include <linux/fs.h>
@@ -50,7 +49,7 @@
#include <linux/reboot.h>
#include <linux/ftrace.h>
#include <linux/slow-work.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
@@ -77,6 +76,7 @@ extern int max_threads;
extern int core_uses_pid;
extern int suid_dumpable;
extern char core_pattern[];
+extern unsigned int core_pipe_limit;
extern int pid_max;
extern int min_free_kbytes;
extern int pid_max_min, pid_max_max;
@@ -106,6 +106,9 @@ static int __maybe_unused one = 1;
static int __maybe_unused two = 2;
static unsigned long one_ul = 1;
static int one_hundred = 100;
+#ifdef CONFIG_PRINTK
+static int ten_thousand = 10000;
+#endif
/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
@@ -160,9 +163,9 @@ extern int max_lock_depth;
#endif
#ifdef CONFIG_PROC_SYSCTL
-static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp,
+static int proc_do_cad_pid(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
-static int proc_taint(struct ctl_table *table, int write, struct file *filp,
+static int proc_taint(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
#endif
@@ -421,6 +424,14 @@ static struct ctl_table kern_table[] = {
.proc_handler = &proc_dostring,
.strategy = &sysctl_string,
},
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "core_pipe_limit",
+ .data = &core_pipe_limit,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
#ifdef CONFIG_PROC_SYSCTL
{
.procname = "tainted",
@@ -722,6 +733,17 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec,
},
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "printk_delay",
+ .data = &printk_delay_msec,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ .extra2 = &ten_thousand,
+ },
#endif
{
.ctl_name = KERN_NGROUPS_MAX,
@@ -964,28 +986,28 @@ static struct ctl_table kern_table[] = {
.child = slow_work_sysctls,
},
#endif
-#ifdef CONFIG_PERF_COUNTERS
+#ifdef CONFIG_PERF_EVENTS
{
.ctl_name = CTL_UNNUMBERED,
- .procname = "perf_counter_paranoid",
- .data = &sysctl_perf_counter_paranoid,
- .maxlen = sizeof(sysctl_perf_counter_paranoid),
+ .procname = "perf_event_paranoid",
+ .data = &sysctl_perf_event_paranoid,
+ .maxlen = sizeof(sysctl_perf_event_paranoid),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
{
.ctl_name = CTL_UNNUMBERED,
- .procname = "perf_counter_mlock_kb",
- .data = &sysctl_perf_counter_mlock,
- .maxlen = sizeof(sysctl_perf_counter_mlock),
+ .procname = "perf_event_mlock_kb",
+ .data = &sysctl_perf_event_mlock,
+ .maxlen = sizeof(sysctl_perf_event_mlock),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
{
.ctl_name = CTL_UNNUMBERED,
- .procname = "perf_counter_max_sample_rate",
- .data = &sysctl_perf_counter_sample_rate,
- .maxlen = sizeof(sysctl_perf_counter_sample_rate),
+ .procname = "perf_event_max_sample_rate",
+ .data = &sysctl_perf_event_sample_rate,
+ .maxlen = sizeof(sysctl_perf_event_sample_rate),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
@@ -1376,6 +1398,31 @@ static struct ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = &scan_unevictable_handler,
},
+#ifdef CONFIG_MEMORY_FAILURE
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "memory_failure_early_kill",
+ .data = &sysctl_memory_failure_early_kill,
+ .maxlen = sizeof(sysctl_memory_failure_early_kill),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "memory_failure_recovery",
+ .data = &sysctl_memory_failure_recovery,
+ .maxlen = sizeof(sysctl_memory_failure_recovery),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#endif
+
/*
* NOTE: do not add new entries to this table unless you have read
* Documentation/sysctl/ctl_unnumbered.txt
@@ -2204,7 +2251,7 @@ void sysctl_head_put(struct ctl_table_header *head)
#ifdef CONFIG_PROC_SYSCTL
static int _proc_do_string(void* data, int maxlen, int write,
- struct file *filp, void __user *buffer,
+ void __user *buffer,
size_t *lenp, loff_t *ppos)
{
size_t len;
@@ -2265,7 +2312,6 @@ static int _proc_do_string(void* data, int maxlen, int write,
* proc_dostring - read a string sysctl
* @table: the sysctl table
* @write: %TRUE if this is a write to the sysctl file
- * @filp: the file structure
* @buffer: the user buffer
* @lenp: the size of the user buffer
* @ppos: file position
@@ -2279,10 +2325,10 @@ static int _proc_do_string(void* data, int maxlen, int write,
*
* Returns 0 on success.
*/
-int proc_dostring(struct ctl_table *table, int write, struct file *filp,
+int proc_dostring(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- return _proc_do_string(table->data, table->maxlen, write, filp,
+ return _proc_do_string(table->data, table->maxlen, write,
buffer, lenp, ppos);
}
@@ -2307,7 +2353,7 @@ static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
}
static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
- int write, struct file *filp, void __user *buffer,
+ int write, void __user *buffer,
size_t *lenp, loff_t *ppos,
int (*conv)(int *negp, unsigned long *lvalp, int *valp,
int write, void *data),
@@ -2414,13 +2460,13 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
#undef TMPBUFLEN
}
-static int do_proc_dointvec(struct ctl_table *table, int write, struct file *filp,
+static int do_proc_dointvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos,
int (*conv)(int *negp, unsigned long *lvalp, int *valp,
int write, void *data),
void *data)
{
- return __do_proc_dointvec(table->data, table, write, filp,
+ return __do_proc_dointvec(table->data, table, write,
buffer, lenp, ppos, conv, data);
}
@@ -2428,7 +2474,6 @@ static int do_proc_dointvec(struct ctl_table *table, int write, struct file *fil
* proc_dointvec - read a vector of integers
* @table: the sysctl table
* @write: %TRUE if this is a write to the sysctl file
- * @filp: the file structure
* @buffer: the user buffer
* @lenp: the size of the user buffer
* @ppos: file position
@@ -2438,10 +2483,10 @@ static int do_proc_dointvec(struct ctl_table *table, int write, struct file *fil
*
* Returns 0 on success.
*/
-int proc_dointvec(struct ctl_table *table, int write, struct file *filp,
+int proc_dointvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
+ return do_proc_dointvec(table,write,buffer,lenp,ppos,
NULL,NULL);
}
@@ -2449,7 +2494,7 @@ int proc_dointvec(struct ctl_table *table, int write, struct file *filp,
* Taint values can only be increased
* This means we can safely use a temporary.
*/
-static int proc_taint(struct ctl_table *table, int write, struct file *filp,
+static int proc_taint(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
@@ -2461,7 +2506,7 @@ static int proc_taint(struct ctl_table *table, int write, struct file *filp,
t = *table;
t.data = &tmptaint;
- err = proc_doulongvec_minmax(&t, write, filp, buffer, lenp, ppos);
+ err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
if (err < 0)
return err;
@@ -2513,7 +2558,6 @@ static int do_proc_dointvec_minmax_conv(int *negp, unsigned long *lvalp,
* proc_dointvec_minmax - read a vector of integers with min/max values
* @table: the sysctl table
* @write: %TRUE if this is a write to the sysctl file
- * @filp: the file structure
* @buffer: the user buffer
* @lenp: the size of the user buffer
* @ppos: file position
@@ -2526,19 +2570,18 @@ static int do_proc_dointvec_minmax_conv(int *negp, unsigned long *lvalp,
*
* Returns 0 on success.
*/
-int proc_dointvec_minmax(struct ctl_table *table, int write, struct file *filp,
+int proc_dointvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct do_proc_dointvec_minmax_conv_param param = {
.min = (int *) table->extra1,
.max = (int *) table->extra2,
};
- return do_proc_dointvec(table, write, filp, buffer, lenp, ppos,
+ return do_proc_dointvec(table, write, buffer, lenp, ppos,
do_proc_dointvec_minmax_conv, &param);
}
static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write,
- struct file *filp,
void __user *buffer,
size_t *lenp, loff_t *ppos,
unsigned long convmul,
@@ -2643,21 +2686,19 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
}
static int do_proc_doulongvec_minmax(struct ctl_table *table, int write,
- struct file *filp,
void __user *buffer,
size_t *lenp, loff_t *ppos,
unsigned long convmul,
unsigned long convdiv)
{
return __do_proc_doulongvec_minmax(table->data, table, write,
- filp, buffer, lenp, ppos, convmul, convdiv);
+ buffer, lenp, ppos, convmul, convdiv);
}
/**
* proc_doulongvec_minmax - read a vector of long integers with min/max values
* @table: the sysctl table
* @write: %TRUE if this is a write to the sysctl file
- * @filp: the file structure
* @buffer: the user buffer
* @lenp: the size of the user buffer
* @ppos: file position
@@ -2670,17 +2711,16 @@ static int do_proc_doulongvec_minmax(struct ctl_table *table, int write,
*
* Returns 0 on success.
*/
-int proc_doulongvec_minmax(struct ctl_table *table, int write, struct file *filp,
+int proc_doulongvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- return do_proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos, 1l, 1l);
+ return do_proc_doulongvec_minmax(table, write, buffer, lenp, ppos, 1l, 1l);
}
/**
* proc_doulongvec_ms_jiffies_minmax - read a vector of millisecond values with min/max values
* @table: the sysctl table
* @write: %TRUE if this is a write to the sysctl file
- * @filp: the file structure
* @buffer: the user buffer
* @lenp: the size of the user buffer
* @ppos: file position
@@ -2695,11 +2735,10 @@ int proc_doulongvec_minmax(struct ctl_table *table, int write, struct file *filp
* Returns 0 on success.
*/
int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
- struct file *filp,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
- return do_proc_doulongvec_minmax(table, write, filp, buffer,
+ return do_proc_doulongvec_minmax(table, write, buffer,
lenp, ppos, HZ, 1000l);
}
@@ -2775,7 +2814,6 @@ static int do_proc_dointvec_ms_jiffies_conv(int *negp, unsigned long *lvalp,
* proc_dointvec_jiffies - read a vector of integers as seconds
* @table: the sysctl table
* @write: %TRUE if this is a write to the sysctl file
- * @filp: the file structure
* @buffer: the user buffer
* @lenp: the size of the user buffer
* @ppos: file position
@@ -2787,10 +2825,10 @@ static int do_proc_dointvec_ms_jiffies_conv(int *negp, unsigned long *lvalp,
*
* Returns 0 on success.
*/
-int proc_dointvec_jiffies(struct ctl_table *table, int write, struct file *filp,
+int proc_dointvec_jiffies(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
+ return do_proc_dointvec(table,write,buffer,lenp,ppos,
do_proc_dointvec_jiffies_conv,NULL);
}
@@ -2798,7 +2836,6 @@ int proc_dointvec_jiffies(struct ctl_table *table, int write, struct file *filp,
* proc_dointvec_userhz_jiffies - read a vector of integers as 1/USER_HZ seconds
* @table: the sysctl table
* @write: %TRUE if this is a write to the sysctl file
- * @filp: the file structure
* @buffer: the user buffer
* @lenp: the size of the user buffer
* @ppos: pointer to the file position
@@ -2810,10 +2847,10 @@ int proc_dointvec_jiffies(struct ctl_table *table, int write, struct file *filp,
*
* Returns 0 on success.
*/
-int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, struct file *filp,
+int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
+ return do_proc_dointvec(table,write,buffer,lenp,ppos,
do_proc_dointvec_userhz_jiffies_conv,NULL);
}
@@ -2821,7 +2858,6 @@ int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, struct file
* proc_dointvec_ms_jiffies - read a vector of integers as 1 milliseconds
* @table: the sysctl table
* @write: %TRUE if this is a write to the sysctl file
- * @filp: the file structure
* @buffer: the user buffer
* @lenp: the size of the user buffer
* @ppos: file position
@@ -2834,14 +2870,14 @@ int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, struct file
*
* Returns 0 on success.
*/
-int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, struct file *filp,
+int proc_dointvec_ms_jiffies(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- return do_proc_dointvec(table, write, filp, buffer, lenp, ppos,
+ return do_proc_dointvec(table, write, buffer, lenp, ppos,
do_proc_dointvec_ms_jiffies_conv, NULL);
}
-static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp,
+static int proc_do_cad_pid(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct pid *new_pid;
@@ -2850,7 +2886,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp
tmp = pid_vnr(cad_pid);
- r = __do_proc_dointvec(&tmp, table, write, filp, buffer,
+ r = __do_proc_dointvec(&tmp, table, write, buffer,
lenp, ppos, NULL, NULL);
if (r || !write)
return r;
@@ -2865,50 +2901,49 @@ static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp
#else /* CONFIG_PROC_FS */
-int proc_dostring(struct ctl_table *table, int write, struct file *filp,
+int proc_dostring(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec(struct ctl_table *table, int write, struct file *filp,
+int proc_dointvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec_minmax(struct ctl_table *table, int write, struct file *filp,
+int proc_dointvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec_jiffies(struct ctl_table *table, int write, struct file *filp,
+int proc_dointvec_jiffies(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, struct file *filp,
+int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, struct file *filp,
+int proc_dointvec_ms_jiffies(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_doulongvec_minmax(struct ctl_table *table, int write, struct file *filp,
+int proc_doulongvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
- struct file *filp,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index 0b0a6366c9d..ee266620b06 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,4 +1,4 @@
-obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o
+obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o timeconv.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
diff --git a/kernel/time/timeconv.c b/kernel/time/timeconv.c
new file mode 100644
index 00000000000..86628e755f3
--- /dev/null
+++ b/kernel/time/timeconv.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 1993, 1994, 1995, 1996, 1997 Free Software Foundation, Inc.
+ * This file is part of the GNU C Library.
+ * Contributed by Paul Eggert (eggert@twinsun.com).
+ *
+ * The GNU C Library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The GNU C Library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with the GNU C Library; see the file COPYING.LIB. If not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * Converts the calendar time to broken-down time representation
+ * Based on code from glibc-2.6
+ *
+ * 2009-7-14:
+ * Moved from glibc-2.6 to kernel by Zhaolei<zhaolei@cn.fujitsu.com>
+ */
+
+#include <linux/time.h>
+#include <linux/module.h>
+
+/*
+ * Nonzero if YEAR is a leap year (every 4 years,
+ * except every 100th isn't, and every 400th is).
+ */
+static int __isleap(long year)
+{
+ return (year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0);
+}
+
+/* do a mathdiv for long type */
+static long math_div(long a, long b)
+{
+ return a / b - (a % b < 0);
+}
+
+/* How many leap years between y1 and y2, y1 must less or equal to y2 */
+static long leaps_between(long y1, long y2)
+{
+ long leaps1 = math_div(y1 - 1, 4) - math_div(y1 - 1, 100)
+ + math_div(y1 - 1, 400);
+ long leaps2 = math_div(y2 - 1, 4) - math_div(y2 - 1, 100)
+ + math_div(y2 - 1, 400);
+ return leaps2 - leaps1;
+}
+
+/* How many days come before each month (0-12). */
+static const unsigned short __mon_yday[2][13] = {
+ /* Normal years. */
+ {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365},
+ /* Leap years. */
+ {0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366}
+};
+
+#define SECS_PER_HOUR (60 * 60)
+#define SECS_PER_DAY (SECS_PER_HOUR * 24)
+
+/**
+ * time_to_tm - converts the calendar time to local broken-down time
+ *
+ * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
+ * Coordinated Universal Time (UTC).
+ * @offset offset seconds adding to totalsecs.
+ * @result pointer to struct tm variable to receive broken-down time
+ */
+void time_to_tm(time_t totalsecs, int offset, struct tm *result)
+{
+ long days, rem, y;
+ const unsigned short *ip;
+
+ days = totalsecs / SECS_PER_DAY;
+ rem = totalsecs % SECS_PER_DAY;
+ rem += offset;
+ while (rem < 0) {
+ rem += SECS_PER_DAY;
+ --days;
+ }
+ while (rem >= SECS_PER_DAY) {
+ rem -= SECS_PER_DAY;
+ ++days;
+ }
+
+ result->tm_hour = rem / SECS_PER_HOUR;
+ rem %= SECS_PER_HOUR;
+ result->tm_min = rem / 60;
+ result->tm_sec = rem % 60;
+
+ /* January 1, 1970 was a Thursday. */
+ result->tm_wday = (4 + days) % 7;
+ if (result->tm_wday < 0)
+ result->tm_wday += 7;
+
+ y = 1970;
+
+ while (days < 0 || days >= (__isleap(y) ? 366 : 365)) {
+ /* Guess a corrected year, assuming 365 days per year. */
+ long yg = y + math_div(days, 365);
+
+ /* Adjust DAYS and Y to match the guessed year. */
+ days -= (yg - y) * 365 + leaps_between(y, yg);
+ y = yg;
+ }
+
+ result->tm_year = y - 1900;
+
+ result->tm_yday = days;
+
+ ip = __mon_yday[__isleap(y)];
+ for (y = 11; days < ip[y]; y--)
+ continue;
+ days -= ip[y];
+
+ result->tm_mon = y;
+ result->tm_mday = days + 1;
+}
+EXPORT_SYMBOL(time_to_tm);
diff --git a/kernel/timer.c b/kernel/timer.c
index bbb51074680..5db5a8d2681 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -37,7 +37,7 @@
#include <linux/delay.h>
#include <linux/tick.h>
#include <linux/kallsyms.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/sched.h>
#include <asm/uaccess.h>
@@ -46,6 +46,9 @@
#include <asm/timex.h>
#include <asm/io.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/timer.h>
+
u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
EXPORT_SYMBOL(jiffies_64);
@@ -521,6 +524,25 @@ static inline void debug_timer_activate(struct timer_list *timer) { }
static inline void debug_timer_deactivate(struct timer_list *timer) { }
#endif
+static inline void debug_init(struct timer_list *timer)
+{
+ debug_timer_init(timer);
+ trace_timer_init(timer);
+}
+
+static inline void
+debug_activate(struct timer_list *timer, unsigned long expires)
+{
+ debug_timer_activate(timer);
+ trace_timer_start(timer, expires);
+}
+
+static inline void debug_deactivate(struct timer_list *timer)
+{
+ debug_timer_deactivate(timer);
+ trace_timer_cancel(timer);
+}
+
static void __init_timer(struct timer_list *timer,
const char *name,
struct lock_class_key *key)
@@ -549,7 +571,7 @@ void init_timer_key(struct timer_list *timer,
const char *name,
struct lock_class_key *key)
{
- debug_timer_init(timer);
+ debug_init(timer);
__init_timer(timer, name, key);
}
EXPORT_SYMBOL(init_timer_key);
@@ -568,7 +590,7 @@ static inline void detach_timer(struct timer_list *timer,
{
struct list_head *entry = &timer->entry;
- debug_timer_deactivate(timer);
+ debug_deactivate(timer);
__list_del(entry->prev, entry->next);
if (clear_pending)
@@ -632,7 +654,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
goto out_unlock;
}
- debug_timer_activate(timer);
+ debug_activate(timer, expires);
new_base = __get_cpu_var(tvec_bases);
@@ -787,7 +809,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
BUG_ON(timer_pending(timer) || !timer->function);
spin_lock_irqsave(&base->lock, flags);
timer_set_base(timer, base);
- debug_timer_activate(timer);
+ debug_activate(timer, timer->expires);
if (time_before(timer->expires, base->next_timer) &&
!tbase_get_deferrable(timer->base))
base->next_timer = timer->expires;
@@ -1000,7 +1022,9 @@ static inline void __run_timers(struct tvec_base *base)
*/
lock_map_acquire(&lockdep_map);
+ trace_timer_expire_entry(timer);
fn(data);
+ trace_timer_expire_exit(timer);
lock_map_release(&lockdep_map);
@@ -1187,7 +1211,7 @@ static void run_timer_softirq(struct softirq_action *h)
{
struct tvec_base *base = __get_cpu_var(tvec_bases);
- perf_counter_do_pending();
+ perf_event_do_pending();
hrtimer_run_pending();
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index e7163460440..b416512ad17 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -83,7 +83,7 @@ config RING_BUFFER_ALLOW_SWAP
# This allows those options to appear when no other tracer is selected. But the
# options do not appear when something else selects it. We need the two options
# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
-# hidding of the automatic options options.
+# hidding of the automatic options.
config TRACING
bool
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index e70af98bb99..46592feab5a 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1520,7 +1520,7 @@ static int t_show(struct seq_file *m, void *v)
return 0;
}
-static struct seq_operations show_ftrace_seq_ops = {
+static const struct seq_operations show_ftrace_seq_ops = {
.start = t_start,
.next = t_next,
.stop = t_stop,
@@ -2461,7 +2461,7 @@ static int g_show(struct seq_file *m, void *v)
return 0;
}
-static struct seq_operations ftrace_graph_seq_ops = {
+static const struct seq_operations ftrace_graph_seq_ops = {
.start = g_start,
.next = g_next,
.stop = g_stop,
@@ -3018,7 +3018,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
int
ftrace_enable_sysctl(struct ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
@@ -3028,7 +3028,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
mutex_lock(&ftrace_lock);
- ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
+ ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
goto out;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ae17453dc0f..45068269ebb 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1949,7 +1949,7 @@ static int s_show(struct seq_file *m, void *v)
return 0;
}
-static struct seq_operations tracer_seq_ops = {
+static const struct seq_operations tracer_seq_ops = {
.start = s_start,
.next = s_next,
.stop = s_stop,
@@ -1984,11 +1984,9 @@ __tracing_open(struct inode *inode, struct file *file)
if (current_trace)
*iter->trace = *current_trace;
- if (!alloc_cpumask_var(&iter->started, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
goto fail;
- cpumask_clear(iter->started);
-
if (current_trace && current_trace->print_max)
iter->tr = &max_tr;
else
@@ -2163,7 +2161,7 @@ static int t_show(struct seq_file *m, void *v)
return 0;
}
-static struct seq_operations show_traces_seq_ops = {
+static const struct seq_operations show_traces_seq_ops = {
.start = t_start,
.next = t_next,
.stop = t_stop,
@@ -4389,7 +4387,7 @@ __init static int tracer_alloc_buffers(void)
if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
goto out_free_buffer_mask;
- if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
goto out_free_tracing_cpumask;
/* To save memory, keep the ring buffer size to its minimum */
@@ -4400,7 +4398,6 @@ __init static int tracer_alloc_buffers(void)
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
cpumask_copy(tracing_cpumask, cpu_all_mask);
- cpumask_clear(tracing_reader_cpumask);
/* TODO: make the number of buffers hot pluggable with CPUS */
global_trace.buffer = ring_buffer_alloc(ring_buf_size,
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index ca7d7c4d0c2..23b63859130 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -155,7 +155,7 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
seq_print_ip_sym(seq, it->from, symflags) &&
trace_seq_printf(seq, "\n"))
return TRACE_TYPE_HANDLED;
- return TRACE_TYPE_PARTIAL_LINE;;
+ return TRACE_TYPE_PARTIAL_LINE;
}
return TRACE_TYPE_UNHANDLED;
}
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 0f6facb050a..8504ac71e4e 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -296,14 +296,14 @@ static const struct file_operations stack_trace_fops = {
int
stack_trace_sysctl(struct ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
mutex_lock(&stack_sysctl_mutex);
- ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
+ ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret || !write ||
(last_stack_tracer_enabled == !!stack_tracer_enabled))
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 7a3550cf259..9fbce6c9d2e 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -2,7 +2,7 @@
#include <trace/events/syscalls.h>
#include <linux/kernel.h>
#include <linux/ftrace.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/syscall.h>
#include "trace_output.h"
@@ -433,7 +433,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
rec->nr = syscall_nr;
syscall_get_arguments(current, regs, 0, sys_data->nb_args,
(unsigned long *)&rec->args);
- perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size);
+ perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
end:
local_irq_restore(flags);
@@ -532,7 +532,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
rec->nr = syscall_nr;
rec->ret = syscall_get_return_value(current, regs);
- perf_tpcounter_event(sys_data->exit_id, 0, 1, rec, size);
+ perf_tp_event(sys_data->exit_id, 0, 1, rec, size);
end:
local_irq_restore(flags);
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 9489a0a9b1b..cc89be5bc0f 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -48,7 +48,7 @@ static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
/*
* Note about RCU :
- * It is used to to delay the free of multiple probes array until a quiescent
+ * It is used to delay the free of multiple probes array until a quiescent
* state is reached.
* Tracepoint entries modifications are protected by the tracepoints_mutex.
*/
diff --git a/kernel/uid16.c b/kernel/uid16.c
index 0314501688b..419209893d8 100644
--- a/kernel/uid16.c
+++ b/kernel/uid16.c
@@ -4,7 +4,6 @@
*/
#include <linux/mm.h>
-#include <linux/utsname.h>
#include <linux/mman.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index 92359cc747a..69eae358a72 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -42,14 +42,14 @@ static void put_uts(ctl_table *table, int write, void *which)
* Special case of dostring for the UTS structure. This has locks
* to observe. Should this be in kernel/sys.c ????
*/
-static int proc_do_uts_string(ctl_table *table, int write, struct file *filp,
+static int proc_do_uts_string(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table uts_table;
int r;
memcpy(&uts_table, table, sizeof(uts_table));
uts_table.data = get_uts(table, write);
- r = proc_dostring(&uts_table,write,filp,buffer,lenp, ppos);
+ r = proc_dostring(&uts_table,write,buffer,lenp, ppos);
put_uts(table, write, uts_table.data);
return r;
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d57b12f59c8..891155817bc 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -50,6 +50,14 @@ config MAGIC_SYSRQ
keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
unless you really know what this hack does.
+config STRIP_ASM_SYMS
+ bool "Strip assembler-generated symbols during link"
+ default n
+ help
+ Strip internal assembler-generated symbols during a link (symbols
+ that look like '.Lxxx') so they don't pollute the output of
+ get_wchan() and suchlike.
+
config UNUSED_SYMBOLS
bool "Enable unused/obsolete exported symbols"
default y if X86
diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck
index 603c81b6654..846e039a86b 100644
--- a/lib/Kconfig.kmemcheck
+++ b/lib/Kconfig.kmemcheck
@@ -1,6 +1,8 @@
config HAVE_ARCH_KMEMCHECK
bool
+if HAVE_ARCH_KMEMCHECK
+
menuconfig KMEMCHECK
bool "kmemcheck: trap use of uninitialized memory"
depends on DEBUG_KERNEL
@@ -89,3 +91,4 @@ config KMEMCHECK_BITOPS_OK
accesses where not all the bits are initialized at the same time.
This may also hide some real bugs.
+endif
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index 68dfce59c1b..fc686c7a0a0 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -27,6 +27,11 @@
#define GZIP_IOBUF_SIZE (16*1024)
+static int nofill(void *buffer, unsigned int len)
+{
+ return -1;
+}
+
/* Included from initramfs et al code */
STATIC int INIT gunzip(unsigned char *buf, int len,
int(*fill)(void*, unsigned int),
@@ -76,6 +81,9 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
goto gunzip_nomem4;
}
+ if (!fill)
+ fill = nofill;
+
if (len == 0)
len = fill(zbuf, GZIP_IOBUF_SIZE);
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index 0b954e04bd3..ca82fde81c8 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -82,6 +82,11 @@ struct rc {
#define RC_MODEL_TOTAL_BITS 11
+static int nofill(void *buffer, unsigned int len)
+{
+ return -1;
+}
+
/* Called twice: once at startup and once in rc_normalize() */
static void INIT rc_read(struct rc *rc)
{
@@ -97,7 +102,10 @@ static inline void INIT rc_init(struct rc *rc,
int (*fill)(void*, unsigned int),
char *buffer, int buffer_size)
{
- rc->fill = fill;
+ if (fill)
+ rc->fill = fill;
+ else
+ rc->fill = nofill;
rc->buffer = (uint8_t *)buffer;
rc->buffer_size = buffer_size;
rc->buffer_end = rc->buffer + rc->buffer_size;
diff --git a/lib/flex_array.c b/lib/flex_array.c
index 7baed2fc3bc..66eef2e4483 100644
--- a/lib/flex_array.c
+++ b/lib/flex_array.c
@@ -28,23 +28,6 @@ struct flex_array_part {
char elements[FLEX_ARRAY_PART_SIZE];
};
-static inline int __elements_per_part(int element_size)
-{
- return FLEX_ARRAY_PART_SIZE / element_size;
-}
-
-static inline int bytes_left_in_base(void)
-{
- int element_offset = offsetof(struct flex_array, parts);
- int bytes_left = FLEX_ARRAY_BASE_SIZE - element_offset;
- return bytes_left;
-}
-
-static inline int nr_base_part_ptrs(void)
-{
- return bytes_left_in_base() / sizeof(struct flex_array_part *);
-}
-
/*
* If a user requests an allocation which is small
* enough, we may simply use the space in the
@@ -54,7 +37,7 @@ static inline int nr_base_part_ptrs(void)
static inline int elements_fit_in_base(struct flex_array *fa)
{
int data_size = fa->element_size * fa->total_nr_elements;
- if (data_size <= bytes_left_in_base())
+ if (data_size <= FLEX_ARRAY_BASE_BYTES_LEFT)
return 1;
return 0;
}
@@ -63,6 +46,7 @@ static inline int elements_fit_in_base(struct flex_array *fa)
* flex_array_alloc - allocate a new flexible array
* @element_size: the size of individual elements in the array
* @total: total number of elements that this should hold
+ * @flags: page allocation flags to use for base array
*
* Note: all locking must be provided by the caller.
*
@@ -103,7 +87,8 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
gfp_t flags)
{
struct flex_array *ret;
- int max_size = nr_base_part_ptrs() * __elements_per_part(element_size);
+ int max_size = FLEX_ARRAY_NR_BASE_PTRS *
+ FLEX_ARRAY_ELEMENTS_PER_PART(element_size);
/* max_size will end up 0 if element_size > PAGE_SIZE */
if (total > max_size)
@@ -113,17 +98,21 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
return NULL;
ret->element_size = element_size;
ret->total_nr_elements = total;
+ if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
+ memset(ret->parts[0], FLEX_ARRAY_FREE,
+ FLEX_ARRAY_BASE_BYTES_LEFT);
return ret;
}
static int fa_element_to_part_nr(struct flex_array *fa,
unsigned int element_nr)
{
- return element_nr / __elements_per_part(fa->element_size);
+ return element_nr / FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size);
}
/**
* flex_array_free_parts - just free the second-level pages
+ * @fa: the flex array from which to free parts
*
* This is to be used in cases where the base 'struct flex_array'
* has been statically allocated and should not be free.
@@ -131,11 +120,10 @@ static int fa_element_to_part_nr(struct flex_array *fa,
void flex_array_free_parts(struct flex_array *fa)
{
int part_nr;
- int max_part = nr_base_part_ptrs();
if (elements_fit_in_base(fa))
return;
- for (part_nr = 0; part_nr < max_part; part_nr++)
+ for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++)
kfree(fa->parts[part_nr]);
}
@@ -150,7 +138,8 @@ static unsigned int index_inside_part(struct flex_array *fa,
{
unsigned int part_offset;
- part_offset = element_nr % __elements_per_part(fa->element_size);
+ part_offset = element_nr %
+ FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size);
return part_offset * fa->element_size;
}
@@ -159,15 +148,12 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
{
struct flex_array_part *part = fa->parts[part_nr];
if (!part) {
- /*
- * This leaves the part pages uninitialized
- * and with potentially random data, just
- * as if the user had kmalloc()'d the whole.
- * __GFP_ZERO can be used to zero it.
- */
- part = kmalloc(FLEX_ARRAY_PART_SIZE, flags);
+ part = kmalloc(sizeof(struct flex_array_part), flags);
if (!part)
return NULL;
+ if (!(flags & __GFP_ZERO))
+ memset(part, FLEX_ARRAY_FREE,
+ sizeof(struct flex_array_part));
fa->parts[part_nr] = part;
}
return part;
@@ -175,9 +161,12 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
/**
* flex_array_put - copy data into the array at @element_nr
- * @src: address of data to copy into the array
+ * @fa: the flex array to copy data into
* @element_nr: index of the position in which to insert
* the new element.
+ * @src: address of data to copy into the array
+ * @flags: page allocation flags to use for array expansion
+ *
*
* Note that this *copies* the contents of @src into
* the array. If you are trying to store an array of
@@ -207,9 +196,38 @@ int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
}
/**
+ * flex_array_clear - clear element in array at @element_nr
+ * @fa: the flex array of the element.
+ * @element_nr: index of the position to clear.
+ *
+ * Locking must be provided by the caller.
+ */
+int flex_array_clear(struct flex_array *fa, unsigned int element_nr)
+{
+ int part_nr = fa_element_to_part_nr(fa, element_nr);
+ struct flex_array_part *part;
+ void *dst;
+
+ if (element_nr >= fa->total_nr_elements)
+ return -ENOSPC;
+ if (elements_fit_in_base(fa))
+ part = (struct flex_array_part *)&fa->parts[0];
+ else {
+ part = fa->parts[part_nr];
+ if (!part)
+ return -EINVAL;
+ }
+ dst = &part->elements[index_inside_part(fa, element_nr)];
+ memset(dst, FLEX_ARRAY_FREE, fa->element_size);
+ return 0;
+}
+
+/**
* flex_array_prealloc - guarantee that array space exists
+ * @fa: the flex array for which to preallocate parts
* @start: index of first array element for which space is allocated
* @end: index of last (inclusive) element for which space is allocated
+ * @flags: page allocation flags
*
* This will guarantee that no future calls to flex_array_put()
* will allocate memory. It can be used if you are expecting to
@@ -242,6 +260,7 @@ int flex_array_prealloc(struct flex_array *fa, unsigned int start,
/**
* flex_array_get - pull data back out of the array
+ * @fa: the flex array from which to extract data
* @element_nr: index of the element to fetch from the array
*
* Returns a pointer to the data at index @element_nr. Note
@@ -266,3 +285,43 @@ void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
}
return &part->elements[index_inside_part(fa, element_nr)];
}
+
+static int part_is_free(struct flex_array_part *part)
+{
+ int i;
+
+ for (i = 0; i < sizeof(struct flex_array_part); i++)
+ if (part->elements[i] != FLEX_ARRAY_FREE)
+ return 0;
+ return 1;
+}
+
+/**
+ * flex_array_shrink - free unused second-level pages
+ * @fa: the flex array to shrink
+ *
+ * Frees all second-level pages that consist solely of unused
+ * elements. Returns the number of pages freed.
+ *
+ * Locking must be provided by the caller.
+ */
+int flex_array_shrink(struct flex_array *fa)
+{
+ struct flex_array_part *part;
+ int part_nr;
+ int ret = 0;
+
+ if (elements_fit_in_base(fa))
+ return ret;
+ for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) {
+ part = fa->parts[part_nr];
+ if (!part)
+ continue;
+ if (part_is_free(part)) {
+ fa->parts[part_nr] = NULL;
+ kfree(part);
+ ret++;
+ }
+ }
+ return ret;
+}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index d320c1816a7..b91839e9e89 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -671,7 +671,7 @@ static char *ip4_string(char *p, const u8 *addr, bool leading_zeros)
return p;
}
-static char *ip6_compressed_string(char *p, const struct in6_addr *addr)
+static char *ip6_compressed_string(char *p, const char *addr)
{
int i;
int j;
@@ -683,7 +683,12 @@ static char *ip6_compressed_string(char *p, const struct in6_addr *addr)
u8 hi;
u8 lo;
bool needcolon = false;
- bool useIPv4 = ipv6_addr_v4mapped(addr) || ipv6_addr_is_isatap(addr);
+ bool useIPv4;
+ struct in6_addr in6;
+
+ memcpy(&in6, addr, sizeof(struct in6_addr));
+
+ useIPv4 = ipv6_addr_v4mapped(&in6) || ipv6_addr_is_isatap(&in6);
memset(zerolength, 0, sizeof(zerolength));
@@ -695,7 +700,7 @@ static char *ip6_compressed_string(char *p, const struct in6_addr *addr)
/* find position of longest 0 run */
for (i = 0; i < range; i++) {
for (j = i; j < range; j++) {
- if (addr->s6_addr16[j] != 0)
+ if (in6.s6_addr16[j] != 0)
break;
zerolength[i]++;
}
@@ -722,7 +727,7 @@ static char *ip6_compressed_string(char *p, const struct in6_addr *addr)
needcolon = false;
}
/* hex u16 without leading 0s */
- word = ntohs(addr->s6_addr16[i]);
+ word = ntohs(in6.s6_addr16[i]);
hi = word >> 8;
lo = word & 0xff;
if (hi) {
@@ -741,19 +746,19 @@ static char *ip6_compressed_string(char *p, const struct in6_addr *addr)
if (useIPv4) {
if (needcolon)
*p++ = ':';
- p = ip4_string(p, &addr->s6_addr[12], false);
+ p = ip4_string(p, &in6.s6_addr[12], false);
}
*p = '\0';
return p;
}
-static char *ip6_string(char *p, const struct in6_addr *addr, const char *fmt)
+static char *ip6_string(char *p, const char *addr, const char *fmt)
{
int i;
for (i = 0; i < 8; i++) {
- p = pack_hex_byte(p, addr->s6_addr[2 * i]);
- p = pack_hex_byte(p, addr->s6_addr[2 * i + 1]);
+ p = pack_hex_byte(p, *addr++);
+ p = pack_hex_byte(p, *addr++);
if (fmt[0] == 'I' && i != 7)
*p++ = ':';
}
@@ -768,9 +773,9 @@ static char *ip6_addr_string(char *buf, char *end, const u8 *addr,
char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")];
if (fmt[0] == 'I' && fmt[2] == 'c')
- ip6_compressed_string(ip6_addr, (const struct in6_addr *)addr);
+ ip6_compressed_string(ip6_addr, addr);
else
- ip6_string(ip6_addr, (const struct in6_addr *)addr, fmt);
+ ip6_string(ip6_addr, addr, fmt);
return string(buf, end, ip6_addr, spec);
}
@@ -1092,13 +1097,8 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
/* Reject out-of-range values early. Large positive sizes are
used for unknown buffer sizes. */
- if (unlikely((int) size < 0)) {
- /* There can be only one.. */
- static char warn = 1;
- WARN_ON(warn);
- warn = 0;
+ if (WARN_ON_ONCE((int) size < 0))
return 0;
- }
str = buf;
end = buf + size;
@@ -1544,13 +1544,8 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
struct printf_spec spec = {0};
- if (unlikely((int) size < 0)) {
- /* There can be only one.. */
- static char warn = 1;
- WARN_ON(warn);
- warn = 0;
+ if (WARN_ON_ONCE((int) size < 0))
return 0;
- }
str = buf;
end = buf + size;
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c
index c3e4a2baf83..46a31e5f49c 100644
--- a/lib/zlib_deflate/deflate.c
+++ b/lib/zlib_deflate/deflate.c
@@ -135,7 +135,7 @@ static const config configuration_table[10] = {
/* ===========================================================================
* Update a hash value with the given input byte
- * IN assertion: all calls to to UPDATE_HASH are made with consecutive
+ * IN assertion: all calls to UPDATE_HASH are made with consecutive
* input characters, so that a running hash key can be computed from the
* previous key instead of complete recalculation each time.
*/
@@ -146,7 +146,7 @@ static const config configuration_table[10] = {
* Insert string str in the dictionary and set match_head to the previous head
* of the hash chain (the most recent string with same hash key). Return
* the previous length of the hash chain.
- * IN assertion: all calls to to INSERT_STRING are made with consecutive
+ * IN assertion: all calls to INSERT_STRING are made with consecutive
* input characters and the first MIN_MATCH bytes of str are valid
* (except for the last MIN_MATCH-1 bytes of the input file).
*/
diff --git a/mm/Kconfig b/mm/Kconfig
index 3aa519f52e1..24776072959 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -214,6 +214,18 @@ config HAVE_MLOCKED_PAGE_BIT
config MMU_NOTIFIER
bool
+config KSM
+ bool "Enable KSM for page merging"
+ depends on MMU
+ help
+ Enable Kernel Samepage Merging: KSM periodically scans those areas
+ of an application's address space that an app has advised may be
+ mergeable. When it finds pages of identical content, it replaces
+ the many instances by a single resident page with that content, so
+ saving memory until one or another app needs to modify the content.
+ Recommended for use with KVM, or with other duplicative applications.
+ See Documentation/vm/ksm.txt for more information.
+
config DEFAULT_MMAP_MIN_ADDR
int "Low address space to protect from user allocation"
default 4096
@@ -233,6 +245,20 @@ config DEFAULT_MMAP_MIN_ADDR
/proc/sys/vm/mmap_min_addr tunable.
+config MEMORY_FAILURE
+ depends on MMU
+ depends on X86_MCE
+ bool "Enable recovery from hardware memory errors"
+ help
+ Enables code to recover from some memory failures on systems
+ with MCA recovery. This allows a system to continue running
+ even when some of its memory has uncorrected errors. This requires
+ special hardware support and typically ECC memory.
+
+config HWPOISON_INJECT
+ tristate "Poison pages injector"
+ depends on MEMORY_FAILURE && DEBUG_KERNEL
+
config NOMMU_INITIAL_TRIM_EXCESS
int "Turn on mmap() excess space trimming before booting"
depends on !MMU
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index aa99fd1f710..af7cfb43d2f 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -6,7 +6,7 @@ config DEBUG_PAGEALLOC
---help---
Unmap pages from the kernel linear mapping after free_pages().
This results in a large slowdown, but helps to find certain types
- of memory corruptions.
+ of memory corruption.
config WANT_PAGE_DEBUG_FLAGS
bool
@@ -17,11 +17,11 @@ config PAGE_POISONING
depends on !HIBERNATION
select DEBUG_PAGEALLOC
select WANT_PAGE_DEBUG_FLAGS
- help
+ ---help---
Fill the pages with poison patterns after free_pages() and verify
the patterns before alloc_pages(). This results in a large slowdown,
- but helps to find certain types of memory corruptions.
+ but helps to find certain types of memory corruption.
- This option cannot enalbe with hibernation. Otherwise, it will get
- wrong messages for memory corruption because the free pages are not
- saved to the suspend image.
+ This option cannot be enabled in combination with hibernation as
+ that would result in incorrect warnings of memory corruption after
+ a resume because free pages are not saved to the suspend image.
diff --git a/mm/Makefile b/mm/Makefile
index ea4b18bd396..ebf849042ed 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -5,16 +5,16 @@
mmu-y := nommu.o
mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
- vmalloc.o
+ vmalloc.o pagewalk.o
obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
maccess.o page_alloc.o page-writeback.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \
prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
- page_isolation.o mm_init.o $(mmu-y)
+ page_isolation.o mm_init.o mmu_context.o \
+ $(mmu-y)
obj-y += init-mm.o
-obj-$(CONFIG_PROC_PAGE_MONITOR) += pagewalk.o
obj-$(CONFIG_BOUNCE) += bounce.o
obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
obj-$(CONFIG_HAS_DMA) += dmapool.o
@@ -25,6 +25,7 @@ obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
obj-$(CONFIG_TMPFS_POSIX_ACL) += shmem_acl.o
obj-$(CONFIG_SLOB) += slob.o
obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
+obj-$(CONFIG_KSM) += ksm.o
obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
obj-$(CONFIG_SLAB) += slab.o
obj-$(CONFIG_SLUB) += slub.o
@@ -40,5 +41,7 @@ obj-$(CONFIG_SMP) += allocpercpu.o
endif
obj-$(CONFIG_QUICKLIST) += quicklist.o
obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
+obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
+obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o
obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
diff --git a/mm/filemap.c b/mm/filemap.c
index dd51c68e2b8..6c84e598b4a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -58,7 +58,7 @@
/*
* Lock ordering:
*
- * ->i_mmap_lock (vmtruncate)
+ * ->i_mmap_lock (truncate_pagecache)
* ->private_lock (__free_pte->__set_page_dirty_buffers)
* ->swap_lock (exclusive_swap_page, others)
* ->mapping->tree_lock
@@ -104,6 +104,10 @@
*
* ->task->proc_lock
* ->dcache_lock (proc_pid_lookup)
+ *
+ * (code doesn't rely on that order, so you could switch it around)
+ * ->tasklist_lock (memory_failure, collect_procs_ao)
+ * ->i_mmap_lock
*/
/*
@@ -119,6 +123,8 @@ void __remove_from_page_cache(struct page *page)
page->mapping = NULL;
mapping->nrpages--;
__dec_zone_page_state(page, NR_FILE_PAGES);
+ if (PageSwapBacked(page))
+ __dec_zone_page_state(page, NR_SHMEM);
BUG_ON(page_mapped(page));
/*
@@ -431,6 +437,8 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
if (likely(!error)) {
mapping->nrpages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
+ if (PageSwapBacked(page))
+ __inc_zone_page_state(page, NR_SHMEM);
spin_unlock_irq(&mapping->tree_lock);
} else {
page->mapping = NULL;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b16d6363477..6f048fcc749 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -456,24 +456,6 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
h->free_huge_pages_node[nid]++;
}
-static struct page *dequeue_huge_page(struct hstate *h)
-{
- int nid;
- struct page *page = NULL;
-
- for (nid = 0; nid < MAX_NUMNODES; ++nid) {
- if (!list_empty(&h->hugepage_freelists[nid])) {
- page = list_entry(h->hugepage_freelists[nid].next,
- struct page, lru);
- list_del(&page->lru);
- h->free_huge_pages--;
- h->free_huge_pages_node[nid]--;
- break;
- }
- }
- return page;
-}
-
static struct page *dequeue_huge_page_vma(struct hstate *h,
struct vm_area_struct *vma,
unsigned long address, int avoid_reserve)
@@ -641,7 +623,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
/*
* Use a helper variable to find the next node and then
- * copy it back to hugetlb_next_nid afterwards:
+ * copy it back to next_nid_to_alloc afterwards:
* otherwise there's a window in which a racer might
* pass invalid nid MAX_NUMNODES to alloc_pages_exact_node.
* But we don't need to use a spin_lock here: it really
@@ -650,13 +632,13 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
* if we just successfully allocated a hugepage so that
* the next caller gets hugepages on the next node.
*/
-static int hstate_next_node(struct hstate *h)
+static int hstate_next_node_to_alloc(struct hstate *h)
{
int next_nid;
- next_nid = next_node(h->hugetlb_next_nid, node_online_map);
+ next_nid = next_node(h->next_nid_to_alloc, node_online_map);
if (next_nid == MAX_NUMNODES)
next_nid = first_node(node_online_map);
- h->hugetlb_next_nid = next_nid;
+ h->next_nid_to_alloc = next_nid;
return next_nid;
}
@@ -667,14 +649,15 @@ static int alloc_fresh_huge_page(struct hstate *h)
int next_nid;
int ret = 0;
- start_nid = h->hugetlb_next_nid;
+ start_nid = h->next_nid_to_alloc;
+ next_nid = start_nid;
do {
- page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid);
+ page = alloc_fresh_huge_page_node(h, next_nid);
if (page)
ret = 1;
- next_nid = hstate_next_node(h);
- } while (!page && h->hugetlb_next_nid != start_nid);
+ next_nid = hstate_next_node_to_alloc(h);
+ } while (!page && next_nid != start_nid);
if (ret)
count_vm_event(HTLB_BUDDY_PGALLOC);
@@ -684,6 +667,61 @@ static int alloc_fresh_huge_page(struct hstate *h)
return ret;
}
+/*
+ * helper for free_pool_huge_page() - find next node
+ * from which to free a huge page
+ */
+static int hstate_next_node_to_free(struct hstate *h)
+{
+ int next_nid;
+ next_nid = next_node(h->next_nid_to_free, node_online_map);
+ if (next_nid == MAX_NUMNODES)
+ next_nid = first_node(node_online_map);
+ h->next_nid_to_free = next_nid;
+ return next_nid;
+}
+
+/*
+ * Free huge page from pool from next node to free.
+ * Attempt to keep persistent huge pages more or less
+ * balanced over allowed nodes.
+ * Called with hugetlb_lock locked.
+ */
+static int free_pool_huge_page(struct hstate *h, bool acct_surplus)
+{
+ int start_nid;
+ int next_nid;
+ int ret = 0;
+
+ start_nid = h->next_nid_to_free;
+ next_nid = start_nid;
+
+ do {
+ /*
+ * If we're returning unused surplus pages, only examine
+ * nodes with surplus pages.
+ */
+ if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
+ !list_empty(&h->hugepage_freelists[next_nid])) {
+ struct page *page =
+ list_entry(h->hugepage_freelists[next_nid].next,
+ struct page, lru);
+ list_del(&page->lru);
+ h->free_huge_pages--;
+ h->free_huge_pages_node[next_nid]--;
+ if (acct_surplus) {
+ h->surplus_huge_pages--;
+ h->surplus_huge_pages_node[next_nid]--;
+ }
+ update_and_free_page(h, page);
+ ret = 1;
+ }
+ next_nid = hstate_next_node_to_free(h);
+ } while (!ret && next_nid != start_nid);
+
+ return ret;
+}
+
static struct page *alloc_buddy_huge_page(struct hstate *h,
struct vm_area_struct *vma, unsigned long address)
{
@@ -855,22 +893,13 @@ free:
* When releasing a hugetlb pool reservation, any surplus pages that were
* allocated to satisfy the reservation must be explicitly freed if they were
* never used.
+ * Called with hugetlb_lock held.
*/
static void return_unused_surplus_pages(struct hstate *h,
unsigned long unused_resv_pages)
{
- static int nid = -1;
- struct page *page;
unsigned long nr_pages;
- /*
- * We want to release as many surplus pages as possible, spread
- * evenly across all nodes. Iterate across all nodes until we
- * can no longer free unreserved surplus pages. This occurs when
- * the nodes with surplus pages have no free pages.
- */
- unsigned long remaining_iterations = nr_online_nodes;
-
/* Uncommit the reservation */
h->resv_huge_pages -= unused_resv_pages;
@@ -880,26 +909,17 @@ static void return_unused_surplus_pages(struct hstate *h,
nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
- while (remaining_iterations-- && nr_pages) {
- nid = next_node(nid, node_online_map);
- if (nid == MAX_NUMNODES)
- nid = first_node(node_online_map);
-
- if (!h->surplus_huge_pages_node[nid])
- continue;
-
- if (!list_empty(&h->hugepage_freelists[nid])) {
- page = list_entry(h->hugepage_freelists[nid].next,
- struct page, lru);
- list_del(&page->lru);
- update_and_free_page(h, page);
- h->free_huge_pages--;
- h->free_huge_pages_node[nid]--;
- h->surplus_huge_pages--;
- h->surplus_huge_pages_node[nid]--;
- nr_pages--;
- remaining_iterations = nr_online_nodes;
- }
+ /*
+ * We want to release as many surplus pages as possible, spread
+ * evenly across all nodes. Iterate across all nodes until we
+ * can no longer free unreserved surplus pages. This occurs when
+ * the nodes with surplus pages have no free pages.
+ * free_pool_huge_page() will balance the the frees across the
+ * on-line nodes for us and will handle the hstate accounting.
+ */
+ while (nr_pages--) {
+ if (!free_pool_huge_page(h, 1))
+ break;
}
}
@@ -1008,9 +1028,10 @@ int __weak alloc_bootmem_huge_page(struct hstate *h)
void *addr;
addr = __alloc_bootmem_node_nopanic(
- NODE_DATA(h->hugetlb_next_nid),
+ NODE_DATA(h->next_nid_to_alloc),
huge_page_size(h), huge_page_size(h), 0);
+ hstate_next_node_to_alloc(h);
if (addr) {
/*
* Use the beginning of the huge page to store the
@@ -1020,7 +1041,6 @@ int __weak alloc_bootmem_huge_page(struct hstate *h)
m = addr;
goto found;
}
- hstate_next_node(h);
nr_nodes--;
}
return 0;
@@ -1141,31 +1161,43 @@ static inline void try_to_free_low(struct hstate *h, unsigned long count)
*/
static int adjust_pool_surplus(struct hstate *h, int delta)
{
- static int prev_nid;
- int nid = prev_nid;
+ int start_nid, next_nid;
int ret = 0;
VM_BUG_ON(delta != -1 && delta != 1);
- do {
- nid = next_node(nid, node_online_map);
- if (nid == MAX_NUMNODES)
- nid = first_node(node_online_map);
- /* To shrink on this node, there must be a surplus page */
- if (delta < 0 && !h->surplus_huge_pages_node[nid])
- continue;
- /* Surplus cannot exceed the total number of pages */
- if (delta > 0 && h->surplus_huge_pages_node[nid] >=
+ if (delta < 0)
+ start_nid = h->next_nid_to_alloc;
+ else
+ start_nid = h->next_nid_to_free;
+ next_nid = start_nid;
+
+ do {
+ int nid = next_nid;
+ if (delta < 0) {
+ next_nid = hstate_next_node_to_alloc(h);
+ /*
+ * To shrink on this node, there must be a surplus page
+ */
+ if (!h->surplus_huge_pages_node[nid])
+ continue;
+ }
+ if (delta > 0) {
+ next_nid = hstate_next_node_to_free(h);
+ /*
+ * Surplus cannot exceed the total number of pages
+ */
+ if (h->surplus_huge_pages_node[nid] >=
h->nr_huge_pages_node[nid])
- continue;
+ continue;
+ }
h->surplus_huge_pages += delta;
h->surplus_huge_pages_node[nid] += delta;
ret = 1;
break;
- } while (nid != prev_nid);
+ } while (next_nid != start_nid);
- prev_nid = nid;
return ret;
}
@@ -1227,10 +1259,8 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
min_count = max(count, min_count);
try_to_free_low(h, min_count);
while (min_count < persistent_huge_pages(h)) {
- struct page *page = dequeue_huge_page(h);
- if (!page)
+ if (!free_pool_huge_page(h, 0))
break;
- update_and_free_page(h, page);
}
while (count < persistent_huge_pages(h)) {
if (!adjust_pool_surplus(h, 1))
@@ -1442,7 +1472,8 @@ void __init hugetlb_add_hstate(unsigned order)
h->free_huge_pages = 0;
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&h->hugepage_freelists[i]);
- h->hugetlb_next_nid = first_node(node_online_map);
+ h->next_nid_to_alloc = first_node(node_online_map);
+ h->next_nid_to_free = first_node(node_online_map);
snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
huge_page_size(h)/1024);
@@ -1506,7 +1537,7 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
#ifdef CONFIG_SYSCTL
int hugetlb_sysctl_handler(struct ctl_table *table, int write,
- struct file *file, void __user *buffer,
+ void __user *buffer,
size_t *length, loff_t *ppos)
{
struct hstate *h = &default_hstate;
@@ -1517,7 +1548,7 @@ int hugetlb_sysctl_handler(struct ctl_table *table, int write,
table->data = &tmp;
table->maxlen = sizeof(unsigned long);
- proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
+ proc_doulongvec_minmax(table, write, buffer, length, ppos);
if (write)
h->max_huge_pages = set_max_huge_pages(h, tmp);
@@ -1526,10 +1557,10 @@ int hugetlb_sysctl_handler(struct ctl_table *table, int write,
}
int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
- struct file *file, void __user *buffer,
+ void __user *buffer,
size_t *length, loff_t *ppos)
{
- proc_dointvec(table, write, file, buffer, length, ppos);
+ proc_dointvec(table, write, buffer, length, ppos);
if (hugepages_treat_as_movable)
htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
else
@@ -1538,7 +1569,7 @@ int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
}
int hugetlb_overcommit_handler(struct ctl_table *table, int write,
- struct file *file, void __user *buffer,
+ void __user *buffer,
size_t *length, loff_t *ppos)
{
struct hstate *h = &default_hstate;
@@ -1549,7 +1580,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
table->data = &tmp;
table->maxlen = sizeof(unsigned long);
- proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
+ proc_doulongvec_minmax(table, write, buffer, length, ppos);
if (write) {
spin_lock(&hugetlb_lock);
@@ -1985,6 +2016,26 @@ static struct page *hugetlbfs_pagecache_page(struct hstate *h,
return find_lock_page(mapping, idx);
}
+/*
+ * Return whether there is a pagecache page to back given address within VMA.
+ * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
+ */
+static bool hugetlbfs_pagecache_present(struct hstate *h,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ struct address_space *mapping;
+ pgoff_t idx;
+ struct page *page;
+
+ mapping = vma->vm_file->f_mapping;
+ idx = vma_hugecache_offset(h, vma, address);
+
+ page = find_get_page(mapping, idx);
+ if (page)
+ put_page(page);
+ return page != NULL;
+}
+
static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, unsigned int flags)
{
@@ -2180,54 +2231,55 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
return NULL;
}
-static int huge_zeropage_ok(pte_t *ptep, int write, int shared)
-{
- if (!ptep || write || shared)
- return 0;
- else
- return huge_pte_none(huge_ptep_get(ptep));
-}
-
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *position, int *length, int i,
- int write)
+ unsigned int flags)
{
unsigned long pfn_offset;
unsigned long vaddr = *position;
int remainder = *length;
struct hstate *h = hstate_vma(vma);
- int zeropage_ok = 0;
- int shared = vma->vm_flags & VM_SHARED;
spin_lock(&mm->page_table_lock);
while (vaddr < vma->vm_end && remainder) {
pte_t *pte;
+ int absent;
struct page *page;
/*
* Some archs (sparc64, sh*) have multiple pte_ts to
- * each hugepage. We have to make * sure we get the
+ * each hugepage. We have to make sure we get the
* first, for the page indexing below to work.
*/
pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
- if (huge_zeropage_ok(pte, write, shared))
- zeropage_ok = 1;
+ absent = !pte || huge_pte_none(huge_ptep_get(pte));
+
+ /*
+ * When coredumping, it suits get_dump_page if we just return
+ * an error where there's an empty slot with no huge pagecache
+ * to back it. This way, we avoid allocating a hugepage, and
+ * the sparse dumpfile avoids allocating disk blocks, but its
+ * huge holes still show up with zeroes where they need to be.
+ */
+ if (absent && (flags & FOLL_DUMP) &&
+ !hugetlbfs_pagecache_present(h, vma, vaddr)) {
+ remainder = 0;
+ break;
+ }
- if (!pte ||
- (huge_pte_none(huge_ptep_get(pte)) && !zeropage_ok) ||
- (write && !pte_write(huge_ptep_get(pte)))) {
+ if (absent ||
+ ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
int ret;
spin_unlock(&mm->page_table_lock);
- ret = hugetlb_fault(mm, vma, vaddr, write);
+ ret = hugetlb_fault(mm, vma, vaddr,
+ (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
spin_lock(&mm->page_table_lock);
if (!(ret & VM_FAULT_ERROR))
continue;
remainder = 0;
- if (!i)
- i = -EFAULT;
break;
}
@@ -2235,10 +2287,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
page = pte_page(huge_ptep_get(pte));
same_page:
if (pages) {
- if (zeropage_ok)
- pages[i] = ZERO_PAGE(0);
- else
- pages[i] = mem_map_offset(page, pfn_offset);
+ pages[i] = mem_map_offset(page, pfn_offset);
get_page(pages[i]);
}
@@ -2262,7 +2311,7 @@ same_page:
*length = remainder;
*position = vaddr;
- return i;
+ return i ? i : -EFAULT;
}
void hugetlb_change_protection(struct vm_area_struct *vma,
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
new file mode 100644
index 00000000000..e1d85137f08
--- /dev/null
+++ b/mm/hwpoison-inject.c
@@ -0,0 +1,41 @@
+/* Inject a hwpoison memory failure on a arbitary pfn */
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+
+static struct dentry *hwpoison_dir, *corrupt_pfn;
+
+static int hwpoison_inject(void *data, u64 val)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ printk(KERN_INFO "Injecting memory failure at pfn %Lx\n", val);
+ return __memory_failure(val, 18, 0);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(hwpoison_fops, NULL, hwpoison_inject, "%lli\n");
+
+static void pfn_inject_exit(void)
+{
+ if (hwpoison_dir)
+ debugfs_remove_recursive(hwpoison_dir);
+}
+
+static int pfn_inject_init(void)
+{
+ hwpoison_dir = debugfs_create_dir("hwpoison", NULL);
+ if (hwpoison_dir == NULL)
+ return -ENOMEM;
+ corrupt_pfn = debugfs_create_file("corrupt-pfn", 0600, hwpoison_dir,
+ NULL, &hwpoison_fops);
+ if (corrupt_pfn == NULL) {
+ pfn_inject_exit();
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+module_init(pfn_inject_init);
+module_exit(pfn_inject_exit);
+MODULE_LICENSE("GPL");
diff --git a/mm/internal.h b/mm/internal.h
index f290c4db528..22ec8d2b0fb 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -37,6 +37,8 @@ static inline void __put_page(struct page *page)
atomic_dec(&page->_count);
}
+extern unsigned long highest_memmap_pfn;
+
/*
* in mm/vmscan.c:
*/
@@ -46,7 +48,6 @@ extern void putback_lru_page(struct page *page);
/*
* in mm/page_alloc.c
*/
-extern unsigned long highest_memmap_pfn;
extern void __free_pages_bootmem(struct page *page, unsigned int order);
extern void prep_compound_page(struct page *page, unsigned long order);
@@ -250,13 +251,8 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
}
#endif /* CONFIG_SPARSEMEM */
-#define GUP_FLAGS_WRITE 0x1
-#define GUP_FLAGS_FORCE 0x2
-#define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4
-#define GUP_FLAGS_IGNORE_SIGKILL 0x8
-
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int flags,
+ unsigned long start, int len, unsigned int foll_flags,
struct page **pages, struct vm_area_struct **vmas);
#define ZONE_RECLAIM_NOSCAN -2
diff --git a/mm/ksm.c b/mm/ksm.c
new file mode 100644
index 00000000000..f7edac356f4
--- /dev/null
+++ b/mm/ksm.c
@@ -0,0 +1,1711 @@
+/*
+ * Memory merging support.
+ *
+ * This code enables dynamic sharing of identical pages found in different
+ * memory areas, even if they are not shared by fork()
+ *
+ * Copyright (C) 2008-2009 Red Hat, Inc.
+ * Authors:
+ * Izik Eidus
+ * Andrea Arcangeli
+ * Chris Wright
+ * Hugh Dickins
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+#include <linux/rwsem.h>
+#include <linux/pagemap.h>
+#include <linux/rmap.h>
+#include <linux/spinlock.h>
+#include <linux/jhash.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/rbtree.h>
+#include <linux/mmu_notifier.h>
+#include <linux/swap.h>
+#include <linux/ksm.h>
+
+#include <asm/tlbflush.h>
+
+/*
+ * A few notes about the KSM scanning process,
+ * to make it easier to understand the data structures below:
+ *
+ * In order to reduce excessive scanning, KSM sorts the memory pages by their
+ * contents into a data structure that holds pointers to the pages' locations.
+ *
+ * Since the contents of the pages may change at any moment, KSM cannot just
+ * insert the pages into a normal sorted tree and expect it to find anything.
+ * Therefore KSM uses two data structures - the stable and the unstable tree.
+ *
+ * The stable tree holds pointers to all the merged pages (ksm pages), sorted
+ * by their contents. Because each such page is write-protected, searching on
+ * this tree is fully assured to be working (except when pages are unmapped),
+ * and therefore this tree is called the stable tree.
+ *
+ * In addition to the stable tree, KSM uses a second data structure called the
+ * unstable tree: this tree holds pointers to pages which have been found to
+ * be "unchanged for a period of time". The unstable tree sorts these pages
+ * by their contents, but since they are not write-protected, KSM cannot rely
+ * upon the unstable tree to work correctly - the unstable tree is liable to
+ * be corrupted as its contents are modified, and so it is called unstable.
+ *
+ * KSM solves this problem by several techniques:
+ *
+ * 1) The unstable tree is flushed every time KSM completes scanning all
+ * memory areas, and then the tree is rebuilt again from the beginning.
+ * 2) KSM will only insert into the unstable tree, pages whose hash value
+ * has not changed since the previous scan of all memory areas.
+ * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
+ * colors of the nodes and not on their contents, assuring that even when
+ * the tree gets "corrupted" it won't get out of balance, so scanning time
+ * remains the same (also, searching and inserting nodes in an rbtree uses
+ * the same algorithm, so we have no overhead when we flush and rebuild).
+ * 4) KSM never flushes the stable tree, which means that even if it were to
+ * take 10 attempts to find a page in the unstable tree, once it is found,
+ * it is secured in the stable tree. (When we scan a new page, we first
+ * compare it against the stable tree, and then against the unstable tree.)
+ */
+
+/**
+ * struct mm_slot - ksm information per mm that is being scanned
+ * @link: link to the mm_slots hash list
+ * @mm_list: link into the mm_slots list, rooted in ksm_mm_head
+ * @rmap_list: head for this mm_slot's list of rmap_items
+ * @mm: the mm that this information is valid for
+ */
+struct mm_slot {
+ struct hlist_node link;
+ struct list_head mm_list;
+ struct list_head rmap_list;
+ struct mm_struct *mm;
+};
+
+/**
+ * struct ksm_scan - cursor for scanning
+ * @mm_slot: the current mm_slot we are scanning
+ * @address: the next address inside that to be scanned
+ * @rmap_item: the current rmap that we are scanning inside the rmap_list
+ * @seqnr: count of completed full scans (needed when removing unstable node)
+ *
+ * There is only the one ksm_scan instance of this cursor structure.
+ */
+struct ksm_scan {
+ struct mm_slot *mm_slot;
+ unsigned long address;
+ struct rmap_item *rmap_item;
+ unsigned long seqnr;
+};
+
+/**
+ * struct rmap_item - reverse mapping item for virtual addresses
+ * @link: link into mm_slot's rmap_list (rmap_list is per mm)
+ * @mm: the memory structure this rmap_item is pointing into
+ * @address: the virtual address this rmap_item tracks (+ flags in low bits)
+ * @oldchecksum: previous checksum of the page at that virtual address
+ * @node: rb_node of this rmap_item in either unstable or stable tree
+ * @next: next rmap_item hanging off the same node of the stable tree
+ * @prev: previous rmap_item hanging off the same node of the stable tree
+ */
+struct rmap_item {
+ struct list_head link;
+ struct mm_struct *mm;
+ unsigned long address; /* + low bits used for flags below */
+ union {
+ unsigned int oldchecksum; /* when unstable */
+ struct rmap_item *next; /* when stable */
+ };
+ union {
+ struct rb_node node; /* when tree node */
+ struct rmap_item *prev; /* in stable list */
+ };
+};
+
+#define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
+#define NODE_FLAG 0x100 /* is a node of unstable or stable tree */
+#define STABLE_FLAG 0x200 /* is a node or list item of stable tree */
+
+/* The stable and unstable tree heads */
+static struct rb_root root_stable_tree = RB_ROOT;
+static struct rb_root root_unstable_tree = RB_ROOT;
+
+#define MM_SLOTS_HASH_HEADS 1024
+static struct hlist_head *mm_slots_hash;
+
+static struct mm_slot ksm_mm_head = {
+ .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
+};
+static struct ksm_scan ksm_scan = {
+ .mm_slot = &ksm_mm_head,
+};
+
+static struct kmem_cache *rmap_item_cache;
+static struct kmem_cache *mm_slot_cache;
+
+/* The number of nodes in the stable tree */
+static unsigned long ksm_pages_shared;
+
+/* The number of page slots additionally sharing those nodes */
+static unsigned long ksm_pages_sharing;
+
+/* The number of nodes in the unstable tree */
+static unsigned long ksm_pages_unshared;
+
+/* The number of rmap_items in use: to calculate pages_volatile */
+static unsigned long ksm_rmap_items;
+
+/* Limit on the number of unswappable pages used */
+static unsigned long ksm_max_kernel_pages;
+
+/* Number of pages ksmd should scan in one batch */
+static unsigned int ksm_thread_pages_to_scan = 100;
+
+/* Milliseconds ksmd should sleep between batches */
+static unsigned int ksm_thread_sleep_millisecs = 20;
+
+#define KSM_RUN_STOP 0
+#define KSM_RUN_MERGE 1
+#define KSM_RUN_UNMERGE 2
+static unsigned int ksm_run = KSM_RUN_STOP;
+
+static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
+static DEFINE_MUTEX(ksm_thread_mutex);
+static DEFINE_SPINLOCK(ksm_mmlist_lock);
+
+#define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\
+ sizeof(struct __struct), __alignof__(struct __struct),\
+ (__flags), NULL)
+
+static void __init ksm_init_max_kernel_pages(void)
+{
+ ksm_max_kernel_pages = nr_free_buffer_pages() / 4;
+}
+
+static int __init ksm_slab_init(void)
+{
+ rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
+ if (!rmap_item_cache)
+ goto out;
+
+ mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
+ if (!mm_slot_cache)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ kmem_cache_destroy(rmap_item_cache);
+out:
+ return -ENOMEM;
+}
+
+static void __init ksm_slab_free(void)
+{
+ kmem_cache_destroy(mm_slot_cache);
+ kmem_cache_destroy(rmap_item_cache);
+ mm_slot_cache = NULL;
+}
+
+static inline struct rmap_item *alloc_rmap_item(void)
+{
+ struct rmap_item *rmap_item;
+
+ rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL);
+ if (rmap_item)
+ ksm_rmap_items++;
+ return rmap_item;
+}
+
+static inline void free_rmap_item(struct rmap_item *rmap_item)
+{
+ ksm_rmap_items--;
+ rmap_item->mm = NULL; /* debug safety */
+ kmem_cache_free(rmap_item_cache, rmap_item);
+}
+
+static inline struct mm_slot *alloc_mm_slot(void)
+{
+ if (!mm_slot_cache) /* initialization failed */
+ return NULL;
+ return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
+}
+
+static inline void free_mm_slot(struct mm_slot *mm_slot)
+{
+ kmem_cache_free(mm_slot_cache, mm_slot);
+}
+
+static int __init mm_slots_hash_init(void)
+{
+ mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
+ GFP_KERNEL);
+ if (!mm_slots_hash)
+ return -ENOMEM;
+ return 0;
+}
+
+static void __init mm_slots_hash_free(void)
+{
+ kfree(mm_slots_hash);
+}
+
+static struct mm_slot *get_mm_slot(struct mm_struct *mm)
+{
+ struct mm_slot *mm_slot;
+ struct hlist_head *bucket;
+ struct hlist_node *node;
+
+ bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
+ % MM_SLOTS_HASH_HEADS];
+ hlist_for_each_entry(mm_slot, node, bucket, link) {
+ if (mm == mm_slot->mm)
+ return mm_slot;
+ }
+ return NULL;
+}
+
+static void insert_to_mm_slots_hash(struct mm_struct *mm,
+ struct mm_slot *mm_slot)
+{
+ struct hlist_head *bucket;
+
+ bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
+ % MM_SLOTS_HASH_HEADS];
+ mm_slot->mm = mm;
+ INIT_LIST_HEAD(&mm_slot->rmap_list);
+ hlist_add_head(&mm_slot->link, bucket);
+}
+
+static inline int in_stable_tree(struct rmap_item *rmap_item)
+{
+ return rmap_item->address & STABLE_FLAG;
+}
+
+/*
+ * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
+ * page tables after it has passed through ksm_exit() - which, if necessary,
+ * takes mmap_sem briefly to serialize against them. ksm_exit() does not set
+ * a special flag: they can just back out as soon as mm_users goes to zero.
+ * ksm_test_exit() is used throughout to make this test for exit: in some
+ * places for correctness, in some places just to avoid unnecessary work.
+ */
+static inline bool ksm_test_exit(struct mm_struct *mm)
+{
+ return atomic_read(&mm->mm_users) == 0;
+}
+
+/*
+ * We use break_ksm to break COW on a ksm page: it's a stripped down
+ *
+ * if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1)
+ * put_page(page);
+ *
+ * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
+ * in case the application has unmapped and remapped mm,addr meanwhile.
+ * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
+ * mmap of /dev/mem or /dev/kmem, where we would not want to touch it.
+ */
+static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
+{
+ struct page *page;
+ int ret = 0;
+
+ do {
+ cond_resched();
+ page = follow_page(vma, addr, FOLL_GET);
+ if (!page)
+ break;
+ if (PageKsm(page))
+ ret = handle_mm_fault(vma->vm_mm, vma, addr,
+ FAULT_FLAG_WRITE);
+ else
+ ret = VM_FAULT_WRITE;
+ put_page(page);
+ } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
+ /*
+ * We must loop because handle_mm_fault() may back out if there's
+ * any difficulty e.g. if pte accessed bit gets updated concurrently.
+ *
+ * VM_FAULT_WRITE is what we have been hoping for: it indicates that
+ * COW has been broken, even if the vma does not permit VM_WRITE;
+ * but note that a concurrent fault might break PageKsm for us.
+ *
+ * VM_FAULT_SIGBUS could occur if we race with truncation of the
+ * backing file, which also invalidates anonymous pages: that's
+ * okay, that truncation will have unmapped the PageKsm for us.
+ *
+ * VM_FAULT_OOM: at the time of writing (late July 2009), setting
+ * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
+ * current task has TIF_MEMDIE set, and will be OOM killed on return
+ * to user; and ksmd, having no mm, would never be chosen for that.
+ *
+ * But if the mm is in a limited mem_cgroup, then the fault may fail
+ * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
+ * even ksmd can fail in this way - though it's usually breaking ksm
+ * just to undo a merge it made a moment before, so unlikely to oom.
+ *
+ * That's a pity: we might therefore have more kernel pages allocated
+ * than we're counting as nodes in the stable tree; but ksm_do_scan
+ * will retry to break_cow on each pass, so should recover the page
+ * in due course. The important thing is to not let VM_MERGEABLE
+ * be cleared while any such pages might remain in the area.
+ */
+ return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
+}
+
+static void break_cow(struct mm_struct *mm, unsigned long addr)
+{
+ struct vm_area_struct *vma;
+
+ down_read(&mm->mmap_sem);
+ if (ksm_test_exit(mm))
+ goto out;
+ vma = find_vma(mm, addr);
+ if (!vma || vma->vm_start > addr)
+ goto out;
+ if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
+ goto out;
+ break_ksm(vma, addr);
+out:
+ up_read(&mm->mmap_sem);
+}
+
+static struct page *get_mergeable_page(struct rmap_item *rmap_item)
+{
+ struct mm_struct *mm = rmap_item->mm;
+ unsigned long addr = rmap_item->address;
+ struct vm_area_struct *vma;
+ struct page *page;
+
+ down_read(&mm->mmap_sem);
+ if (ksm_test_exit(mm))
+ goto out;
+ vma = find_vma(mm, addr);
+ if (!vma || vma->vm_start > addr)
+ goto out;
+ if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
+ goto out;
+
+ page = follow_page(vma, addr, FOLL_GET);
+ if (!page)
+ goto out;
+ if (PageAnon(page)) {
+ flush_anon_page(vma, page, addr);
+ flush_dcache_page(page);
+ } else {
+ put_page(page);
+out: page = NULL;
+ }
+ up_read(&mm->mmap_sem);
+ return page;
+}
+
+/*
+ * get_ksm_page: checks if the page at the virtual address in rmap_item
+ * is still PageKsm, in which case we can trust the content of the page,
+ * and it returns the gotten page; but NULL if the page has been zapped.
+ */
+static struct page *get_ksm_page(struct rmap_item *rmap_item)
+{
+ struct page *page;
+
+ page = get_mergeable_page(rmap_item);
+ if (page && !PageKsm(page)) {
+ put_page(page);
+ page = NULL;
+ }
+ return page;
+}
+
+/*
+ * Removing rmap_item from stable or unstable tree.
+ * This function will clean the information from the stable/unstable tree.
+ */
+static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
+{
+ if (in_stable_tree(rmap_item)) {
+ struct rmap_item *next_item = rmap_item->next;
+
+ if (rmap_item->address & NODE_FLAG) {
+ if (next_item) {
+ rb_replace_node(&rmap_item->node,
+ &next_item->node,
+ &root_stable_tree);
+ next_item->address |= NODE_FLAG;
+ ksm_pages_sharing--;
+ } else {
+ rb_erase(&rmap_item->node, &root_stable_tree);
+ ksm_pages_shared--;
+ }
+ } else {
+ struct rmap_item *prev_item = rmap_item->prev;
+
+ BUG_ON(prev_item->next != rmap_item);
+ prev_item->next = next_item;
+ if (next_item) {
+ BUG_ON(next_item->prev != rmap_item);
+ next_item->prev = rmap_item->prev;
+ }
+ ksm_pages_sharing--;
+ }
+
+ rmap_item->next = NULL;
+
+ } else if (rmap_item->address & NODE_FLAG) {
+ unsigned char age;
+ /*
+ * Usually ksmd can and must skip the rb_erase, because
+ * root_unstable_tree was already reset to RB_ROOT.
+ * But be careful when an mm is exiting: do the rb_erase
+ * if this rmap_item was inserted by this scan, rather
+ * than left over from before.
+ */
+ age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
+ BUG_ON(age > 1);
+ if (!age)
+ rb_erase(&rmap_item->node, &root_unstable_tree);
+ ksm_pages_unshared--;
+ }
+
+ rmap_item->address &= PAGE_MASK;
+
+ cond_resched(); /* we're called from many long loops */
+}
+
+static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
+ struct list_head *cur)
+{
+ struct rmap_item *rmap_item;
+
+ while (cur != &mm_slot->rmap_list) {
+ rmap_item = list_entry(cur, struct rmap_item, link);
+ cur = cur->next;
+ remove_rmap_item_from_tree(rmap_item);
+ list_del(&rmap_item->link);
+ free_rmap_item(rmap_item);
+ }
+}
+
+/*
+ * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather
+ * than check every pte of a given vma, the locking doesn't quite work for
+ * that - an rmap_item is assigned to the stable tree after inserting ksm
+ * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing
+ * rmap_items from parent to child at fork time (so as not to waste time
+ * if exit comes before the next scan reaches it).
+ *
+ * Similarly, although we'd like to remove rmap_items (so updating counts
+ * and freeing memory) when unmerging an area, it's easier to leave that
+ * to the next pass of ksmd - consider, for example, how ksmd might be
+ * in cmp_and_merge_page on one of the rmap_items we would be removing.
+ */
+static int unmerge_ksm_pages(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ unsigned long addr;
+ int err = 0;
+
+ for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
+ if (ksm_test_exit(vma->vm_mm))
+ break;
+ if (signal_pending(current))
+ err = -ERESTARTSYS;
+ else
+ err = break_ksm(vma, addr);
+ }
+ return err;
+}
+
+#ifdef CONFIG_SYSFS
+/*
+ * Only called through the sysfs control interface:
+ */
+static int unmerge_and_remove_all_rmap_items(void)
+{
+ struct mm_slot *mm_slot;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ int err = 0;
+
+ spin_lock(&ksm_mmlist_lock);
+ ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next,
+ struct mm_slot, mm_list);
+ spin_unlock(&ksm_mmlist_lock);
+
+ for (mm_slot = ksm_scan.mm_slot;
+ mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
+ mm = mm_slot->mm;
+ down_read(&mm->mmap_sem);
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ if (ksm_test_exit(mm))
+ break;
+ if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
+ continue;
+ err = unmerge_ksm_pages(vma,
+ vma->vm_start, vma->vm_end);
+ if (err)
+ goto error;
+ }
+
+ remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next);
+
+ spin_lock(&ksm_mmlist_lock);
+ ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
+ struct mm_slot, mm_list);
+ if (ksm_test_exit(mm)) {
+ hlist_del(&mm_slot->link);
+ list_del(&mm_slot->mm_list);
+ spin_unlock(&ksm_mmlist_lock);
+
+ free_mm_slot(mm_slot);
+ clear_bit(MMF_VM_MERGEABLE, &mm->flags);
+ up_read(&mm->mmap_sem);
+ mmdrop(mm);
+ } else {
+ spin_unlock(&ksm_mmlist_lock);
+ up_read(&mm->mmap_sem);
+ }
+ }
+
+ ksm_scan.seqnr = 0;
+ return 0;
+
+error:
+ up_read(&mm->mmap_sem);
+ spin_lock(&ksm_mmlist_lock);
+ ksm_scan.mm_slot = &ksm_mm_head;
+ spin_unlock(&ksm_mmlist_lock);
+ return err;
+}
+#endif /* CONFIG_SYSFS */
+
+static u32 calc_checksum(struct page *page)
+{
+ u32 checksum;
+ void *addr = kmap_atomic(page, KM_USER0);
+ checksum = jhash2(addr, PAGE_SIZE / 4, 17);
+ kunmap_atomic(addr, KM_USER0);
+ return checksum;
+}
+
+static int memcmp_pages(struct page *page1, struct page *page2)
+{
+ char *addr1, *addr2;
+ int ret;
+
+ addr1 = kmap_atomic(page1, KM_USER0);
+ addr2 = kmap_atomic(page2, KM_USER1);
+ ret = memcmp(addr1, addr2, PAGE_SIZE);
+ kunmap_atomic(addr2, KM_USER1);
+ kunmap_atomic(addr1, KM_USER0);
+ return ret;
+}
+
+static inline int pages_identical(struct page *page1, struct page *page2)
+{
+ return !memcmp_pages(page1, page2);
+}
+
+static int write_protect_page(struct vm_area_struct *vma, struct page *page,
+ pte_t *orig_pte)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long addr;
+ pte_t *ptep;
+ spinlock_t *ptl;
+ int swapped;
+ int err = -EFAULT;
+
+ addr = page_address_in_vma(page, vma);
+ if (addr == -EFAULT)
+ goto out;
+
+ ptep = page_check_address(page, mm, addr, &ptl, 0);
+ if (!ptep)
+ goto out;
+
+ if (pte_write(*ptep)) {
+ pte_t entry;
+
+ swapped = PageSwapCache(page);
+ flush_cache_page(vma, addr, page_to_pfn(page));
+ /*
+ * Ok this is tricky, when get_user_pages_fast() run it doesnt
+ * take any lock, therefore the check that we are going to make
+ * with the pagecount against the mapcount is racey and
+ * O_DIRECT can happen right after the check.
+ * So we clear the pte and flush the tlb before the check
+ * this assure us that no O_DIRECT can happen after the check
+ * or in the middle of the check.
+ */
+ entry = ptep_clear_flush(vma, addr, ptep);
+ /*
+ * Check that no O_DIRECT or similar I/O is in progress on the
+ * page
+ */
+ if ((page_mapcount(page) + 2 + swapped) != page_count(page)) {
+ set_pte_at_notify(mm, addr, ptep, entry);
+ goto out_unlock;
+ }
+ entry = pte_wrprotect(entry);
+ set_pte_at_notify(mm, addr, ptep, entry);
+ }
+ *orig_pte = *ptep;
+ err = 0;
+
+out_unlock:
+ pte_unmap_unlock(ptep, ptl);
+out:
+ return err;
+}
+
+/**
+ * replace_page - replace page in vma by new ksm page
+ * @vma: vma that holds the pte pointing to oldpage
+ * @oldpage: the page we are replacing by newpage
+ * @newpage: the ksm page we replace oldpage by
+ * @orig_pte: the original value of the pte
+ *
+ * Returns 0 on success, -EFAULT on failure.
+ */
+static int replace_page(struct vm_area_struct *vma, struct page *oldpage,
+ struct page *newpage, pte_t orig_pte)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep;
+ spinlock_t *ptl;
+ unsigned long addr;
+ pgprot_t prot;
+ int err = -EFAULT;
+
+ prot = vm_get_page_prot(vma->vm_flags & ~VM_WRITE);
+
+ addr = page_address_in_vma(oldpage, vma);
+ if (addr == -EFAULT)
+ goto out;
+
+ pgd = pgd_offset(mm, addr);
+ if (!pgd_present(*pgd))
+ goto out;
+
+ pud = pud_offset(pgd, addr);
+ if (!pud_present(*pud))
+ goto out;
+
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_present(*pmd))
+ goto out;
+
+ ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ if (!pte_same(*ptep, orig_pte)) {
+ pte_unmap_unlock(ptep, ptl);
+ goto out;
+ }
+
+ get_page(newpage);
+ page_add_ksm_rmap(newpage);
+
+ flush_cache_page(vma, addr, pte_pfn(*ptep));
+ ptep_clear_flush(vma, addr, ptep);
+ set_pte_at_notify(mm, addr, ptep, mk_pte(newpage, prot));
+
+ page_remove_rmap(oldpage);
+ put_page(oldpage);
+
+ pte_unmap_unlock(ptep, ptl);
+ err = 0;
+out:
+ return err;
+}
+
+/*
+ * try_to_merge_one_page - take two pages and merge them into one
+ * @vma: the vma that hold the pte pointing into oldpage
+ * @oldpage: the page that we want to replace with newpage
+ * @newpage: the page that we want to map instead of oldpage
+ *
+ * Note:
+ * oldpage should be a PageAnon page, while newpage should be a PageKsm page,
+ * or a newly allocated kernel page which page_add_ksm_rmap will make PageKsm.
+ *
+ * This function returns 0 if the pages were merged, -EFAULT otherwise.
+ */
+static int try_to_merge_one_page(struct vm_area_struct *vma,
+ struct page *oldpage,
+ struct page *newpage)
+{
+ pte_t orig_pte = __pte(0);
+ int err = -EFAULT;
+
+ if (!(vma->vm_flags & VM_MERGEABLE))
+ goto out;
+
+ if (!PageAnon(oldpage))
+ goto out;
+
+ get_page(newpage);
+ get_page(oldpage);
+
+ /*
+ * We need the page lock to read a stable PageSwapCache in
+ * write_protect_page(). We use trylock_page() instead of
+ * lock_page() because we don't want to wait here - we
+ * prefer to continue scanning and merging different pages,
+ * then come back to this page when it is unlocked.
+ */
+ if (!trylock_page(oldpage))
+ goto out_putpage;
+ /*
+ * If this anonymous page is mapped only here, its pte may need
+ * to be write-protected. If it's mapped elsewhere, all of its
+ * ptes are necessarily already write-protected. But in either
+ * case, we need to lock and check page_count is not raised.
+ */
+ if (write_protect_page(vma, oldpage, &orig_pte)) {
+ unlock_page(oldpage);
+ goto out_putpage;
+ }
+ unlock_page(oldpage);
+
+ if (pages_identical(oldpage, newpage))
+ err = replace_page(vma, oldpage, newpage, orig_pte);
+
+out_putpage:
+ put_page(oldpage);
+ put_page(newpage);
+out:
+ return err;
+}
+
+/*
+ * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
+ * but no new kernel page is allocated: kpage must already be a ksm page.
+ */
+static int try_to_merge_with_ksm_page(struct mm_struct *mm1,
+ unsigned long addr1,
+ struct page *page1,
+ struct page *kpage)
+{
+ struct vm_area_struct *vma;
+ int err = -EFAULT;
+
+ down_read(&mm1->mmap_sem);
+ if (ksm_test_exit(mm1))
+ goto out;
+
+ vma = find_vma(mm1, addr1);
+ if (!vma || vma->vm_start > addr1)
+ goto out;
+
+ err = try_to_merge_one_page(vma, page1, kpage);
+out:
+ up_read(&mm1->mmap_sem);
+ return err;
+}
+
+/*
+ * try_to_merge_two_pages - take two identical pages and prepare them
+ * to be merged into one page.
+ *
+ * This function returns 0 if we successfully mapped two identical pages
+ * into one page, -EFAULT otherwise.
+ *
+ * Note that this function allocates a new kernel page: if one of the pages
+ * is already a ksm page, try_to_merge_with_ksm_page should be used.
+ */
+static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1,
+ struct page *page1, struct mm_struct *mm2,
+ unsigned long addr2, struct page *page2)
+{
+ struct vm_area_struct *vma;
+ struct page *kpage;
+ int err = -EFAULT;
+
+ /*
+ * The number of nodes in the stable tree
+ * is the number of kernel pages that we hold.
+ */
+ if (ksm_max_kernel_pages &&
+ ksm_max_kernel_pages <= ksm_pages_shared)
+ return err;
+
+ kpage = alloc_page(GFP_HIGHUSER);
+ if (!kpage)
+ return err;
+
+ down_read(&mm1->mmap_sem);
+ if (ksm_test_exit(mm1)) {
+ up_read(&mm1->mmap_sem);
+ goto out;
+ }
+ vma = find_vma(mm1, addr1);
+ if (!vma || vma->vm_start > addr1) {
+ up_read(&mm1->mmap_sem);
+ goto out;
+ }
+
+ copy_user_highpage(kpage, page1, addr1, vma);
+ err = try_to_merge_one_page(vma, page1, kpage);
+ up_read(&mm1->mmap_sem);
+
+ if (!err) {
+ err = try_to_merge_with_ksm_page(mm2, addr2, page2, kpage);
+ /*
+ * If that fails, we have a ksm page with only one pte
+ * pointing to it: so break it.
+ */
+ if (err)
+ break_cow(mm1, addr1);
+ }
+out:
+ put_page(kpage);
+ return err;
+}
+
+/*
+ * stable_tree_search - search page inside the stable tree
+ * @page: the page that we are searching identical pages to.
+ * @page2: pointer into identical page that we are holding inside the stable
+ * tree that we have found.
+ * @rmap_item: the reverse mapping item
+ *
+ * This function checks if there is a page inside the stable tree
+ * with identical content to the page that we are scanning right now.
+ *
+ * This function return rmap_item pointer to the identical item if found,
+ * NULL otherwise.
+ */
+static struct rmap_item *stable_tree_search(struct page *page,
+ struct page **page2,
+ struct rmap_item *rmap_item)
+{
+ struct rb_node *node = root_stable_tree.rb_node;
+
+ while (node) {
+ struct rmap_item *tree_rmap_item, *next_rmap_item;
+ int ret;
+
+ tree_rmap_item = rb_entry(node, struct rmap_item, node);
+ while (tree_rmap_item) {
+ BUG_ON(!in_stable_tree(tree_rmap_item));
+ cond_resched();
+ page2[0] = get_ksm_page(tree_rmap_item);
+ if (page2[0])
+ break;
+ next_rmap_item = tree_rmap_item->next;
+ remove_rmap_item_from_tree(tree_rmap_item);
+ tree_rmap_item = next_rmap_item;
+ }
+ if (!tree_rmap_item)
+ return NULL;
+
+ ret = memcmp_pages(page, page2[0]);
+
+ if (ret < 0) {
+ put_page(page2[0]);
+ node = node->rb_left;
+ } else if (ret > 0) {
+ put_page(page2[0]);
+ node = node->rb_right;
+ } else {
+ return tree_rmap_item;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * stable_tree_insert - insert rmap_item pointing to new ksm page
+ * into the stable tree.
+ *
+ * @page: the page that we are searching identical page to inside the stable
+ * tree.
+ * @rmap_item: pointer to the reverse mapping item.
+ *
+ * This function returns rmap_item if success, NULL otherwise.
+ */
+static struct rmap_item *stable_tree_insert(struct page *page,
+ struct rmap_item *rmap_item)
+{
+ struct rb_node **new = &root_stable_tree.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*new) {
+ struct rmap_item *tree_rmap_item, *next_rmap_item;
+ struct page *tree_page;
+ int ret;
+
+ tree_rmap_item = rb_entry(*new, struct rmap_item, node);
+ while (tree_rmap_item) {
+ BUG_ON(!in_stable_tree(tree_rmap_item));
+ cond_resched();
+ tree_page = get_ksm_page(tree_rmap_item);
+ if (tree_page)
+ break;
+ next_rmap_item = tree_rmap_item->next;
+ remove_rmap_item_from_tree(tree_rmap_item);
+ tree_rmap_item = next_rmap_item;
+ }
+ if (!tree_rmap_item)
+ return NULL;
+
+ ret = memcmp_pages(page, tree_page);
+ put_page(tree_page);
+
+ parent = *new;
+ if (ret < 0)
+ new = &parent->rb_left;
+ else if (ret > 0)
+ new = &parent->rb_right;
+ else {
+ /*
+ * It is not a bug that stable_tree_search() didn't
+ * find this node: because at that time our page was
+ * not yet write-protected, so may have changed since.
+ */
+ return NULL;
+ }
+ }
+
+ rmap_item->address |= NODE_FLAG | STABLE_FLAG;
+ rmap_item->next = NULL;
+ rb_link_node(&rmap_item->node, parent, new);
+ rb_insert_color(&rmap_item->node, &root_stable_tree);
+
+ ksm_pages_shared++;
+ return rmap_item;
+}
+
+/*
+ * unstable_tree_search_insert - search and insert items into the unstable tree.
+ *
+ * @page: the page that we are going to search for identical page or to insert
+ * into the unstable tree
+ * @page2: pointer into identical page that was found inside the unstable tree
+ * @rmap_item: the reverse mapping item of page
+ *
+ * This function searches for a page in the unstable tree identical to the
+ * page currently being scanned; and if no identical page is found in the
+ * tree, we insert rmap_item as a new object into the unstable tree.
+ *
+ * This function returns pointer to rmap_item found to be identical
+ * to the currently scanned page, NULL otherwise.
+ *
+ * This function does both searching and inserting, because they share
+ * the same walking algorithm in an rbtree.
+ */
+static struct rmap_item *unstable_tree_search_insert(struct page *page,
+ struct page **page2,
+ struct rmap_item *rmap_item)
+{
+ struct rb_node **new = &root_unstable_tree.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*new) {
+ struct rmap_item *tree_rmap_item;
+ int ret;
+
+ tree_rmap_item = rb_entry(*new, struct rmap_item, node);
+ page2[0] = get_mergeable_page(tree_rmap_item);
+ if (!page2[0])
+ return NULL;
+
+ /*
+ * Don't substitute an unswappable ksm page
+ * just for one good swappable forked page.
+ */
+ if (page == page2[0]) {
+ put_page(page2[0]);
+ return NULL;
+ }
+
+ ret = memcmp_pages(page, page2[0]);
+
+ parent = *new;
+ if (ret < 0) {
+ put_page(page2[0]);
+ new = &parent->rb_left;
+ } else if (ret > 0) {
+ put_page(page2[0]);
+ new = &parent->rb_right;
+ } else {
+ return tree_rmap_item;
+ }
+ }
+
+ rmap_item->address |= NODE_FLAG;
+ rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
+ rb_link_node(&rmap_item->node, parent, new);
+ rb_insert_color(&rmap_item->node, &root_unstable_tree);
+
+ ksm_pages_unshared++;
+ return NULL;
+}
+
+/*
+ * stable_tree_append - add another rmap_item to the linked list of
+ * rmap_items hanging off a given node of the stable tree, all sharing
+ * the same ksm page.
+ */
+static void stable_tree_append(struct rmap_item *rmap_item,
+ struct rmap_item *tree_rmap_item)
+{
+ rmap_item->next = tree_rmap_item->next;
+ rmap_item->prev = tree_rmap_item;
+
+ if (tree_rmap_item->next)
+ tree_rmap_item->next->prev = rmap_item;
+
+ tree_rmap_item->next = rmap_item;
+ rmap_item->address |= STABLE_FLAG;
+
+ ksm_pages_sharing++;
+}
+
+/*
+ * cmp_and_merge_page - first see if page can be merged into the stable tree;
+ * if not, compare checksum to previous and if it's the same, see if page can
+ * be inserted into the unstable tree, or merged with a page already there and
+ * both transferred to the stable tree.
+ *
+ * @page: the page that we are searching identical page to.
+ * @rmap_item: the reverse mapping into the virtual address of this page
+ */
+static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
+{
+ struct page *page2[1];
+ struct rmap_item *tree_rmap_item;
+ unsigned int checksum;
+ int err;
+
+ if (in_stable_tree(rmap_item))
+ remove_rmap_item_from_tree(rmap_item);
+
+ /* We first start with searching the page inside the stable tree */
+ tree_rmap_item = stable_tree_search(page, page2, rmap_item);
+ if (tree_rmap_item) {
+ if (page == page2[0]) /* forked */
+ err = 0;
+ else
+ err = try_to_merge_with_ksm_page(rmap_item->mm,
+ rmap_item->address,
+ page, page2[0]);
+ put_page(page2[0]);
+
+ if (!err) {
+ /*
+ * The page was successfully merged:
+ * add its rmap_item to the stable tree.
+ */
+ stable_tree_append(rmap_item, tree_rmap_item);
+ }
+ return;
+ }
+
+ /*
+ * A ksm page might have got here by fork, but its other
+ * references have already been removed from the stable tree.
+ * Or it might be left over from a break_ksm which failed
+ * when the mem_cgroup had reached its limit: try again now.
+ */
+ if (PageKsm(page))
+ break_cow(rmap_item->mm, rmap_item->address);
+
+ /*
+ * In case the hash value of the page was changed from the last time we
+ * have calculated it, this page to be changed frequely, therefore we
+ * don't want to insert it to the unstable tree, and we don't want to
+ * waste our time to search if there is something identical to it there.
+ */
+ checksum = calc_checksum(page);
+ if (rmap_item->oldchecksum != checksum) {
+ rmap_item->oldchecksum = checksum;
+ return;
+ }
+
+ tree_rmap_item = unstable_tree_search_insert(page, page2, rmap_item);
+ if (tree_rmap_item) {
+ err = try_to_merge_two_pages(rmap_item->mm,
+ rmap_item->address, page,
+ tree_rmap_item->mm,
+ tree_rmap_item->address, page2[0]);
+ /*
+ * As soon as we merge this page, we want to remove the
+ * rmap_item of the page we have merged with from the unstable
+ * tree, and insert it instead as new node in the stable tree.
+ */
+ if (!err) {
+ rb_erase(&tree_rmap_item->node, &root_unstable_tree);
+ tree_rmap_item->address &= ~NODE_FLAG;
+ ksm_pages_unshared--;
+
+ /*
+ * If we fail to insert the page into the stable tree,
+ * we will have 2 virtual addresses that are pointing
+ * to a ksm page left outside the stable tree,
+ * in which case we need to break_cow on both.
+ */
+ if (stable_tree_insert(page2[0], tree_rmap_item))
+ stable_tree_append(rmap_item, tree_rmap_item);
+ else {
+ break_cow(tree_rmap_item->mm,
+ tree_rmap_item->address);
+ break_cow(rmap_item->mm, rmap_item->address);
+ }
+ }
+
+ put_page(page2[0]);
+ }
+}
+
+static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
+ struct list_head *cur,
+ unsigned long addr)
+{
+ struct rmap_item *rmap_item;
+
+ while (cur != &mm_slot->rmap_list) {
+ rmap_item = list_entry(cur, struct rmap_item, link);
+ if ((rmap_item->address & PAGE_MASK) == addr) {
+ if (!in_stable_tree(rmap_item))
+ remove_rmap_item_from_tree(rmap_item);
+ return rmap_item;
+ }
+ if (rmap_item->address > addr)
+ break;
+ cur = cur->next;
+ remove_rmap_item_from_tree(rmap_item);
+ list_del(&rmap_item->link);
+ free_rmap_item(rmap_item);
+ }
+
+ rmap_item = alloc_rmap_item();
+ if (rmap_item) {
+ /* It has already been zeroed */
+ rmap_item->mm = mm_slot->mm;
+ rmap_item->address = addr;
+ list_add_tail(&rmap_item->link, cur);
+ }
+ return rmap_item;
+}
+
+static struct rmap_item *scan_get_next_rmap_item(struct page **page)
+{
+ struct mm_struct *mm;
+ struct mm_slot *slot;
+ struct vm_area_struct *vma;
+ struct rmap_item *rmap_item;
+
+ if (list_empty(&ksm_mm_head.mm_list))
+ return NULL;
+
+ slot = ksm_scan.mm_slot;
+ if (slot == &ksm_mm_head) {
+ root_unstable_tree = RB_ROOT;
+
+ spin_lock(&ksm_mmlist_lock);
+ slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
+ ksm_scan.mm_slot = slot;
+ spin_unlock(&ksm_mmlist_lock);
+next_mm:
+ ksm_scan.address = 0;
+ ksm_scan.rmap_item = list_entry(&slot->rmap_list,
+ struct rmap_item, link);
+ }
+
+ mm = slot->mm;
+ down_read(&mm->mmap_sem);
+ if (ksm_test_exit(mm))
+ vma = NULL;
+ else
+ vma = find_vma(mm, ksm_scan.address);
+
+ for (; vma; vma = vma->vm_next) {
+ if (!(vma->vm_flags & VM_MERGEABLE))
+ continue;
+ if (ksm_scan.address < vma->vm_start)
+ ksm_scan.address = vma->vm_start;
+ if (!vma->anon_vma)
+ ksm_scan.address = vma->vm_end;
+
+ while (ksm_scan.address < vma->vm_end) {
+ if (ksm_test_exit(mm))
+ break;
+ *page = follow_page(vma, ksm_scan.address, FOLL_GET);
+ if (*page && PageAnon(*page)) {
+ flush_anon_page(vma, *page, ksm_scan.address);
+ flush_dcache_page(*page);
+ rmap_item = get_next_rmap_item(slot,
+ ksm_scan.rmap_item->link.next,
+ ksm_scan.address);
+ if (rmap_item) {
+ ksm_scan.rmap_item = rmap_item;
+ ksm_scan.address += PAGE_SIZE;
+ } else
+ put_page(*page);
+ up_read(&mm->mmap_sem);
+ return rmap_item;
+ }
+ if (*page)
+ put_page(*page);
+ ksm_scan.address += PAGE_SIZE;
+ cond_resched();
+ }
+ }
+
+ if (ksm_test_exit(mm)) {
+ ksm_scan.address = 0;
+ ksm_scan.rmap_item = list_entry(&slot->rmap_list,
+ struct rmap_item, link);
+ }
+ /*
+ * Nuke all the rmap_items that are above this current rmap:
+ * because there were no VM_MERGEABLE vmas with such addresses.
+ */
+ remove_trailing_rmap_items(slot, ksm_scan.rmap_item->link.next);
+
+ spin_lock(&ksm_mmlist_lock);
+ ksm_scan.mm_slot = list_entry(slot->mm_list.next,
+ struct mm_slot, mm_list);
+ if (ksm_scan.address == 0) {
+ /*
+ * We've completed a full scan of all vmas, holding mmap_sem
+ * throughout, and found no VM_MERGEABLE: so do the same as
+ * __ksm_exit does to remove this mm from all our lists now.
+ * This applies either when cleaning up after __ksm_exit
+ * (but beware: we can reach here even before __ksm_exit),
+ * or when all VM_MERGEABLE areas have been unmapped (and
+ * mmap_sem then protects against race with MADV_MERGEABLE).
+ */
+ hlist_del(&slot->link);
+ list_del(&slot->mm_list);
+ spin_unlock(&ksm_mmlist_lock);
+
+ free_mm_slot(slot);
+ clear_bit(MMF_VM_MERGEABLE, &mm->flags);
+ up_read(&mm->mmap_sem);
+ mmdrop(mm);
+ } else {
+ spin_unlock(&ksm_mmlist_lock);
+ up_read(&mm->mmap_sem);
+ }
+
+ /* Repeat until we've completed scanning the whole list */
+ slot = ksm_scan.mm_slot;
+ if (slot != &ksm_mm_head)
+ goto next_mm;
+
+ ksm_scan.seqnr++;
+ return NULL;
+}
+
+/**
+ * ksm_do_scan - the ksm scanner main worker function.
+ * @scan_npages - number of pages we want to scan before we return.
+ */
+static void ksm_do_scan(unsigned int scan_npages)
+{
+ struct rmap_item *rmap_item;
+ struct page *page;
+
+ while (scan_npages--) {
+ cond_resched();
+ rmap_item = scan_get_next_rmap_item(&page);
+ if (!rmap_item)
+ return;
+ if (!PageKsm(page) || !in_stable_tree(rmap_item))
+ cmp_and_merge_page(page, rmap_item);
+ else if (page_mapcount(page) == 1) {
+ /*
+ * Replace now-unshared ksm page by ordinary page.
+ */
+ break_cow(rmap_item->mm, rmap_item->address);
+ remove_rmap_item_from_tree(rmap_item);
+ rmap_item->oldchecksum = calc_checksum(page);
+ }
+ put_page(page);
+ }
+}
+
+static int ksmd_should_run(void)
+{
+ return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
+}
+
+static int ksm_scan_thread(void *nothing)
+{
+ set_user_nice(current, 5);
+
+ while (!kthread_should_stop()) {
+ mutex_lock(&ksm_thread_mutex);
+ if (ksmd_should_run())
+ ksm_do_scan(ksm_thread_pages_to_scan);
+ mutex_unlock(&ksm_thread_mutex);
+
+ if (ksmd_should_run()) {
+ schedule_timeout_interruptible(
+ msecs_to_jiffies(ksm_thread_sleep_millisecs));
+ } else {
+ wait_event_interruptible(ksm_thread_wait,
+ ksmd_should_run() || kthread_should_stop());
+ }
+ }
+ return 0;
+}
+
+int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, int advice, unsigned long *vm_flags)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int err;
+
+ switch (advice) {
+ case MADV_MERGEABLE:
+ /*
+ * Be somewhat over-protective for now!
+ */
+ if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
+ VM_PFNMAP | VM_IO | VM_DONTEXPAND |
+ VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
+ VM_MIXEDMAP | VM_SAO))
+ return 0; /* just ignore the advice */
+
+ if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
+ err = __ksm_enter(mm);
+ if (err)
+ return err;
+ }
+
+ *vm_flags |= VM_MERGEABLE;
+ break;
+
+ case MADV_UNMERGEABLE:
+ if (!(*vm_flags & VM_MERGEABLE))
+ return 0; /* just ignore the advice */
+
+ if (vma->anon_vma) {
+ err = unmerge_ksm_pages(vma, start, end);
+ if (err)
+ return err;
+ }
+
+ *vm_flags &= ~VM_MERGEABLE;
+ break;
+ }
+
+ return 0;
+}
+
+int __ksm_enter(struct mm_struct *mm)
+{
+ struct mm_slot *mm_slot;
+ int needs_wakeup;
+
+ mm_slot = alloc_mm_slot();
+ if (!mm_slot)
+ return -ENOMEM;
+
+ /* Check ksm_run too? Would need tighter locking */
+ needs_wakeup = list_empty(&ksm_mm_head.mm_list);
+
+ spin_lock(&ksm_mmlist_lock);
+ insert_to_mm_slots_hash(mm, mm_slot);
+ /*
+ * Insert just behind the scanning cursor, to let the area settle
+ * down a little; when fork is followed by immediate exec, we don't
+ * want ksmd to waste time setting up and tearing down an rmap_list.
+ */
+ list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list);
+ spin_unlock(&ksm_mmlist_lock);
+
+ set_bit(MMF_VM_MERGEABLE, &mm->flags);
+ atomic_inc(&mm->mm_count);
+
+ if (needs_wakeup)
+ wake_up_interruptible(&ksm_thread_wait);
+
+ return 0;
+}
+
+void __ksm_exit(struct mm_struct *mm)
+{
+ struct mm_slot *mm_slot;
+ int easy_to_free = 0;
+
+ /*
+ * This process is exiting: if it's straightforward (as is the
+ * case when ksmd was never running), free mm_slot immediately.
+ * But if it's at the cursor or has rmap_items linked to it, use
+ * mmap_sem to synchronize with any break_cows before pagetables
+ * are freed, and leave the mm_slot on the list for ksmd to free.
+ * Beware: ksm may already have noticed it exiting and freed the slot.
+ */
+
+ spin_lock(&ksm_mmlist_lock);
+ mm_slot = get_mm_slot(mm);
+ if (mm_slot && ksm_scan.mm_slot != mm_slot) {
+ if (list_empty(&mm_slot->rmap_list)) {
+ hlist_del(&mm_slot->link);
+ list_del(&mm_slot->mm_list);
+ easy_to_free = 1;
+ } else {
+ list_move(&mm_slot->mm_list,
+ &ksm_scan.mm_slot->mm_list);
+ }
+ }
+ spin_unlock(&ksm_mmlist_lock);
+
+ if (easy_to_free) {
+ free_mm_slot(mm_slot);
+ clear_bit(MMF_VM_MERGEABLE, &mm->flags);
+ mmdrop(mm);
+ } else if (mm_slot) {
+ down_write(&mm->mmap_sem);
+ up_write(&mm->mmap_sem);
+ }
+}
+
+#ifdef CONFIG_SYSFS
+/*
+ * This all compiles without CONFIG_SYSFS, but is a waste of space.
+ */
+
+#define KSM_ATTR_RO(_name) \
+ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
+#define KSM_ATTR(_name) \
+ static struct kobj_attribute _name##_attr = \
+ __ATTR(_name, 0644, _name##_show, _name##_store)
+
+static ssize_t sleep_millisecs_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs);
+}
+
+static ssize_t sleep_millisecs_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long msecs;
+ int err;
+
+ err = strict_strtoul(buf, 10, &msecs);
+ if (err || msecs > UINT_MAX)
+ return -EINVAL;
+
+ ksm_thread_sleep_millisecs = msecs;
+
+ return count;
+}
+KSM_ATTR(sleep_millisecs);
+
+static ssize_t pages_to_scan_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", ksm_thread_pages_to_scan);
+}
+
+static ssize_t pages_to_scan_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ unsigned long nr_pages;
+
+ err = strict_strtoul(buf, 10, &nr_pages);
+ if (err || nr_pages > UINT_MAX)
+ return -EINVAL;
+
+ ksm_thread_pages_to_scan = nr_pages;
+
+ return count;
+}
+KSM_ATTR(pages_to_scan);
+
+static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", ksm_run);
+}
+
+static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ unsigned long flags;
+
+ err = strict_strtoul(buf, 10, &flags);
+ if (err || flags > UINT_MAX)
+ return -EINVAL;
+ if (flags > KSM_RUN_UNMERGE)
+ return -EINVAL;
+
+ /*
+ * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
+ * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
+ * breaking COW to free the unswappable pages_shared (but leaves
+ * mm_slots on the list for when ksmd may be set running again).
+ */
+
+ mutex_lock(&ksm_thread_mutex);
+ if (ksm_run != flags) {
+ ksm_run = flags;
+ if (flags & KSM_RUN_UNMERGE) {
+ current->flags |= PF_OOM_ORIGIN;
+ err = unmerge_and_remove_all_rmap_items();
+ current->flags &= ~PF_OOM_ORIGIN;
+ if (err) {
+ ksm_run = KSM_RUN_STOP;
+ count = err;
+ }
+ }
+ }
+ mutex_unlock(&ksm_thread_mutex);
+
+ if (flags & KSM_RUN_MERGE)
+ wake_up_interruptible(&ksm_thread_wait);
+
+ return count;
+}
+KSM_ATTR(run);
+
+static ssize_t max_kernel_pages_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ unsigned long nr_pages;
+
+ err = strict_strtoul(buf, 10, &nr_pages);
+ if (err)
+ return -EINVAL;
+
+ ksm_max_kernel_pages = nr_pages;
+
+ return count;
+}
+
+static ssize_t max_kernel_pages_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", ksm_max_kernel_pages);
+}
+KSM_ATTR(max_kernel_pages);
+
+static ssize_t pages_shared_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", ksm_pages_shared);
+}
+KSM_ATTR_RO(pages_shared);
+
+static ssize_t pages_sharing_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", ksm_pages_sharing);
+}
+KSM_ATTR_RO(pages_sharing);
+
+static ssize_t pages_unshared_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", ksm_pages_unshared);
+}
+KSM_ATTR_RO(pages_unshared);
+
+static ssize_t pages_volatile_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ long ksm_pages_volatile;
+
+ ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
+ - ksm_pages_sharing - ksm_pages_unshared;
+ /*
+ * It was not worth any locking to calculate that statistic,
+ * but it might therefore sometimes be negative: conceal that.
+ */
+ if (ksm_pages_volatile < 0)
+ ksm_pages_volatile = 0;
+ return sprintf(buf, "%ld\n", ksm_pages_volatile);
+}
+KSM_ATTR_RO(pages_volatile);
+
+static ssize_t full_scans_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", ksm_scan.seqnr);
+}
+KSM_ATTR_RO(full_scans);
+
+static struct attribute *ksm_attrs[] = {
+ &sleep_millisecs_attr.attr,
+ &pages_to_scan_attr.attr,
+ &run_attr.attr,
+ &max_kernel_pages_attr.attr,
+ &pages_shared_attr.attr,
+ &pages_sharing_attr.attr,
+ &pages_unshared_attr.attr,
+ &pages_volatile_attr.attr,
+ &full_scans_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ksm_attr_group = {
+ .attrs = ksm_attrs,
+ .name = "ksm",
+};
+#endif /* CONFIG_SYSFS */
+
+static int __init ksm_init(void)
+{
+ struct task_struct *ksm_thread;
+ int err;
+
+ ksm_init_max_kernel_pages();
+
+ err = ksm_slab_init();
+ if (err)
+ goto out;
+
+ err = mm_slots_hash_init();
+ if (err)
+ goto out_free1;
+
+ ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd");
+ if (IS_ERR(ksm_thread)) {
+ printk(KERN_ERR "ksm: creating kthread failed\n");
+ err = PTR_ERR(ksm_thread);
+ goto out_free2;
+ }
+
+#ifdef CONFIG_SYSFS
+ err = sysfs_create_group(mm_kobj, &ksm_attr_group);
+ if (err) {
+ printk(KERN_ERR "ksm: register sysfs failed\n");
+ kthread_stop(ksm_thread);
+ goto out_free2;
+ }
+#endif /* CONFIG_SYSFS */
+
+ return 0;
+
+out_free2:
+ mm_slots_hash_free();
+out_free1:
+ ksm_slab_free();
+out:
+ return err;
+}
+module_init(ksm_init)
diff --git a/mm/madvise.c b/mm/madvise.c
index 76eb4193acd..35b1479b7c9 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -11,6 +11,7 @@
#include <linux/mempolicy.h>
#include <linux/hugetlb.h>
#include <linux/sched.h>
+#include <linux/ksm.h>
/*
* Any behaviour which results in changes to the vma->vm_flags needs to
@@ -41,7 +42,7 @@ static long madvise_behavior(struct vm_area_struct * vma,
struct mm_struct * mm = vma->vm_mm;
int error = 0;
pgoff_t pgoff;
- int new_flags = vma->vm_flags;
+ unsigned long new_flags = vma->vm_flags;
switch (behavior) {
case MADV_NORMAL:
@@ -57,8 +58,18 @@ static long madvise_behavior(struct vm_area_struct * vma,
new_flags |= VM_DONTCOPY;
break;
case MADV_DOFORK:
+ if (vma->vm_flags & VM_IO) {
+ error = -EINVAL;
+ goto out;
+ }
new_flags &= ~VM_DONTCOPY;
break;
+ case MADV_MERGEABLE:
+ case MADV_UNMERGEABLE:
+ error = ksm_madvise(vma, start, end, behavior, &new_flags);
+ if (error)
+ goto out;
+ break;
}
if (new_flags == vma->vm_flags) {
@@ -207,41 +218,46 @@ static long madvise_remove(struct vm_area_struct *vma,
return error;
}
+#ifdef CONFIG_MEMORY_FAILURE
+/*
+ * Error injection support for memory error handling.
+ */
+static int madvise_hwpoison(unsigned long start, unsigned long end)
+{
+ int ret = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ for (; start < end; start += PAGE_SIZE) {
+ struct page *p;
+ int ret = get_user_pages(current, current->mm, start, 1,
+ 0, 0, &p, NULL);
+ if (ret != 1)
+ return ret;
+ printk(KERN_INFO "Injecting memory failure for page %lx at %lx\n",
+ page_to_pfn(p), start);
+ /* Ignore return value for now */
+ __memory_failure(page_to_pfn(p), 0, 1);
+ put_page(p);
+ }
+ return ret;
+}
+#endif
+
static long
madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
unsigned long start, unsigned long end, int behavior)
{
- long error;
-
switch (behavior) {
- case MADV_DOFORK:
- if (vma->vm_flags & VM_IO) {
- error = -EINVAL;
- break;
- }
- case MADV_DONTFORK:
- case MADV_NORMAL:
- case MADV_SEQUENTIAL:
- case MADV_RANDOM:
- error = madvise_behavior(vma, prev, start, end, behavior);
- break;
case MADV_REMOVE:
- error = madvise_remove(vma, prev, start, end);
- break;
-
+ return madvise_remove(vma, prev, start, end);
case MADV_WILLNEED:
- error = madvise_willneed(vma, prev, start, end);
- break;
-
+ return madvise_willneed(vma, prev, start, end);
case MADV_DONTNEED:
- error = madvise_dontneed(vma, prev, start, end);
- break;
-
+ return madvise_dontneed(vma, prev, start, end);
default:
- BUG();
- break;
+ return madvise_behavior(vma, prev, start, end, behavior);
}
- return error;
}
static int
@@ -256,12 +272,17 @@ madvise_behavior_valid(int behavior)
case MADV_REMOVE:
case MADV_WILLNEED:
case MADV_DONTNEED:
+#ifdef CONFIG_KSM
+ case MADV_MERGEABLE:
+ case MADV_UNMERGEABLE:
+#endif
return 1;
default:
return 0;
}
}
+
/*
* The madvise(2) system call.
*
@@ -286,6 +307,12 @@ madvise_behavior_valid(int behavior)
* so the kernel can free resources associated with it.
* MADV_REMOVE - the application wants to free up the given range of
* pages and associated backing store.
+ * MADV_DONTFORK - omit this area from child's address space when forking:
+ * typically, to avoid COWing pages pinned by get_user_pages().
+ * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
+ * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
+ * this area with pages of identical content from other such areas.
+ * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
*
* return values:
* zero - success
@@ -307,6 +334,10 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
int write;
size_t len;
+#ifdef CONFIG_MEMORY_FAILURE
+ if (behavior == MADV_HWPOISON)
+ return madvise_hwpoison(start, start+len_in);
+#endif
if (!madvise_behavior_valid(behavior))
return error;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index fd4529d86de..e2b98a6875c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -29,6 +29,7 @@
#include <linux/rcupdate.h>
#include <linux/limits.h>
#include <linux/mutex.h>
+#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/spinlock.h>
@@ -43,6 +44,7 @@
struct cgroup_subsys mem_cgroup_subsys __read_mostly;
#define MEM_CGROUP_RECLAIM_RETRIES 5
+struct mem_cgroup *root_mem_cgroup __read_mostly;
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
@@ -53,6 +55,7 @@ static int really_do_swap_account __initdata = 1; /* for remember boot option*/
#endif
static DEFINE_MUTEX(memcg_tasklist); /* can be hold under cgroup_mutex */
+#define SOFTLIMIT_EVENTS_THRESH (1000)
/*
* Statistics for memory cgroup.
@@ -66,6 +69,8 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_MAPPED_FILE, /* # of pages charged as file rss */
MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
+ MEM_CGROUP_STAT_EVENTS, /* sum of pagein + pageout for internal use */
+ MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
MEM_CGROUP_STAT_NSTATS,
};
@@ -78,6 +83,20 @@ struct mem_cgroup_stat {
struct mem_cgroup_stat_cpu cpustat[0];
};
+static inline void
+__mem_cgroup_stat_reset_safe(struct mem_cgroup_stat_cpu *stat,
+ enum mem_cgroup_stat_index idx)
+{
+ stat->count[idx] = 0;
+}
+
+static inline s64
+__mem_cgroup_stat_read_local(struct mem_cgroup_stat_cpu *stat,
+ enum mem_cgroup_stat_index idx)
+{
+ return stat->count[idx];
+}
+
/*
* For accounting under irq disable, no need for increment preempt count.
*/
@@ -117,6 +136,12 @@ struct mem_cgroup_per_zone {
unsigned long count[NR_LRU_LISTS];
struct zone_reclaim_stat reclaim_stat;
+ struct rb_node tree_node; /* RB tree node */
+ unsigned long long usage_in_excess;/* Set to the value by which */
+ /* the soft limit is exceeded*/
+ bool on_tree;
+ struct mem_cgroup *mem; /* Back pointer, we cannot */
+ /* use container_of */
};
/* Macro for accessing counter */
#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
@@ -130,6 +155,26 @@ struct mem_cgroup_lru_info {
};
/*
+ * Cgroups above their limits are maintained in a RB-Tree, independent of
+ * their hierarchy representation
+ */
+
+struct mem_cgroup_tree_per_zone {
+ struct rb_root rb_root;
+ spinlock_t lock;
+};
+
+struct mem_cgroup_tree_per_node {
+ struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
+};
+
+struct mem_cgroup_tree {
+ struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
+};
+
+static struct mem_cgroup_tree soft_limit_tree __read_mostly;
+
+/*
* The memory controller data structure. The memory controller controls both
* page cache and RSS per cgroup. We would eventually like to provide
* statistics based on the statistics developed by Rik Van Riel for clock-pro,
@@ -186,6 +231,13 @@ struct mem_cgroup {
struct mem_cgroup_stat stat;
};
+/*
+ * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
+ * limit reclaim to prevent infinite loops, if they ever occur.
+ */
+#define MEM_CGROUP_MAX_RECLAIM_LOOPS (100)
+#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
+
enum charge_type {
MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
MEM_CGROUP_CHARGE_TYPE_MAPPED,
@@ -200,13 +252,8 @@ enum charge_type {
#define PCGF_CACHE (1UL << PCG_CACHE)
#define PCGF_USED (1UL << PCG_USED)
#define PCGF_LOCK (1UL << PCG_LOCK)
-static const unsigned long
-pcg_default_flags[NR_CHARGE_TYPE] = {
- PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
- PCGF_USED | PCGF_LOCK, /* Anon */
- PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
- 0, /* FORCE */
-};
+/* Not used, but added here for completeness */
+#define PCGF_ACCT (1UL << PCG_ACCT)
/* for encoding cft->private value on file */
#define _MEM (0)
@@ -215,15 +262,241 @@ pcg_default_flags[NR_CHARGE_TYPE] = {
#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
#define MEMFILE_ATTR(val) ((val) & 0xffff)
+/*
+ * Reclaim flags for mem_cgroup_hierarchical_reclaim
+ */
+#define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0
+#define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
+#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
+#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
+#define MEM_CGROUP_RECLAIM_SOFT_BIT 0x2
+#define MEM_CGROUP_RECLAIM_SOFT (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
+
static void mem_cgroup_get(struct mem_cgroup *mem);
static void mem_cgroup_put(struct mem_cgroup *mem);
static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
+static struct mem_cgroup_per_zone *
+mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
+{
+ return &mem->info.nodeinfo[nid]->zoneinfo[zid];
+}
+
+static struct mem_cgroup_per_zone *
+page_cgroup_zoneinfo(struct page_cgroup *pc)
+{
+ struct mem_cgroup *mem = pc->mem_cgroup;
+ int nid = page_cgroup_nid(pc);
+ int zid = page_cgroup_zid(pc);
+
+ if (!mem)
+ return NULL;
+
+ return mem_cgroup_zoneinfo(mem, nid, zid);
+}
+
+static struct mem_cgroup_tree_per_zone *
+soft_limit_tree_node_zone(int nid, int zid)
+{
+ return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
+}
+
+static struct mem_cgroup_tree_per_zone *
+soft_limit_tree_from_page(struct page *page)
+{
+ int nid = page_to_nid(page);
+ int zid = page_zonenum(page);
+
+ return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
+}
+
+static void
+__mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
+ struct mem_cgroup_per_zone *mz,
+ struct mem_cgroup_tree_per_zone *mctz)
+{
+ struct rb_node **p = &mctz->rb_root.rb_node;
+ struct rb_node *parent = NULL;
+ struct mem_cgroup_per_zone *mz_node;
+
+ if (mz->on_tree)
+ return;
+
+ mz->usage_in_excess = res_counter_soft_limit_excess(&mem->res);
+ while (*p) {
+ parent = *p;
+ mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
+ tree_node);
+ if (mz->usage_in_excess < mz_node->usage_in_excess)
+ p = &(*p)->rb_left;
+ /*
+ * We can't avoid mem cgroups that are over their soft
+ * limit by the same amount
+ */
+ else if (mz->usage_in_excess >= mz_node->usage_in_excess)
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&mz->tree_node, parent, p);
+ rb_insert_color(&mz->tree_node, &mctz->rb_root);
+ mz->on_tree = true;
+}
+
+static void
+__mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
+ struct mem_cgroup_per_zone *mz,
+ struct mem_cgroup_tree_per_zone *mctz)
+{
+ if (!mz->on_tree)
+ return;
+ rb_erase(&mz->tree_node, &mctz->rb_root);
+ mz->on_tree = false;
+}
+
+static void
+mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
+ struct mem_cgroup_per_zone *mz,
+ struct mem_cgroup_tree_per_zone *mctz)
+{
+ spin_lock(&mctz->lock);
+ __mem_cgroup_insert_exceeded(mem, mz, mctz);
+ spin_unlock(&mctz->lock);
+}
+
+static void
+mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
+ struct mem_cgroup_per_zone *mz,
+ struct mem_cgroup_tree_per_zone *mctz)
+{
+ spin_lock(&mctz->lock);
+ __mem_cgroup_remove_exceeded(mem, mz, mctz);
+ spin_unlock(&mctz->lock);
+}
+
+static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem)
+{
+ bool ret = false;
+ int cpu;
+ s64 val;
+ struct mem_cgroup_stat_cpu *cpustat;
+
+ cpu = get_cpu();
+ cpustat = &mem->stat.cpustat[cpu];
+ val = __mem_cgroup_stat_read_local(cpustat, MEM_CGROUP_STAT_EVENTS);
+ if (unlikely(val > SOFTLIMIT_EVENTS_THRESH)) {
+ __mem_cgroup_stat_reset_safe(cpustat, MEM_CGROUP_STAT_EVENTS);
+ ret = true;
+ }
+ put_cpu();
+ return ret;
+}
+
+static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
+{
+ unsigned long long prev_usage_in_excess, new_usage_in_excess;
+ bool updated_tree = false;
+ struct mem_cgroup_per_zone *mz;
+ struct mem_cgroup_tree_per_zone *mctz;
+
+ mz = mem_cgroup_zoneinfo(mem, page_to_nid(page), page_zonenum(page));
+ mctz = soft_limit_tree_from_page(page);
+
+ /*
+ * We do updates in lazy mode, mem's are removed
+ * lazily from the per-zone, per-node rb tree
+ */
+ prev_usage_in_excess = mz->usage_in_excess;
+
+ new_usage_in_excess = res_counter_soft_limit_excess(&mem->res);
+ if (prev_usage_in_excess) {
+ mem_cgroup_remove_exceeded(mem, mz, mctz);
+ updated_tree = true;
+ }
+ if (!new_usage_in_excess)
+ goto done;
+ mem_cgroup_insert_exceeded(mem, mz, mctz);
+
+done:
+ if (updated_tree) {
+ spin_lock(&mctz->lock);
+ mz->usage_in_excess = new_usage_in_excess;
+ spin_unlock(&mctz->lock);
+ }
+}
+
+static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
+{
+ int node, zone;
+ struct mem_cgroup_per_zone *mz;
+ struct mem_cgroup_tree_per_zone *mctz;
+
+ for_each_node_state(node, N_POSSIBLE) {
+ for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+ mz = mem_cgroup_zoneinfo(mem, node, zone);
+ mctz = soft_limit_tree_node_zone(node, zone);
+ mem_cgroup_remove_exceeded(mem, mz, mctz);
+ }
+ }
+}
+
+static inline unsigned long mem_cgroup_get_excess(struct mem_cgroup *mem)
+{
+ return res_counter_soft_limit_excess(&mem->res) >> PAGE_SHIFT;
+}
+
+static struct mem_cgroup_per_zone *
+__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
+{
+ struct rb_node *rightmost = NULL;
+ struct mem_cgroup_per_zone *mz = NULL;
+
+retry:
+ rightmost = rb_last(&mctz->rb_root);
+ if (!rightmost)
+ goto done; /* Nothing to reclaim from */
+
+ mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
+ /*
+ * Remove the node now but someone else can add it back,
+ * we will to add it back at the end of reclaim to its correct
+ * position in the tree.
+ */
+ __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
+ if (!res_counter_soft_limit_excess(&mz->mem->res) ||
+ !css_tryget(&mz->mem->css))
+ goto retry;
+done:
+ return mz;
+}
+
+static struct mem_cgroup_per_zone *
+mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
+{
+ struct mem_cgroup_per_zone *mz;
+
+ spin_lock(&mctz->lock);
+ mz = __mem_cgroup_largest_soft_limit_node(mctz);
+ spin_unlock(&mctz->lock);
+ return mz;
+}
+
+static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
+ bool charge)
+{
+ int val = (charge) ? 1 : -1;
+ struct mem_cgroup_stat *stat = &mem->stat;
+ struct mem_cgroup_stat_cpu *cpustat;
+ int cpu = get_cpu();
+
+ cpustat = &stat->cpustat[cpu];
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_SWAPOUT, val);
+ put_cpu();
+}
+
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
struct page_cgroup *pc,
bool charge)
{
- int val = (charge)? 1 : -1;
+ int val = (charge) ? 1 : -1;
struct mem_cgroup_stat *stat = &mem->stat;
struct mem_cgroup_stat_cpu *cpustat;
int cpu = get_cpu();
@@ -240,28 +513,10 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
else
__mem_cgroup_stat_add_safe(cpustat,
MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_EVENTS, 1);
put_cpu();
}
-static struct mem_cgroup_per_zone *
-mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
-{
- return &mem->info.nodeinfo[nid]->zoneinfo[zid];
-}
-
-static struct mem_cgroup_per_zone *
-page_cgroup_zoneinfo(struct page_cgroup *pc)
-{
- struct mem_cgroup *mem = pc->mem_cgroup;
- int nid = page_cgroup_nid(pc);
- int zid = page_cgroup_zid(pc);
-
- if (!mem)
- return NULL;
-
- return mem_cgroup_zoneinfo(mem, nid, zid);
-}
-
static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
enum lru_list idx)
{
@@ -354,6 +609,11 @@ static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
return ret;
}
+static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
+{
+ return (mem == root_mem_cgroup);
+}
+
/*
* Following LRU functions are allowed to be used without PCG_LOCK.
* Operations are called by routine of global LRU independently from memcg.
@@ -371,22 +631,24 @@ static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
{
struct page_cgroup *pc;
- struct mem_cgroup *mem;
struct mem_cgroup_per_zone *mz;
if (mem_cgroup_disabled())
return;
pc = lookup_page_cgroup(page);
/* can happen while we handle swapcache. */
- if (list_empty(&pc->lru) || !pc->mem_cgroup)
+ if (!TestClearPageCgroupAcctLRU(pc))
return;
+ VM_BUG_ON(!pc->mem_cgroup);
/*
* We don't check PCG_USED bit. It's cleared when the "page" is finally
* removed from global LRU.
*/
mz = page_cgroup_zoneinfo(pc);
- mem = pc->mem_cgroup;
MEM_CGROUP_ZSTAT(mz, lru) -= 1;
+ if (mem_cgroup_is_root(pc->mem_cgroup))
+ return;
+ VM_BUG_ON(list_empty(&pc->lru));
list_del_init(&pc->lru);
return;
}
@@ -410,8 +672,8 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
* For making pc->mem_cgroup visible, insert smp_rmb() here.
*/
smp_rmb();
- /* unused page is not rotated. */
- if (!PageCgroupUsed(pc))
+ /* unused or root page is not rotated. */
+ if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup))
return;
mz = page_cgroup_zoneinfo(pc);
list_move(&pc->lru, &mz->lists[lru]);
@@ -425,6 +687,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
if (mem_cgroup_disabled())
return;
pc = lookup_page_cgroup(page);
+ VM_BUG_ON(PageCgroupAcctLRU(pc));
/*
* Used bit is set without atomic ops but after smp_wmb().
* For making pc->mem_cgroup visible, insert smp_rmb() here.
@@ -435,6 +698,9 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
mz = page_cgroup_zoneinfo(pc);
MEM_CGROUP_ZSTAT(mz, lru) += 1;
+ SetPageCgroupAcctLRU(pc);
+ if (mem_cgroup_is_root(pc->mem_cgroup))
+ return;
list_add(&pc->lru, &mz->lists[lru]);
}
@@ -469,7 +735,7 @@ static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
spin_lock_irqsave(&zone->lru_lock, flags);
/* link when the page is linked to LRU but page_cgroup isn't */
- if (PageLRU(page) && list_empty(&pc->lru))
+ if (PageLRU(page) && !PageCgroupAcctLRU(pc))
mem_cgroup_add_lru_list(page, page_lru(page));
spin_unlock_irqrestore(&zone->lru_lock, flags);
}
@@ -648,7 +914,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
int nid = z->zone_pgdat->node_id;
int zid = zone_idx(z);
struct mem_cgroup_per_zone *mz;
- int lru = LRU_FILE * !!file + !!active;
+ int lru = LRU_FILE * file + active;
int ret;
BUG_ON(!mem_cont);
@@ -855,28 +1121,62 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
* If shrink==true, for avoiding to free too much, this returns immedieately.
*/
static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
- gfp_t gfp_mask, bool noswap, bool shrink)
+ struct zone *zone,
+ gfp_t gfp_mask,
+ unsigned long reclaim_options)
{
struct mem_cgroup *victim;
int ret, total = 0;
int loop = 0;
+ bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
+ bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
+ bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
+ unsigned long excess = mem_cgroup_get_excess(root_mem);
/* If memsw_is_minimum==1, swap-out is of-no-use. */
if (root_mem->memsw_is_minimum)
noswap = true;
- while (loop < 2) {
+ while (1) {
victim = mem_cgroup_select_victim(root_mem);
- if (victim == root_mem)
+ if (victim == root_mem) {
loop++;
+ if (loop >= 2) {
+ /*
+ * If we have not been able to reclaim
+ * anything, it might because there are
+ * no reclaimable pages under this hierarchy
+ */
+ if (!check_soft || !total) {
+ css_put(&victim->css);
+ break;
+ }
+ /*
+ * We want to do more targetted reclaim.
+ * excess >> 2 is not to excessive so as to
+ * reclaim too much, nor too less that we keep
+ * coming back to reclaim from this cgroup
+ */
+ if (total >= (excess >> 2) ||
+ (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
+ css_put(&victim->css);
+ break;
+ }
+ }
+ }
if (!mem_cgroup_local_usage(&victim->stat)) {
/* this cgroup's local usage == 0 */
css_put(&victim->css);
continue;
}
/* we use swappiness of local cgroup */
- ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, noswap,
- get_swappiness(victim));
+ if (check_soft)
+ ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
+ noswap, get_swappiness(victim), zone,
+ zone->zone_pgdat->node_id);
+ else
+ ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
+ noswap, get_swappiness(victim));
css_put(&victim->css);
/*
* At shrinking usage, we can't check we should stop here or
@@ -886,7 +1186,10 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
if (shrink)
return ret;
total += ret;
- if (mem_cgroup_check_under_limit(root_mem))
+ if (check_soft) {
+ if (res_counter_check_under_soft_limit(&root_mem->res))
+ return total;
+ } else if (mem_cgroup_check_under_limit(root_mem))
return 1 + total;
}
return total;
@@ -965,11 +1268,11 @@ done:
*/
static int __mem_cgroup_try_charge(struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcg,
- bool oom)
+ bool oom, struct page *page)
{
- struct mem_cgroup *mem, *mem_over_limit;
+ struct mem_cgroup *mem, *mem_over_limit, *mem_over_soft_limit;
int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
- struct res_counter *fail_res;
+ struct res_counter *fail_res, *soft_fail_res = NULL;
if (unlikely(test_thread_flag(TIF_MEMDIE))) {
/* Don't account this! */
@@ -996,20 +1299,23 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
VM_BUG_ON(css_is_removed(&mem->css));
while (1) {
- int ret;
- bool noswap = false;
+ int ret = 0;
+ unsigned long flags = 0;
- ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
+ if (mem_cgroup_is_root(mem))
+ goto done;
+ ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res,
+ &soft_fail_res);
if (likely(!ret)) {
if (!do_swap_account)
break;
ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
- &fail_res);
+ &fail_res, NULL);
if (likely(!ret))
break;
/* mem+swap counter fails */
- res_counter_uncharge(&mem->res, PAGE_SIZE);
- noswap = true;
+ res_counter_uncharge(&mem->res, PAGE_SIZE, NULL);
+ flags |= MEM_CGROUP_RECLAIM_NOSWAP;
mem_over_limit = mem_cgroup_from_res_counter(fail_res,
memsw);
} else
@@ -1020,8 +1326,8 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
if (!(gfp_mask & __GFP_WAIT))
goto nomem;
- ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
- noswap, false);
+ ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
+ gfp_mask, flags);
if (ret)
continue;
@@ -1046,13 +1352,24 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
goto nomem;
}
}
+ /*
+ * Insert just the ancestor, we should trickle down to the correct
+ * cgroup for reclaim, since the other nodes will be below their
+ * soft limit
+ */
+ if (soft_fail_res) {
+ mem_over_soft_limit =
+ mem_cgroup_from_res_counter(soft_fail_res, res);
+ if (mem_cgroup_soft_limit_check(mem_over_soft_limit))
+ mem_cgroup_update_tree(mem_over_soft_limit, page);
+ }
+done:
return 0;
nomem:
css_put(&mem->css);
return -ENOMEM;
}
-
/*
* A helper function to get mem_cgroup from ID. must be called under
* rcu_read_lock(). The caller must check css_is_removed() or some if
@@ -1119,15 +1436,38 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
lock_page_cgroup(pc);
if (unlikely(PageCgroupUsed(pc))) {
unlock_page_cgroup(pc);
- res_counter_uncharge(&mem->res, PAGE_SIZE);
- if (do_swap_account)
- res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+ if (!mem_cgroup_is_root(mem)) {
+ res_counter_uncharge(&mem->res, PAGE_SIZE, NULL);
+ if (do_swap_account)
+ res_counter_uncharge(&mem->memsw, PAGE_SIZE,
+ NULL);
+ }
css_put(&mem->css);
return;
}
+
pc->mem_cgroup = mem;
+ /*
+ * We access a page_cgroup asynchronously without lock_page_cgroup().
+ * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
+ * is accessed after testing USED bit. To make pc->mem_cgroup visible
+ * before USED bit, we need memory barrier here.
+ * See mem_cgroup_add_lru_list(), etc.
+ */
smp_wmb();
- pc->flags = pcg_default_flags[ctype];
+ switch (ctype) {
+ case MEM_CGROUP_CHARGE_TYPE_CACHE:
+ case MEM_CGROUP_CHARGE_TYPE_SHMEM:
+ SetPageCgroupCache(pc);
+ SetPageCgroupUsed(pc);
+ break;
+ case MEM_CGROUP_CHARGE_TYPE_MAPPED:
+ ClearPageCgroupCache(pc);
+ SetPageCgroupUsed(pc);
+ break;
+ default:
+ break;
+ }
mem_cgroup_charge_statistics(mem, pc, true);
@@ -1178,7 +1518,8 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
if (pc->mem_cgroup != from)
goto out;
- res_counter_uncharge(&from->res, PAGE_SIZE);
+ if (!mem_cgroup_is_root(from))
+ res_counter_uncharge(&from->res, PAGE_SIZE, NULL);
mem_cgroup_charge_statistics(from, pc, false);
page = pc->page;
@@ -1197,8 +1538,8 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
1);
}
- if (do_swap_account)
- res_counter_uncharge(&from->memsw, PAGE_SIZE);
+ if (do_swap_account && !mem_cgroup_is_root(from))
+ res_counter_uncharge(&from->memsw, PAGE_SIZE, NULL);
css_put(&from->css);
css_get(&to->css);
@@ -1238,7 +1579,7 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
parent = mem_cgroup_from_cont(pcg);
- ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
+ ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, page);
if (ret || !parent)
return ret;
@@ -1268,9 +1609,11 @@ uncharge:
/* drop extra refcnt by try_charge() */
css_put(&parent->css);
/* uncharge if move fails */
- res_counter_uncharge(&parent->res, PAGE_SIZE);
- if (do_swap_account)
- res_counter_uncharge(&parent->memsw, PAGE_SIZE);
+ if (!mem_cgroup_is_root(parent)) {
+ res_counter_uncharge(&parent->res, PAGE_SIZE, NULL);
+ if (do_swap_account)
+ res_counter_uncharge(&parent->memsw, PAGE_SIZE, NULL);
+ }
return ret;
}
@@ -1295,7 +1638,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
prefetchw(pc);
mem = memcg;
- ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
+ ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page);
if (ret || !mem)
return ret;
@@ -1414,14 +1757,14 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
if (!mem)
goto charge_cur_mm;
*ptr = mem;
- ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
+ ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, page);
/* drop extra refcnt from tryget */
css_put(&mem->css);
return ret;
charge_cur_mm:
if (unlikely(!mm))
mm = &init_mm;
- return __mem_cgroup_try_charge(mm, mask, ptr, true);
+ return __mem_cgroup_try_charge(mm, mask, ptr, true, page);
}
static void
@@ -1459,7 +1802,10 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
* This recorded memcg can be obsolete one. So, avoid
* calling css_tryget
*/
- res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
+ if (!mem_cgroup_is_root(memcg))
+ res_counter_uncharge(&memcg->memsw, PAGE_SIZE,
+ NULL);
+ mem_cgroup_swap_statistics(memcg, false);
mem_cgroup_put(memcg);
}
rcu_read_unlock();
@@ -1484,9 +1830,11 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
return;
if (!mem)
return;
- res_counter_uncharge(&mem->res, PAGE_SIZE);
- if (do_swap_account)
- res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+ if (!mem_cgroup_is_root(mem)) {
+ res_counter_uncharge(&mem->res, PAGE_SIZE, NULL);
+ if (do_swap_account)
+ res_counter_uncharge(&mem->memsw, PAGE_SIZE, NULL);
+ }
css_put(&mem->css);
}
@@ -1500,6 +1848,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
struct page_cgroup *pc;
struct mem_cgroup *mem = NULL;
struct mem_cgroup_per_zone *mz;
+ bool soft_limit_excess = false;
if (mem_cgroup_disabled())
return NULL;
@@ -1538,9 +1887,14 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
break;
}
- res_counter_uncharge(&mem->res, PAGE_SIZE);
- if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
- res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+ if (!mem_cgroup_is_root(mem)) {
+ res_counter_uncharge(&mem->res, PAGE_SIZE, &soft_limit_excess);
+ if (do_swap_account &&
+ (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
+ res_counter_uncharge(&mem->memsw, PAGE_SIZE, NULL);
+ }
+ if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
+ mem_cgroup_swap_statistics(mem, true);
mem_cgroup_charge_statistics(mem, pc, false);
ClearPageCgroupUsed(pc);
@@ -1554,6 +1908,8 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
mz = page_cgroup_zoneinfo(pc);
unlock_page_cgroup(pc);
+ if (soft_limit_excess && mem_cgroup_soft_limit_check(mem))
+ mem_cgroup_update_tree(mem, page);
/* at swapout, this memcg will be accessed to record to swap */
if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
css_put(&mem->css);
@@ -1629,7 +1985,9 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
* We uncharge this because swap is freed.
* This memcg can be obsolete one. We avoid calling css_tryget
*/
- res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
+ if (!mem_cgroup_is_root(memcg))
+ res_counter_uncharge(&memcg->memsw, PAGE_SIZE, NULL);
+ mem_cgroup_swap_statistics(memcg, false);
mem_cgroup_put(memcg);
}
rcu_read_unlock();
@@ -1658,7 +2016,8 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
unlock_page_cgroup(pc);
if (mem) {
- ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
+ ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
+ page);
css_put(&mem->css);
}
*ptr = mem;
@@ -1798,8 +2157,9 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
if (!ret)
break;
- progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
- false, true);
+ progress = mem_cgroup_hierarchical_reclaim(memcg, NULL,
+ GFP_KERNEL,
+ MEM_CGROUP_RECLAIM_SHRINK);
curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
/* Usage is reduced ? */
if (curusage >= oldusage)
@@ -1851,7 +2211,9 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
if (!ret)
break;
- mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true, true);
+ mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
+ MEM_CGROUP_RECLAIM_NOSWAP |
+ MEM_CGROUP_RECLAIM_SHRINK);
curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
/* Usage is reduced ? */
if (curusage >= oldusage)
@@ -1862,6 +2224,97 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
return ret;
}
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+ gfp_t gfp_mask, int nid,
+ int zid)
+{
+ unsigned long nr_reclaimed = 0;
+ struct mem_cgroup_per_zone *mz, *next_mz = NULL;
+ unsigned long reclaimed;
+ int loop = 0;
+ struct mem_cgroup_tree_per_zone *mctz;
+
+ if (order > 0)
+ return 0;
+
+ mctz = soft_limit_tree_node_zone(nid, zid);
+ /*
+ * This loop can run a while, specially if mem_cgroup's continuously
+ * keep exceeding their soft limit and putting the system under
+ * pressure
+ */
+ do {
+ if (next_mz)
+ mz = next_mz;
+ else
+ mz = mem_cgroup_largest_soft_limit_node(mctz);
+ if (!mz)
+ break;
+
+ reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
+ gfp_mask,
+ MEM_CGROUP_RECLAIM_SOFT);
+ nr_reclaimed += reclaimed;
+ spin_lock(&mctz->lock);
+
+ /*
+ * If we failed to reclaim anything from this memory cgroup
+ * it is time to move on to the next cgroup
+ */
+ next_mz = NULL;
+ if (!reclaimed) {
+ do {
+ /*
+ * Loop until we find yet another one.
+ *
+ * By the time we get the soft_limit lock
+ * again, someone might have aded the
+ * group back on the RB tree. Iterate to
+ * make sure we get a different mem.
+ * mem_cgroup_largest_soft_limit_node returns
+ * NULL if no other cgroup is present on
+ * the tree
+ */
+ next_mz =
+ __mem_cgroup_largest_soft_limit_node(mctz);
+ if (next_mz == mz) {
+ css_put(&next_mz->mem->css);
+ next_mz = NULL;
+ } else /* next_mz == NULL or other memcg */
+ break;
+ } while (1);
+ }
+ mz->usage_in_excess =
+ res_counter_soft_limit_excess(&mz->mem->res);
+ __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
+ /*
+ * One school of thought says that we should not add
+ * back the node to the tree if reclaim returns 0.
+ * But our reclaim could return 0, simply because due
+ * to priority we are exposing a smaller subset of
+ * memory to reclaim from. Consider this as a longer
+ * term TODO.
+ */
+ if (mz->usage_in_excess)
+ __mem_cgroup_insert_exceeded(mz->mem, mz, mctz);
+ spin_unlock(&mctz->lock);
+ css_put(&mz->mem->css);
+ loop++;
+ /*
+ * Could not reclaim anything and there are no more
+ * mem cgroups to try or we seem to be looping without
+ * reclaiming anything.
+ */
+ if (!nr_reclaimed &&
+ (next_mz == NULL ||
+ loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
+ break;
+ } while (!nr_reclaimed);
+ if (next_mz)
+ css_put(&next_mz->mem->css);
+ return nr_reclaimed;
+}
+
/*
* This routine traverse page_cgroup in given list and drop them all.
* *And* this routine doesn't reclaim page itself, just removes page_cgroup.
@@ -2046,20 +2499,64 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
return retval;
}
+struct mem_cgroup_idx_data {
+ s64 val;
+ enum mem_cgroup_stat_index idx;
+};
+
+static int
+mem_cgroup_get_idx_stat(struct mem_cgroup *mem, void *data)
+{
+ struct mem_cgroup_idx_data *d = data;
+ d->val += mem_cgroup_read_stat(&mem->stat, d->idx);
+ return 0;
+}
+
+static void
+mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
+ enum mem_cgroup_stat_index idx, s64 *val)
+{
+ struct mem_cgroup_idx_data d;
+ d.idx = idx;
+ d.val = 0;
+ mem_cgroup_walk_tree(mem, &d, mem_cgroup_get_idx_stat);
+ *val = d.val;
+}
+
static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
{
struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
- u64 val = 0;
+ u64 idx_val, val;
int type, name;
type = MEMFILE_TYPE(cft->private);
name = MEMFILE_ATTR(cft->private);
switch (type) {
case _MEM:
- val = res_counter_read_u64(&mem->res, name);
+ if (name == RES_USAGE && mem_cgroup_is_root(mem)) {
+ mem_cgroup_get_recursive_idx_stat(mem,
+ MEM_CGROUP_STAT_CACHE, &idx_val);
+ val = idx_val;
+ mem_cgroup_get_recursive_idx_stat(mem,
+ MEM_CGROUP_STAT_RSS, &idx_val);
+ val += idx_val;
+ val <<= PAGE_SHIFT;
+ } else
+ val = res_counter_read_u64(&mem->res, name);
break;
case _MEMSWAP:
- val = res_counter_read_u64(&mem->memsw, name);
+ if (name == RES_USAGE && mem_cgroup_is_root(mem)) {
+ mem_cgroup_get_recursive_idx_stat(mem,
+ MEM_CGROUP_STAT_CACHE, &idx_val);
+ val = idx_val;
+ mem_cgroup_get_recursive_idx_stat(mem,
+ MEM_CGROUP_STAT_RSS, &idx_val);
+ val += idx_val;
+ mem_cgroup_get_recursive_idx_stat(mem,
+ MEM_CGROUP_STAT_SWAPOUT, &idx_val);
+ val <<= PAGE_SHIFT;
+ } else
+ val = res_counter_read_u64(&mem->memsw, name);
break;
default:
BUG();
@@ -2083,6 +2580,10 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
name = MEMFILE_ATTR(cft->private);
switch (name) {
case RES_LIMIT:
+ if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
+ ret = -EINVAL;
+ break;
+ }
/* This function does all necessary parse...reuse it */
ret = res_counter_memparse_write_strategy(buffer, &val);
if (ret)
@@ -2092,6 +2593,20 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
else
ret = mem_cgroup_resize_memsw_limit(memcg, val);
break;
+ case RES_SOFT_LIMIT:
+ ret = res_counter_memparse_write_strategy(buffer, &val);
+ if (ret)
+ break;
+ /*
+ * For memsw, soft limits are hard to implement in terms
+ * of semantics, for now, we support soft limits for
+ * control without swap
+ */
+ if (type == _MEM)
+ ret = res_counter_set_soft_limit(&memcg->res, val);
+ else
+ ret = -EINVAL;
+ break;
default:
ret = -EINVAL; /* should be BUG() ? */
break;
@@ -2149,6 +2664,7 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
res_counter_reset_failcnt(&mem->memsw);
break;
}
+
return 0;
}
@@ -2160,6 +2676,7 @@ enum {
MCS_MAPPED_FILE,
MCS_PGPGIN,
MCS_PGPGOUT,
+ MCS_SWAP,
MCS_INACTIVE_ANON,
MCS_ACTIVE_ANON,
MCS_INACTIVE_FILE,
@@ -2181,6 +2698,7 @@ struct {
{"mapped_file", "total_mapped_file"},
{"pgpgin", "total_pgpgin"},
{"pgpgout", "total_pgpgout"},
+ {"swap", "total_swap"},
{"inactive_anon", "total_inactive_anon"},
{"active_anon", "total_active_anon"},
{"inactive_file", "total_inactive_file"},
@@ -2205,6 +2723,10 @@ static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
s->stat[MCS_PGPGIN] += val;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
s->stat[MCS_PGPGOUT] += val;
+ if (do_swap_account) {
+ val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_SWAPOUT);
+ s->stat[MCS_SWAP] += val * PAGE_SIZE;
+ }
/* per zone stat */
val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
@@ -2236,8 +2758,11 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
memset(&mystat, 0, sizeof(mystat));
mem_cgroup_get_local_stat(mem_cont, &mystat);
- for (i = 0; i < NR_MCS_STAT; i++)
+ for (i = 0; i < NR_MCS_STAT; i++) {
+ if (i == MCS_SWAP && !do_swap_account)
+ continue;
cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
+ }
/* Hierarchical information */
{
@@ -2250,9 +2775,11 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
memset(&mystat, 0, sizeof(mystat));
mem_cgroup_get_total_stat(mem_cont, &mystat);
- for (i = 0; i < NR_MCS_STAT; i++)
+ for (i = 0; i < NR_MCS_STAT; i++) {
+ if (i == MCS_SWAP && !do_swap_account)
+ continue;
cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
-
+ }
#ifdef CONFIG_DEBUG_VM
cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
@@ -2345,6 +2872,12 @@ static struct cftype mem_cgroup_files[] = {
.read_u64 = mem_cgroup_read,
},
{
+ .name = "soft_limit_in_bytes",
+ .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
+ .write_string = mem_cgroup_write,
+ .read_u64 = mem_cgroup_read,
+ },
+ {
.name = "failcnt",
.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
.trigger = mem_cgroup_reset,
@@ -2438,6 +2971,9 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
mz = &pn->zoneinfo[zone];
for_each_lru(l)
INIT_LIST_HEAD(&mz->lists[l]);
+ mz->usage_in_excess = 0;
+ mz->on_tree = false;
+ mz->mem = mem;
}
return 0;
}
@@ -2483,6 +3019,7 @@ static void __mem_cgroup_free(struct mem_cgroup *mem)
{
int node;
+ mem_cgroup_remove_from_trees(mem);
free_css_id(&mem_cgroup_subsys, &mem->css);
for_each_node_state(node, N_POSSIBLE)
@@ -2531,6 +3068,31 @@ static void __init enable_swap_cgroup(void)
}
#endif
+static int mem_cgroup_soft_limit_tree_init(void)
+{
+ struct mem_cgroup_tree_per_node *rtpn;
+ struct mem_cgroup_tree_per_zone *rtpz;
+ int tmp, node, zone;
+
+ for_each_node_state(node, N_POSSIBLE) {
+ tmp = node;
+ if (!node_state(node, N_NORMAL_MEMORY))
+ tmp = -1;
+ rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
+ if (!rtpn)
+ return 1;
+
+ soft_limit_tree.rb_tree_per_node[node] = rtpn;
+
+ for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+ rtpz = &rtpn->rb_tree_per_zone[zone];
+ rtpz->rb_root = RB_ROOT;
+ spin_lock_init(&rtpz->lock);
+ }
+ }
+ return 0;
+}
+
static struct cgroup_subsys_state * __ref
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
@@ -2545,10 +3107,15 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
for_each_node_state(node, N_POSSIBLE)
if (alloc_mem_cgroup_per_zone_info(mem, node))
goto free_out;
+
/* root ? */
if (cont->parent == NULL) {
enable_swap_cgroup();
parent = NULL;
+ root_mem_cgroup = mem;
+ if (mem_cgroup_soft_limit_tree_init())
+ goto free_out;
+
} else {
parent = mem_cgroup_from_cont(cont->parent);
mem->use_hierarchy = parent->use_hierarchy;
@@ -2577,6 +3144,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
return &mem->css;
free_out:
__mem_cgroup_free(mem);
+ root_mem_cgroup = NULL;
return ERR_PTR(error);
}
@@ -2612,7 +3180,8 @@ static int mem_cgroup_populate(struct cgroup_subsys *ss,
static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct cgroup *cont,
struct cgroup *old_cont,
- struct task_struct *p)
+ struct task_struct *p,
+ bool threadgroup)
{
mutex_lock(&memcg_tasklist);
/*
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
new file mode 100644
index 00000000000..729d4b15b64
--- /dev/null
+++ b/mm/memory-failure.c
@@ -0,0 +1,832 @@
+/*
+ * Copyright (C) 2008, 2009 Intel Corporation
+ * Authors: Andi Kleen, Fengguang Wu
+ *
+ * This software may be redistributed and/or modified under the terms of
+ * the GNU General Public License ("GPL") version 2 only as published by the
+ * Free Software Foundation.
+ *
+ * High level machine check handler. Handles pages reported by the
+ * hardware as being corrupted usually due to a 2bit ECC memory or cache
+ * failure.
+ *
+ * Handles page cache pages in various states. The tricky part
+ * here is that we can access any page asynchronous to other VM
+ * users, because memory failures could happen anytime and anywhere,
+ * possibly violating some of their assumptions. This is why this code
+ * has to be extremely careful. Generally it tries to use normal locking
+ * rules, as in get the standard locks, even if that means the
+ * error handling takes potentially a long time.
+ *
+ * The operation to map back from RMAP chains to processes has to walk
+ * the complete process list and has non linear complexity with the number
+ * mappings. In short it can be quite slow. But since memory corruptions
+ * are rare we hope to get away with this.
+ */
+
+/*
+ * Notebook:
+ * - hugetlb needs more code
+ * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages
+ * - pass bad pages to kdump next kernel
+ */
+#define DEBUG 1 /* remove me in 2.6.34 */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/page-flags.h>
+#include <linux/sched.h>
+#include <linux/rmap.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <linux/backing-dev.h>
+#include "internal.h"
+
+int sysctl_memory_failure_early_kill __read_mostly = 0;
+
+int sysctl_memory_failure_recovery __read_mostly = 1;
+
+atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
+
+/*
+ * Send all the processes who have the page mapped an ``action optional''
+ * signal.
+ */
+static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
+ unsigned long pfn)
+{
+ struct siginfo si;
+ int ret;
+
+ printk(KERN_ERR
+ "MCE %#lx: Killing %s:%d early due to hardware memory corruption\n",
+ pfn, t->comm, t->pid);
+ si.si_signo = SIGBUS;
+ si.si_errno = 0;
+ si.si_code = BUS_MCEERR_AO;
+ si.si_addr = (void *)addr;
+#ifdef __ARCH_SI_TRAPNO
+ si.si_trapno = trapno;
+#endif
+ si.si_addr_lsb = PAGE_SHIFT;
+ /*
+ * Don't use force here, it's convenient if the signal
+ * can be temporarily blocked.
+ * This could cause a loop when the user sets SIGBUS
+ * to SIG_IGN, but hopefully noone will do that?
+ */
+ ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */
+ if (ret < 0)
+ printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n",
+ t->comm, t->pid, ret);
+ return ret;
+}
+
+/*
+ * Kill all processes that have a poisoned page mapped and then isolate
+ * the page.
+ *
+ * General strategy:
+ * Find all processes having the page mapped and kill them.
+ * But we keep a page reference around so that the page is not
+ * actually freed yet.
+ * Then stash the page away
+ *
+ * There's no convenient way to get back to mapped processes
+ * from the VMAs. So do a brute-force search over all
+ * running processes.
+ *
+ * Remember that machine checks are not common (or rather
+ * if they are common you have other problems), so this shouldn't
+ * be a performance issue.
+ *
+ * Also there are some races possible while we get from the
+ * error detection to actually handle it.
+ */
+
+struct to_kill {
+ struct list_head nd;
+ struct task_struct *tsk;
+ unsigned long addr;
+ unsigned addr_valid:1;
+};
+
+/*
+ * Failure handling: if we can't find or can't kill a process there's
+ * not much we can do. We just print a message and ignore otherwise.
+ */
+
+/*
+ * Schedule a process for later kill.
+ * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
+ * TBD would GFP_NOIO be enough?
+ */
+static void add_to_kill(struct task_struct *tsk, struct page *p,
+ struct vm_area_struct *vma,
+ struct list_head *to_kill,
+ struct to_kill **tkc)
+{
+ struct to_kill *tk;
+
+ if (*tkc) {
+ tk = *tkc;
+ *tkc = NULL;
+ } else {
+ tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
+ if (!tk) {
+ printk(KERN_ERR
+ "MCE: Out of memory while machine check handling\n");
+ return;
+ }
+ }
+ tk->addr = page_address_in_vma(p, vma);
+ tk->addr_valid = 1;
+
+ /*
+ * In theory we don't have to kill when the page was
+ * munmaped. But it could be also a mremap. Since that's
+ * likely very rare kill anyways just out of paranoia, but use
+ * a SIGKILL because the error is not contained anymore.
+ */
+ if (tk->addr == -EFAULT) {
+ pr_debug("MCE: Unable to find user space address %lx in %s\n",
+ page_to_pfn(p), tsk->comm);
+ tk->addr_valid = 0;
+ }
+ get_task_struct(tsk);
+ tk->tsk = tsk;
+ list_add_tail(&tk->nd, to_kill);
+}
+
+/*
+ * Kill the processes that have been collected earlier.
+ *
+ * Only do anything when DOIT is set, otherwise just free the list
+ * (this is used for clean pages which do not need killing)
+ * Also when FAIL is set do a force kill because something went
+ * wrong earlier.
+ */
+static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno,
+ int fail, unsigned long pfn)
+{
+ struct to_kill *tk, *next;
+
+ list_for_each_entry_safe (tk, next, to_kill, nd) {
+ if (doit) {
+ /*
+ * In case something went wrong with munmaping
+ * make sure the process doesn't catch the
+ * signal and then access the memory. Just kill it.
+ * the signal handlers
+ */
+ if (fail || tk->addr_valid == 0) {
+ printk(KERN_ERR
+ "MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
+ pfn, tk->tsk->comm, tk->tsk->pid);
+ force_sig(SIGKILL, tk->tsk);
+ }
+
+ /*
+ * In theory the process could have mapped
+ * something else on the address in-between. We could
+ * check for that, but we need to tell the
+ * process anyways.
+ */
+ else if (kill_proc_ao(tk->tsk, tk->addr, trapno,
+ pfn) < 0)
+ printk(KERN_ERR
+ "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n",
+ pfn, tk->tsk->comm, tk->tsk->pid);
+ }
+ put_task_struct(tk->tsk);
+ kfree(tk);
+ }
+}
+
+static int task_early_kill(struct task_struct *tsk)
+{
+ if (!tsk->mm)
+ return 0;
+ if (tsk->flags & PF_MCE_PROCESS)
+ return !!(tsk->flags & PF_MCE_EARLY);
+ return sysctl_memory_failure_early_kill;
+}
+
+/*
+ * Collect processes when the error hit an anonymous page.
+ */
+static void collect_procs_anon(struct page *page, struct list_head *to_kill,
+ struct to_kill **tkc)
+{
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+ struct anon_vma *av;
+
+ read_lock(&tasklist_lock);
+ av = page_lock_anon_vma(page);
+ if (av == NULL) /* Not actually mapped anymore */
+ goto out;
+ for_each_process (tsk) {
+ if (!task_early_kill(tsk))
+ continue;
+ list_for_each_entry (vma, &av->head, anon_vma_node) {
+ if (!page_mapped_in_vma(page, vma))
+ continue;
+ if (vma->vm_mm == tsk->mm)
+ add_to_kill(tsk, page, vma, to_kill, tkc);
+ }
+ }
+ page_unlock_anon_vma(av);
+out:
+ read_unlock(&tasklist_lock);
+}
+
+/*
+ * Collect processes when the error hit a file mapped page.
+ */
+static void collect_procs_file(struct page *page, struct list_head *to_kill,
+ struct to_kill **tkc)
+{
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+ struct prio_tree_iter iter;
+ struct address_space *mapping = page->mapping;
+
+ /*
+ * A note on the locking order between the two locks.
+ * We don't rely on this particular order.
+ * If you have some other code that needs a different order
+ * feel free to switch them around. Or add a reverse link
+ * from mm_struct to task_struct, then this could be all
+ * done without taking tasklist_lock and looping over all tasks.
+ */
+
+ read_lock(&tasklist_lock);
+ spin_lock(&mapping->i_mmap_lock);
+ for_each_process(tsk) {
+ pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+
+ if (!task_early_kill(tsk))
+ continue;
+
+ vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff,
+ pgoff) {
+ /*
+ * Send early kill signal to tasks where a vma covers
+ * the page but the corrupted page is not necessarily
+ * mapped it in its pte.
+ * Assume applications who requested early kill want
+ * to be informed of all such data corruptions.
+ */
+ if (vma->vm_mm == tsk->mm)
+ add_to_kill(tsk, page, vma, to_kill, tkc);
+ }
+ }
+ spin_unlock(&mapping->i_mmap_lock);
+ read_unlock(&tasklist_lock);
+}
+
+/*
+ * Collect the processes who have the corrupted page mapped to kill.
+ * This is done in two steps for locking reasons.
+ * First preallocate one tokill structure outside the spin locks,
+ * so that we can kill at least one process reasonably reliable.
+ */
+static void collect_procs(struct page *page, struct list_head *tokill)
+{
+ struct to_kill *tk;
+
+ if (!page->mapping)
+ return;
+
+ tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
+ if (!tk)
+ return;
+ if (PageAnon(page))
+ collect_procs_anon(page, tokill, &tk);
+ else
+ collect_procs_file(page, tokill, &tk);
+ kfree(tk);
+}
+
+/*
+ * Error handlers for various types of pages.
+ */
+
+enum outcome {
+ FAILED, /* Error handling failed */
+ DELAYED, /* Will be handled later */
+ IGNORED, /* Error safely ignored */
+ RECOVERED, /* Successfully recovered */
+};
+
+static const char *action_name[] = {
+ [FAILED] = "Failed",
+ [DELAYED] = "Delayed",
+ [IGNORED] = "Ignored",
+ [RECOVERED] = "Recovered",
+};
+
+/*
+ * Error hit kernel page.
+ * Do nothing, try to be lucky and not touch this instead. For a few cases we
+ * could be more sophisticated.
+ */
+static int me_kernel(struct page *p, unsigned long pfn)
+{
+ return DELAYED;
+}
+
+/*
+ * Already poisoned page.
+ */
+static int me_ignore(struct page *p, unsigned long pfn)
+{
+ return IGNORED;
+}
+
+/*
+ * Page in unknown state. Do nothing.
+ */
+static int me_unknown(struct page *p, unsigned long pfn)
+{
+ printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn);
+ return FAILED;
+}
+
+/*
+ * Free memory
+ */
+static int me_free(struct page *p, unsigned long pfn)
+{
+ return DELAYED;
+}
+
+/*
+ * Clean (or cleaned) page cache page.
+ */
+static int me_pagecache_clean(struct page *p, unsigned long pfn)
+{
+ int err;
+ int ret = FAILED;
+ struct address_space *mapping;
+
+ if (!isolate_lru_page(p))
+ page_cache_release(p);
+
+ /*
+ * For anonymous pages we're done the only reference left
+ * should be the one m_f() holds.
+ */
+ if (PageAnon(p))
+ return RECOVERED;
+
+ /*
+ * Now truncate the page in the page cache. This is really
+ * more like a "temporary hole punch"
+ * Don't do this for block devices when someone else
+ * has a reference, because it could be file system metadata
+ * and that's not safe to truncate.
+ */
+ mapping = page_mapping(p);
+ if (!mapping) {
+ /*
+ * Page has been teared down in the meanwhile
+ */
+ return FAILED;
+ }
+
+ /*
+ * Truncation is a bit tricky. Enable it per file system for now.
+ *
+ * Open: to take i_mutex or not for this? Right now we don't.
+ */
+ if (mapping->a_ops->error_remove_page) {
+ err = mapping->a_ops->error_remove_page(mapping, p);
+ if (err != 0) {
+ printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n",
+ pfn, err);
+ } else if (page_has_private(p) &&
+ !try_to_release_page(p, GFP_NOIO)) {
+ pr_debug("MCE %#lx: failed to release buffers\n", pfn);
+ } else {
+ ret = RECOVERED;
+ }
+ } else {
+ /*
+ * If the file system doesn't support it just invalidate
+ * This fails on dirty or anything with private pages
+ */
+ if (invalidate_inode_page(p))
+ ret = RECOVERED;
+ else
+ printk(KERN_INFO "MCE %#lx: Failed to invalidate\n",
+ pfn);
+ }
+ return ret;
+}
+
+/*
+ * Dirty cache page page
+ * Issues: when the error hit a hole page the error is not properly
+ * propagated.
+ */
+static int me_pagecache_dirty(struct page *p, unsigned long pfn)
+{
+ struct address_space *mapping = page_mapping(p);
+
+ SetPageError(p);
+ /* TBD: print more information about the file. */
+ if (mapping) {
+ /*
+ * IO error will be reported by write(), fsync(), etc.
+ * who check the mapping.
+ * This way the application knows that something went
+ * wrong with its dirty file data.
+ *
+ * There's one open issue:
+ *
+ * The EIO will be only reported on the next IO
+ * operation and then cleared through the IO map.
+ * Normally Linux has two mechanisms to pass IO error
+ * first through the AS_EIO flag in the address space
+ * and then through the PageError flag in the page.
+ * Since we drop pages on memory failure handling the
+ * only mechanism open to use is through AS_AIO.
+ *
+ * This has the disadvantage that it gets cleared on
+ * the first operation that returns an error, while
+ * the PageError bit is more sticky and only cleared
+ * when the page is reread or dropped. If an
+ * application assumes it will always get error on
+ * fsync, but does other operations on the fd before
+ * and the page is dropped inbetween then the error
+ * will not be properly reported.
+ *
+ * This can already happen even without hwpoisoned
+ * pages: first on metadata IO errors (which only
+ * report through AS_EIO) or when the page is dropped
+ * at the wrong time.
+ *
+ * So right now we assume that the application DTRT on
+ * the first EIO, but we're not worse than other parts
+ * of the kernel.
+ */
+ mapping_set_error(mapping, EIO);
+ }
+
+ return me_pagecache_clean(p, pfn);
+}
+
+/*
+ * Clean and dirty swap cache.
+ *
+ * Dirty swap cache page is tricky to handle. The page could live both in page
+ * cache and swap cache(ie. page is freshly swapped in). So it could be
+ * referenced concurrently by 2 types of PTEs:
+ * normal PTEs and swap PTEs. We try to handle them consistently by calling
+ * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
+ * and then
+ * - clear dirty bit to prevent IO
+ * - remove from LRU
+ * - but keep in the swap cache, so that when we return to it on
+ * a later page fault, we know the application is accessing
+ * corrupted data and shall be killed (we installed simple
+ * interception code in do_swap_page to catch it).
+ *
+ * Clean swap cache pages can be directly isolated. A later page fault will
+ * bring in the known good data from disk.
+ */
+static int me_swapcache_dirty(struct page *p, unsigned long pfn)
+{
+ int ret = FAILED;
+
+ ClearPageDirty(p);
+ /* Trigger EIO in shmem: */
+ ClearPageUptodate(p);
+
+ if (!isolate_lru_page(p)) {
+ page_cache_release(p);
+ ret = DELAYED;
+ }
+
+ return ret;
+}
+
+static int me_swapcache_clean(struct page *p, unsigned long pfn)
+{
+ int ret = FAILED;
+
+ if (!isolate_lru_page(p)) {
+ page_cache_release(p);
+ ret = RECOVERED;
+ }
+ delete_from_swap_cache(p);
+ return ret;
+}
+
+/*
+ * Huge pages. Needs work.
+ * Issues:
+ * No rmap support so we cannot find the original mapper. In theory could walk
+ * all MMs and look for the mappings, but that would be non atomic and racy.
+ * Need rmap for hugepages for this. Alternatively we could employ a heuristic,
+ * like just walking the current process and hoping it has it mapped (that
+ * should be usually true for the common "shared database cache" case)
+ * Should handle free huge pages and dequeue them too, but this needs to
+ * handle huge page accounting correctly.
+ */
+static int me_huge_page(struct page *p, unsigned long pfn)
+{
+ return FAILED;
+}
+
+/*
+ * Various page states we can handle.
+ *
+ * A page state is defined by its current page->flags bits.
+ * The table matches them in order and calls the right handler.
+ *
+ * This is quite tricky because we can access page at any time
+ * in its live cycle, so all accesses have to be extremly careful.
+ *
+ * This is not complete. More states could be added.
+ * For any missing state don't attempt recovery.
+ */
+
+#define dirty (1UL << PG_dirty)
+#define sc (1UL << PG_swapcache)
+#define unevict (1UL << PG_unevictable)
+#define mlock (1UL << PG_mlocked)
+#define writeback (1UL << PG_writeback)
+#define lru (1UL << PG_lru)
+#define swapbacked (1UL << PG_swapbacked)
+#define head (1UL << PG_head)
+#define tail (1UL << PG_tail)
+#define compound (1UL << PG_compound)
+#define slab (1UL << PG_slab)
+#define buddy (1UL << PG_buddy)
+#define reserved (1UL << PG_reserved)
+
+static struct page_state {
+ unsigned long mask;
+ unsigned long res;
+ char *msg;
+ int (*action)(struct page *p, unsigned long pfn);
+} error_states[] = {
+ { reserved, reserved, "reserved kernel", me_ignore },
+ { buddy, buddy, "free kernel", me_free },
+
+ /*
+ * Could in theory check if slab page is free or if we can drop
+ * currently unused objects without touching them. But just
+ * treat it as standard kernel for now.
+ */
+ { slab, slab, "kernel slab", me_kernel },
+
+#ifdef CONFIG_PAGEFLAGS_EXTENDED
+ { head, head, "huge", me_huge_page },
+ { tail, tail, "huge", me_huge_page },
+#else
+ { compound, compound, "huge", me_huge_page },
+#endif
+
+ { sc|dirty, sc|dirty, "swapcache", me_swapcache_dirty },
+ { sc|dirty, sc, "swapcache", me_swapcache_clean },
+
+ { unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty},
+ { unevict, unevict, "unevictable LRU", me_pagecache_clean},
+
+#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
+ { mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty },
+ { mlock, mlock, "mlocked LRU", me_pagecache_clean },
+#endif
+
+ { lru|dirty, lru|dirty, "LRU", me_pagecache_dirty },
+ { lru|dirty, lru, "clean LRU", me_pagecache_clean },
+ { swapbacked, swapbacked, "anonymous", me_pagecache_clean },
+
+ /*
+ * Catchall entry: must be at end.
+ */
+ { 0, 0, "unknown page state", me_unknown },
+};
+
+#undef lru
+
+static void action_result(unsigned long pfn, char *msg, int result)
+{
+ struct page *page = NULL;
+ if (pfn_valid(pfn))
+ page = pfn_to_page(pfn);
+
+ printk(KERN_ERR "MCE %#lx: %s%s page recovery: %s\n",
+ pfn,
+ page && PageDirty(page) ? "dirty " : "",
+ msg, action_name[result]);
+}
+
+static int page_action(struct page_state *ps, struct page *p,
+ unsigned long pfn, int ref)
+{
+ int result;
+
+ result = ps->action(p, pfn);
+ action_result(pfn, ps->msg, result);
+ if (page_count(p) != 1 + ref)
+ printk(KERN_ERR
+ "MCE %#lx: %s page still referenced by %d users\n",
+ pfn, ps->msg, page_count(p) - 1);
+
+ /* Could do more checks here if page looks ok */
+ /*
+ * Could adjust zone counters here to correct for the missing page.
+ */
+
+ return result == RECOVERED ? 0 : -EBUSY;
+}
+
+#define N_UNMAP_TRIES 5
+
+/*
+ * Do all that is necessary to remove user space mappings. Unmap
+ * the pages and send SIGBUS to the processes if the data was dirty.
+ */
+static void hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ int trapno)
+{
+ enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
+ struct address_space *mapping;
+ LIST_HEAD(tokill);
+ int ret;
+ int i;
+ int kill = 1;
+
+ if (PageReserved(p) || PageCompound(p) || PageSlab(p))
+ return;
+
+ if (!PageLRU(p))
+ lru_add_drain_all();
+
+ /*
+ * This check implies we don't kill processes if their pages
+ * are in the swap cache early. Those are always late kills.
+ */
+ if (!page_mapped(p))
+ return;
+
+ if (PageSwapCache(p)) {
+ printk(KERN_ERR
+ "MCE %#lx: keeping poisoned page in swap cache\n", pfn);
+ ttu |= TTU_IGNORE_HWPOISON;
+ }
+
+ /*
+ * Propagate the dirty bit from PTEs to struct page first, because we
+ * need this to decide if we should kill or just drop the page.
+ */
+ mapping = page_mapping(p);
+ if (!PageDirty(p) && mapping && mapping_cap_writeback_dirty(mapping)) {
+ if (page_mkclean(p)) {
+ SetPageDirty(p);
+ } else {
+ kill = 0;
+ ttu |= TTU_IGNORE_HWPOISON;
+ printk(KERN_INFO
+ "MCE %#lx: corrupted page was clean: dropped without side effects\n",
+ pfn);
+ }
+ }
+
+ /*
+ * First collect all the processes that have the page
+ * mapped in dirty form. This has to be done before try_to_unmap,
+ * because ttu takes the rmap data structures down.
+ *
+ * Error handling: We ignore errors here because
+ * there's nothing that can be done.
+ */
+ if (kill)
+ collect_procs(p, &tokill);
+
+ /*
+ * try_to_unmap can fail temporarily due to races.
+ * Try a few times (RED-PEN better strategy?)
+ */
+ for (i = 0; i < N_UNMAP_TRIES; i++) {
+ ret = try_to_unmap(p, ttu);
+ if (ret == SWAP_SUCCESS)
+ break;
+ pr_debug("MCE %#lx: try_to_unmap retry needed %d\n", pfn, ret);
+ }
+
+ if (ret != SWAP_SUCCESS)
+ printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
+ pfn, page_mapcount(p));
+
+ /*
+ * Now that the dirty bit has been propagated to the
+ * struct page and all unmaps done we can decide if
+ * killing is needed or not. Only kill when the page
+ * was dirty, otherwise the tokill list is merely
+ * freed. When there was a problem unmapping earlier
+ * use a more force-full uncatchable kill to prevent
+ * any accesses to the poisoned memory.
+ */
+ kill_procs_ao(&tokill, !!PageDirty(p), trapno,
+ ret != SWAP_SUCCESS, pfn);
+}
+
+int __memory_failure(unsigned long pfn, int trapno, int ref)
+{
+ struct page_state *ps;
+ struct page *p;
+ int res;
+
+ if (!sysctl_memory_failure_recovery)
+ panic("Memory failure from trap %d on page %lx", trapno, pfn);
+
+ if (!pfn_valid(pfn)) {
+ action_result(pfn, "memory outside kernel control", IGNORED);
+ return -EIO;
+ }
+
+ p = pfn_to_page(pfn);
+ if (TestSetPageHWPoison(p)) {
+ action_result(pfn, "already hardware poisoned", IGNORED);
+ return 0;
+ }
+
+ atomic_long_add(1, &mce_bad_pages);
+
+ /*
+ * We need/can do nothing about count=0 pages.
+ * 1) it's a free page, and therefore in safe hand:
+ * prep_new_page() will be the gate keeper.
+ * 2) it's part of a non-compound high order page.
+ * Implies some kernel user: cannot stop them from
+ * R/W the page; let's pray that the page has been
+ * used and will be freed some time later.
+ * In fact it's dangerous to directly bump up page count from 0,
+ * that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
+ */
+ if (!get_page_unless_zero(compound_head(p))) {
+ action_result(pfn, "free or high order kernel", IGNORED);
+ return PageBuddy(compound_head(p)) ? 0 : -EBUSY;
+ }
+
+ /*
+ * Lock the page and wait for writeback to finish.
+ * It's very difficult to mess with pages currently under IO
+ * and in many cases impossible, so we just avoid it here.
+ */
+ lock_page_nosync(p);
+ wait_on_page_writeback(p);
+
+ /*
+ * Now take care of user space mappings.
+ */
+ hwpoison_user_mappings(p, pfn, trapno);
+
+ /*
+ * Torn down by someone else?
+ */
+ if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
+ action_result(pfn, "already truncated LRU", IGNORED);
+ res = 0;
+ goto out;
+ }
+
+ res = -EBUSY;
+ for (ps = error_states;; ps++) {
+ if ((p->flags & ps->mask) == ps->res) {
+ res = page_action(ps, p, pfn, ref);
+ break;
+ }
+ }
+out:
+ unlock_page(p);
+ return res;
+}
+EXPORT_SYMBOL_GPL(__memory_failure);
+
+/**
+ * memory_failure - Handle memory failure of a page.
+ * @pfn: Page Number of the corrupted page
+ * @trapno: Trap number reported in the signal to user space.
+ *
+ * This function is called by the low level machine check code
+ * of an architecture when it detects hardware memory corruption
+ * of a page. It tries its best to recover, which includes
+ * dropping pages, killing processes etc.
+ *
+ * The function is primarily of use for corruptions that
+ * happen outside the current execution context (e.g. when
+ * detected by a background scrubber)
+ *
+ * Must run in process context (e.g. a work queue) with interrupts
+ * enabled and no spinlocks hold.
+ */
+void memory_failure(unsigned long pfn, int trapno)
+{
+ __memory_failure(pfn, trapno, 0);
+}
diff --git a/mm/memory.c b/mm/memory.c
index e8f63d9961e..7e91b5f9f69 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -45,6 +45,7 @@
#include <linux/swap.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
+#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/module.h>
#include <linux/delayacct.h>
@@ -107,6 +108,18 @@ static int __init disable_randmaps(char *s)
}
__setup("norandmaps", disable_randmaps);
+unsigned long zero_pfn __read_mostly;
+unsigned long highest_memmap_pfn __read_mostly;
+
+/*
+ * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
+ */
+static int __init init_zero_pfn(void)
+{
+ zero_pfn = page_to_pfn(ZERO_PAGE(0));
+ return 0;
+}
+core_initcall(init_zero_pfn);
/*
* If a p?d_bad entry is found while walking page tables, report
@@ -284,7 +297,8 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long addr = vma->vm_start;
/*
- * Hide vma from rmap and vmtruncate before freeing pgtables
+ * Hide vma from rmap and truncate_pagecache before freeing
+ * pgtables
*/
anon_vma_unlink(vma);
unlink_file_vma(vma);
@@ -443,6 +457,20 @@ static inline int is_cow_mapping(unsigned int flags)
return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
}
+#ifndef is_zero_pfn
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ return pfn == zero_pfn;
+}
+#endif
+
+#ifndef my_zero_pfn
+static inline unsigned long my_zero_pfn(unsigned long addr)
+{
+ return zero_pfn;
+}
+#endif
+
/*
* vm_normal_page -- This function gets the "struct page" associated with a pte.
*
@@ -498,7 +526,9 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
if (HAVE_PTE_SPECIAL) {
if (likely(!pte_special(pte)))
goto check_pfn;
- if (!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)))
+ if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
+ return NULL;
+ if (!is_zero_pfn(pfn))
print_bad_pte(vma, addr, pte, NULL);
return NULL;
}
@@ -520,6 +550,8 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
}
}
+ if (is_zero_pfn(pfn))
+ return NULL;
check_pfn:
if (unlikely(pfn > highest_memmap_pfn)) {
print_bad_pte(vma, addr, pte, NULL);
@@ -597,8 +629,8 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
page = vm_normal_page(vma, addr, pte);
if (page) {
get_page(page);
- page_dup_rmap(page, vma, addr);
- rss[!!PageAnon(page)]++;
+ page_dup_rmap(page);
+ rss[PageAnon(page)]++;
}
out_set_pte:
@@ -1143,9 +1175,14 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
goto no_page;
if ((flags & FOLL_WRITE) && !pte_write(pte))
goto unlock;
+
page = vm_normal_page(vma, address, pte);
- if (unlikely(!page))
- goto bad_page;
+ if (unlikely(!page)) {
+ if ((flags & FOLL_DUMP) ||
+ !is_zero_pfn(pte_pfn(pte)))
+ goto bad_page;
+ page = pte_page(pte);
+ }
if (flags & FOLL_GET)
get_page(page);
@@ -1173,65 +1210,46 @@ no_page:
pte_unmap_unlock(ptep, ptl);
if (!pte_none(pte))
return page;
- /* Fall through to ZERO_PAGE handling */
+
no_page_table:
/*
* When core dumping an enormous anonymous area that nobody
- * has touched so far, we don't want to allocate page tables.
+ * has touched so far, we don't want to allocate unnecessary pages or
+ * page tables. Return error instead of NULL to skip handle_mm_fault,
+ * then get_dump_page() will return NULL to leave a hole in the dump.
+ * But we can only make this optimization where a hole would surely
+ * be zero-filled if handle_mm_fault() actually did handle it.
*/
- if (flags & FOLL_ANON) {
- page = ZERO_PAGE(0);
- if (flags & FOLL_GET)
- get_page(page);
- BUG_ON(flags & FOLL_WRITE);
- }
+ if ((flags & FOLL_DUMP) &&
+ (!vma->vm_ops || !vma->vm_ops->fault))
+ return ERR_PTR(-EFAULT);
return page;
}
-/* Can we do the FOLL_ANON optimization? */
-static inline int use_zero_page(struct vm_area_struct *vma)
-{
- /*
- * We don't want to optimize FOLL_ANON for make_pages_present()
- * when it tries to page in a VM_LOCKED region. As to VM_SHARED,
- * we want to get the page from the page tables to make sure
- * that we serialize and update with any other user of that
- * mapping.
- */
- if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
- return 0;
- /*
- * And if we have a fault routine, it's not an anonymous region.
- */
- return !vma->vm_ops || !vma->vm_ops->fault;
-}
-
-
-
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int nr_pages, int flags,
+ unsigned long start, int nr_pages, unsigned int gup_flags,
struct page **pages, struct vm_area_struct **vmas)
{
int i;
- unsigned int vm_flags = 0;
- int write = !!(flags & GUP_FLAGS_WRITE);
- int force = !!(flags & GUP_FLAGS_FORCE);
- int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
- int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
+ unsigned long vm_flags;
if (nr_pages <= 0)
return 0;
+
+ VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
+
/*
* Require read or write permissions.
- * If 'force' is set, we only require the "MAY" flags.
+ * If FOLL_FORCE is set, we only require the "MAY" flags.
*/
- vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
- vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+ vm_flags = (gup_flags & FOLL_WRITE) ?
+ (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+ vm_flags &= (gup_flags & FOLL_FORCE) ?
+ (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
i = 0;
do {
struct vm_area_struct *vma;
- unsigned int foll_flags;
vma = find_extend_vma(mm, start);
if (!vma && in_gate_area(tsk, start)) {
@@ -1243,7 +1261,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
pte_t *pte;
/* user gate pages are read-only */
- if (!ignore && write)
+ if (gup_flags & FOLL_WRITE)
return i ? : -EFAULT;
if (pg > TASK_SIZE)
pgd = pgd_offset_k(pg);
@@ -1277,38 +1295,26 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (!vma ||
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
- (!ignore && !(vm_flags & vma->vm_flags)))
+ !(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
- &start, &nr_pages, i, write);
+ &start, &nr_pages, i, gup_flags);
continue;
}
- foll_flags = FOLL_TOUCH;
- if (pages)
- foll_flags |= FOLL_GET;
- if (!write && use_zero_page(vma))
- foll_flags |= FOLL_ANON;
-
do {
struct page *page;
+ unsigned int foll_flags = gup_flags;
/*
* If we have a pending SIGKILL, don't keep faulting
- * pages and potentially allocating memory, unless
- * current is handling munlock--e.g., on exit. In
- * that case, we are not allocating memory. Rather,
- * we're only unlocking already resident/mapped pages.
+ * pages and potentially allocating memory.
*/
- if (unlikely(!ignore_sigkill &&
- fatal_signal_pending(current)))
+ if (unlikely(fatal_signal_pending(current)))
return i ? i : -ERESTARTSYS;
- if (write)
- foll_flags |= FOLL_WRITE;
-
cond_resched();
while (!(page = follow_page(vma, start, foll_flags))) {
int ret;
@@ -1320,7 +1326,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (ret & VM_FAULT_ERROR) {
if (ret & VM_FAULT_OOM)
return i ? i : -ENOMEM;
- else if (ret & VM_FAULT_SIGBUS)
+ if (ret &
+ (VM_FAULT_HWPOISON|VM_FAULT_SIGBUS))
return i ? i : -EFAULT;
BUG();
}
@@ -1419,18 +1426,47 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas)
{
- int flags = 0;
+ int flags = FOLL_TOUCH;
+ if (pages)
+ flags |= FOLL_GET;
if (write)
- flags |= GUP_FLAGS_WRITE;
+ flags |= FOLL_WRITE;
if (force)
- flags |= GUP_FLAGS_FORCE;
+ flags |= FOLL_FORCE;
return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
}
-
EXPORT_SYMBOL(get_user_pages);
+/**
+ * get_dump_page() - pin user page in memory while writing it to core dump
+ * @addr: user address
+ *
+ * Returns struct page pointer of user page pinned for dump,
+ * to be freed afterwards by page_cache_release() or put_page().
+ *
+ * Returns NULL on any kind of failure - a hole must then be inserted into
+ * the corefile, to preserve alignment with its headers; and also returns
+ * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
+ * allowing a hole to be left in the corefile to save diskspace.
+ *
+ * Called without mmap_sem, but after all other threads have been killed.
+ */
+#ifdef CONFIG_ELF_CORE
+struct page *get_dump_page(unsigned long addr)
+{
+ struct vm_area_struct *vma;
+ struct page *page;
+
+ if (__get_user_pages(current, current->mm, addr, 1,
+ FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1)
+ return NULL;
+ flush_cache_page(vma, addr, page_to_pfn(page));
+ return page;
+}
+#endif /* CONFIG_ELF_CORE */
+
pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl)
{
@@ -1608,7 +1644,8 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
* If we don't have pte special, then we have to use the pfn_valid()
* based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
* refcount the page if pfn_valid is true (hence insert_page rather
- * than insert_pfn).
+ * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
+ * without pte special, it would there be refcounted as a normal page.
*/
if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
struct page *page;
@@ -1974,7 +2011,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
* Take out anonymous pages first, anonymous shared vmas are
* not dirty accountable.
*/
- if (PageAnon(old_page)) {
+ if (PageAnon(old_page) && !PageKsm(old_page)) {
if (!trylock_page(old_page)) {
page_cache_get(old_page);
pte_unmap_unlock(page_table, ptl);
@@ -2075,10 +2112,19 @@ gotten:
if (unlikely(anon_vma_prepare(vma)))
goto oom;
- VM_BUG_ON(old_page == ZERO_PAGE(0));
- new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
- if (!new_page)
- goto oom;
+
+ if (is_zero_pfn(pte_pfn(orig_pte))) {
+ new_page = alloc_zeroed_user_highpage_movable(vma, address);
+ if (!new_page)
+ goto oom;
+ } else {
+ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+ if (!new_page)
+ goto oom;
+ cow_user_page(new_page, old_page, address, vma);
+ }
+ __SetPageUptodate(new_page);
+
/*
* Don't let another task, with possibly unlocked vma,
* keep the mlocked page.
@@ -2088,8 +2134,6 @@ gotten:
clear_page_mlock(old_page);
unlock_page(old_page);
}
- cow_user_page(new_page, old_page, address, vma);
- __SetPageUptodate(new_page);
if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
goto oom_free_new;
@@ -2115,9 +2159,14 @@ gotten:
* seen in the presence of one thread doing SMC and another
* thread doing COW.
*/
- ptep_clear_flush_notify(vma, address, page_table);
+ ptep_clear_flush(vma, address, page_table);
page_add_new_anon_rmap(new_page, vma, address);
- set_pte_at(mm, address, page_table, entry);
+ /*
+ * We call the notify macro here because, when using secondary
+ * mmu page tables (such as kvm shadow page tables), we want the
+ * new page to be mapped directly into the secondary page table.
+ */
+ set_pte_at_notify(mm, address, page_table, entry);
update_mmu_cache(vma, address, entry);
if (old_page) {
/*
@@ -2360,7 +2409,7 @@ restart:
* @mapping: the address space containing mmaps to be unmapped.
* @holebegin: byte in first page to unmap, relative to the start of
* the underlying file. This will be rounded down to a PAGE_SIZE
- * boundary. Note that this is different from vmtruncate(), which
+ * boundary. Note that this is different from truncate_pagecache(), which
* must keep the partial page. In contrast, we must get rid of
* partial pages.
* @holelen: size of prospective hole in bytes. This will be rounded
@@ -2411,63 +2460,6 @@ void unmap_mapping_range(struct address_space *mapping,
}
EXPORT_SYMBOL(unmap_mapping_range);
-/**
- * vmtruncate - unmap mappings "freed" by truncate() syscall
- * @inode: inode of the file used
- * @offset: file offset to start truncating
- *
- * NOTE! We have to be ready to update the memory sharing
- * between the file and the memory map for a potential last
- * incomplete page. Ugly, but necessary.
- */
-int vmtruncate(struct inode * inode, loff_t offset)
-{
- if (inode->i_size < offset) {
- unsigned long limit;
-
- limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
- if (limit != RLIM_INFINITY && offset > limit)
- goto out_sig;
- if (offset > inode->i_sb->s_maxbytes)
- goto out_big;
- i_size_write(inode, offset);
- } else {
- struct address_space *mapping = inode->i_mapping;
-
- /*
- * truncation of in-use swapfiles is disallowed - it would
- * cause subsequent swapout to scribble on the now-freed
- * blocks.
- */
- if (IS_SWAPFILE(inode))
- return -ETXTBSY;
- i_size_write(inode, offset);
-
- /*
- * unmap_mapping_range is called twice, first simply for
- * efficiency so that truncate_inode_pages does fewer
- * single-page unmaps. However after this first call, and
- * before truncate_inode_pages finishes, it is possible for
- * private pages to be COWed, which remain after
- * truncate_inode_pages finishes, hence the second
- * unmap_mapping_range call must be made for correctness.
- */
- unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
- truncate_inode_pages(mapping, offset);
- unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
- }
-
- if (inode->i_op->truncate)
- inode->i_op->truncate(inode);
- return 0;
-
-out_sig:
- send_sig(SIGXFSZ, current, 0);
-out_big:
- return -EFBIG;
-}
-EXPORT_SYMBOL(vmtruncate);
-
int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
{
struct address_space *mapping = inode->i_mapping;
@@ -2512,8 +2504,15 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out;
entry = pte_to_swp_entry(orig_pte);
- if (is_migration_entry(entry)) {
- migration_entry_wait(mm, pmd, address);
+ if (unlikely(non_swap_entry(entry))) {
+ if (is_migration_entry(entry)) {
+ migration_entry_wait(mm, pmd, address);
+ } else if (is_hwpoison_entry(entry)) {
+ ret = VM_FAULT_HWPOISON;
+ } else {
+ print_bad_pte(vma, address, orig_pte, NULL);
+ ret = VM_FAULT_OOM;
+ }
goto out;
}
delayacct_set_flag(DELAYACCT_PF_SWAPIN);
@@ -2537,6 +2536,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* Had to read the page from swap area: Major fault */
ret = VM_FAULT_MAJOR;
count_vm_event(PGMAJFAULT);
+ } else if (PageHWPoison(page)) {
+ ret = VM_FAULT_HWPOISON;
+ delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
+ goto out;
}
lock_page(page);
@@ -2625,6 +2628,16 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
spinlock_t *ptl;
pte_t entry;
+ if (!(flags & FAULT_FLAG_WRITE)) {
+ entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
+ vma->vm_page_prot));
+ ptl = pte_lockptr(mm, pmd);
+ spin_lock(ptl);
+ if (!pte_none(*page_table))
+ goto unlock;
+ goto setpte;
+ }
+
/* Allocate our own private page. */
pte_unmap(page_table);
@@ -2639,13 +2652,16 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto oom_free_page;
entry = mk_pte(page, vma->vm_page_prot);
- entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ if (vma->vm_flags & VM_WRITE)
+ entry = pte_mkwrite(pte_mkdirty(entry));
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (!pte_none(*page_table))
goto release;
+
inc_mm_counter(mm, anon_rss);
page_add_new_anon_rmap(page, vma, address);
+setpte:
set_pte_at(mm, address, page_table, entry);
/* No need to invalidate - it was non-present before */
@@ -2700,6 +2716,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
return ret;
+ if (unlikely(PageHWPoison(vmf.page))) {
+ if (ret & VM_FAULT_LOCKED)
+ unlock_page(vmf.page);
+ return VM_FAULT_HWPOISON;
+ }
+
/*
* For consistency in subsequent calls, make the faulted page always
* locked.
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index e4412a676c8..821dee59637 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -339,8 +339,11 @@ EXPORT_SYMBOL_GPL(__remove_pages);
void online_page(struct page *page)
{
+ unsigned long pfn = page_to_pfn(page);
+
totalram_pages++;
- num_physpages++;
+ if (pfn >= num_physpages)
+ num_physpages = pfn + 1;
#ifdef CONFIG_HIGHMEM
if (PageHighMem(page))
@@ -410,7 +413,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
if (!populated_zone(zone))
need_zonelists_rebuild = 1;
- ret = walk_memory_resource(pfn, nr_pages, &onlined_pages,
+ ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
online_pages_range);
if (ret) {
printk(KERN_DEBUG "online_pages %lx at %lx failed\n",
@@ -422,6 +425,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
zone->present_pages += onlined_pages;
zone->zone_pgdat->node_present_pages += onlined_pages;
+ zone_pcp_update(zone);
setup_per_zone_wmarks();
calculate_zone_inactive_ratio(zone);
if (onlined_pages) {
@@ -701,7 +705,7 @@ offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
static void
offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
{
- walk_memory_resource(start_pfn, end_pfn - start_pfn, NULL,
+ walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
offline_isolated_pages_cb);
}
@@ -727,7 +731,7 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
long offlined = 0;
int ret;
- ret = walk_memory_resource(start_pfn, end_pfn - start_pfn, &offlined,
+ ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
check_pages_isolated_cb);
if (ret < 0)
offlined = (long)ret;
@@ -831,7 +835,6 @@ repeat:
zone->present_pages -= offlined_pages;
zone->zone_pgdat->node_present_pages -= offlined_pages;
totalram_pages -= offlined_pages;
- num_physpages -= offlined_pages;
setup_per_zone_wmarks();
calculate_zone_inactive_ratio(zone);
diff --git a/mm/mempool.c b/mm/mempool.c
index 32e75d40050..1a3bc3d4d55 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -308,13 +308,6 @@ void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
}
EXPORT_SYMBOL(mempool_kmalloc);
-void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data)
-{
- size_t size = (size_t)pool_data;
- return kzalloc(size, gfp_mask);
-}
-EXPORT_SYMBOL(mempool_kzalloc);
-
void mempool_kfree(void *element, void *pool_data)
{
kfree(element);
diff --git a/mm/migrate.c b/mm/migrate.c
index 939888f9dda..1a4bf481378 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -67,6 +67,8 @@ int putback_lru_pages(struct list_head *l)
list_for_each_entry_safe(page, page2, l, lru) {
list_del(&page->lru);
+ dec_zone_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
putback_lru_page(page);
count++;
}
@@ -147,7 +149,7 @@ out:
static void remove_file_migration_ptes(struct page *old, struct page *new)
{
struct vm_area_struct *vma;
- struct address_space *mapping = page_mapping(new);
+ struct address_space *mapping = new->mapping;
struct prio_tree_iter iter;
pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -270,7 +272,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
pslot = radix_tree_lookup_slot(&mapping->page_tree,
page_index(page));
- expected_count = 2 + !!page_has_private(page);
+ expected_count = 2 + page_has_private(page);
if (page_count(page) != expected_count ||
(struct page *)radix_tree_deref_slot(pslot) != page) {
spin_unlock_irq(&mapping->tree_lock);
@@ -312,7 +314,10 @@ static int migrate_page_move_mapping(struct address_space *mapping,
*/
__dec_zone_page_state(page, NR_FILE_PAGES);
__inc_zone_page_state(newpage, NR_FILE_PAGES);
-
+ if (PageSwapBacked(page)) {
+ __dec_zone_page_state(page, NR_SHMEM);
+ __inc_zone_page_state(newpage, NR_SHMEM);
+ }
spin_unlock_irq(&mapping->tree_lock);
return 0;
@@ -664,13 +669,15 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
* needs to be effective.
*/
try_to_free_buffers(page);
+ goto rcu_unlock;
}
- goto rcu_unlock;
+ goto skip_unmap;
}
/* Establish migration ptes or remove ptes */
- try_to_unmap(page, 1);
+ try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+skip_unmap:
if (!page_mapped(page))
rc = move_to_new_page(newpage, page);
@@ -693,6 +700,8 @@ unlock:
* restored.
*/
list_del(&page->lru);
+ dec_zone_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
putback_lru_page(page);
}
@@ -737,6 +746,13 @@ int migrate_pages(struct list_head *from,
struct page *page2;
int swapwrite = current->flags & PF_SWAPWRITE;
int rc;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ list_for_each_entry(page, from, lru)
+ __inc_zone_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
+ local_irq_restore(flags);
if (!swapwrite)
current->flags |= PF_SWAPWRITE;
diff --git a/mm/mlock.c b/mm/mlock.c
index 45eb650b965..bd6f0e466f6 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -139,49 +139,36 @@ static void munlock_vma_page(struct page *page)
}
/**
- * __mlock_vma_pages_range() - mlock/munlock a range of pages in the vma.
+ * __mlock_vma_pages_range() - mlock a range of pages in the vma.
* @vma: target vma
* @start: start address
* @end: end address
- * @mlock: 0 indicate munlock, otherwise mlock.
*
- * If @mlock == 0, unlock an mlocked range;
- * else mlock the range of pages. This takes care of making the pages present ,
- * too.
+ * This takes care of making the pages present too.
*
* return 0 on success, negative error code on error.
*
* vma->vm_mm->mmap_sem must be held for at least read.
*/
static long __mlock_vma_pages_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- int mlock)
+ unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long addr = start;
struct page *pages[16]; /* 16 gives a reasonable batch */
int nr_pages = (end - start) / PAGE_SIZE;
int ret = 0;
- int gup_flags = 0;
+ int gup_flags;
VM_BUG_ON(start & ~PAGE_MASK);
VM_BUG_ON(end & ~PAGE_MASK);
VM_BUG_ON(start < vma->vm_start);
VM_BUG_ON(end > vma->vm_end);
- VM_BUG_ON((!rwsem_is_locked(&mm->mmap_sem)) &&
- (atomic_read(&mm->mm_users) != 0));
-
- /*
- * mlock: don't page populate if vma has PROT_NONE permission.
- * munlock: always do munlock although the vma has PROT_NONE
- * permission, or SIGKILL is pending.
- */
- if (!mlock)
- gup_flags |= GUP_FLAGS_IGNORE_VMA_PERMISSIONS |
- GUP_FLAGS_IGNORE_SIGKILL;
+ VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+ gup_flags = FOLL_TOUCH | FOLL_GET;
if (vma->vm_flags & VM_WRITE)
- gup_flags |= GUP_FLAGS_WRITE;
+ gup_flags |= FOLL_WRITE;
while (nr_pages > 0) {
int i;
@@ -201,51 +188,45 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
* This can happen for, e.g., VM_NONLINEAR regions before
* a page has been allocated and mapped at a given offset,
* or for addresses that map beyond end of a file.
- * We'll mlock the the pages if/when they get faulted in.
+ * We'll mlock the pages if/when they get faulted in.
*/
if (ret < 0)
break;
- if (ret == 0) {
- /*
- * We know the vma is there, so the only time
- * we cannot get a single page should be an
- * error (ret < 0) case.
- */
- WARN_ON(1);
- break;
- }
lru_add_drain(); /* push cached pages to LRU */
for (i = 0; i < ret; i++) {
struct page *page = pages[i];
- lock_page(page);
- /*
- * Because we lock page here and migration is blocked
- * by the elevated reference, we need only check for
- * page truncation (file-cache only).
- */
if (page->mapping) {
- if (mlock)
+ /*
+ * That preliminary check is mainly to avoid
+ * the pointless overhead of lock_page on the
+ * ZERO_PAGE: which might bounce very badly if
+ * there is contention. However, we're still
+ * dirtying its cacheline with get/put_page:
+ * we'll add another __get_user_pages flag to
+ * avoid it if that case turns out to matter.
+ */
+ lock_page(page);
+ /*
+ * Because we lock page here and migration is
+ * blocked by the elevated reference, we need
+ * only check for file-cache page truncation.
+ */
+ if (page->mapping)
mlock_vma_page(page);
- else
- munlock_vma_page(page);
+ unlock_page(page);
}
- unlock_page(page);
- put_page(page); /* ref from get_user_pages() */
-
- /*
- * here we assume that get_user_pages() has given us
- * a list of virtually contiguous pages.
- */
- addr += PAGE_SIZE; /* for next get_user_pages() */
- nr_pages--;
+ put_page(page); /* ref from get_user_pages() */
}
+
+ addr += ret * PAGE_SIZE;
+ nr_pages -= ret;
ret = 0;
}
- return ret; /* count entire vma as locked_vm */
+ return ret; /* 0 or negative error code */
}
/*
@@ -289,7 +270,7 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current))) {
- __mlock_vma_pages_range(vma, start, end, 1);
+ __mlock_vma_pages_range(vma, start, end);
/* Hide errors from mmap() and other callers */
return 0;
@@ -310,7 +291,6 @@ no_mlock:
return nr_pages; /* error or pages NOT mlocked */
}
-
/*
* munlock_vma_pages_range() - munlock all pages in the vma range.'
* @vma - vma containing range to be munlock()ed.
@@ -330,10 +310,38 @@ no_mlock:
* free them. This will result in freeing mlocked pages.
*/
void munlock_vma_pages_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end)
{
+ unsigned long addr;
+
+ lru_add_drain();
vma->vm_flags &= ~VM_LOCKED;
- __mlock_vma_pages_range(vma, start, end, 0);
+
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
+ struct page *page;
+ /*
+ * Although FOLL_DUMP is intended for get_dump_page(),
+ * it just so happens that its special treatment of the
+ * ZERO_PAGE (returning an error instead of doing get_page)
+ * suits munlock very well (and if somehow an abnormal page
+ * has sneaked into the range, we won't oops here: great).
+ */
+ page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
+ if (page && !IS_ERR(page)) {
+ lock_page(page);
+ /*
+ * Like in __mlock_vma_pages_range(),
+ * because we lock page here and migration is
+ * blocked by the elevated reference, we need
+ * only check for file-cache page truncation.
+ */
+ if (page->mapping)
+ munlock_vma_page(page);
+ unlock_page(page);
+ put_page(page);
+ }
+ cond_resched();
+ }
}
/*
@@ -400,18 +408,14 @@ success:
* It's okay if try_to_unmap_one unmaps a page just after we
* set VM_LOCKED, __mlock_vma_pages_range will bring it back.
*/
- vma->vm_flags = newflags;
if (lock) {
- ret = __mlock_vma_pages_range(vma, start, end, 1);
-
- if (ret > 0) {
- mm->locked_vm -= ret;
- ret = 0;
- } else
- ret = __mlock_posix_error_return(ret); /* translate if needed */
+ vma->vm_flags = newflags;
+ ret = __mlock_vma_pages_range(vma, start, end);
+ if (ret < 0)
+ ret = __mlock_posix_error_return(ret);
} else {
- __mlock_vma_pages_range(vma, start, end, 0);
+ munlock_vma_pages_range(vma, start, end);
}
out:
diff --git a/mm/mmap.c b/mm/mmap.c
index 26892e346d8..21d4029a07b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -28,7 +28,7 @@
#include <linux/mempolicy.h>
#include <linux/rmap.h>
#include <linux/mmu_notifier.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
@@ -570,9 +570,9 @@ again: remove_next = 1 + (end > next->vm_end);
/*
* When changing only vma->vm_end, we don't really need
- * anon_vma lock: but is that case worth optimizing out?
+ * anon_vma lock.
*/
- if (vma->anon_vma)
+ if (vma->anon_vma && (insert || importer || start != vma->vm_start))
anon_vma = vma->anon_vma;
if (anon_vma) {
spin_lock(&anon_vma->lock);
@@ -656,9 +656,6 @@ again: remove_next = 1 + (end > next->vm_end);
validate_mm(mm);
}
-/* Flags that can be inherited from an existing mapping when merging */
-#define VM_MERGEABLE_FLAGS (VM_CAN_NONLINEAR)
-
/*
* If the vma has a ->close operation then the driver probably needs to release
* per-vma resources, so we don't attempt to merge those.
@@ -666,7 +663,8 @@ again: remove_next = 1 + (end > next->vm_end);
static inline int is_mergeable_vma(struct vm_area_struct *vma,
struct file *file, unsigned long vm_flags)
{
- if ((vma->vm_flags ^ vm_flags) & ~VM_MERGEABLE_FLAGS)
+ /* VM_CAN_NONLINEAR may get set later by f_op->mmap() */
+ if ((vma->vm_flags ^ vm_flags) & ~VM_CAN_NONLINEAR)
return 0;
if (vma->vm_file != file)
return 0;
@@ -951,6 +949,24 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
if (mm->map_count > sysctl_max_map_count)
return -ENOMEM;
+ if (flags & MAP_HUGETLB) {
+ struct user_struct *user = NULL;
+ if (file)
+ return -EINVAL;
+
+ /*
+ * VM_NORESERVE is used because the reservations will be
+ * taken when vm_ops->mmap() is called
+ * A dummy user value is used because we are not locking
+ * memory so no accounting is necessary
+ */
+ len = ALIGN(len, huge_page_size(&default_hstate));
+ file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
+ &user, HUGETLB_ANONHUGE_INODE);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+ }
+
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
@@ -965,11 +981,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
- if (flags & MAP_LOCKED) {
+ if (flags & MAP_LOCKED)
if (!can_do_mlock())
return -EPERM;
- vm_flags |= VM_LOCKED;
- }
/* mlock MCL_FUTURE? */
if (vm_flags & VM_LOCKED) {
@@ -1195,21 +1209,21 @@ munmap_back:
goto unmap_and_free_vma;
if (vm_flags & VM_EXECUTABLE)
added_exe_file_vma(mm);
+
+ /* Can addr have changed??
+ *
+ * Answer: Yes, several device drivers can do it in their
+ * f_op->mmap method. -DaveM
+ */
+ addr = vma->vm_start;
+ pgoff = vma->vm_pgoff;
+ vm_flags = vma->vm_flags;
} else if (vm_flags & VM_SHARED) {
error = shmem_zero_setup(vma);
if (error)
goto free_vma;
}
- /* Can addr have changed??
- *
- * Answer: Yes, several device drivers can do it in their
- * f_op->mmap method. -DaveM
- */
- addr = vma->vm_start;
- pgoff = vma->vm_pgoff;
- vm_flags = vma->vm_flags;
-
if (vma_wants_writenotify(vma))
vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
@@ -1220,7 +1234,7 @@ munmap_back:
if (correct_wcount)
atomic_inc(&inode->i_writecount);
out:
- perf_counter_mmap(vma);
+ perf_event_mmap(vma);
mm->total_vm += len >> PAGE_SHIFT;
vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
@@ -2111,6 +2125,7 @@ void exit_mmap(struct mm_struct *mm)
/* Use -1 here to ensure all VMAs in the mm are unmapped */
end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
vm_unacct_memory(nr_accounted);
+
free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
tlb_finish_mmu(tlb, 0, end);
@@ -2308,7 +2323,7 @@ int install_special_mapping(struct mm_struct *mm,
mm->total_vm += len >> PAGE_SHIFT;
- perf_counter_mmap(vma);
+ perf_event_mmap(vma);
return 0;
}
diff --git a/mm/mmu_context.c b/mm/mmu_context.c
new file mode 100644
index 00000000000..ded9081f402
--- /dev/null
+++ b/mm/mmu_context.c
@@ -0,0 +1,58 @@
+/* Copyright (C) 2009 Red Hat, Inc.
+ *
+ * See ../COPYING for licensing terms.
+ */
+
+#include <linux/mm.h>
+#include <linux/mmu_context.h>
+#include <linux/sched.h>
+
+#include <asm/mmu_context.h>
+
+/*
+ * use_mm
+ * Makes the calling kernel thread take on the specified
+ * mm context.
+ * Called by the retry thread execute retries within the
+ * iocb issuer's mm context, so that copy_from/to_user
+ * operations work seamlessly for aio.
+ * (Note: this routine is intended to be called only
+ * from a kernel thread context)
+ */
+void use_mm(struct mm_struct *mm)
+{
+ struct mm_struct *active_mm;
+ struct task_struct *tsk = current;
+
+ task_lock(tsk);
+ active_mm = tsk->active_mm;
+ if (active_mm != mm) {
+ atomic_inc(&mm->mm_count);
+ tsk->active_mm = mm;
+ }
+ tsk->mm = mm;
+ switch_mm(active_mm, mm, tsk);
+ task_unlock(tsk);
+
+ if (active_mm != mm)
+ mmdrop(active_mm);
+}
+
+/*
+ * unuse_mm
+ * Reverses the effect of use_mm, i.e. releases the
+ * specified mm context which was earlier taken on
+ * by the calling kernel thread
+ * (Note: this routine is intended to be called only
+ * from a kernel thread context)
+ */
+void unuse_mm(struct mm_struct *mm)
+{
+ struct task_struct *tsk = current;
+
+ task_lock(tsk);
+ tsk->mm = NULL;
+ /* active_mm is still 'mm' */
+ enter_lazy_tlb(mm, tsk);
+ task_unlock(tsk);
+}
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 5f4ef0250be..7e33f2cb3c7 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -99,6 +99,26 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
return young;
}
+void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
+ pte_t pte)
+{
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->change_pte)
+ mn->ops->change_pte(mn, mm, address, pte);
+ /*
+ * Some drivers don't have change_pte,
+ * so we must call invalidate_page in that case.
+ */
+ else if (mn->ops->invalidate_page)
+ mn->ops->invalidate_page(mn, mm, address);
+ }
+ rcu_read_unlock();
+}
+
void __mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address)
{
diff --git a/mm/mprotect.c b/mm/mprotect.c
index d80311baeb2..8bc969d8112 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -23,7 +23,7 @@
#include <linux/swapops.h>
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
@@ -300,7 +300,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
if (error)
goto out;
- perf_counter_mmap(vma);
+ perf_event_mmap(vma);
nstart = tmp;
if (nstart < prev->vm_end)
diff --git a/mm/mremap.c b/mm/mremap.c
index a39b7b91be4..97bff254771 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -11,6 +11,7 @@
#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/shm.h>
+#include <linux/ksm.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/capability.h>
@@ -85,8 +86,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
if (vma->vm_file) {
/*
* Subtle point from Rajesh Venkatasubramanian: before
- * moving file-based ptes, we must lock vmtruncate out,
- * since it might clean the dst vma before the src vma,
+ * moving file-based ptes, we must lock truncate_pagecache
+ * out, since it might clean the dst vma before the src vma,
* and we propagate stale pages into the dst afterward.
*/
mapping = vma->vm_file->f_mapping;
@@ -174,6 +175,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
unsigned long excess = 0;
unsigned long hiwater_vm;
int split = 0;
+ int err;
/*
* We'd prefer to avoid failure later on in do_munmap:
@@ -182,6 +184,18 @@ static unsigned long move_vma(struct vm_area_struct *vma,
if (mm->map_count >= sysctl_max_map_count - 3)
return -ENOMEM;
+ /*
+ * Advise KSM to break any KSM pages in the area to be moved:
+ * it would be confusing if they were to turn up at the new
+ * location, where they happen to coincide with different KSM
+ * pages recently unmapped. But leave vma->vm_flags as it was,
+ * so KSM can come around to merge on vma and new_vma afterwards.
+ */
+ err = ksm_madvise(vma, old_addr, old_addr + old_len,
+ MADV_UNMERGEABLE, &vm_flags);
+ if (err)
+ return err;
+
new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff);
if (!new_vma)
diff --git a/mm/nommu.c b/mm/nommu.c
index 66e81e7e9fe..c73aa4753d7 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -33,6 +33,7 @@
#include <asm/uaccess.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
#include "internal.h"
static inline __attribute__((format(printf, 1, 2)))
@@ -56,12 +57,11 @@ void no_printk(const char *fmt, ...)
no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
#endif
-#include "internal.h"
-
void *high_memory;
struct page *mem_map;
unsigned long max_mapnr;
unsigned long num_physpages;
+unsigned long highest_memmap_pfn;
struct percpu_counter vm_committed_as;
int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
int sysctl_overcommit_ratio = 50; /* default is 50% */
@@ -83,46 +83,6 @@ struct vm_operations_struct generic_file_vm_ops = {
};
/*
- * Handle all mappings that got truncated by a "truncate()"
- * system call.
- *
- * NOTE! We have to be ready to update the memory sharing
- * between the file and the memory map for a potential last
- * incomplete page. Ugly, but necessary.
- */
-int vmtruncate(struct inode *inode, loff_t offset)
-{
- struct address_space *mapping = inode->i_mapping;
- unsigned long limit;
-
- if (inode->i_size < offset)
- goto do_expand;
- i_size_write(inode, offset);
-
- truncate_inode_pages(mapping, offset);
- goto out_truncate;
-
-do_expand:
- limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
- if (limit != RLIM_INFINITY && offset > limit)
- goto out_sig;
- if (offset > inode->i_sb->s_maxbytes)
- goto out;
- i_size_write(inode, offset);
-
-out_truncate:
- if (inode->i_op->truncate)
- inode->i_op->truncate(inode);
- return 0;
-out_sig:
- send_sig(SIGXFSZ, current, 0);
-out:
- return -EFBIG;
-}
-
-EXPORT_SYMBOL(vmtruncate);
-
-/*
* Return the total memory allocated for this pointer, not
* just what the caller asked for.
*
@@ -170,21 +130,20 @@ unsigned int kobjsize(const void *objp)
}
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int nr_pages, int flags,
+ unsigned long start, int nr_pages, unsigned int foll_flags,
struct page **pages, struct vm_area_struct **vmas)
{
struct vm_area_struct *vma;
unsigned long vm_flags;
int i;
- int write = !!(flags & GUP_FLAGS_WRITE);
- int force = !!(flags & GUP_FLAGS_FORCE);
- int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
/* calculate required read or write permissions.
- * - if 'force' is set, we only require the "MAY" flags.
+ * If FOLL_FORCE is set, we only require the "MAY" flags.
*/
- vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
- vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+ vm_flags = (foll_flags & FOLL_WRITE) ?
+ (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+ vm_flags &= (foll_flags & FOLL_FORCE) ?
+ (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
for (i = 0; i < nr_pages; i++) {
vma = find_vma(mm, start);
@@ -192,8 +151,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
goto finish_or_fault;
/* protect what we can, including chardevs */
- if (vma->vm_flags & (VM_IO | VM_PFNMAP) ||
- (!ignore && !(vm_flags & vma->vm_flags)))
+ if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+ !(vm_flags & vma->vm_flags))
goto finish_or_fault;
if (pages) {
@@ -212,7 +171,6 @@ finish_or_fault:
return i ? : -EFAULT;
}
-
/*
* get a list of pages in an address range belonging to the specified process
* and indicate the VMA that covers each page
@@ -227,9 +185,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
int flags = 0;
if (write)
- flags |= GUP_FLAGS_WRITE;
+ flags |= FOLL_WRITE;
if (force)
- flags |= GUP_FLAGS_FORCE;
+ flags |= FOLL_FORCE;
return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
}
@@ -627,6 +585,22 @@ static void put_nommu_region(struct vm_region *region)
}
/*
+ * update protection on a vma
+ */
+static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
+{
+#ifdef CONFIG_MPU
+ struct mm_struct *mm = vma->vm_mm;
+ long start = vma->vm_start & PAGE_MASK;
+ while (start < vma->vm_end) {
+ protect_page(mm, start, flags);
+ start += PAGE_SIZE;
+ }
+ update_protections(mm);
+#endif
+}
+
+/*
* add a VMA into a process's mm_struct in the appropriate place in the list
* and tree and add to the address space's page tree also if not an anonymous
* page
@@ -645,6 +619,8 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
mm->map_count++;
vma->vm_mm = mm;
+ protect_vma(vma, vma->vm_flags);
+
/* add the VMA to the mapping */
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
@@ -707,6 +683,8 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
kenter("%p", vma);
+ protect_vma(vma, 0);
+
mm->map_count--;
if (mm->mmap_cache == vma)
mm->mmap_cache = NULL;
@@ -848,7 +826,7 @@ static int validate_mmap_request(struct file *file,
int ret;
/* do the simple checks first */
- if (flags & MAP_FIXED || addr) {
+ if (flags & MAP_FIXED) {
printk(KERN_DEBUG
"%d: Can't do fixed-address/overlay mmap of RAM\n",
current->pid);
@@ -1056,7 +1034,7 @@ static int do_mmap_shared_file(struct vm_area_struct *vma)
ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
if (ret == 0) {
vma->vm_region->vm_top = vma->vm_region->vm_end;
- return ret;
+ return 0;
}
if (ret != -ENOSYS)
return ret;
@@ -1073,7 +1051,8 @@ static int do_mmap_shared_file(struct vm_area_struct *vma)
*/
static int do_mmap_private(struct vm_area_struct *vma,
struct vm_region *region,
- unsigned long len)
+ unsigned long len,
+ unsigned long capabilities)
{
struct page *pages;
unsigned long total, point, n, rlen;
@@ -1084,13 +1063,13 @@ static int do_mmap_private(struct vm_area_struct *vma,
* shared mappings on devices or memory
* - VM_MAYSHARE will be set if it may attempt to share
*/
- if (vma->vm_file) {
+ if (capabilities & BDI_CAP_MAP_DIRECT) {
ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
if (ret == 0) {
/* shouldn't return success if we're not sharing */
BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
vma->vm_region->vm_top = vma->vm_region->vm_end;
- return ret;
+ return 0;
}
if (ret != -ENOSYS)
return ret;
@@ -1203,9 +1182,6 @@ unsigned long do_mmap_pgoff(struct file *file,
kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff);
- if (!(flags & MAP_FIXED))
- addr = round_hint_to_min(addr);
-
/* decide whether we should attempt the mapping, and if so what sort of
* mapping */
ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
@@ -1215,6 +1191,9 @@ unsigned long do_mmap_pgoff(struct file *file,
return ret;
}
+ /* we ignore the address hint */
+ addr = 0;
+
/* we've determined that we can make the mapping, now translate what we
* now know into VMA flags */
vm_flags = determine_vm_flags(file, prot, flags, capabilities);
@@ -1328,7 +1307,7 @@ unsigned long do_mmap_pgoff(struct file *file,
* - this is the hook for quasi-memory character devices to
* tell us the location of a shared mapping
*/
- if (file && file->f_op->get_unmapped_area) {
+ if (capabilities & BDI_CAP_MAP_DIRECT) {
addr = file->f_op->get_unmapped_area(file, addr, len,
pgoff, flags);
if (IS_ERR((void *) addr)) {
@@ -1352,15 +1331,17 @@ unsigned long do_mmap_pgoff(struct file *file,
}
vma->vm_region = region;
- add_nommu_region(region);
- /* set up the mapping */
+ /* set up the mapping
+ * - the region is filled in if BDI_CAP_MAP_DIRECT is still set
+ */
if (file && vma->vm_flags & VM_SHARED)
ret = do_mmap_shared_file(vma);
else
- ret = do_mmap_private(vma, region, len);
+ ret = do_mmap_private(vma, region, len, capabilities);
if (ret < 0)
- goto error_put_region;
+ goto error_just_free;
+ add_nommu_region(region);
/* okay... we have a mapping; now we have to register it */
result = vma->vm_start;
@@ -1378,19 +1359,6 @@ share:
kleave(" = %lx", result);
return result;
-error_put_region:
- __put_nommu_region(region);
- if (vma) {
- if (vma->vm_file) {
- fput(vma->vm_file);
- if (vma->vm_flags & VM_EXECUTABLE)
- removed_exe_file_vma(vma->vm_mm);
- }
- kmem_cache_free(vm_area_cachep, vma);
- }
- kleave(" = %d [pr]", ret);
- return ret;
-
error_just_free:
up_write(&nommu_region_sem);
error:
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index a7b2460e922..ea2147dabba 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -34,6 +34,23 @@ int sysctl_oom_dump_tasks;
static DEFINE_SPINLOCK(zone_scan_lock);
/* #define DEBUG */
+/*
+ * Is all threads of the target process nodes overlap ours?
+ */
+static int has_intersects_mems_allowed(struct task_struct *tsk)
+{
+ struct task_struct *t;
+
+ t = tsk;
+ do {
+ if (cpuset_mems_allowed_intersects(current, t))
+ return 1;
+ t = next_thread(t);
+ } while (t != tsk);
+
+ return 0;
+}
+
/**
* badness - calculate a numeric value for how bad this task has been
* @p: task struct of which task we should calculate
@@ -58,6 +75,13 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
unsigned long points, cpu_time, run_time;
struct mm_struct *mm;
struct task_struct *child;
+ int oom_adj = p->signal->oom_adj;
+ struct task_cputime task_time;
+ unsigned long utime;
+ unsigned long stime;
+
+ if (oom_adj == OOM_DISABLE)
+ return 0;
task_lock(p);
mm = p->mm;
@@ -79,7 +103,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
/*
* swapoff can easily use up all memory, so kill those first.
*/
- if (p->flags & PF_SWAPOFF)
+ if (p->flags & PF_OOM_ORIGIN)
return ULONG_MAX;
/*
@@ -102,8 +126,11 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
* of seconds. There is no particular reason for this other than
* that it turned out to work very well in practice.
*/
- cpu_time = (cputime_to_jiffies(p->utime) + cputime_to_jiffies(p->stime))
- >> (SHIFT_HZ + 3);
+ thread_group_cputime(p, &task_time);
+ utime = cputime_to_jiffies(task_time.utime);
+ stime = cputime_to_jiffies(task_time.stime);
+ cpu_time = (utime + stime) >> (SHIFT_HZ + 3);
+
if (uptime >= p->start_time.tv_sec)
run_time = (uptime - p->start_time.tv_sec) >> 10;
@@ -144,19 +171,19 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
* because p may have allocated or otherwise mapped memory on
* this node before. However it will be less likely.
*/
- if (!cpuset_mems_allowed_intersects(current, p))
+ if (!has_intersects_mems_allowed(p))
points /= 8;
/*
- * Adjust the score by oomkilladj.
+ * Adjust the score by oom_adj.
*/
- if (p->oomkilladj) {
- if (p->oomkilladj > 0) {
+ if (oom_adj) {
+ if (oom_adj > 0) {
if (!points)
points = 1;
- points <<= p->oomkilladj;
+ points <<= oom_adj;
} else
- points >>= -(p->oomkilladj);
+ points >>= -(oom_adj);
}
#ifdef DEBUG
@@ -200,13 +227,13 @@ static inline enum oom_constraint constrained_alloc(struct zonelist *zonelist,
static struct task_struct *select_bad_process(unsigned long *ppoints,
struct mem_cgroup *mem)
{
- struct task_struct *g, *p;
+ struct task_struct *p;
struct task_struct *chosen = NULL;
struct timespec uptime;
*ppoints = 0;
do_posix_clock_monotonic_gettime(&uptime);
- do_each_thread(g, p) {
+ for_each_process(p) {
unsigned long points;
/*
@@ -251,7 +278,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints,
*ppoints = ULONG_MAX;
}
- if (p->oomkilladj == OOM_DISABLE)
+ if (p->signal->oom_adj == OOM_DISABLE)
continue;
points = badness(p, uptime.tv_sec);
@@ -259,7 +286,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints,
chosen = p;
*ppoints = points;
}
- } while_each_thread(g, p);
+ }
return chosen;
}
@@ -304,7 +331,7 @@ static void dump_tasks(const struct mem_cgroup *mem)
}
printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n",
p->pid, __task_cred(p)->uid, p->tgid, mm->total_vm,
- get_mm_rss(mm), (int)task_cpu(p), p->oomkilladj,
+ get_mm_rss(mm), (int)task_cpu(p), p->signal->oom_adj,
p->comm);
task_unlock(p);
} while_each_thread(g, p);
@@ -346,11 +373,6 @@ static void __oom_kill_task(struct task_struct *p, int verbose)
static int oom_kill_task(struct task_struct *p)
{
- struct mm_struct *mm;
- struct task_struct *g, *q;
-
- mm = p->mm;
-
/* WARNING: mm may not be dereferenced since we did not obtain its
* value from get_task_mm(p). This is OK since all we need to do is
* compare mm to q->mm below.
@@ -359,30 +381,11 @@ static int oom_kill_task(struct task_struct *p)
* change to NULL at any time since we do not hold task_lock(p).
* However, this is of no concern to us.
*/
-
- if (mm == NULL)
+ if (!p->mm || p->signal->oom_adj == OOM_DISABLE)
return 1;
- /*
- * Don't kill the process if any threads are set to OOM_DISABLE
- */
- do_each_thread(g, q) {
- if (q->mm == mm && q->oomkilladj == OOM_DISABLE)
- return 1;
- } while_each_thread(g, q);
-
__oom_kill_task(p, 1);
- /*
- * kill all processes that share the ->mm (i.e. all threads),
- * but are in a different thread group. Don't let them have access
- * to memory reserves though, otherwise we might deplete all memory.
- */
- do_each_thread(g, q) {
- if (q->mm == mm && !same_thread_group(q, p))
- force_sig(SIGKILL, q);
- } while_each_thread(g, q);
-
return 0;
}
@@ -394,8 +397,9 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
if (printk_ratelimit()) {
printk(KERN_WARNING "%s invoked oom-killer: "
- "gfp_mask=0x%x, order=%d, oomkilladj=%d\n",
- current->comm, gfp_mask, order, current->oomkilladj);
+ "gfp_mask=0x%x, order=%d, oom_adj=%d\n",
+ current->comm, gfp_mask, order,
+ current->signal->oom_adj);
task_lock(current);
cpuset_print_task_mems_allowed(current);
task_unlock(current);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d1ba4644105..a3b14090b1f 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -44,18 +44,21 @@ static long ratelimit_pages = 32;
/*
* When balance_dirty_pages decides that the caller needs to perform some
* non-background writeback, this is how many pages it will attempt to write.
- * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
+ * It should be somewhat larger than dirtied pages to ensure that reasonably
* large amounts of I/O are submitted.
*/
-static inline long sync_writeback_pages(void)
+static inline long sync_writeback_pages(unsigned long dirtied)
{
- return ratelimit_pages + ratelimit_pages / 2;
+ if (dirtied < ratelimit_pages)
+ dirtied = ratelimit_pages;
+
+ return dirtied + dirtied / 2;
}
/* The following parameters are exported via /proc/sys/vm */
/*
- * Start background writeback (via pdflush) at this percentage
+ * Start background writeback (via writeback threads) at this percentage
*/
int dirty_background_ratio = 10;
@@ -155,37 +158,37 @@ static void update_completion_period(void)
}
int dirty_background_ratio_handler(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
- ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write)
dirty_background_bytes = 0;
return ret;
}
int dirty_background_bytes_handler(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
- ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos);
+ ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write)
dirty_background_ratio = 0;
return ret;
}
int dirty_ratio_handler(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int old_ratio = vm_dirty_ratio;
int ret;
- ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
update_completion_period();
vm_dirty_bytes = 0;
@@ -195,13 +198,13 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
int dirty_bytes_handler(struct ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos)
{
unsigned long old_bytes = vm_dirty_bytes;
int ret;
- ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos);
+ ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
update_completion_period();
vm_dirty_ratio = 0;
@@ -380,7 +383,8 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
struct zone *z =
&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
- x += zone_page_state(z, NR_FREE_PAGES) + zone_lru_pages(z);
+ x += zone_page_state(z, NR_FREE_PAGES) +
+ zone_reclaimable_pages(z);
}
/*
* Make sure that the number of highmem pages is never larger
@@ -404,7 +408,7 @@ unsigned long determine_dirtyable_memory(void)
{
unsigned long x;
- x = global_page_state(NR_FREE_PAGES) + global_lru_pages();
+ x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);
@@ -473,10 +477,11 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
* balance_dirty_pages() must be called by processes which are generating dirty
* data. It looks at the number of dirty pages in the machine and will force
* the caller to perform writeback if the system is over `vm_dirty_ratio'.
- * If we're over `background_thresh' then pdflush is woken to perform some
- * writeout.
+ * If we're over `background_thresh' then the writeback threads are woken to
+ * perform some writeout.
*/
-static void balance_dirty_pages(struct address_space *mapping)
+static void balance_dirty_pages(struct address_space *mapping,
+ unsigned long write_chunk)
{
long nr_reclaimable, bdi_nr_reclaimable;
long nr_writeback, bdi_nr_writeback;
@@ -484,7 +489,6 @@ static void balance_dirty_pages(struct address_space *mapping)
unsigned long dirty_thresh;
unsigned long bdi_thresh;
unsigned long pages_written = 0;
- unsigned long write_chunk = sync_writeback_pages();
unsigned long pause = 1;
struct backing_dev_info *bdi = mapping->backing_dev_info;
@@ -578,7 +582,7 @@ static void balance_dirty_pages(struct address_space *mapping)
bdi->dirty_exceeded = 0;
if (writeback_in_progress(bdi))
- return; /* pdflush is already working this queue */
+ return;
/*
* In laptop mode, we wait until hitting the higher threshold before
@@ -589,10 +593,10 @@ static void balance_dirty_pages(struct address_space *mapping)
* background_thresh, to keep the amount of dirty memory low.
*/
if ((laptop_mode && pages_written) ||
- (!laptop_mode && ((nr_writeback = global_page_state(NR_FILE_DIRTY)
- + global_page_state(NR_UNSTABLE_NFS))
+ (!laptop_mode && ((global_page_state(NR_FILE_DIRTY)
+ + global_page_state(NR_UNSTABLE_NFS))
> background_thresh)))
- bdi_start_writeback(bdi, nr_writeback);
+ bdi_start_writeback(bdi, NULL, 0);
}
void set_page_dirty_balance(struct page *page, int page_mkwrite)
@@ -639,9 +643,10 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
p = &__get_cpu_var(bdp_ratelimits);
*p += nr_pages_dirtied;
if (unlikely(*p >= ratelimit)) {
+ ratelimit = sync_writeback_pages(*p);
*p = 0;
preempt_enable();
- balance_dirty_pages(mapping);
+ balance_dirty_pages(mapping, ratelimit);
return;
}
preempt_enable();
@@ -685,9 +690,9 @@ static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
* sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
*/
int dirty_writeback_centisecs_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+ void __user *buffer, size_t *length, loff_t *ppos)
{
- proc_dointvec(table, write, file, buffer, length, ppos);
+ proc_dointvec(table, write, buffer, length, ppos);
return 0;
}
@@ -1148,6 +1153,13 @@ int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
EXPORT_SYMBOL(redirty_page_for_writepage);
/*
+ * Dirty a page.
+ *
+ * For pages with a mapping this should be done under the page lock
+ * for the benefit of asynchronous memory errors who prefer a consistent
+ * dirty state. This rule can be broken in some special cases,
+ * but should be better not to.
+ *
* If the mapping doesn't provide a set_page_dirty a_op, then
* just fall through and assume that it wants buffer_heads.
*/
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a0de15f4698..bf720550b44 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -48,6 +48,7 @@
#include <linux/page_cgroup.h>
#include <linux/debugobjects.h>
#include <linux/kmemleak.h>
+#include <trace/events/kmem.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -71,7 +72,6 @@ EXPORT_SYMBOL(node_states);
unsigned long totalram_pages __read_mostly;
unsigned long totalreserve_pages __read_mostly;
-unsigned long highest_memmap_pfn __read_mostly;
int percpu_pagelist_fraction;
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
@@ -123,8 +123,8 @@ static char * const zone_names[MAX_NR_ZONES] = {
int min_free_kbytes = 1024;
-unsigned long __meminitdata nr_kernel_pages;
-unsigned long __meminitdata nr_all_pages;
+static unsigned long __meminitdata nr_kernel_pages;
+static unsigned long __meminitdata nr_all_pages;
static unsigned long __meminitdata dma_reserve;
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
@@ -234,6 +234,12 @@ static void bad_page(struct page *page)
static unsigned long nr_shown;
static unsigned long nr_unshown;
+ /* Don't complain about poisoned pages */
+ if (PageHWPoison(page)) {
+ __ClearPageBuddy(page);
+ return;
+ }
+
/*
* Allow a burst of 60 reports, then keep quiet for that minute;
* or allow a steady drip of one report per second.
@@ -510,7 +516,7 @@ static inline int free_pages_check(struct page *page)
}
/*
- * Frees a list of pages.
+ * Frees a number of pages from the PCP lists
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
@@ -520,22 +526,42 @@ static inline int free_pages_check(struct page *page)
* And clear the zone's pages_scanned counter, to hold off the "all pages are
* pinned" detection logic.
*/
-static void free_pages_bulk(struct zone *zone, int count,
- struct list_head *list, int order)
+static void free_pcppages_bulk(struct zone *zone, int count,
+ struct per_cpu_pages *pcp)
{
+ int migratetype = 0;
+ int batch_free = 0;
+
spin_lock(&zone->lock);
zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
zone->pages_scanned = 0;
- __mod_zone_page_state(zone, NR_FREE_PAGES, count << order);
- while (count--) {
+ __mod_zone_page_state(zone, NR_FREE_PAGES, count);
+ while (count) {
struct page *page;
+ struct list_head *list;
- VM_BUG_ON(list_empty(list));
- page = list_entry(list->prev, struct page, lru);
- /* have to delete it as __free_one_page list manipulates */
- list_del(&page->lru);
- __free_one_page(page, zone, order, page_private(page));
+ /*
+ * Remove pages from lists in a round-robin fashion. A
+ * batch_free count is maintained that is incremented when an
+ * empty list is encountered. This is so more pages are freed
+ * off fuller lists instead of spinning excessively around empty
+ * lists
+ */
+ do {
+ batch_free++;
+ if (++migratetype == MIGRATE_PCPTYPES)
+ migratetype = 0;
+ list = &pcp->lists[migratetype];
+ } while (list_empty(list));
+
+ do {
+ page = list_entry(list->prev, struct page, lru);
+ /* must delete as __free_one_page list manipulates */
+ list_del(&page->lru);
+ __free_one_page(page, zone, 0, migratetype);
+ trace_mm_page_pcpu_drain(page, 0, migratetype);
+ } while (--count && --batch_free && !list_empty(list));
}
spin_unlock(&zone->lock);
}
@@ -557,7 +583,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
unsigned long flags;
int i;
int bad = 0;
- int wasMlocked = TestClearPageMlocked(page);
+ int wasMlocked = __TestClearPageMlocked(page);
kmemcheck_free_shadow(page, order);
@@ -646,7 +672,7 @@ static inline void expand(struct zone *zone, struct page *page,
/*
* This page is about to be returned from the page allocator
*/
-static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
+static inline int check_new_page(struct page *page)
{
if (unlikely(page_mapcount(page) |
(page->mapping != NULL) |
@@ -655,6 +681,18 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
bad_page(page);
return 1;
}
+ return 0;
+}
+
+static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
+{
+ int i;
+
+ for (i = 0; i < (1 << order); i++) {
+ struct page *p = page + i;
+ if (unlikely(check_new_page(p)))
+ return 1;
+ }
set_page_private(page, 0);
set_page_refcounted(page);
@@ -783,6 +821,17 @@ static int move_freepages_block(struct zone *zone, struct page *page,
return move_freepages(zone, start_page, end_page, migratetype);
}
+static void change_pageblock_range(struct page *pageblock_page,
+ int start_order, int migratetype)
+{
+ int nr_pageblocks = 1 << (start_order - pageblock_order);
+
+ while (nr_pageblocks--) {
+ set_pageblock_migratetype(pageblock_page, migratetype);
+ pageblock_page += pageblock_nr_pages;
+ }
+}
+
/* Remove an element from the buddy allocator from the fallback list */
static inline struct page *
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
@@ -836,11 +885,16 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
list_del(&page->lru);
rmv_page_order(page);
- if (current_order == pageblock_order)
- set_pageblock_migratetype(page,
+ /* Take ownership for orders >= pageblock_order */
+ if (current_order >= pageblock_order)
+ change_pageblock_range(page, current_order,
start_migratetype);
expand(zone, page, order, current_order, area, migratetype);
+
+ trace_mm_page_alloc_extfrag(page, order, current_order,
+ start_migratetype, migratetype);
+
return page;
}
}
@@ -874,6 +928,7 @@ retry_reserve:
}
}
+ trace_mm_page_alloc_zone_locked(page, order, migratetype);
return page;
}
@@ -934,7 +989,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
to_drain = pcp->batch;
else
to_drain = pcp->count;
- free_pages_bulk(zone, to_drain, &pcp->list, 0);
+ free_pcppages_bulk(zone, to_drain, pcp);
pcp->count -= to_drain;
local_irq_restore(flags);
}
@@ -960,7 +1015,7 @@ static void drain_pages(unsigned int cpu)
pcp = &pset->pcp;
local_irq_save(flags);
- free_pages_bulk(zone, pcp->count, &pcp->list, 0);
+ free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
local_irq_restore(flags);
}
@@ -1026,7 +1081,8 @@ static void free_hot_cold_page(struct page *page, int cold)
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
unsigned long flags;
- int wasMlocked = TestClearPageMlocked(page);
+ int migratetype;
+ int wasMlocked = __TestClearPageMlocked(page);
kmemcheck_free_shadow(page, 0);
@@ -1043,35 +1099,49 @@ static void free_hot_cold_page(struct page *page, int cold)
kernel_map_pages(page, 1, 0);
pcp = &zone_pcp(zone, get_cpu())->pcp;
- set_page_private(page, get_pageblock_migratetype(page));
+ migratetype = get_pageblock_migratetype(page);
+ set_page_private(page, migratetype);
local_irq_save(flags);
if (unlikely(wasMlocked))
free_page_mlock(page);
__count_vm_event(PGFREE);
+ /*
+ * We only track unmovable, reclaimable and movable on pcp lists.
+ * Free ISOLATE pages back to the allocator because they are being
+ * offlined but treat RESERVE as movable pages so we can get those
+ * areas back if necessary. Otherwise, we may have to free
+ * excessively into the page allocator
+ */
+ if (migratetype >= MIGRATE_PCPTYPES) {
+ if (unlikely(migratetype == MIGRATE_ISOLATE)) {
+ free_one_page(zone, page, 0, migratetype);
+ goto out;
+ }
+ migratetype = MIGRATE_MOVABLE;
+ }
+
if (cold)
- list_add_tail(&page->lru, &pcp->list);
+ list_add_tail(&page->lru, &pcp->lists[migratetype]);
else
- list_add(&page->lru, &pcp->list);
+ list_add(&page->lru, &pcp->lists[migratetype]);
pcp->count++;
if (pcp->count >= pcp->high) {
- free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
+ free_pcppages_bulk(zone, pcp->batch, pcp);
pcp->count -= pcp->batch;
}
+
+out:
local_irq_restore(flags);
put_cpu();
}
void free_hot_page(struct page *page)
{
+ trace_mm_page_free_direct(page, 0);
free_hot_cold_page(page, 0);
}
-void free_cold_page(struct page *page)
-{
- free_hot_cold_page(page, 1);
-}
-
/*
* split_page takes a non-compound higher-order page, and splits it into
* n (1<<order) sub-pages: page[0..n]
@@ -1119,35 +1189,23 @@ again:
cpu = get_cpu();
if (likely(order == 0)) {
struct per_cpu_pages *pcp;
+ struct list_head *list;
pcp = &zone_pcp(zone, cpu)->pcp;
+ list = &pcp->lists[migratetype];
local_irq_save(flags);
- if (!pcp->count) {
- pcp->count = rmqueue_bulk(zone, 0,
- pcp->batch, &pcp->list,
+ if (list_empty(list)) {
+ pcp->count += rmqueue_bulk(zone, 0,
+ pcp->batch, list,
migratetype, cold);
- if (unlikely(!pcp->count))
+ if (unlikely(list_empty(list)))
goto failed;
}
- /* Find a page of the appropriate migrate type */
- if (cold) {
- list_for_each_entry_reverse(page, &pcp->list, lru)
- if (page_private(page) == migratetype)
- break;
- } else {
- list_for_each_entry(page, &pcp->list, lru)
- if (page_private(page) == migratetype)
- break;
- }
-
- /* Allocate more to the pcp list if necessary */
- if (unlikely(&page->lru == &pcp->list)) {
- pcp->count += rmqueue_bulk(zone, 0,
- pcp->batch, &pcp->list,
- migratetype, cold);
- page = list_entry(pcp->list.next, struct page, lru);
- }
+ if (cold)
+ page = list_entry(list->prev, struct page, lru);
+ else
+ page = list_entry(list->next, struct page, lru);
list_del(&page->lru);
pcp->count--;
@@ -1627,10 +1685,6 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
/* We now go into synchronous reclaim */
cpuset_memory_pressure_bump();
-
- /*
- * The task's cpuset might have expanded its set of allowable nodes
- */
p->flags |= PF_MEMALLOC;
lockdep_set_current_reclaim_state(gfp_mask);
reclaim_state.reclaimed_slab = 0;
@@ -1765,6 +1819,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
wake_all_kswapd(order, zonelist, high_zoneidx);
+restart:
/*
* OK, we're below the kswapd watermark and have kicked background
* reclaim. Now things get more complex, so set up alloc_flags according
@@ -1772,7 +1827,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
*/
alloc_flags = gfp_to_alloc_flags(gfp_mask);
-restart:
/* This is the last chance, in general, before the goto nopage. */
page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
@@ -1907,6 +1961,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
zonelist, high_zoneidx, nodemask,
preferred_zone, migratetype);
+ trace_mm_page_alloc(page, order, gfp_mask, migratetype);
return page;
}
EXPORT_SYMBOL(__alloc_pages_nodemask);
@@ -1916,44 +1971,41 @@ EXPORT_SYMBOL(__alloc_pages_nodemask);
*/
unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
{
- struct page * page;
+ struct page *page;
+
+ /*
+ * __get_free_pages() returns a 32-bit address, which cannot represent
+ * a highmem page
+ */
+ VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
+
page = alloc_pages(gfp_mask, order);
if (!page)
return 0;
return (unsigned long) page_address(page);
}
-
EXPORT_SYMBOL(__get_free_pages);
unsigned long get_zeroed_page(gfp_t gfp_mask)
{
- struct page * page;
-
- /*
- * get_zeroed_page() returns a 32-bit address, which cannot represent
- * a highmem page
- */
- VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
-
- page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
- if (page)
- return (unsigned long) page_address(page);
- return 0;
+ return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
}
-
EXPORT_SYMBOL(get_zeroed_page);
void __pagevec_free(struct pagevec *pvec)
{
int i = pagevec_count(pvec);
- while (--i >= 0)
+ while (--i >= 0) {
+ trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
free_hot_cold_page(pvec->pages[i], pvec->cold);
+ }
}
void __free_pages(struct page *page, unsigned int order)
{
if (put_page_testzero(page)) {
+ trace_mm_page_free_direct(page, order);
if (order == 0)
free_hot_page(page);
else
@@ -2128,23 +2180,28 @@ void show_free_areas(void)
}
}
- printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
- " inactive_file:%lu"
+ printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
+ " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
" unevictable:%lu"
- " dirty:%lu writeback:%lu unstable:%lu\n"
- " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
+ " dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n"
+ " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
+ " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
global_page_state(NR_ACTIVE_ANON),
- global_page_state(NR_ACTIVE_FILE),
global_page_state(NR_INACTIVE_ANON),
+ global_page_state(NR_ISOLATED_ANON),
+ global_page_state(NR_ACTIVE_FILE),
global_page_state(NR_INACTIVE_FILE),
+ global_page_state(NR_ISOLATED_FILE),
global_page_state(NR_UNEVICTABLE),
global_page_state(NR_FILE_DIRTY),
global_page_state(NR_WRITEBACK),
global_page_state(NR_UNSTABLE_NFS),
+ nr_blockdev_pages(),
global_page_state(NR_FREE_PAGES),
- global_page_state(NR_SLAB_RECLAIMABLE) +
- global_page_state(NR_SLAB_UNRECLAIMABLE),
+ global_page_state(NR_SLAB_RECLAIMABLE),
+ global_page_state(NR_SLAB_UNRECLAIMABLE),
global_page_state(NR_FILE_MAPPED),
+ global_page_state(NR_SHMEM),
global_page_state(NR_PAGETABLE),
global_page_state(NR_BOUNCE));
@@ -2162,7 +2219,21 @@ void show_free_areas(void)
" active_file:%lukB"
" inactive_file:%lukB"
" unevictable:%lukB"
+ " isolated(anon):%lukB"
+ " isolated(file):%lukB"
" present:%lukB"
+ " mlocked:%lukB"
+ " dirty:%lukB"
+ " writeback:%lukB"
+ " mapped:%lukB"
+ " shmem:%lukB"
+ " slab_reclaimable:%lukB"
+ " slab_unreclaimable:%lukB"
+ " kernel_stack:%lukB"
+ " pagetables:%lukB"
+ " unstable:%lukB"
+ " bounce:%lukB"
+ " writeback_tmp:%lukB"
" pages_scanned:%lu"
" all_unreclaimable? %s"
"\n",
@@ -2176,7 +2247,22 @@ void show_free_areas(void)
K(zone_page_state(zone, NR_ACTIVE_FILE)),
K(zone_page_state(zone, NR_INACTIVE_FILE)),
K(zone_page_state(zone, NR_UNEVICTABLE)),
+ K(zone_page_state(zone, NR_ISOLATED_ANON)),
+ K(zone_page_state(zone, NR_ISOLATED_FILE)),
K(zone->present_pages),
+ K(zone_page_state(zone, NR_MLOCK)),
+ K(zone_page_state(zone, NR_FILE_DIRTY)),
+ K(zone_page_state(zone, NR_WRITEBACK)),
+ K(zone_page_state(zone, NR_FILE_MAPPED)),
+ K(zone_page_state(zone, NR_SHMEM)),
+ K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
+ K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
+ zone_page_state(zone, NR_KERNEL_STACK) *
+ THREAD_SIZE / 1024,
+ K(zone_page_state(zone, NR_PAGETABLE)),
+ K(zone_page_state(zone, NR_UNSTABLE_NFS)),
+ K(zone_page_state(zone, NR_BOUNCE)),
+ K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
zone->pages_scanned,
(zone_is_all_unreclaimable(zone) ? "yes" : "no")
);
@@ -2305,7 +2391,7 @@ early_param("numa_zonelist_order", setup_numa_zonelist_order);
* sysctl handler for numa_zonelist_order
*/
int numa_zonelist_order_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length,
+ void __user *buffer, size_t *length,
loff_t *ppos)
{
char saved_string[NUMA_ZONELIST_ORDER_LEN];
@@ -2314,7 +2400,7 @@ int numa_zonelist_order_handler(ctl_table *table, int write,
if (write)
strncpy(saved_string, (char*)table->data,
NUMA_ZONELIST_ORDER_LEN);
- ret = proc_dostring(table, write, file, buffer, length, ppos);
+ ret = proc_dostring(table, write, buffer, length, ppos);
if (ret)
return ret;
if (write) {
@@ -2783,7 +2869,8 @@ static void setup_zone_migrate_reserve(struct zone *zone)
{
unsigned long start_pfn, pfn, end_pfn;
struct page *page;
- unsigned long reserve, block_migratetype;
+ unsigned long block_migratetype;
+ int reserve;
/* Get the start pfn, end pfn and the number of blocks to reserve */
start_pfn = zone->zone_start_pfn;
@@ -2791,6 +2878,15 @@ static void setup_zone_migrate_reserve(struct zone *zone)
reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
pageblock_order;
+ /*
+ * Reserve blocks are generally in place to help high-order atomic
+ * allocations that are short-lived. A min_free_kbytes value that
+ * would result in more than 2 reserve blocks for atomic allocations
+ * is assumed to be in place to help anti-fragmentation for the
+ * future allocation of hugepages at runtime.
+ */
+ reserve = min(2, reserve);
+
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
if (!pfn_valid(pfn))
continue;
@@ -2961,6 +3057,7 @@ static int zone_batchsize(struct zone *zone)
static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
{
struct per_cpu_pages *pcp;
+ int migratetype;
memset(p, 0, sizeof(*p));
@@ -2968,7 +3065,8 @@ static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
pcp->count = 0;
pcp->high = 6 * batch;
pcp->batch = max(1UL, 1 * batch);
- INIT_LIST_HEAD(&pcp->list);
+ for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
+ INIT_LIST_HEAD(&pcp->lists[migratetype]);
}
/*
@@ -3146,6 +3244,32 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
return 0;
}
+static int __zone_pcp_update(void *data)
+{
+ struct zone *zone = data;
+ int cpu;
+ unsigned long batch = zone_batchsize(zone), flags;
+
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ struct per_cpu_pageset *pset;
+ struct per_cpu_pages *pcp;
+
+ pset = zone_pcp(zone, cpu);
+ pcp = &pset->pcp;
+
+ local_irq_save(flags);
+ free_pcppages_bulk(zone, pcp->count, pcp);
+ setup_pageset(pset, batch);
+ local_irq_restore(flags);
+ }
+ return 0;
+}
+
+void zone_pcp_update(struct zone *zone)
+{
+ stop_machine(__zone_pcp_update, zone, NULL);
+}
+
static __meminit void zone_pcp_init(struct zone *zone)
{
int cpu;
@@ -3720,7 +3844,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
zone_pcp_init(zone);
for_each_lru(l) {
INIT_LIST_HEAD(&zone->lru[l].list);
- zone->lru[l].nr_saved_scan = 0;
+ zone->reclaim_stat.nr_saved_scan[l] = 0;
}
zone->reclaim_stat.recent_rotated[0] = 0;
zone->reclaim_stat.recent_rotated[1] = 0;
@@ -4509,7 +4633,7 @@ void setup_per_zone_wmarks(void)
calculate_totalreserve_pages();
}
-/**
+/*
* The inactive anon list should be small enough that the VM never has to
* do too much work, but large enough that each inactive page has a chance
* to be referenced again before it is swapped out.
@@ -4600,9 +4724,9 @@ module_init(init_per_zone_wmark_min)
* changes.
*/
int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+ void __user *buffer, size_t *length, loff_t *ppos)
{
- proc_dointvec(table, write, file, buffer, length, ppos);
+ proc_dointvec(table, write, buffer, length, ppos);
if (write)
setup_per_zone_wmarks();
return 0;
@@ -4610,12 +4734,12 @@ int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
#ifdef CONFIG_NUMA
int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+ void __user *buffer, size_t *length, loff_t *ppos)
{
struct zone *zone;
int rc;
- rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+ rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc)
return rc;
@@ -4626,12 +4750,12 @@ int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
}
int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+ void __user *buffer, size_t *length, loff_t *ppos)
{
struct zone *zone;
int rc;
- rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+ rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc)
return rc;
@@ -4652,9 +4776,9 @@ int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
* if in function of the boot time zone sizes.
*/
int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+ void __user *buffer, size_t *length, loff_t *ppos)
{
- proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+ proc_dointvec_minmax(table, write, buffer, length, ppos);
setup_per_zone_lowmem_reserve();
return 0;
}
@@ -4666,13 +4790,13 @@ int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
*/
int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+ void __user *buffer, size_t *length, loff_t *ppos)
{
struct zone *zone;
unsigned int cpu;
int ret;
- ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+ ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (!write || (ret == -EINVAL))
return ret;
for_each_populated_zone(zone) {
@@ -4732,7 +4856,14 @@ void *__init alloc_large_system_hash(const char *tablename,
numentries <<= (PAGE_SHIFT - scale);
/* Make sure we've got at least a 0-order allocation.. */
- if (unlikely((numentries * bucketsize) < PAGE_SIZE))
+ if (unlikely(flags & HASH_SMALL)) {
+ /* Makes no sense without HASH_EARLY */
+ WARN_ON(!(flags & HASH_EARLY));
+ if (!(numentries >> *_hash_shift)) {
+ numentries = 1UL << *_hash_shift;
+ BUG_ON(!numentries);
+ }
+ } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
numentries = PAGE_SIZE / bucketsize;
}
numentries = roundup_pow_of_two(numentries);
@@ -4874,13 +5005,16 @@ int set_migratetype_isolate(struct page *page)
struct zone *zone;
unsigned long flags;
int ret = -EBUSY;
+ int zone_idx;
zone = page_zone(page);
+ zone_idx = zone_idx(zone);
spin_lock_irqsave(&zone->lock, flags);
/*
* In future, more migrate types will be able to be isolation target.
*/
- if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
+ if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE &&
+ zone_idx != ZONE_MOVABLE)
goto out;
set_pageblock_migratetype(page, MIGRATE_ISOLATE);
move_freepages_block(zone, page, MIGRATE_ISOLATE);
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index f22b4ebbd8d..3d535d59482 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -116,10 +116,16 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
nid = page_to_nid(pfn_to_page(pfn));
table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
VM_BUG_ON(!slab_is_available());
- base = kmalloc_node(table_size,
+ if (node_state(nid, N_HIGH_MEMORY)) {
+ base = kmalloc_node(table_size,
GFP_KERNEL | __GFP_NOWARN, nid);
- if (!base)
- base = vmalloc_node(table_size, nid);
+ if (!base)
+ base = vmalloc_node(table_size, nid);
+ } else {
+ base = kmalloc(table_size, GFP_KERNEL | __GFP_NOWARN);
+ if (!base)
+ base = vmalloc(table_size);
+ }
} else {
/*
* We don't have to allocate page_cgroup again, but
diff --git a/mm/quicklist.c b/mm/quicklist.c
index 6eedf7e473d..6633965bb27 100644
--- a/mm/quicklist.c
+++ b/mm/quicklist.c
@@ -29,7 +29,6 @@ static unsigned long max_pages(unsigned long min_pages)
int node = numa_node_id();
struct zone *zones = NODE_DATA(node)->node_zones;
int num_cpus_on_node;
- const struct cpumask *cpumask_on_node = cpumask_of_node(node);
node_free_pages =
#ifdef CONFIG_ZONE_DMA
@@ -42,7 +41,7 @@ static unsigned long max_pages(unsigned long min_pages)
max = node_free_pages / FRACTION_OF_NODE_MEM;
- num_cpus_on_node = cpus_weight_nr(*cpumask_on_node);
+ num_cpus_on_node = cpumask_weight(cpumask_of_node(node));
max /= num_cpus_on_node;
return max(max, min_pages);
diff --git a/mm/rmap.c b/mm/rmap.c
index 0895b5c7cbf..28aafe2b530 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -36,6 +36,11 @@
* mapping->tree_lock (widely used, in set_page_dirty,
* in arch-dependent flush_dcache_mmap_lock,
* within inode_lock in __sync_single_inode)
+ *
+ * (code doesn't rely on that order so it could be switched around)
+ * ->tasklist_lock
+ * anon_vma->lock (memory_failure, collect_procs_anon)
+ * pte map lock
*/
#include <linux/mm.h>
@@ -191,7 +196,7 @@ void __init anon_vma_init(void)
* Getting a lock on a stable anon_vma from a page off the LRU is
* tricky: page_lock_anon_vma rely on RCU to guard against the races.
*/
-static struct anon_vma *page_lock_anon_vma(struct page *page)
+struct anon_vma *page_lock_anon_vma(struct page *page)
{
struct anon_vma *anon_vma;
unsigned long anon_mapping;
@@ -211,7 +216,7 @@ out:
return NULL;
}
-static void page_unlock_anon_vma(struct anon_vma *anon_vma)
+void page_unlock_anon_vma(struct anon_vma *anon_vma)
{
spin_unlock(&anon_vma->lock);
rcu_read_unlock();
@@ -311,7 +316,7 @@ pte_t *page_check_address(struct page *page, struct mm_struct *mm,
* if the page is not mapped into the page tables of this VMA. Only
* valid for normal file or anonymous VMAs.
*/
-static int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
+int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
{
unsigned long address;
pte_t *pte;
@@ -710,27 +715,6 @@ void page_add_file_rmap(struct page *page)
}
}
-#ifdef CONFIG_DEBUG_VM
-/**
- * page_dup_rmap - duplicate pte mapping to a page
- * @page: the page to add the mapping to
- * @vma: the vm area being duplicated
- * @address: the user virtual address mapped
- *
- * For copy_page_range only: minimal extract from page_add_file_rmap /
- * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's
- * quicker.
- *
- * The caller needs to hold the pte lock.
- */
-void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
-{
- if (PageAnon(page))
- __page_check_anon_rmap(page, vma, address);
- atomic_inc(&page->_mapcount);
-}
-#endif
-
/**
* page_remove_rmap - take down pte mapping from a page
* @page: page to remove mapping from
@@ -739,34 +723,37 @@ void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long
*/
void page_remove_rmap(struct page *page)
{
- if (atomic_add_negative(-1, &page->_mapcount)) {
- /*
- * Now that the last pte has gone, s390 must transfer dirty
- * flag from storage key to struct page. We can usually skip
- * this if the page is anon, so about to be freed; but perhaps
- * not if it's in swapcache - there might be another pte slot
- * containing the swap entry, but page not yet written to swap.
- */
- if ((!PageAnon(page) || PageSwapCache(page)) &&
- page_test_dirty(page)) {
- page_clear_dirty(page);
- set_page_dirty(page);
- }
- if (PageAnon(page))
- mem_cgroup_uncharge_page(page);
- __dec_zone_page_state(page,
- PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
- mem_cgroup_update_mapped_file_stat(page, -1);
- /*
- * It would be tidy to reset the PageAnon mapping here,
- * but that might overwrite a racing page_add_anon_rmap
- * which increments mapcount after us but sets mapping
- * before us: so leave the reset to free_hot_cold_page,
- * and remember that it's only reliable while mapped.
- * Leaving it set also helps swapoff to reinstate ptes
- * faster for those pages still in swapcache.
- */
+ /* page still mapped by someone else? */
+ if (!atomic_add_negative(-1, &page->_mapcount))
+ return;
+
+ /*
+ * Now that the last pte has gone, s390 must transfer dirty
+ * flag from storage key to struct page. We can usually skip
+ * this if the page is anon, so about to be freed; but perhaps
+ * not if it's in swapcache - there might be another pte slot
+ * containing the swap entry, but page not yet written to swap.
+ */
+ if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) {
+ page_clear_dirty(page);
+ set_page_dirty(page);
+ }
+ if (PageAnon(page)) {
+ mem_cgroup_uncharge_page(page);
+ __dec_zone_page_state(page, NR_ANON_PAGES);
+ } else {
+ __dec_zone_page_state(page, NR_FILE_MAPPED);
}
+ mem_cgroup_update_mapped_file_stat(page, -1);
+ /*
+ * It would be tidy to reset the PageAnon mapping here,
+ * but that might overwrite a racing page_add_anon_rmap
+ * which increments mapcount after us but sets mapping
+ * before us: so leave the reset to free_hot_cold_page,
+ * and remember that it's only reliable while mapped.
+ * Leaving it set also helps swapoff to reinstate ptes
+ * faster for those pages still in swapcache.
+ */
}
/*
@@ -774,7 +761,7 @@ void page_remove_rmap(struct page *page)
* repeatedly from either try_to_unmap_anon or try_to_unmap_file.
*/
static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
- int migration)
+ enum ttu_flags flags)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
@@ -796,11 +783,13 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* If it's recently referenced (perhaps page_referenced
* skipped over this mm) then we should reactivate it.
*/
- if (!migration) {
+ if (!(flags & TTU_IGNORE_MLOCK)) {
if (vma->vm_flags & VM_LOCKED) {
ret = SWAP_MLOCK;
goto out_unmap;
}
+ }
+ if (!(flags & TTU_IGNORE_ACCESS)) {
if (ptep_clear_flush_young_notify(vma, address, pte)) {
ret = SWAP_FAIL;
goto out_unmap;
@@ -818,7 +807,14 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
- if (PageAnon(page)) {
+ if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
+ if (PageAnon(page))
+ dec_mm_counter(mm, anon_rss);
+ else
+ dec_mm_counter(mm, file_rss);
+ set_pte_at(mm, address, pte,
+ swp_entry_to_pte(make_hwpoison_entry(page)));
+ } else if (PageAnon(page)) {
swp_entry_t entry = { .val = page_private(page) };
if (PageSwapCache(page)) {
@@ -840,12 +836,12 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* pte. do_swap_page() will wait until the migration
* pte is removed and then restart fault handling.
*/
- BUG_ON(!migration);
+ BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
entry = make_migration_entry(page, pte_write(pteval));
}
set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
BUG_ON(pte_file(*pte));
- } else if (PAGE_MIGRATION && migration) {
+ } else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) {
/* Establish migration entry for a file page */
swp_entry_t entry;
entry = make_migration_entry(page, pte_write(pteval));
@@ -1014,12 +1010,13 @@ static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma)
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* 'LOCKED.
*/
-static int try_to_unmap_anon(struct page *page, int unlock, int migration)
+static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
{
struct anon_vma *anon_vma;
struct vm_area_struct *vma;
unsigned int mlocked = 0;
int ret = SWAP_AGAIN;
+ int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
if (MLOCK_PAGES && unlikely(unlock))
ret = SWAP_SUCCESS; /* default for try_to_munlock() */
@@ -1035,7 +1032,7 @@ static int try_to_unmap_anon(struct page *page, int unlock, int migration)
continue; /* must visit all unlocked vmas */
ret = SWAP_MLOCK; /* saw at least one mlocked vma */
} else {
- ret = try_to_unmap_one(page, vma, migration);
+ ret = try_to_unmap_one(page, vma, flags);
if (ret == SWAP_FAIL || !page_mapped(page))
break;
}
@@ -1059,8 +1056,7 @@ static int try_to_unmap_anon(struct page *page, int unlock, int migration)
/**
* try_to_unmap_file - unmap/unlock file page using the object-based rmap method
* @page: the page to unmap/unlock
- * @unlock: request for unlock rather than unmap [unlikely]
- * @migration: unmapping for migration - ignored if @unlock
+ * @flags: action and flags
*
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the address_space struct it points to.
@@ -1072,7 +1068,7 @@ static int try_to_unmap_anon(struct page *page, int unlock, int migration)
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* 'LOCKED.
*/
-static int try_to_unmap_file(struct page *page, int unlock, int migration)
+static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
{
struct address_space *mapping = page->mapping;
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -1084,6 +1080,7 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration)
unsigned long max_nl_size = 0;
unsigned int mapcount;
unsigned int mlocked = 0;
+ int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
if (MLOCK_PAGES && unlikely(unlock))
ret = SWAP_SUCCESS; /* default for try_to_munlock() */
@@ -1096,7 +1093,7 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration)
continue; /* must visit all vmas */
ret = SWAP_MLOCK;
} else {
- ret = try_to_unmap_one(page, vma, migration);
+ ret = try_to_unmap_one(page, vma, flags);
if (ret == SWAP_FAIL || !page_mapped(page))
goto out;
}
@@ -1121,7 +1118,8 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration)
ret = SWAP_MLOCK; /* leave mlocked == 0 */
goto out; /* no need to look further */
}
- if (!MLOCK_PAGES && !migration && (vma->vm_flags & VM_LOCKED))
+ if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
+ (vma->vm_flags & VM_LOCKED))
continue;
cursor = (unsigned long) vma->vm_private_data;
if (cursor > max_nl_cursor)
@@ -1155,7 +1153,7 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration)
do {
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) {
- if (!MLOCK_PAGES && !migration &&
+ if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
(vma->vm_flags & VM_LOCKED))
continue;
cursor = (unsigned long) vma->vm_private_data;
@@ -1195,7 +1193,7 @@ out:
/**
* try_to_unmap - try to remove all page table mappings to a page
* @page: the page to get unmapped
- * @migration: migration flag
+ * @flags: action and flags
*
* Tries to remove all the page table entries which are mapping this
* page, used in the pageout path. Caller must hold the page lock.
@@ -1206,16 +1204,16 @@ out:
* SWAP_FAIL - the page is unswappable
* SWAP_MLOCK - page is mlocked.
*/
-int try_to_unmap(struct page *page, int migration)
+int try_to_unmap(struct page *page, enum ttu_flags flags)
{
int ret;
BUG_ON(!PageLocked(page));
if (PageAnon(page))
- ret = try_to_unmap_anon(page, 0, migration);
+ ret = try_to_unmap_anon(page, flags);
else
- ret = try_to_unmap_file(page, 0, migration);
+ ret = try_to_unmap_file(page, flags);
if (ret != SWAP_MLOCK && !page_mapped(page))
ret = SWAP_SUCCESS;
return ret;
@@ -1240,8 +1238,8 @@ int try_to_munlock(struct page *page)
VM_BUG_ON(!PageLocked(page) || PageLRU(page));
if (PageAnon(page))
- return try_to_unmap_anon(page, 1, 0);
+ return try_to_unmap_anon(page, TTU_MUNLOCK);
else
- return try_to_unmap_file(page, 1, 0);
+ return try_to_unmap_file(page, TTU_MUNLOCK);
}
diff --git a/mm/shmem.c b/mm/shmem.c
index bd20f8bb02a..ccf446a9faa 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -49,7 +49,6 @@ static struct vfsmount *shm_mnt;
#include <linux/backing-dev.h>
#include <linux/shmem_fs.h>
#include <linux/writeback.h>
-#include <linux/vfs.h>
#include <linux/blkdev.h>
#include <linux/security.h>
#include <linux/swapops.h>
@@ -1047,8 +1046,9 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
* sync from ever calling shmem_writepage; but a stacking filesystem
* may use the ->writepage of its underlying filesystem, in which case
* tmpfs should write out to swap only in response to memory pressure,
- * and not for pdflush or sync. However, in those cases, we do still
- * want to check if there's a redundant swappage to be discarded.
+ * and not for the writeback threads or sync. However, in those cases,
+ * we do still want to check if there's a redundant swappage to be
+ * discarded.
*/
if (wbc->for_reclaim)
swap = get_swap_page();
@@ -1097,6 +1097,10 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
shmem_swp_unmap(entry);
unlock:
spin_unlock(&info->lock);
+ /*
+ * add_to_swap_cache() doesn't return -EEXIST, so we can safely
+ * clear SWAP_HAS_CACHE flag.
+ */
swapcache_free(swap, NULL);
redirty:
set_page_dirty(page);
@@ -1630,8 +1634,8 @@ shmem_write_end(struct file *file, struct address_space *mapping,
if (pos + copied > inode->i_size)
i_size_write(inode, pos + copied);
- unlock_page(page);
set_page_dirty(page);
+ unlock_page(page);
page_cache_release(page);
return copied;
@@ -1968,13 +1972,13 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
iput(inode);
return error;
}
- unlock_page(page);
inode->i_mapping->a_ops = &shmem_aops;
inode->i_op = &shmem_symlink_inode_operations;
kaddr = kmap_atomic(page, KM_USER0);
memcpy(kaddr, symname, len);
kunmap_atomic(kaddr, KM_USER0);
set_page_dirty(page);
+ unlock_page(page);
page_cache_release(page);
}
if (dir->i_mode & S_ISGID)
@@ -2306,17 +2310,14 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
int err = -ENOMEM;
/* Round up to L1_CACHE_BYTES to resist false sharing */
- sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
+ sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
L1_CACHE_BYTES), GFP_KERNEL);
if (!sbinfo)
return -ENOMEM;
- sbinfo->max_blocks = 0;
- sbinfo->max_inodes = 0;
sbinfo->mode = S_IRWXUGO | S_ISVTX;
sbinfo->uid = current_fsuid();
sbinfo->gid = current_fsgid();
- sbinfo->mpol = NULL;
sb->s_fs_info = sbinfo;
#ifdef CONFIG_TMPFS
@@ -2420,6 +2421,7 @@ static const struct address_space_operations shmem_aops = {
.write_end = shmem_write_end,
#endif
.migratepage = migrate_page,
+ .error_remove_page = generic_error_remove_page,
};
static const struct file_operations shmem_file_operations = {
@@ -2590,6 +2592,11 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
return 0;
}
+int shmem_lock(struct file *file, int lock, struct user_struct *user)
+{
+ return 0;
+}
+
#define shmem_vm_ops generic_file_vm_ops
#define shmem_file_operations ramfs_file_operations
#define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev)
diff --git a/mm/slab.c b/mm/slab.c
index 7b5d4deacfc..7dfa481c96b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1384,7 +1384,7 @@ void __init kmem_cache_init(void)
* Fragmentation resistance on low memory - only use bigger
* page orders on machines with more than 32MB of memory.
*/
- if (num_physpages > (32 << 20) >> PAGE_SHIFT)
+ if (totalram_pages > (32 << 20) >> PAGE_SHIFT)
slab_break_gfp_order = BREAK_GFP_ORDER_HI;
/* Bootstrap is tricky, because several objects are allocated
diff --git a/mm/slub.c b/mm/slub.c
index 0a216aae227..4996fc71955 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3345,6 +3345,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
{
struct kmem_cache *s;
+ if (WARN_ON(!name))
+ return NULL;
+
down_write(&slub_lock);
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index a13ea6401ae..d9714bdcb4a 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -48,8 +48,14 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
{
/* If the main allocator is up use that, fallback to bootmem. */
if (slab_is_available()) {
- struct page *page = alloc_pages_node(node,
+ struct page *page;
+
+ if (node_state(node, N_HIGH_MEMORY))
+ page = alloc_pages_node(node,
GFP_KERNEL | __GFP_ZERO, get_order(size));
+ else
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(size));
if (page)
return page_address(page);
return NULL;
diff --git a/mm/sparse.c b/mm/sparse.c
index da432d9f0ae..6ce4aab69e9 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -62,9 +62,12 @@ static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
unsigned long array_size = SECTIONS_PER_ROOT *
sizeof(struct mem_section);
- if (slab_is_available())
- section = kmalloc_node(array_size, GFP_KERNEL, nid);
- else
+ if (slab_is_available()) {
+ if (node_state(nid, N_HIGH_MEMORY))
+ section = kmalloc_node(array_size, GFP_KERNEL, nid);
+ else
+ section = kmalloc(array_size, GFP_KERNEL);
+ } else
section = alloc_bootmem_node(NODE_DATA(nid), array_size);
if (section)
diff --git a/mm/swap.c b/mm/swap.c
index cb29ae5d33a..308e57d8d7e 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -118,7 +118,7 @@ static void pagevec_move_tail(struct pagevec *pvec)
spin_lock(&zone->lru_lock);
}
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
- int lru = page_is_file_cache(page);
+ int lru = page_lru_base_type(page);
list_move_tail(&page->lru, &zone->lru[lru].list);
pgmoved++;
}
@@ -181,7 +181,7 @@ void activate_page(struct page *page)
spin_lock_irq(&zone->lru_lock);
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
int file = page_is_file_cache(page);
- int lru = LRU_BASE + file;
+ int lru = page_lru_base_type(page);
del_page_from_lru_list(zone, page, lru);
SetPageActive(page);
@@ -189,7 +189,7 @@ void activate_page(struct page *page)
add_page_to_lru_list(zone, page, lru);
__count_vm_event(PGACTIVATE);
- update_page_reclaim_stat(zone, page, !!file, 1);
+ update_page_reclaim_stat(zone, page, file, 1);
}
spin_unlock_irq(&zone->lru_lock);
}
@@ -496,7 +496,7 @@ EXPORT_SYMBOL(pagevec_lookup_tag);
*/
void __init swap_setup(void)
{
- unsigned long megs = num_physpages >> (20 - PAGE_SHIFT);
+ unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
#ifdef CONFIG_SWAP
bdi_init(swapper_space.backing_dev_info);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 5ae6b8b78c8..6d1daeb1cb4 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -67,10 +67,10 @@ void show_swap_cache_info(void)
}
/*
- * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
+ * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
* but sets SwapCache flag and private instead of mapping and index.
*/
-int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
+static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
{
int error;
@@ -78,28 +78,43 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
VM_BUG_ON(PageSwapCache(page));
VM_BUG_ON(!PageSwapBacked(page));
+ page_cache_get(page);
+ SetPageSwapCache(page);
+ set_page_private(page, entry.val);
+
+ spin_lock_irq(&swapper_space.tree_lock);
+ error = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
+ if (likely(!error)) {
+ total_swapcache_pages++;
+ __inc_zone_page_state(page, NR_FILE_PAGES);
+ INC_CACHE_INFO(add_total);
+ }
+ spin_unlock_irq(&swapper_space.tree_lock);
+
+ if (unlikely(error)) {
+ /*
+ * Only the context which have set SWAP_HAS_CACHE flag
+ * would call add_to_swap_cache().
+ * So add_to_swap_cache() doesn't returns -EEXIST.
+ */
+ VM_BUG_ON(error == -EEXIST);
+ set_page_private(page, 0UL);
+ ClearPageSwapCache(page);
+ page_cache_release(page);
+ }
+
+ return error;
+}
+
+
+int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
+{
+ int error;
+
error = radix_tree_preload(gfp_mask);
if (!error) {
- page_cache_get(page);
- SetPageSwapCache(page);
- set_page_private(page, entry.val);
-
- spin_lock_irq(&swapper_space.tree_lock);
- error = radix_tree_insert(&swapper_space.page_tree,
- entry.val, page);
- if (likely(!error)) {
- total_swapcache_pages++;
- __inc_zone_page_state(page, NR_FILE_PAGES);
- INC_CACHE_INFO(add_total);
- }
- spin_unlock_irq(&swapper_space.tree_lock);
+ error = __add_to_swap_cache(page, entry);
radix_tree_preload_end();
-
- if (unlikely(error)) {
- set_page_private(page, 0UL);
- ClearPageSwapCache(page);
- page_cache_release(page);
- }
}
return error;
}
@@ -137,38 +152,34 @@ int add_to_swap(struct page *page)
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(!PageUptodate(page));
- for (;;) {
- entry = get_swap_page();
- if (!entry.val)
- return 0;
+ entry = get_swap_page();
+ if (!entry.val)
+ return 0;
+ /*
+ * Radix-tree node allocations from PF_MEMALLOC contexts could
+ * completely exhaust the page allocator. __GFP_NOMEMALLOC
+ * stops emergency reserves from being allocated.
+ *
+ * TODO: this could cause a theoretical memory reclaim
+ * deadlock in the swap out path.
+ */
+ /*
+ * Add it to the swap cache and mark it dirty
+ */
+ err = add_to_swap_cache(page, entry,
+ __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
+
+ if (!err) { /* Success */
+ SetPageDirty(page);
+ return 1;
+ } else { /* -ENOMEM radix-tree allocation failure */
/*
- * Radix-tree node allocations from PF_MEMALLOC contexts could
- * completely exhaust the page allocator. __GFP_NOMEMALLOC
- * stops emergency reserves from being allocated.
- *
- * TODO: this could cause a theoretical memory reclaim
- * deadlock in the swap out path.
- */
- /*
- * Add it to the swap cache and mark it dirty
+ * add_to_swap_cache() doesn't return -EEXIST, so we can safely
+ * clear SWAP_HAS_CACHE flag.
*/
- err = add_to_swap_cache(page, entry,
- __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
-
- switch (err) {
- case 0: /* Success */
- SetPageDirty(page);
- return 1;
- case -EEXIST:
- /* Raced with "speculative" read_swap_cache_async */
- swapcache_free(entry, NULL);
- continue;
- default:
- /* -ENOMEM radix-tree allocation failure */
- swapcache_free(entry, NULL);
- return 0;
- }
+ swapcache_free(entry, NULL);
+ return 0;
}
}
@@ -290,26 +301,31 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
}
/*
+ * call radix_tree_preload() while we can wait.
+ */
+ err = radix_tree_preload(gfp_mask & GFP_KERNEL);
+ if (err)
+ break;
+
+ /*
* Swap entry may have been freed since our caller observed it.
*/
err = swapcache_prepare(entry);
- if (err == -EEXIST) /* seems racy */
+ if (err == -EEXIST) { /* seems racy */
+ radix_tree_preload_end();
continue;
- if (err) /* swp entry is obsolete ? */
+ }
+ if (err) { /* swp entry is obsolete ? */
+ radix_tree_preload_end();
break;
+ }
- /*
- * Associate the page with swap entry in the swap cache.
- * May fail (-EEXIST) if there is already a page associated
- * with this entry in the swap cache: added by a racing
- * read_swap_cache_async, or add_to_swap or shmem_writepage
- * re-using the just freed swap entry for an existing page.
- * May fail (-ENOMEM) if radix-tree node allocation failed.
- */
+ /* May fail (-ENOMEM) if radix-tree node allocation failed. */
__set_page_locked(new_page);
SetPageSwapBacked(new_page);
- err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
+ err = __add_to_swap_cache(new_page, entry);
if (likely(!err)) {
+ radix_tree_preload_end();
/*
* Initiate read into locked page and return.
*/
@@ -317,8 +333,13 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
swap_readpage(new_page);
return new_page;
}
+ radix_tree_preload_end();
ClearPageSwapBacked(new_page);
__clear_page_locked(new_page);
+ /*
+ * add_to_swap_cache() doesn't return -EEXIST, so we can safely
+ * clear SWAP_HAS_CACHE flag.
+ */
swapcache_free(entry, NULL);
} while (err != -ENOMEM);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 74f1102e874..4de7f02f820 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -699,7 +699,7 @@ int free_swap_and_cache(swp_entry_t entry)
struct swap_info_struct *p;
struct page *page = NULL;
- if (is_migration_entry(entry))
+ if (non_swap_entry(entry))
return 1;
p = swap_info_get(entry);
@@ -1575,9 +1575,9 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
p->flags &= ~SWP_WRITEOK;
spin_unlock(&swap_lock);
- current->flags |= PF_SWAPOFF;
+ current->flags |= PF_OOM_ORIGIN;
err = try_to_unuse(type);
- current->flags &= ~PF_SWAPOFF;
+ current->flags &= ~PF_OOM_ORIGIN;
if (err) {
/* re-insert swap space back into swap_list */
@@ -2085,7 +2085,7 @@ static int __swap_duplicate(swp_entry_t entry, bool cache)
int count;
bool has_cache;
- if (is_migration_entry(entry))
+ if (non_swap_entry(entry))
return -EINVAL;
type = swp_type(entry);
diff --git a/mm/truncate.c b/mm/truncate.c
index ccc3ecf7cb9..450cebdabfc 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -93,11 +93,11 @@ EXPORT_SYMBOL(cancel_dirty_page);
* its lock, b) when a concurrent invalidate_mapping_pages got there first and
* c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
*/
-static void
+static int
truncate_complete_page(struct address_space *mapping, struct page *page)
{
if (page->mapping != mapping)
- return;
+ return -EIO;
if (page_has_private(page))
do_invalidatepage(page, 0);
@@ -108,6 +108,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
remove_from_page_cache(page);
ClearPageMappedToDisk(page);
page_cache_release(page); /* pagecache ref */
+ return 0;
}
/*
@@ -135,6 +136,51 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
return ret;
}
+int truncate_inode_page(struct address_space *mapping, struct page *page)
+{
+ if (page_mapped(page)) {
+ unmap_mapping_range(mapping,
+ (loff_t)page->index << PAGE_CACHE_SHIFT,
+ PAGE_CACHE_SIZE, 0);
+ }
+ return truncate_complete_page(mapping, page);
+}
+
+/*
+ * Used to get rid of pages on hardware memory corruption.
+ */
+int generic_error_remove_page(struct address_space *mapping, struct page *page)
+{
+ if (!mapping)
+ return -EINVAL;
+ /*
+ * Only punch for normal data pages for now.
+ * Handling other types like directories would need more auditing.
+ */
+ if (!S_ISREG(mapping->host->i_mode))
+ return -EIO;
+ return truncate_inode_page(mapping, page);
+}
+EXPORT_SYMBOL(generic_error_remove_page);
+
+/*
+ * Safely invalidate one page from its pagecache mapping.
+ * It only drops clean, unused pages. The page must be locked.
+ *
+ * Returns 1 if the page is successfully invalidated, otherwise 0.
+ */
+int invalidate_inode_page(struct page *page)
+{
+ struct address_space *mapping = page_mapping(page);
+ if (!mapping)
+ return 0;
+ if (PageDirty(page) || PageWriteback(page))
+ return 0;
+ if (page_mapped(page))
+ return 0;
+ return invalidate_complete_page(mapping, page);
+}
+
/**
* truncate_inode_pages - truncate range of pages specified by start & end byte offsets
* @mapping: mapping to truncate
@@ -196,12 +242,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
unlock_page(page);
continue;
}
- if (page_mapped(page)) {
- unmap_mapping_range(mapping,
- (loff_t)page_index<<PAGE_CACHE_SHIFT,
- PAGE_CACHE_SIZE, 0);
- }
- truncate_complete_page(mapping, page);
+ truncate_inode_page(mapping, page);
unlock_page(page);
}
pagevec_release(&pvec);
@@ -238,15 +279,10 @@ void truncate_inode_pages_range(struct address_space *mapping,
break;
lock_page(page);
wait_on_page_writeback(page);
- if (page_mapped(page)) {
- unmap_mapping_range(mapping,
- (loff_t)page->index<<PAGE_CACHE_SHIFT,
- PAGE_CACHE_SIZE, 0);
- }
+ truncate_inode_page(mapping, page);
if (page->index > next)
next = page->index;
next++;
- truncate_complete_page(mapping, page);
unlock_page(page);
}
pagevec_release(&pvec);
@@ -311,12 +347,8 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
if (lock_failed)
continue;
- if (PageDirty(page) || PageWriteback(page))
- goto unlock;
- if (page_mapped(page))
- goto unlock;
- ret += invalidate_complete_page(mapping, page);
-unlock:
+ ret += invalidate_inode_page(page);
+
unlock_page(page);
if (next > end)
break;
@@ -465,3 +497,67 @@ int invalidate_inode_pages2(struct address_space *mapping)
return invalidate_inode_pages2_range(mapping, 0, -1);
}
EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
+
+/**
+ * truncate_pagecache - unmap and remove pagecache that has been truncated
+ * @inode: inode
+ * @old: old file offset
+ * @new: new file offset
+ *
+ * inode's new i_size must already be written before truncate_pagecache
+ * is called.
+ *
+ * This function should typically be called before the filesystem
+ * releases resources associated with the freed range (eg. deallocates
+ * blocks). This way, pagecache will always stay logically coherent
+ * with on-disk format, and the filesystem would not have to deal with
+ * situations such as writepage being called for a page that has already
+ * had its underlying blocks deallocated.
+ */
+void truncate_pagecache(struct inode *inode, loff_t old, loff_t new)
+{
+ if (new < old) {
+ struct address_space *mapping = inode->i_mapping;
+
+ /*
+ * unmap_mapping_range is called twice, first simply for
+ * efficiency so that truncate_inode_pages does fewer
+ * single-page unmaps. However after this first call, and
+ * before truncate_inode_pages finishes, it is possible for
+ * private pages to be COWed, which remain after
+ * truncate_inode_pages finishes, hence the second
+ * unmap_mapping_range call must be made for correctness.
+ */
+ unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
+ truncate_inode_pages(mapping, new);
+ unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
+ }
+}
+EXPORT_SYMBOL(truncate_pagecache);
+
+/**
+ * vmtruncate - unmap mappings "freed" by truncate() syscall
+ * @inode: inode of the file used
+ * @offset: file offset to start truncating
+ *
+ * NOTE! We have to be ready to update the memory sharing
+ * between the file and the memory map for a potential last
+ * incomplete page. Ugly, but necessary.
+ */
+int vmtruncate(struct inode *inode, loff_t offset)
+{
+ loff_t oldsize;
+ int error;
+
+ error = inode_newsize_ok(inode, offset);
+ if (error)
+ return error;
+ oldsize = inode->i_size;
+ i_size_write(inode, offset);
+ truncate_pagecache(inode, oldsize, offset);
+ if (inode->i_op->truncate)
+ inode->i_op->truncate(inode);
+
+ return error;
+}
+EXPORT_SYMBOL(vmtruncate);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 204b8243d8a..69511e66323 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -25,7 +25,7 @@
#include <linux/rcupdate.h>
#include <linux/pfn.h>
#include <linux/kmemleak.h>
-
+#include <linux/highmem.h>
#include <asm/atomic.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
@@ -168,11 +168,9 @@ static int vmap_page_range_noflush(unsigned long start, unsigned long end,
next = pgd_addr_end(addr, end);
err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
if (err)
- break;
+ return err;
} while (pgd++, addr = next, addr != end);
- if (unlikely(err))
- return err;
return nr;
}
@@ -186,7 +184,7 @@ static int vmap_page_range(unsigned long start, unsigned long end,
return ret;
}
-static inline int is_vmalloc_or_module_addr(const void *x)
+int is_vmalloc_or_module_addr(const void *x)
{
/*
* ARM, x86-64 and sparc64 put modules in a special place,
@@ -1272,17 +1270,21 @@ struct vm_struct *remove_vm_area(const void *addr)
if (va && va->flags & VM_VM_AREA) {
struct vm_struct *vm = va->private;
struct vm_struct *tmp, **p;
-
- vmap_debug_free_range(va->va_start, va->va_end);
- free_unmap_vmap_area(va);
- vm->size -= PAGE_SIZE;
-
+ /*
+ * remove from list and disallow access to this vm_struct
+ * before unmap. (address range confliction is maintained by
+ * vmap.)
+ */
write_lock(&vmlist_lock);
for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
;
*p = tmp->next;
write_unlock(&vmlist_lock);
+ vmap_debug_free_range(va->va_start, va->va_end);
+ free_unmap_vmap_area(va);
+ vm->size -= PAGE_SIZE;
+
return vm;
}
return NULL;
@@ -1384,7 +1386,7 @@ void *vmap(struct page **pages, unsigned int count,
might_sleep();
- if (count > num_physpages)
+ if (count > totalram_pages)
return NULL;
area = get_vm_area_caller((count << PAGE_SHIFT), flags,
@@ -1491,7 +1493,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
unsigned long real_size = size;
size = PAGE_ALIGN(size);
- if (!size || (size >> PAGE_SHIFT) > num_physpages)
+ if (!size || (size >> PAGE_SHIFT) > totalram_pages)
return NULL;
area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
@@ -1641,10 +1643,120 @@ void *vmalloc_32_user(unsigned long size)
}
EXPORT_SYMBOL(vmalloc_32_user);
+/*
+ * small helper routine , copy contents to buf from addr.
+ * If the page is not present, fill zero.
+ */
+
+static int aligned_vread(char *buf, char *addr, unsigned long count)
+{
+ struct page *p;
+ int copied = 0;
+
+ while (count) {
+ unsigned long offset, length;
+
+ offset = (unsigned long)addr & ~PAGE_MASK;
+ length = PAGE_SIZE - offset;
+ if (length > count)
+ length = count;
+ p = vmalloc_to_page(addr);
+ /*
+ * To do safe access to this _mapped_ area, we need
+ * lock. But adding lock here means that we need to add
+ * overhead of vmalloc()/vfree() calles for this _debug_
+ * interface, rarely used. Instead of that, we'll use
+ * kmap() and get small overhead in this access function.
+ */
+ if (p) {
+ /*
+ * we can expect USER0 is not used (see vread/vwrite's
+ * function description)
+ */
+ void *map = kmap_atomic(p, KM_USER0);
+ memcpy(buf, map + offset, length);
+ kunmap_atomic(map, KM_USER0);
+ } else
+ memset(buf, 0, length);
+
+ addr += length;
+ buf += length;
+ copied += length;
+ count -= length;
+ }
+ return copied;
+}
+
+static int aligned_vwrite(char *buf, char *addr, unsigned long count)
+{
+ struct page *p;
+ int copied = 0;
+
+ while (count) {
+ unsigned long offset, length;
+
+ offset = (unsigned long)addr & ~PAGE_MASK;
+ length = PAGE_SIZE - offset;
+ if (length > count)
+ length = count;
+ p = vmalloc_to_page(addr);
+ /*
+ * To do safe access to this _mapped_ area, we need
+ * lock. But adding lock here means that we need to add
+ * overhead of vmalloc()/vfree() calles for this _debug_
+ * interface, rarely used. Instead of that, we'll use
+ * kmap() and get small overhead in this access function.
+ */
+ if (p) {
+ /*
+ * we can expect USER0 is not used (see vread/vwrite's
+ * function description)
+ */
+ void *map = kmap_atomic(p, KM_USER0);
+ memcpy(map + offset, buf, length);
+ kunmap_atomic(map, KM_USER0);
+ }
+ addr += length;
+ buf += length;
+ copied += length;
+ count -= length;
+ }
+ return copied;
+}
+
+/**
+ * vread() - read vmalloc area in a safe way.
+ * @buf: buffer for reading data
+ * @addr: vm address.
+ * @count: number of bytes to be read.
+ *
+ * Returns # of bytes which addr and buf should be increased.
+ * (same number to @count). Returns 0 if [addr...addr+count) doesn't
+ * includes any intersect with alive vmalloc area.
+ *
+ * This function checks that addr is a valid vmalloc'ed area, and
+ * copy data from that area to a given buffer. If the given memory range
+ * of [addr...addr+count) includes some valid address, data is copied to
+ * proper area of @buf. If there are memory holes, they'll be zero-filled.
+ * IOREMAP area is treated as memory hole and no copy is done.
+ *
+ * If [addr...addr+count) doesn't includes any intersects with alive
+ * vm_struct area, returns 0.
+ * @buf should be kernel's buffer. Because this function uses KM_USER0,
+ * the caller should guarantee KM_USER0 is not used.
+ *
+ * Note: In usual ops, vread() is never necessary because the caller
+ * should know vmalloc() area is valid and can use memcpy().
+ * This is for routines which have to access vmalloc area without
+ * any informaion, as /dev/kmem.
+ *
+ */
+
long vread(char *buf, char *addr, unsigned long count)
{
struct vm_struct *tmp;
char *vaddr, *buf_start = buf;
+ unsigned long buflen = count;
unsigned long n;
/* Don't allow overflow */
@@ -1652,7 +1764,7 @@ long vread(char *buf, char *addr, unsigned long count)
count = -(unsigned long) addr;
read_lock(&vmlist_lock);
- for (tmp = vmlist; tmp; tmp = tmp->next) {
+ for (tmp = vmlist; count && tmp; tmp = tmp->next) {
vaddr = (char *) tmp->addr;
if (addr >= vaddr + tmp->size - PAGE_SIZE)
continue;
@@ -1665,32 +1777,72 @@ long vread(char *buf, char *addr, unsigned long count)
count--;
}
n = vaddr + tmp->size - PAGE_SIZE - addr;
- do {
- if (count == 0)
- goto finished;
- *buf = *addr;
- buf++;
- addr++;
- count--;
- } while (--n > 0);
+ if (n > count)
+ n = count;
+ if (!(tmp->flags & VM_IOREMAP))
+ aligned_vread(buf, addr, n);
+ else /* IOREMAP area is treated as memory hole */
+ memset(buf, 0, n);
+ buf += n;
+ addr += n;
+ count -= n;
}
finished:
read_unlock(&vmlist_lock);
- return buf - buf_start;
+
+ if (buf == buf_start)
+ return 0;
+ /* zero-fill memory holes */
+ if (buf != buf_start + buflen)
+ memset(buf, 0, buflen - (buf - buf_start));
+
+ return buflen;
}
+/**
+ * vwrite() - write vmalloc area in a safe way.
+ * @buf: buffer for source data
+ * @addr: vm address.
+ * @count: number of bytes to be read.
+ *
+ * Returns # of bytes which addr and buf should be incresed.
+ * (same number to @count).
+ * If [addr...addr+count) doesn't includes any intersect with valid
+ * vmalloc area, returns 0.
+ *
+ * This function checks that addr is a valid vmalloc'ed area, and
+ * copy data from a buffer to the given addr. If specified range of
+ * [addr...addr+count) includes some valid address, data is copied from
+ * proper area of @buf. If there are memory holes, no copy to hole.
+ * IOREMAP area is treated as memory hole and no copy is done.
+ *
+ * If [addr...addr+count) doesn't includes any intersects with alive
+ * vm_struct area, returns 0.
+ * @buf should be kernel's buffer. Because this function uses KM_USER0,
+ * the caller should guarantee KM_USER0 is not used.
+ *
+ * Note: In usual ops, vwrite() is never necessary because the caller
+ * should know vmalloc() area is valid and can use memcpy().
+ * This is for routines which have to access vmalloc area without
+ * any informaion, as /dev/kmem.
+ *
+ * The caller should guarantee KM_USER1 is not used.
+ */
+
long vwrite(char *buf, char *addr, unsigned long count)
{
struct vm_struct *tmp;
- char *vaddr, *buf_start = buf;
- unsigned long n;
+ char *vaddr;
+ unsigned long n, buflen;
+ int copied = 0;
/* Don't allow overflow */
if ((unsigned long) addr + count < count)
count = -(unsigned long) addr;
+ buflen = count;
read_lock(&vmlist_lock);
- for (tmp = vmlist; tmp; tmp = tmp->next) {
+ for (tmp = vmlist; count && tmp; tmp = tmp->next) {
vaddr = (char *) tmp->addr;
if (addr >= vaddr + tmp->size - PAGE_SIZE)
continue;
@@ -1702,18 +1854,21 @@ long vwrite(char *buf, char *addr, unsigned long count)
count--;
}
n = vaddr + tmp->size - PAGE_SIZE - addr;
- do {
- if (count == 0)
- goto finished;
- *addr = *buf;
- buf++;
- addr++;
- count--;
- } while (--n > 0);
+ if (n > count)
+ n = count;
+ if (!(tmp->flags & VM_IOREMAP)) {
+ aligned_vwrite(buf, addr, n);
+ copied++;
+ }
+ buf += n;
+ addr += n;
+ count -= n;
}
finished:
read_unlock(&vmlist_lock);
- return buf - buf_start;
+ if (!copied)
+ return 0;
+ return buflen;
}
/**
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ba8228e0a80..64e43889883 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -148,8 +148,8 @@ static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
return &zone->reclaim_stat;
}
-static unsigned long zone_nr_pages(struct zone *zone, struct scan_control *sc,
- enum lru_list lru)
+static unsigned long zone_nr_lru_pages(struct zone *zone,
+ struct scan_control *sc, enum lru_list lru)
{
if (!scanning_global_lru(sc))
return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
@@ -286,7 +286,12 @@ static inline int page_mapping_inuse(struct page *page)
static inline int is_page_cache_freeable(struct page *page)
{
- return page_count(page) - !!page_has_private(page) == 2;
+ /*
+ * A freeable page cache page is referenced only by the caller
+ * that isolated the page, the page cache radix tree and
+ * optional buffer heads at page->private.
+ */
+ return page_count(page) - page_has_private(page) == 2;
}
static int may_write_to_queue(struct backing_dev_info *bdi)
@@ -361,7 +366,6 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
* block, for some throttling. This happens by accident, because
* swap_backing_dev_info is bust: it doesn't reflect the
* congestion state of the swapdevs. Easy to fix, if needed.
- * See swapfile.c:page_queue_congested().
*/
if (!is_page_cache_freeable(page))
return PAGE_KEEP;
@@ -531,7 +535,7 @@ redo:
* unevictable page on [in]active list.
* We know how to handle that.
*/
- lru = active + page_is_file_cache(page);
+ lru = active + page_lru_base_type(page);
lru_cache_add_lru(page, lru);
} else {
/*
@@ -659,7 +663,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* processes. Try to unmap it here.
*/
if (page_mapped(page) && mapping) {
- switch (try_to_unmap(page, 0)) {
+ switch (try_to_unmap(page, TTU_UNMAP)) {
case SWAP_FAIL:
goto activate_locked;
case SWAP_AGAIN:
@@ -821,7 +825,7 @@ int __isolate_lru_page(struct page *page, int mode, int file)
if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
return ret;
- if (mode != ISOLATE_BOTH && (!page_is_file_cache(page) != !file))
+ if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file)
return ret;
/*
@@ -935,6 +939,16 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
/* Check that we have not crossed a zone boundary. */
if (unlikely(page_zone_id(cursor_page) != zone_id))
continue;
+
+ /*
+ * If we don't have enough swap space, reclaiming of
+ * anon page which don't already have a swap slot is
+ * pointless.
+ */
+ if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
+ !PageSwapCache(cursor_page))
+ continue;
+
if (__isolate_lru_page(cursor_page, mode, file) == 0) {
list_move(&cursor_page->lru, dst);
mem_cgroup_del_lru(cursor_page);
@@ -961,7 +975,7 @@ static unsigned long isolate_pages_global(unsigned long nr,
if (file)
lru += LRU_FILE;
return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
- mode, !!file);
+ mode, file);
}
/*
@@ -976,7 +990,7 @@ static unsigned long clear_active_flags(struct list_head *page_list,
struct page *page;
list_for_each_entry(page, page_list, lru) {
- lru = page_is_file_cache(page);
+ lru = page_lru_base_type(page);
if (PageActive(page)) {
lru += LRU_ACTIVE;
ClearPageActive(page);
@@ -1034,6 +1048,31 @@ int isolate_lru_page(struct page *page)
}
/*
+ * Are there way too many processes in the direct reclaim path already?
+ */
+static int too_many_isolated(struct zone *zone, int file,
+ struct scan_control *sc)
+{
+ unsigned long inactive, isolated;
+
+ if (current_is_kswapd())
+ return 0;
+
+ if (!scanning_global_lru(sc))
+ return 0;
+
+ if (file) {
+ inactive = zone_page_state(zone, NR_INACTIVE_FILE);
+ isolated = zone_page_state(zone, NR_ISOLATED_FILE);
+ } else {
+ inactive = zone_page_state(zone, NR_INACTIVE_ANON);
+ isolated = zone_page_state(zone, NR_ISOLATED_ANON);
+ }
+
+ return isolated > inactive;
+}
+
+/*
* shrink_inactive_list() is a helper for shrink_zone(). It returns the number
* of reclaimed pages
*/
@@ -1048,6 +1087,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
int lumpy_reclaim = 0;
+ while (unlikely(too_many_isolated(zone, file, sc))) {
+ congestion_wait(WRITE, HZ/10);
+
+ /* We are about to die and free our memory. Return now. */
+ if (fatal_signal_pending(current))
+ return SWAP_CLUSTER_MAX;
+ }
+
/*
* If we need a large contiguous chunk of memory, or have
* trouble getting a small set of contiguous pages, we
@@ -1072,10 +1119,26 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
unsigned long nr_active;
unsigned int count[NR_LRU_LISTS] = { 0, };
int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
+ unsigned long nr_anon;
+ unsigned long nr_file;
nr_taken = sc->isolate_pages(sc->swap_cluster_max,
&page_list, &nr_scan, sc->order, mode,
zone, sc->mem_cgroup, 0, file);
+
+ if (scanning_global_lru(sc)) {
+ zone->pages_scanned += nr_scan;
+ if (current_is_kswapd())
+ __count_zone_vm_events(PGSCAN_KSWAPD, zone,
+ nr_scan);
+ else
+ __count_zone_vm_events(PGSCAN_DIRECT, zone,
+ nr_scan);
+ }
+
+ if (nr_taken == 0)
+ goto done;
+
nr_active = clear_active_flags(&page_list, count);
__count_vm_events(PGDEACTIVATE, nr_active);
@@ -1088,8 +1151,10 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
__mod_zone_page_state(zone, NR_INACTIVE_ANON,
-count[LRU_INACTIVE_ANON]);
- if (scanning_global_lru(sc))
- zone->pages_scanned += nr_scan;
+ nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
+ nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
+ __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
+ __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
@@ -1123,18 +1188,12 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
}
nr_reclaimed += nr_freed;
+
local_irq_disable();
- if (current_is_kswapd()) {
- __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
+ if (current_is_kswapd())
__count_vm_events(KSWAPD_STEAL, nr_freed);
- } else if (scanning_global_lru(sc))
- __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
-
__count_zone_vm_events(PGSTEAL, zone, nr_freed);
- if (nr_taken == 0)
- goto done;
-
spin_lock(&zone->lru_lock);
/*
* Put back any unfreeable pages.
@@ -1153,8 +1212,8 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
SetPageLRU(page);
lru = page_lru(page);
add_page_to_lru_list(zone, page, lru);
- if (PageActive(page)) {
- int file = !!page_is_file_cache(page);
+ if (is_active_lru(lru)) {
+ int file = is_file_lru(lru);
reclaim_stat->recent_rotated[file]++;
}
if (!pagevec_add(&pvec, page)) {
@@ -1163,10 +1222,13 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
spin_lock_irq(&zone->lru_lock);
}
}
+ __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
+ __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
+
} while (nr_scanned < max_scan);
- spin_unlock(&zone->lru_lock);
+
done:
- local_irq_enable();
+ spin_unlock_irq(&zone->lru_lock);
pagevec_release(&pvec);
return nr_reclaimed;
}
@@ -1215,15 +1277,10 @@ static void move_active_pages_to_lru(struct zone *zone,
while (!list_empty(list)) {
page = lru_to_page(list);
- prefetchw_prev_lru_page(page, list, flags);
VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
- VM_BUG_ON(!PageActive(page));
- if (!is_active_lru(lru))
- ClearPageActive(page); /* we are de-activating */
-
list_move(&page->lru, &zone->lru[lru].list);
mem_cgroup_add_lru_list(page, lru);
pgmoved++;
@@ -1244,7 +1301,7 @@ static void move_active_pages_to_lru(struct zone *zone,
static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
struct scan_control *sc, int priority, int file)
{
- unsigned long pgmoved;
+ unsigned long nr_taken;
unsigned long pgscanned;
unsigned long vm_flags;
LIST_HEAD(l_hold); /* The pages which were snipped off */
@@ -1252,10 +1309,11 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
LIST_HEAD(l_inactive);
struct page *page;
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
+ unsigned long nr_rotated = 0;
lru_add_drain();
spin_lock_irq(&zone->lru_lock);
- pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
+ nr_taken = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
ISOLATE_ACTIVE, zone,
sc->mem_cgroup, 1, file);
/*
@@ -1265,16 +1323,16 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
if (scanning_global_lru(sc)) {
zone->pages_scanned += pgscanned;
}
- reclaim_stat->recent_scanned[!!file] += pgmoved;
+ reclaim_stat->recent_scanned[file] += nr_taken;
__count_zone_vm_events(PGREFILL, zone, pgscanned);
if (file)
- __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
+ __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
else
- __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
+ __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
+ __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
spin_unlock_irq(&zone->lru_lock);
- pgmoved = 0; /* count referenced (mapping) mapped pages */
while (!list_empty(&l_hold)) {
cond_resched();
page = lru_to_page(&l_hold);
@@ -1288,7 +1346,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
/* page_referenced clears PageReferenced */
if (page_mapping_inuse(page) &&
page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
- pgmoved++;
+ nr_rotated++;
/*
* Identify referenced, file-backed active pages and
* give them one more trip around the active list. So
@@ -1304,6 +1362,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
}
}
+ ClearPageActive(page); /* we are de-activating */
list_add(&page->lru, &l_inactive);
}
@@ -1317,13 +1376,13 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
* helps balance scan pressure between file and anonymous pages in
* get_scan_ratio.
*/
- reclaim_stat->recent_rotated[!!file] += pgmoved;
+ reclaim_stat->recent_rotated[file] += nr_rotated;
move_active_pages_to_lru(zone, &l_active,
LRU_ACTIVE + file * LRU_FILE);
move_active_pages_to_lru(zone, &l_inactive,
LRU_BASE + file * LRU_FILE);
-
+ __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
spin_unlock_irq(&zone->lru_lock);
}
@@ -1429,10 +1488,10 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
unsigned long ap, fp;
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
- anon = zone_nr_pages(zone, sc, LRU_ACTIVE_ANON) +
- zone_nr_pages(zone, sc, LRU_INACTIVE_ANON);
- file = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) +
- zone_nr_pages(zone, sc, LRU_INACTIVE_FILE);
+ anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
+ zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
+ file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
+ zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
if (scanning_global_lru(sc)) {
free = zone_page_state(zone, NR_FREE_PAGES);
@@ -1526,6 +1585,7 @@ static void shrink_zone(int priority, struct zone *zone,
enum lru_list l;
unsigned long nr_reclaimed = sc->nr_reclaimed;
unsigned long swap_cluster_max = sc->swap_cluster_max;
+ struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
int noswap = 0;
/* If we have no swap space, do not bother scanning anon pages. */
@@ -1540,17 +1600,14 @@ static void shrink_zone(int priority, struct zone *zone,
int file = is_file_lru(l);
unsigned long scan;
- scan = zone_nr_pages(zone, sc, l);
+ scan = zone_nr_lru_pages(zone, sc, l);
if (priority || noswap) {
scan >>= priority;
scan = (scan * percent[file]) / 100;
}
- if (scanning_global_lru(sc))
- nr[l] = nr_scan_try_batch(scan,
- &zone->lru[l].nr_saved_scan,
- swap_cluster_max);
- else
- nr[l] = scan;
+ nr[l] = nr_scan_try_batch(scan,
+ &reclaim_stat->nr_saved_scan[l],
+ swap_cluster_max);
}
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -1652,10 +1709,10 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
*
* If the caller is !__GFP_FS then the probability of a failure is reasonably
* high - the zone may be full of dirty or under-writeback pages, which this
- * caller can't do much about. We kick pdflush and take explicit naps in the
- * hope that some of these pages can be written. But if the allocating task
- * holds filesystem locks which prevent writeout this might not work, and the
- * allocation attempt will fail.
+ * caller can't do much about. We kick the writeback threads and take explicit
+ * naps in the hope that some of these pages can be written. But if the
+ * allocating task holds filesystem locks which prevent writeout this might not
+ * work, and the allocation attempt will fail.
*
* returns: 0, if no pages reclaimed
* else, the number of pages reclaimed
@@ -1685,7 +1742,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
- lru_pages += zone_lru_pages(zone);
+ lru_pages += zone_reclaimable_pages(zone);
}
}
@@ -1779,11 +1836,45 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
+ gfp_t gfp_mask, bool noswap,
+ unsigned int swappiness,
+ struct zone *zone, int nid)
+{
+ struct scan_control sc = {
+ .may_writepage = !laptop_mode,
+ .may_unmap = 1,
+ .may_swap = !noswap,
+ .swap_cluster_max = SWAP_CLUSTER_MAX,
+ .swappiness = swappiness,
+ .order = 0,
+ .mem_cgroup = mem,
+ .isolate_pages = mem_cgroup_isolate_pages,
+ };
+ nodemask_t nm = nodemask_of_node(nid);
+
+ sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
+ (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
+ sc.nodemask = &nm;
+ sc.nr_reclaimed = 0;
+ sc.nr_scanned = 0;
+ /*
+ * NOTE: Although we can get the priority field, using it
+ * here is not a good idea, since it limits the pages we can scan.
+ * if we don't reclaim here, the shrink_zone from balance_pgdat
+ * will pick up pages from other mem cgroup's as well. We hack
+ * the priority and make it zero.
+ */
+ shrink_zone(0, zone, &sc);
+ return sc.nr_reclaimed;
+}
+
unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
gfp_t gfp_mask,
bool noswap,
unsigned int swappiness)
{
+ struct zonelist *zonelist;
struct scan_control sc = {
.may_writepage = !laptop_mode,
.may_unmap = 1,
@@ -1795,7 +1886,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
.isolate_pages = mem_cgroup_isolate_pages,
.nodemask = NULL, /* we don't care the placement */
};
- struct zonelist *zonelist;
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
@@ -1902,7 +1992,7 @@ loop_again:
for (i = 0; i <= end_zone; i++) {
struct zone *zone = pgdat->node_zones + i;
- lru_pages += zone_lru_pages(zone);
+ lru_pages += zone_reclaimable_pages(zone);
}
/*
@@ -1917,6 +2007,7 @@ loop_again:
for (i = 0; i <= end_zone; i++) {
struct zone *zone = pgdat->node_zones + i;
int nr_slab;
+ int nid, zid;
if (!populated_zone(zone))
continue;
@@ -1931,6 +2022,15 @@ loop_again:
temp_priority[i] = priority;
sc.nr_scanned = 0;
note_zone_scanning_priority(zone, priority);
+
+ nid = pgdat->node_id;
+ zid = zone_idx(zone);
+ /*
+ * Call soft limit reclaim before calling shrink_zone.
+ * For now we ignore the return value
+ */
+ mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask,
+ nid, zid);
/*
* We put equal pressure on every zone, unless one
* zone has way too many pages free already.
@@ -1946,7 +2046,7 @@ loop_again:
if (zone_is_all_unreclaimable(zone))
continue;
if (nr_slab == 0 && zone->pages_scanned >=
- (zone_lru_pages(zone) * 6))
+ (zone_reclaimable_pages(zone) * 6))
zone_set_flag(zone,
ZONE_ALL_UNRECLAIMABLE);
/*
@@ -2113,12 +2213,39 @@ void wakeup_kswapd(struct zone *zone, int order)
wake_up_interruptible(&pgdat->kswapd_wait);
}
-unsigned long global_lru_pages(void)
+/*
+ * The reclaimable count would be mostly accurate.
+ * The less reclaimable pages may be
+ * - mlocked pages, which will be moved to unevictable list when encountered
+ * - mapped pages, which may require several travels to be reclaimed
+ * - dirty pages, which is not "instantly" reclaimable
+ */
+unsigned long global_reclaimable_pages(void)
+{
+ int nr;
+
+ nr = global_page_state(NR_ACTIVE_FILE) +
+ global_page_state(NR_INACTIVE_FILE);
+
+ if (nr_swap_pages > 0)
+ nr += global_page_state(NR_ACTIVE_ANON) +
+ global_page_state(NR_INACTIVE_ANON);
+
+ return nr;
+}
+
+unsigned long zone_reclaimable_pages(struct zone *zone)
{
- return global_page_state(NR_ACTIVE_ANON)
- + global_page_state(NR_ACTIVE_FILE)
- + global_page_state(NR_INACTIVE_ANON)
- + global_page_state(NR_INACTIVE_FILE);
+ int nr;
+
+ nr = zone_page_state(zone, NR_ACTIVE_FILE) +
+ zone_page_state(zone, NR_INACTIVE_FILE);
+
+ if (nr_swap_pages > 0)
+ nr += zone_page_state(zone, NR_ACTIVE_ANON) +
+ zone_page_state(zone, NR_INACTIVE_ANON);
+
+ return nr;
}
#ifdef CONFIG_HIBERNATION
@@ -2133,6 +2260,7 @@ static void shrink_all_zones(unsigned long nr_pages, int prio,
{
struct zone *zone;
unsigned long nr_reclaimed = 0;
+ struct zone_reclaim_stat *reclaim_stat;
for_each_populated_zone(zone) {
enum lru_list l;
@@ -2149,11 +2277,14 @@ static void shrink_all_zones(unsigned long nr_pages, int prio,
l == LRU_ACTIVE_FILE))
continue;
- zone->lru[l].nr_saved_scan += (lru_pages >> prio) + 1;
- if (zone->lru[l].nr_saved_scan >= nr_pages || pass > 3) {
+ reclaim_stat = get_reclaim_stat(zone, sc);
+ reclaim_stat->nr_saved_scan[l] +=
+ (lru_pages >> prio) + 1;
+ if (reclaim_stat->nr_saved_scan[l]
+ >= nr_pages || pass > 3) {
unsigned long nr_to_scan;
- zone->lru[l].nr_saved_scan = 0;
+ reclaim_stat->nr_saved_scan[l] = 0;
nr_to_scan = min(nr_pages, lru_pages);
nr_reclaimed += shrink_list(l, nr_to_scan, zone,
sc, prio);
@@ -2190,7 +2321,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
current->reclaim_state = &reclaim_state;
- lru_pages = global_lru_pages();
+ lru_pages = global_reclaimable_pages();
nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
/* If slab caches are huge, it's better to hit them first */
while (nr_slab >= lru_pages) {
@@ -2232,7 +2363,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
reclaim_state.reclaimed_slab = 0;
shrink_slab(sc.nr_scanned, sc.gfp_mask,
- global_lru_pages());
+ global_reclaimable_pages());
sc.nr_reclaimed += reclaim_state.reclaimed_slab;
if (sc.nr_reclaimed >= nr_pages)
goto out;
@@ -2249,7 +2380,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
if (!sc.nr_reclaimed) {
do {
reclaim_state.reclaimed_slab = 0;
- shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages());
+ shrink_slab(nr_pages, sc.gfp_mask,
+ global_reclaimable_pages());
sc.nr_reclaimed += reclaim_state.reclaimed_slab;
} while (sc.nr_reclaimed < nr_pages &&
reclaim_state.reclaimed_slab > 0);
@@ -2569,7 +2701,7 @@ static void check_move_unevictable_page(struct page *page, struct zone *zone)
retry:
ClearPageUnevictable(page);
if (page_evictable(page, NULL)) {
- enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page);
+ enum lru_list l = page_lru_base_type(page);
__dec_zone_state(zone, NR_UNEVICTABLE);
list_move(&page->lru, &zone->lru[l].list);
@@ -2712,10 +2844,10 @@ static void scan_all_zones_unevictable_pages(void)
unsigned long scan_unevictable_pages;
int scan_unevictable_handler(struct ctl_table *table, int write,
- struct file *file, void __user *buffer,
+ void __user *buffer,
size_t *length, loff_t *ppos)
{
- proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
+ proc_doulongvec_minmax(table, write, buffer, length, ppos);
if (write && *(unsigned long *)table->data)
scan_all_zones_unevictable_pages();
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 138bed53706..c81321f9fee 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -639,11 +639,14 @@ static const char * const vmstat_text[] = {
"nr_slab_reclaimable",
"nr_slab_unreclaimable",
"nr_page_table_pages",
+ "nr_kernel_stack",
"nr_unstable",
"nr_bounce",
"nr_vmscan_write",
"nr_writeback_temp",
-
+ "nr_isolated_anon",
+ "nr_isolated_file",
+ "nr_shmem",
#ifdef CONFIG_NUMA
"numa_hit",
"numa_miss",
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 9bf0b737aa5..b2e07f0dd29 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -43,6 +43,7 @@
#include <net/9p/transport.h>
#include <linux/scatterlist.h>
#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
#include <linux/virtio_9p.h>
#define VIRTQUEUE_NUM 128
@@ -200,7 +201,7 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
req->status = REQ_STATUS_SENT;
- if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc)) {
+ if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) {
P9_DPRINTK(P9_DEBUG_TRANS,
"9p debug: virtio rpc add_buf returned failure");
return -EIO;
@@ -334,8 +335,6 @@ static void p9_virtio_remove(struct virtio_device *vdev)
}
}
-#define VIRTIO_ID_9P 9
-
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_9P, VIRTIO_DEV_ANY_ID },
{ 0 },
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index da0f64f82b5..fbcac76fdc0 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -358,6 +358,7 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
ax25_dev *ax25_dev;
ax25_cb *ax25;
unsigned int k;
+ int ret = 0;
if (copy_from_user(&ax25_ctl, arg, sizeof(ax25_ctl)))
return -EFAULT;
@@ -388,57 +389,63 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
case AX25_WINDOW:
if (ax25->modulus == AX25_MODULUS) {
if (ax25_ctl.arg < 1 || ax25_ctl.arg > 7)
- return -EINVAL;
+ goto einval_put;
} else {
if (ax25_ctl.arg < 1 || ax25_ctl.arg > 63)
- return -EINVAL;
+ goto einval_put;
}
ax25->window = ax25_ctl.arg;
break;
case AX25_T1:
if (ax25_ctl.arg < 1)
- return -EINVAL;
+ goto einval_put;
ax25->rtt = (ax25_ctl.arg * HZ) / 2;
ax25->t1 = ax25_ctl.arg * HZ;
break;
case AX25_T2:
if (ax25_ctl.arg < 1)
- return -EINVAL;
+ goto einval_put;
ax25->t2 = ax25_ctl.arg * HZ;
break;
case AX25_N2:
if (ax25_ctl.arg < 1 || ax25_ctl.arg > 31)
- return -EINVAL;
+ goto einval_put;
ax25->n2count = 0;
ax25->n2 = ax25_ctl.arg;
break;
case AX25_T3:
if (ax25_ctl.arg < 0)
- return -EINVAL;
+ goto einval_put;
ax25->t3 = ax25_ctl.arg * HZ;
break;
case AX25_IDLE:
if (ax25_ctl.arg < 0)
- return -EINVAL;
+ goto einval_put;
ax25->idle = ax25_ctl.arg * 60 * HZ;
break;
case AX25_PACLEN:
if (ax25_ctl.arg < 16 || ax25_ctl.arg > 65535)
- return -EINVAL;
+ goto einval_put;
ax25->paclen = ax25_ctl.arg;
break;
default:
- return -EINVAL;
+ goto einval_put;
}
- return 0;
+out_put:
+ ax25_cb_put(ax25);
+ return ret;
+
+einval_put:
+ ret = -EINVAL;
+ goto out_put;
}
static void ax25_fillin_cb_from_dev(ax25_cb *ax25, ax25_dev *ax25_dev)
@@ -1781,8 +1788,8 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
ax25_info.idletimer = ax25_display_timer(&ax25->idletimer) / (60 * HZ);
ax25_info.n2count = ax25->n2count;
ax25_info.state = ax25->state;
- ax25_info.rcv_q = sk_wmem_alloc_get(sk);
- ax25_info.snd_q = sk_rmem_alloc_get(sk);
+ ax25_info.rcv_q = sk_rmem_alloc_get(sk);
+ ax25_info.snd_q = sk_wmem_alloc_get(sk);
ax25_info.vs = ax25->vs;
ax25_info.vr = ax25->vr;
ax25_info.va = ax25->va;
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 09bedeb5579..49d8495d69b 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -577,11 +577,6 @@ static int hidp_session(void *arg)
}
if (session->hid) {
- if (session->hid->claimed & HID_CLAIMED_INPUT)
- hidinput_disconnect(session->hid);
- if (session->hid->claimed & HID_CLAIMED_HIDRAW)
- hidraw_disconnect(session->hid);
-
hid_destroy_device(session->hid);
session->hid = NULL;
}
@@ -747,8 +742,6 @@ static void hidp_stop(struct hid_device *hid)
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
- if (hid->claimed & HID_CLAIMED_INPUT)
- hidinput_disconnect(hid);
hid->claimed = 0;
}
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 907a82e9023..a16a2342f6b 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -965,12 +965,12 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = {
#ifdef CONFIG_SYSCTL
static
-int brnf_sysctl_call_tables(ctl_table * ctl, int write, struct file *filp,
+int brnf_sysctl_call_tables(ctl_table * ctl, int write,
void __user * buffer, size_t * lenp, loff_t * ppos)
{
int ret;
- ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write && *(int *)(ctl->data))
*(int *)(ctl->data) = 1;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 0bcecbf0658..4d11c28ca8c 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -192,11 +192,10 @@
#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */
/* Thread control flag bits */
-#define T_TERMINATE (1<<0)
-#define T_STOP (1<<1) /* Stop run */
-#define T_RUN (1<<2) /* Start run */
-#define T_REMDEVALL (1<<3) /* Remove all devs */
-#define T_REMDEV (1<<4) /* Remove one dev */
+#define T_STOP (1<<0) /* Stop run */
+#define T_RUN (1<<1) /* Start run */
+#define T_REMDEVALL (1<<2) /* Remove all devs */
+#define T_REMDEV (1<<3) /* Remove one dev */
/* If lock -- can be removed after some work */
#define if_lock(t) spin_lock(&(t->if_lock));
@@ -2105,7 +2104,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
{
- ktime_t start;
+ ktime_t start_time, end_time;
s32 remaining;
struct hrtimer_sleeper t;
@@ -2116,7 +2115,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
if (remaining <= 0)
return;
- start = ktime_now();
+ start_time = ktime_now();
if (remaining < 100)
udelay(remaining); /* really small just spin */
else {
@@ -2135,7 +2134,10 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
} while (t.task && pkt_dev->running && !signal_pending(current));
__set_current_state(TASK_RUNNING);
}
- pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), start));
+ end_time = ktime_now();
+
+ pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
+ pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay);
}
static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
@@ -3365,19 +3367,29 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
mutex_unlock(&pktgen_thread_lock);
}
-static void idle(struct pktgen_dev *pkt_dev)
+static void pktgen_resched(struct pktgen_dev *pkt_dev)
{
ktime_t idle_start = ktime_now();
+ schedule();
+ pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
+}
- if (need_resched())
- schedule();
- else
- cpu_relax();
+static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
+{
+ ktime_t idle_start = ktime_now();
+ while (atomic_read(&(pkt_dev->skb->users)) != 1) {
+ if (signal_pending(current))
+ break;
+
+ if (need_resched())
+ pktgen_resched(pkt_dev);
+ else
+ cpu_relax();
+ }
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
}
-
static void pktgen_xmit(struct pktgen_dev *pkt_dev)
{
struct net_device *odev = pkt_dev->odev;
@@ -3387,36 +3399,21 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
u16 queue_map;
int ret;
- if (pkt_dev->delay) {
- spin(pkt_dev, pkt_dev->next_tx);
-
- /* This is max DELAY, this has special meaning of
- * "never transmit"
- */
- if (pkt_dev->delay == ULLONG_MAX) {
- pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX);
- return;
- }
- }
-
- if (!pkt_dev->skb) {
- set_cur_queue_map(pkt_dev);
- queue_map = pkt_dev->cur_queue_map;
- } else {
- queue_map = skb_get_queue_mapping(pkt_dev->skb);
+ /* If device is offline, then don't send */
+ if (unlikely(!netif_running(odev) || !netif_carrier_ok(odev))) {
+ pktgen_stop_device(pkt_dev);
+ return;
}
- txq = netdev_get_tx_queue(odev, queue_map);
- /* Did we saturate the queue already? */
- if (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)) {
- /* If device is down, then all queues are permnantly frozen */
- if (netif_running(odev))
- idle(pkt_dev);
- else
- pktgen_stop_device(pkt_dev);
+ /* This is max DELAY, this has special meaning of
+ * "never transmit"
+ */
+ if (unlikely(pkt_dev->delay == ULLONG_MAX)) {
+ pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX);
return;
}
+ /* If no skb or clone count exhausted then get new one */
if (!pkt_dev->skb || (pkt_dev->last_ok &&
++pkt_dev->clone_count >= pkt_dev->clone_skb)) {
/* build a new pkt */
@@ -3435,54 +3432,45 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->clone_count = 0; /* reset counter */
}
- /* fill_packet() might have changed the queue */
+ if (pkt_dev->delay && pkt_dev->last_ok)
+ spin(pkt_dev, pkt_dev->next_tx);
+
queue_map = skb_get_queue_mapping(pkt_dev->skb);
txq = netdev_get_tx_queue(odev, queue_map);
__netif_tx_lock_bh(txq);
- if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)))
- pkt_dev->last_ok = 0;
- else {
- atomic_inc(&(pkt_dev->skb->users));
+ atomic_inc(&(pkt_dev->skb->users));
- retry_now:
+ if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)))
+ ret = NETDEV_TX_BUSY;
+ else
ret = (*xmit)(pkt_dev->skb, odev);
- switch (ret) {
- case NETDEV_TX_OK:
- txq_trans_update(txq);
- pkt_dev->last_ok = 1;
- pkt_dev->sofar++;
- pkt_dev->seq_num++;
- pkt_dev->tx_bytes += pkt_dev->cur_pkt_size;
- break;
- case NETDEV_TX_LOCKED:
- cpu_relax();
- goto retry_now;
- default: /* Drivers are not supposed to return other values! */
- if (net_ratelimit())
- pr_info("pktgen: %s xmit error: %d\n",
- odev->name, ret);
- pkt_dev->errors++;
- /* fallthru */
- case NETDEV_TX_BUSY:
- /* Retry it next time */
- atomic_dec(&(pkt_dev->skb->users));
- pkt_dev->last_ok = 0;
- }
-
- if (pkt_dev->delay)
- pkt_dev->next_tx = ktime_add_ns(ktime_now(),
- pkt_dev->delay);
+
+ switch (ret) {
+ case NETDEV_TX_OK:
+ txq_trans_update(txq);
+ pkt_dev->last_ok = 1;
+ pkt_dev->sofar++;
+ pkt_dev->seq_num++;
+ pkt_dev->tx_bytes += pkt_dev->cur_pkt_size;
+ break;
+ default: /* Drivers are not supposed to return other values! */
+ if (net_ratelimit())
+ pr_info("pktgen: %s xmit error: %d\n",
+ odev->name, ret);
+ pkt_dev->errors++;
+ /* fallthru */
+ case NETDEV_TX_LOCKED:
+ case NETDEV_TX_BUSY:
+ /* Retry it next time */
+ atomic_dec(&(pkt_dev->skb->users));
+ pkt_dev->last_ok = 0;
}
__netif_tx_unlock_bh(txq);
/* If pkt_dev->count is zero, then run forever */
if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
- while (atomic_read(&(pkt_dev->skb->users)) != 1) {
- if (signal_pending(current))
- break;
- idle(pkt_dev);
- }
+ pktgen_wait_for_skb(pkt_dev);
/* Done with this */
pktgen_stop_device(pkt_dev);
@@ -3515,20 +3503,24 @@ static int pktgen_thread_worker(void *arg)
while (!kthread_should_stop()) {
pkt_dev = next_to_run(t);
- if (!pkt_dev &&
- (t->control & (T_STOP | T_RUN | T_REMDEVALL | T_REMDEV))
- == 0) {
- prepare_to_wait(&(t->queue), &wait,
- TASK_INTERRUPTIBLE);
- schedule_timeout(HZ / 10);
- finish_wait(&(t->queue), &wait);
+ if (unlikely(!pkt_dev && t->control == 0)) {
+ wait_event_interruptible_timeout(t->queue,
+ t->control != 0,
+ HZ/10);
+ continue;
}
__set_current_state(TASK_RUNNING);
- if (pkt_dev)
+ if (likely(pkt_dev)) {
pktgen_xmit(pkt_dev);
+ if (need_resched())
+ pktgen_resched(pkt_dev);
+ else
+ cpu_relax();
+ }
+
if (t->control & T_STOP) {
pktgen_stop(t);
t->control &= ~(T_STOP);
diff --git a/net/core/sock.c b/net/core/sock.c
index 30d5446512f..524712a7b15 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1206,12 +1206,12 @@ EXPORT_SYMBOL_GPL(sk_setup_caps);
void __init sk_init(void)
{
- if (num_physpages <= 4096) {
+ if (totalram_pages <= 4096) {
sysctl_wmem_max = 32767;
sysctl_rmem_max = 32767;
sysctl_wmem_default = 32767;
sysctl_rmem_default = 32767;
- } else if (num_physpages >= 131072) {
+ } else if (totalram_pages >= 131072) {
sysctl_wmem_max = 131071;
sysctl_rmem_max = 131071;
}
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 923db06c7e5..bc4467082a0 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1049,10 +1049,10 @@ static int __init dccp_init(void)
*
* The methodology is similar to that of the buffer cache.
*/
- if (num_physpages >= (128 * 1024))
- goal = num_physpages >> (21 - PAGE_SHIFT);
+ if (totalram_pages >= (128 * 1024))
+ goal = totalram_pages >> (21 - PAGE_SHIFT);
else
- goal = num_physpages >> (23 - PAGE_SHIFT);
+ goal = totalram_pages >> (23 - PAGE_SHIFT);
if (thash_entries)
goal = (thash_entries *
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 1c6a5bb6f0c..6e1f085db06 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -164,7 +164,7 @@ static int max_t3[] = { 8191 }; /* Must fit in 16 bits when multiplied by BCT3MU
static int min_priority[1];
static int max_priority[] = { 127 }; /* From DECnet spec */
-static int dn_forwarding_proc(ctl_table *, int, struct file *,
+static int dn_forwarding_proc(ctl_table *, int,
void __user *, size_t *, loff_t *);
static int dn_forwarding_sysctl(ctl_table *table,
void __user *oldval, size_t __user *oldlenp,
@@ -274,7 +274,6 @@ static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms)
}
static int dn_forwarding_proc(ctl_table *table, int write,
- struct file *filep,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
@@ -290,7 +289,7 @@ static int dn_forwarding_proc(ctl_table *table, int write,
dn_db = dev->dn_ptr;
old = dn_db->parms.forwarding;
- err = proc_dointvec(table, write, filep, buffer, lenp, ppos);
+ err = proc_dointvec(table, write, buffer, lenp, ppos);
if ((err >= 0) && write) {
if (dn_db->parms.forwarding < 0)
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 9383d3e5a1a..57662cabaf9 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1750,7 +1750,7 @@ void __init dn_route_init(void)
dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
add_timer(&dn_route_timer);
- goal = num_physpages >> (26 - PAGE_SHIFT);
+ goal = totalram_pages >> (26 - PAGE_SHIFT);
for(order = 0; (1UL << order) < goal; order++)
/* NOTHING */;
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index 5bcd592ae6d..26b0ab1e9f5 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -165,7 +165,6 @@ static int dn_node_address_strategy(ctl_table *table,
}
static int dn_node_address_handler(ctl_table *table, int write,
- struct file *filp,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
@@ -276,7 +275,6 @@ static int dn_def_dev_strategy(ctl_table *table,
static int dn_def_dev_handler(ctl_table *table, int write,
- struct file * filp,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 07336c6201f..e92f1fd28aa 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1270,10 +1270,10 @@ static void inet_forward_change(struct net *net)
}
static int devinet_conf_proc(ctl_table *ctl, int write,
- struct file *filp, void __user *buffer,
+ void __user *buffer,
size_t *lenp, loff_t *ppos)
{
- int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write) {
struct ipv4_devconf *cnf = ctl->extra1;
@@ -1342,12 +1342,12 @@ static int devinet_conf_sysctl(ctl_table *table,
}
static int devinet_sysctl_forward(ctl_table *ctl, int write,
- struct file *filp, void __user *buffer,
+ void __user *buffer,
size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
int val = *valp;
- int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write && *valp != val) {
struct net *net = ctl->extra2;
@@ -1372,12 +1372,12 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write,
}
int ipv4_doint_and_flush(ctl_table *ctl, int write,
- struct file *filp, void __user *buffer,
+ void __user *buffer,
size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
int val = *valp;
- int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
struct net *net = ctl->extra2;
if (write && *valp != val)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d9645c94a06..41ada9904d3 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -66,10 +66,7 @@
solution, but it supposes maintaing new variable in ALL
skb, even if no tunneling is used.
- Current solution: t->recursion lock breaks dead loops. It looks
- like dev->tbusy flag, but I preferred new variable, because
- the semantics is different. One day, when hard_start_xmit
- will be multithreaded we will have to use skb->encapsulation.
+ Current solution: HARD_TX_LOCK lock breaks dead loops.
@@ -678,11 +675,6 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
__be32 dst;
int mtu;
- if (tunnel->recursion++) {
- stats->collisions++;
- goto tx_error;
- }
-
if (dev->type == ARPHRD_ETHER)
IPCB(skb)->flags = 0;
@@ -820,7 +812,6 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
ip_rt_put(rt);
stats->tx_dropped++;
dev_kfree_skb(skb);
- tunnel->recursion--;
return NETDEV_TX_OK;
}
if (skb->sk)
@@ -888,7 +879,6 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
nf_reset(skb);
IPTUNNEL_XMIT();
- tunnel->recursion--;
return NETDEV_TX_OK;
tx_error_icmp:
@@ -897,7 +887,6 @@ tx_error_icmp:
tx_error:
stats->tx_errors++;
dev_kfree_skb(skb);
- tunnel->recursion--;
return NETDEV_TX_OK;
}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index fc7993e9061..5a0693576e8 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -611,6 +611,9 @@ static int do_ip_setsockopt(struct sock *sk, int level,
* Check the arguments are allowable
*/
+ if (optlen < sizeof(struct in_addr))
+ goto e_inval;
+
err = -EFAULT;
if (optlen >= sizeof(struct ip_mreqn)) {
if (copy_from_user(&mreq, optval, sizeof(mreq)))
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 62548cb0923..08ccd344de7 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -402,11 +402,6 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
__be32 dst = tiph->daddr;
int mtu;
- if (tunnel->recursion++) {
- stats->collisions++;
- goto tx_error;
- }
-
if (skb->protocol != htons(ETH_P_IP))
goto tx_error;
@@ -485,7 +480,6 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
ip_rt_put(rt);
stats->tx_dropped++;
dev_kfree_skb(skb);
- tunnel->recursion--;
return NETDEV_TX_OK;
}
if (skb->sk)
@@ -523,7 +517,6 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
nf_reset(skb);
IPTUNNEL_XMIT();
- tunnel->recursion--;
return NETDEV_TX_OK;
tx_error_icmp:
@@ -531,7 +524,6 @@ tx_error_icmp:
tx_error:
stats->tx_errors++;
dev_kfree_skb(skb);
- tunnel->recursion--;
return NETDEV_TX_OK;
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 91867d3e632..bb419925202 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -3036,7 +3036,7 @@ void ip_rt_multicast_event(struct in_device *in_dev)
#ifdef CONFIG_SYSCTL
static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
- struct file *filp, void __user *buffer,
+ void __user *buffer,
size_t *lenp, loff_t *ppos)
{
if (write) {
@@ -3046,7 +3046,7 @@ static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
memcpy(&ctl, __ctl, sizeof(ctl));
ctl.data = &flush_delay;
- proc_dointvec(&ctl, write, filp, buffer, lenp, ppos);
+ proc_dointvec(&ctl, write, buffer, lenp, ppos);
net = (struct net *)__ctl->extra1;
rt_cache_flush(net, flush_delay);
@@ -3106,12 +3106,11 @@ static void rt_secret_reschedule(int old)
}
static int ipv4_sysctl_rt_secret_interval(ctl_table *ctl, int write,
- struct file *filp,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int old = ip_rt_secret_interval;
- int ret = proc_dointvec_jiffies(ctl, write, filp, buffer, lenp, ppos);
+ int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
rt_secret_reschedule(old);
@@ -3414,7 +3413,7 @@ int __init ip_rt_init(void)
alloc_large_system_hash("IP route cache",
sizeof(struct rt_hash_bucket),
rhash_entries,
- (num_physpages >= 128 * 1024) ?
+ (totalram_pages >= 128 * 1024) ?
15 : 17,
0,
&rt_hash_log,
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 4710d219f06..2dcf04d9b00 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -36,7 +36,7 @@ static void set_local_port_range(int range[2])
}
/* Validate changes from /proc interface. */
-static int ipv4_local_port_range(ctl_table *table, int write, struct file *filp,
+static int ipv4_local_port_range(ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
@@ -51,7 +51,7 @@ static int ipv4_local_port_range(ctl_table *table, int write, struct file *filp,
};
inet_get_local_port_range(range, range + 1);
- ret = proc_dointvec_minmax(&tmp, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
if (write && ret == 0) {
if (range[1] < range[0])
@@ -91,7 +91,7 @@ static int ipv4_sysctl_local_port_range(ctl_table *table,
}
-static int proc_tcp_congestion_control(ctl_table *ctl, int write, struct file * filp,
+static int proc_tcp_congestion_control(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
char val[TCP_CA_NAME_MAX];
@@ -103,7 +103,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write, struct file *
tcp_get_default_congestion_control(val);
- ret = proc_dostring(&tbl, write, filp, buffer, lenp, ppos);
+ ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
if (write && ret == 0)
ret = tcp_set_default_congestion_control(val);
return ret;
@@ -129,7 +129,7 @@ static int sysctl_tcp_congestion_control(ctl_table *table,
}
static int proc_tcp_available_congestion_control(ctl_table *ctl,
- int write, struct file * filp,
+ int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -140,13 +140,13 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
if (!tbl.data)
return -ENOMEM;
tcp_get_available_congestion_control(tbl.data, TCP_CA_BUF_MAX);
- ret = proc_dostring(&tbl, write, filp, buffer, lenp, ppos);
+ ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
kfree(tbl.data);
return ret;
}
static int proc_allowed_congestion_control(ctl_table *ctl,
- int write, struct file * filp,
+ int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -158,7 +158,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
return -ENOMEM;
tcp_get_allowed_congestion_control(tbl.data, tbl.maxlen);
- ret = proc_dostring(&tbl, write, filp, buffer, lenp, ppos);
+ ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
if (write && ret == 0)
ret = tcp_set_allowed_congestion_control(tbl.data);
kfree(tbl.data);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 19a0612b8a2..21387ebabf0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2862,7 +2862,7 @@ void __init tcp_init(void)
alloc_large_system_hash("TCP established",
sizeof(struct inet_ehash_bucket),
thash_entries,
- (num_physpages >= 128 * 1024) ?
+ (totalram_pages >= 128 * 1024) ?
13 : 15,
0,
&tcp_hashinfo.ehash_size,
@@ -2879,7 +2879,7 @@ void __init tcp_init(void)
alloc_large_system_hash("TCP bind",
sizeof(struct inet_bind_hashbucket),
tcp_hashinfo.ehash_size,
- (num_physpages >= 128 * 1024) ?
+ (totalram_pages >= 128 * 1024) ?
13 : 15,
0,
&tcp_hashinfo.bhash_size,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 55f486d89c8..1fd0a3d775d 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3986,14 +3986,14 @@ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
#ifdef CONFIG_SYSCTL
static
-int addrconf_sysctl_forward(ctl_table *ctl, int write, struct file * filp,
+int addrconf_sysctl_forward(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
int val = *valp;
int ret;
- ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write)
ret = addrconf_fixup_forwarding(ctl, valp, val);
@@ -4090,14 +4090,14 @@ static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old)
}
static
-int addrconf_sysctl_disable(ctl_table *ctl, int write, struct file * filp,
+int addrconf_sysctl_disable(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
int val = *valp;
int ret;
- ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write)
ret = addrconf_disable_ipv6(ctl, valp, val);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 7d25bbe3211..c595bbe1ed9 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1043,11 +1043,6 @@ ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_device_stats *stats = &t->dev->stats;
int ret;
- if (t->recursion++) {
- stats->collisions++;
- goto tx_err;
- }
-
switch (skb->protocol) {
case htons(ETH_P_IP):
ret = ip4ip6_tnl_xmit(skb, dev);
@@ -1062,14 +1057,12 @@ ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
if (ret < 0)
goto tx_err;
- t->recursion--;
return NETDEV_TX_OK;
tx_err:
stats->tx_errors++;
stats->tx_dropped++;
kfree_skb(skb);
- t->recursion--;
return NETDEV_TX_OK;
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 3907510c2ce..090675e269e 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -324,7 +324,7 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
return 0;
}
-static struct seq_operations ipmr_mfc_seq_ops = {
+static const struct seq_operations ipmr_mfc_seq_ops = {
.start = ipmr_mfc_seq_start,
.next = ipmr_mfc_seq_next,
.stop = ipmr_mfc_seq_stop,
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 7015478797f..498b9b0b0fa 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1735,7 +1735,7 @@ static void ndisc_warn_deprecated_sysctl(struct ctl_table *ctl,
}
}
-int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, struct file * filp, void __user *buffer, size_t *lenp, loff_t *ppos)
+int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct net_device *dev = ctl->extra1;
struct inet6_dev *idev;
@@ -1746,16 +1746,16 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, struct file * f
ndisc_warn_deprecated_sysctl(ctl, "syscall", dev ? dev->name : "default");
if (strcmp(ctl->procname, "retrans_time") == 0)
- ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
else if (strcmp(ctl->procname, "base_reachable_time") == 0)
ret = proc_dointvec_jiffies(ctl, write,
- filp, buffer, lenp, ppos);
+ buffer, lenp, ppos);
else if ((strcmp(ctl->procname, "retrans_time_ms") == 0) ||
(strcmp(ctl->procname, "base_reachable_time_ms") == 0))
ret = proc_dointvec_ms_jiffies(ctl, write,
- filp, buffer, lenp, ppos);
+ buffer, lenp, ppos);
else
ret = -1;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 77aecbe8ff6..d6fe7646a8f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2524,13 +2524,13 @@ static const struct file_operations rt6_stats_seq_fops = {
#ifdef CONFIG_SYSCTL
static
-int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp,
+int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = current->nsproxy->net_ns;
int delay = net->ipv6.sysctl.flush_delay;
if (write) {
- proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+ proc_dointvec(ctl, write, buffer, lenp, ppos);
fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
return 0;
} else
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 0ae4f644818..fcb53962884 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -626,11 +626,6 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
struct in6_addr *addr6;
int addr_type;
- if (tunnel->recursion++) {
- stats->collisions++;
- goto tx_error;
- }
-
if (skb->protocol != htons(ETH_P_IPV6))
goto tx_error;
@@ -753,7 +748,6 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
ip_rt_put(rt);
stats->tx_dropped++;
dev_kfree_skb(skb);
- tunnel->recursion--;
return NETDEV_TX_OK;
}
if (skb->sk)
@@ -794,7 +788,6 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
nf_reset(skb);
IPTUNNEL_XMIT();
- tunnel->recursion--;
return NETDEV_TX_OK;
tx_error_icmp:
@@ -802,7 +795,6 @@ tx_error_icmp:
tx_error:
stats->tx_errors++;
dev_kfree_skb(skb);
- tunnel->recursion--;
return NETDEV_TX_OK;
}
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c
index 57f8817c397..5c86567e5a7 100644
--- a/net/irda/irsysctl.c
+++ b/net/irda/irsysctl.c
@@ -73,12 +73,12 @@ static int min_lap_keepalive_time = 100; /* 100us */
/* For other sysctl, I've no idea of the range. Maybe Dag could help
* us on that - Jean II */
-static int do_devname(ctl_table *table, int write, struct file *filp,
+static int do_devname(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
- ret = proc_dostring(table, write, filp, buffer, lenp, ppos);
+ ret = proc_dostring(table, write, buffer, lenp, ppos);
if (ret == 0 && write) {
struct ias_value *val;
@@ -90,12 +90,12 @@ static int do_devname(ctl_table *table, int write, struct file *filp,
}
-static int do_discovery(ctl_table *table, int write, struct file *filp,
+static int do_discovery(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
- ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret)
return ret;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 039901109fa..71e10cabf81 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -90,8 +90,8 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
bss->dtim_period = tim_ie->dtim_period;
}
- /* set default value for buggy APs */
- if (!elems->tim || bss->dtim_period == 0)
+ /* set default value for buggy AP/no TIM element */
+ if (bss->dtim_period == 0)
bss->dtim_period = 1;
bss->supp_rates_len = 0;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index fba2892b99e..446e9bd4b4b 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1496,14 +1496,14 @@ static int ip_vs_zero_all(void)
static int
-proc_do_defense_mode(ctl_table *table, int write, struct file * filp,
+proc_do_defense_mode(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = table->data;
int val = *valp;
int rc;
- rc = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ rc = proc_dointvec(table, write, buffer, lenp, ppos);
if (write && (*valp != val)) {
if ((*valp < 0) || (*valp > 3)) {
/* Restore the correct value */
@@ -1517,7 +1517,7 @@ proc_do_defense_mode(ctl_table *table, int write, struct file * filp,
static int
-proc_do_sync_threshold(ctl_table *table, int write, struct file *filp,
+proc_do_sync_threshold(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = table->data;
@@ -1527,7 +1527,7 @@ proc_do_sync_threshold(ctl_table *table, int write, struct file *filp,
/* backup the value first */
memcpy(val, valp, sizeof(val));
- rc = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ rc = proc_dointvec(table, write, buffer, lenp, ppos);
if (write && (valp[0] < 0 || valp[1] < 0 || valp[0] >= valp[1])) {
/* Restore the correct value */
memcpy(valp, val, sizeof(val));
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index b37109817a9..7c9ec3dee96 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1245,9 +1245,9 @@ static int nf_conntrack_init_init_net(void)
* machine has 512 buckets. >= 1GB machines have 16384 buckets. */
if (!nf_conntrack_htable_size) {
nf_conntrack_htable_size
- = (((num_physpages << PAGE_SHIFT) / 16384)
+ = (((totalram_pages << PAGE_SHIFT) / 16384)
/ sizeof(struct hlist_head));
- if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
+ if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
nf_conntrack_htable_size = 16384;
if (nf_conntrack_htable_size < 32)
nf_conntrack_htable_size = 32;
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 4e620305f28..c93494fef8e 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -226,7 +226,7 @@ static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
static struct ctl_table_header *nf_log_dir_header;
-static int nf_log_proc_dostring(ctl_table *table, int write, struct file *filp,
+static int nf_log_proc_dostring(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
const struct nf_logger *logger;
@@ -260,7 +260,7 @@ static int nf_log_proc_dostring(ctl_table *table, int write, struct file *filp,
table->data = "NONE";
else
table->data = logger->name;
- r = proc_dostring(table, write, filp, buffer, lenp, ppos);
+ r = proc_dostring(table, write, buffer, lenp, ppos);
mutex_unlock(&nf_log_mutex);
}
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index a6ac83a9334..f01955cce31 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -617,7 +617,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
int cpu;
/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
- if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > num_physpages)
+ if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
return NULL;
newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 219dcdbe388..dd16e404424 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -194,9 +194,9 @@ static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family)
if (minfo->cfg.size)
size = minfo->cfg.size;
else {
- size = ((num_physpages << PAGE_SHIFT) / 16384) /
+ size = ((totalram_pages << PAGE_SHIFT) / 16384) /
sizeof(struct list_head);
- if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
+ if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
size = 8192;
if (size < 16)
size = 16;
@@ -266,9 +266,9 @@ static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family)
if (minfo->cfg.size) {
size = minfo->cfg.size;
} else {
- size = (num_physpages << PAGE_SHIFT) / 16384 /
+ size = (totalram_pages << PAGE_SHIFT) / 16384 /
sizeof(struct list_head);
- if (num_physpages > 1024 * 1024 * 1024 / PAGE_SIZE)
+ if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
size = 8192;
if (size < 16)
size = 16;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index c5aab6a368c..a4bafbf1509 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1609,6 +1609,16 @@ int netlink_change_ngroups(struct sock *sk, unsigned int groups)
return err;
}
+void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
+{
+ struct sock *sk;
+ struct hlist_node *node;
+ struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
+
+ sk_for_each_bound(sk, node, &tbl->mc_list)
+ netlink_update_socket_mc(nlk_sk(sk), group, 0);
+}
+
/**
* netlink_clear_multicast_users - kick off multicast listeners
*
@@ -1619,15 +1629,8 @@ int netlink_change_ngroups(struct sock *sk, unsigned int groups)
*/
void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
{
- struct sock *sk;
- struct hlist_node *node;
- struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
-
netlink_table_grab();
-
- sk_for_each_bound(sk, node, &tbl->mc_list)
- netlink_update_socket_mc(nlk_sk(sk), group, 0);
-
+ __netlink_clear_multicast_users(ksk, group);
netlink_table_ungrab();
}
@@ -2091,10 +2094,10 @@ static int __init netlink_proto_init(void)
if (!nl_table)
goto panic;
- if (num_physpages >= (128 * 1024))
- limit = num_physpages >> (21 - PAGE_SHIFT);
+ if (totalram_pages >= (128 * 1024))
+ limit = totalram_pages >> (21 - PAGE_SHIFT);
else
- limit = num_physpages >> (23 - PAGE_SHIFT);
+ limit = totalram_pages >> (23 - PAGE_SHIFT);
order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
limit = (1UL << order) / sizeof(struct hlist_head);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 566941e0336..44ff3f3810f 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -220,10 +220,12 @@ static void __genl_unregister_mc_group(struct genl_family *family,
struct net *net;
BUG_ON(grp->family != family);
+ netlink_table_grab();
rcu_read_lock();
for_each_net_rcu(net)
- netlink_clear_multicast_users(net->genl_sock, grp->id);
+ __netlink_clear_multicast_users(net->genl_sock, grp->id);
rcu_read_unlock();
+ netlink_table_ungrab();
clear_bit(grp->id, mc_groups);
list_del(&grp->list);
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index a662e62a99c..f60c0c2aacb 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -168,6 +168,12 @@ static int pn_send(struct sk_buff *skb, struct net_device *dev,
goto drop;
}
+ /* Broadcast sending is not implemented */
+ if (pn_addr(dst) == PNADDR_BROADCAST) {
+ err = -EOPNOTSUPP;
+ goto drop;
+ }
+
skb_reset_transport_header(skb);
WARN_ON(skb_headroom(skb) & 1); /* HW assumes word alignment */
skb_push(skb, sizeof(struct phonethdr));
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 7a4ee397d2f..07aa9f08d5f 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -113,6 +113,8 @@ void pn_sock_unhash(struct sock *sk)
}
EXPORT_SYMBOL(pn_sock_unhash);
+static DEFINE_MUTEX(port_mutex);
+
static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len)
{
struct sock *sk = sock->sk;
@@ -140,9 +142,11 @@ static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len)
err = -EINVAL; /* attempt to rebind */
goto out;
}
+ WARN_ON(sk_hashed(sk));
+ mutex_lock(&port_mutex);
err = sk->sk_prot->get_port(sk, pn_port(handle));
if (err)
- goto out;
+ goto out_port;
/* get_port() sets the port, bind() sets the address if applicable */
pn->sobject = pn_object(saddr, pn_port(pn->sobject));
@@ -150,6 +154,8 @@ static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len)
/* Enable RX on the socket */
sk->sk_prot->hash(sk);
+out_port:
+ mutex_unlock(&port_mutex);
out:
release_sock(sk);
return err;
@@ -357,8 +363,6 @@ const struct proto_ops phonet_stream_ops = {
};
EXPORT_SYMBOL(phonet_stream_ops);
-static DEFINE_MUTEX(port_mutex);
-
/* allocate port for a socket */
int pn_sock_get_port(struct sock *sk, unsigned short sport)
{
@@ -370,9 +374,7 @@ int pn_sock_get_port(struct sock *sk, unsigned short sport)
memset(&try_sa, 0, sizeof(struct sockaddr_pn));
try_sa.spn_family = AF_PHONET;
-
- mutex_lock(&port_mutex);
-
+ WARN_ON(!mutex_is_locked(&port_mutex));
if (!sport) {
/* search free port */
int port, pmin, pmax;
@@ -401,8 +403,6 @@ int pn_sock_get_port(struct sock *sk, unsigned short sport)
else
sock_put(tmpsk);
}
- mutex_unlock(&port_mutex);
-
/* the port must be in use already */
return -EADDRINUSE;
diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
index 7b5749ee276..2220f332232 100644
--- a/net/phonet/sysctl.c
+++ b/net/phonet/sysctl.c
@@ -56,7 +56,7 @@ void phonet_get_local_port_range(int *min, int *max)
} while (read_seqretry(&local_port_range_lock, seq));
}
-static int proc_local_port_range(ctl_table *table, int write, struct file *filp,
+static int proc_local_port_range(ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
@@ -70,7 +70,7 @@ static int proc_local_port_range(ctl_table *table, int write, struct file *filp,
.extra2 = &local_port_range_max,
};
- ret = proc_dointvec_minmax(&tmp, write, filp, buffer, lenp, ppos);
+ ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
if (write && ret == 0) {
if (range[1] < range[0])
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
index d9231245a79..bc0019f704f 100644
--- a/net/rxrpc/ar-call.c
+++ b/net/rxrpc/ar-call.c
@@ -96,7 +96,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
}
/*
- * allocate a new client call and attempt to to get a connection slot for it
+ * allocate a new client call and attempt to get a connection slot for it
*/
static struct rxrpc_call *rxrpc_alloc_client_call(
struct rxrpc_sock *rx,
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 375d64cb1a3..2c5c76be18f 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -77,7 +77,7 @@
* The service curve parameters are converted to the internal
* representation. The slope values are scaled to avoid overflow.
* the inverse slope values as well as the y-projection of the 1st
- * segment are kept in order to to avoid 64-bit divide operations
+ * segment are kept in order to avoid 64-bit divide operations
* that are expensive on 32-bit architectures.
*/
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index c557f1fb1c6..612dc878e05 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1184,10 +1184,10 @@ SCTP_STATIC __init int sctp_init(void)
/* Size and allocate the association hash table.
* The methodology is similar to that of the tcp hash tables.
*/
- if (num_physpages >= (128 * 1024))
- goal = num_physpages >> (22 - PAGE_SHIFT);
+ if (totalram_pages >= (128 * 1024))
+ goal = totalram_pages >> (22 - PAGE_SHIFT);
else
- goal = num_physpages >> (24 - PAGE_SHIFT);
+ goal = totalram_pages >> (24 - PAGE_SHIFT);
for (order = 0; (1UL << order) < goal; order++)
;
diff --git a/net/socket.c b/net/socket.c
index 2a022c00d85..49917a1cac7 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -86,6 +86,7 @@
#include <linux/audit.h>
#include <linux/wireless.h>
#include <linux/nsproxy.h>
+#include <linux/magic.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -235,8 +236,6 @@ int move_addr_to_user(struct sockaddr *kaddr, int klen, void __user *uaddr,
return __put_user(klen, ulen);
}
-#define SOCKFS_MAGIC 0x534F434B
-
static struct kmem_cache *sock_inode_cachep __read_mostly;
static struct inode *sock_alloc_inode(struct super_block *sb)
@@ -285,7 +284,7 @@ static int init_inodecache(void)
return 0;
}
-static struct super_operations sockfs_ops = {
+static const struct super_operations sockfs_ops = {
.alloc_inode = sock_alloc_inode,
.destroy_inode =sock_destroy_inode,
.statfs = simple_statfs,
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 0c431c277af..54a4e042f10 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -385,7 +385,7 @@ rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred,
EXPORT_SYMBOL_GPL(rpcauth_init_cred);
void
-rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred)
+rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags)
{
task->tk_msg.rpc_cred = get_rpccred(cred);
dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid,
@@ -394,7 +394,7 @@ rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred)
EXPORT_SYMBOL_GPL(rpcauth_generic_bind_cred);
static void
-rpcauth_bind_root_cred(struct rpc_task *task)
+rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags)
{
struct rpc_auth *auth = task->tk_client->cl_auth;
struct auth_cred acred = {
@@ -405,7 +405,7 @@ rpcauth_bind_root_cred(struct rpc_task *task)
dprintk("RPC: %5u looking up %s cred\n",
task->tk_pid, task->tk_client->cl_auth->au_ops->au_name);
- ret = auth->au_ops->lookup_cred(auth, &acred, 0);
+ ret = auth->au_ops->lookup_cred(auth, &acred, lookupflags);
if (!IS_ERR(ret))
task->tk_msg.rpc_cred = ret;
else
@@ -413,14 +413,14 @@ rpcauth_bind_root_cred(struct rpc_task *task)
}
static void
-rpcauth_bind_new_cred(struct rpc_task *task)
+rpcauth_bind_new_cred(struct rpc_task *task, int lookupflags)
{
struct rpc_auth *auth = task->tk_client->cl_auth;
struct rpc_cred *ret;
dprintk("RPC: %5u looking up %s cred\n",
task->tk_pid, auth->au_ops->au_name);
- ret = rpcauth_lookupcred(auth, 0);
+ ret = rpcauth_lookupcred(auth, lookupflags);
if (!IS_ERR(ret))
task->tk_msg.rpc_cred = ret;
else
@@ -430,12 +430,16 @@ rpcauth_bind_new_cred(struct rpc_task *task)
void
rpcauth_bindcred(struct rpc_task *task, struct rpc_cred *cred, int flags)
{
+ int lookupflags = 0;
+
+ if (flags & RPC_TASK_ASYNC)
+ lookupflags |= RPCAUTH_LOOKUP_NEW;
if (cred != NULL)
- cred->cr_ops->crbind(task, cred);
+ cred->cr_ops->crbind(task, cred, lookupflags);
else if (flags & RPC_TASK_ROOTCREDS)
- rpcauth_bind_root_cred(task);
+ rpcauth_bind_root_cred(task, lookupflags);
else
- rpcauth_bind_new_cred(task);
+ rpcauth_bind_new_cred(task, lookupflags);
}
void
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index 4028502f052..bf88bf8e936 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -55,13 +55,13 @@ struct rpc_cred *rpc_lookup_machine_cred(void)
EXPORT_SYMBOL_GPL(rpc_lookup_machine_cred);
static void
-generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred)
+generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags)
{
struct rpc_auth *auth = task->tk_client->cl_auth;
struct auth_cred *acred = &container_of(cred, struct generic_cred, gc_base)->acred;
struct rpc_cred *ret;
- ret = auth->au_ops->lookup_cred(auth, acred, 0);
+ ret = auth->au_ops->lookup_cred(auth, acred, lookupflags);
if (!IS_ERR(ret))
task->tk_msg.rpc_cred = ret;
else
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 2e6a148d277..f6c51e562a0 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1374,8 +1374,10 @@ svcauth_gss_release(struct svc_rqst *rqstp)
if (stat)
goto out_err;
break;
- default:
- goto out_err;
+ /*
+ * For any other gc_svc value, svcauth_gss_accept() already set
+ * the auth_error appropriately; just fall through:
+ */
}
out:
diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c
index c70dd7f5258..1db618f56ec 100644
--- a/net/sunrpc/auth_null.c
+++ b/net/sunrpc/auth_null.c
@@ -8,7 +8,6 @@
#include <linux/types.h>
#include <linux/module.h>
-#include <linux/utsname.h>
#include <linux/sunrpc/clnt.h>
#ifdef RPC_DEBUG
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 45cdaff9b36..d6eee291a0e 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -103,23 +103,21 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
-static void queue_loose(struct cache_detail *detail, struct cache_head *ch);
+static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
-static int cache_fresh_locked(struct cache_head *head, time_t expiry)
+static void cache_fresh_locked(struct cache_head *head, time_t expiry)
{
head->expiry_time = expiry;
head->last_refresh = get_seconds();
- return !test_and_set_bit(CACHE_VALID, &head->flags);
+ set_bit(CACHE_VALID, &head->flags);
}
static void cache_fresh_unlocked(struct cache_head *head,
- struct cache_detail *detail, int new)
+ struct cache_detail *detail)
{
- if (new)
- cache_revisit_request(head);
if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
cache_revisit_request(head);
- queue_loose(detail, head);
+ cache_dequeue(detail, head);
}
}
@@ -132,7 +130,6 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
*/
struct cache_head **head;
struct cache_head *tmp;
- int is_new;
if (!test_bit(CACHE_VALID, &old->flags)) {
write_lock(&detail->hash_lock);
@@ -141,9 +138,9 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
set_bit(CACHE_NEGATIVE, &old->flags);
else
detail->update(old, new);
- is_new = cache_fresh_locked(old, new->expiry_time);
+ cache_fresh_locked(old, new->expiry_time);
write_unlock(&detail->hash_lock);
- cache_fresh_unlocked(old, detail, is_new);
+ cache_fresh_unlocked(old, detail);
return old;
}
write_unlock(&detail->hash_lock);
@@ -167,11 +164,11 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
*head = tmp;
detail->entries++;
cache_get(tmp);
- is_new = cache_fresh_locked(tmp, new->expiry_time);
+ cache_fresh_locked(tmp, new->expiry_time);
cache_fresh_locked(old, 0);
write_unlock(&detail->hash_lock);
- cache_fresh_unlocked(tmp, detail, is_new);
- cache_fresh_unlocked(old, detail, 0);
+ cache_fresh_unlocked(tmp, detail);
+ cache_fresh_unlocked(old, detail);
cache_put(old, detail);
return tmp;
}
@@ -184,6 +181,22 @@ static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
return cd->cache_upcall(cd, h);
}
+static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h)
+{
+ if (!test_bit(CACHE_VALID, &h->flags) ||
+ h->expiry_time < get_seconds())
+ return -EAGAIN;
+ else if (detail->flush_time > h->last_refresh)
+ return -EAGAIN;
+ else {
+ /* entry is valid */
+ if (test_bit(CACHE_NEGATIVE, &h->flags))
+ return -ENOENT;
+ else
+ return 0;
+ }
+}
+
/*
* This is the generic cache management routine for all
* the authentication caches.
@@ -192,8 +205,10 @@ static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
*
*
* Returns 0 if the cache_head can be used, or cache_puts it and returns
- * -EAGAIN if upcall is pending,
- * -ETIMEDOUT if upcall failed and should be retried,
+ * -EAGAIN if upcall is pending and request has been queued
+ * -ETIMEDOUT if upcall failed or request could not be queue or
+ * upcall completed but item is still invalid (implying that
+ * the cache item has been replaced with a newer one).
* -ENOENT if cache entry was negative
*/
int cache_check(struct cache_detail *detail,
@@ -203,17 +218,7 @@ int cache_check(struct cache_detail *detail,
long refresh_age, age;
/* First decide return status as best we can */
- if (!test_bit(CACHE_VALID, &h->flags) ||
- h->expiry_time < get_seconds())
- rv = -EAGAIN;
- else if (detail->flush_time > h->last_refresh)
- rv = -EAGAIN;
- else {
- /* entry is valid */
- if (test_bit(CACHE_NEGATIVE, &h->flags))
- rv = -ENOENT;
- else rv = 0;
- }
+ rv = cache_is_valid(detail, h);
/* now see if we want to start an upcall */
refresh_age = (h->expiry_time - h->last_refresh);
@@ -229,10 +234,11 @@ int cache_check(struct cache_detail *detail,
switch (cache_make_upcall(detail, h)) {
case -EINVAL:
clear_bit(CACHE_PENDING, &h->flags);
+ cache_revisit_request(h);
if (rv == -EAGAIN) {
set_bit(CACHE_NEGATIVE, &h->flags);
- cache_fresh_unlocked(h, detail,
- cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY));
+ cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY);
+ cache_fresh_unlocked(h, detail);
rv = -ENOENT;
}
break;
@@ -245,10 +251,14 @@ int cache_check(struct cache_detail *detail,
}
}
- if (rv == -EAGAIN)
- if (cache_defer_req(rqstp, h) != 0)
- rv = -ETIMEDOUT;
-
+ if (rv == -EAGAIN) {
+ if (cache_defer_req(rqstp, h) < 0) {
+ /* Request is not deferred */
+ rv = cache_is_valid(detail, h);
+ if (rv == -EAGAIN)
+ rv = -ETIMEDOUT;
+ }
+ }
if (rv)
cache_put(h, detail);
return rv;
@@ -396,7 +406,7 @@ static int cache_clean(void)
)
continue;
if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
- queue_loose(current_detail, ch);
+ cache_dequeue(current_detail, ch);
if (atomic_read(&ch->ref.refcount) == 1)
break;
@@ -412,8 +422,10 @@ static int cache_clean(void)
if (!ch)
current_index ++;
spin_unlock(&cache_list_lock);
- if (ch)
+ if (ch) {
+ cache_revisit_request(ch);
cache_put(ch, d);
+ }
} else
spin_unlock(&cache_list_lock);
@@ -488,7 +500,7 @@ static int cache_defer_cnt;
static int cache_defer_req(struct cache_req *req, struct cache_head *item)
{
- struct cache_deferred_req *dreq;
+ struct cache_deferred_req *dreq, *discard;
int hash = DFR_HASH(item);
if (cache_defer_cnt >= DFR_MAX) {
@@ -496,11 +508,11 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
* or continue and drop the oldest below
*/
if (net_random()&1)
- return -ETIMEDOUT;
+ return -ENOMEM;
}
dreq = req->defer(req);
if (dreq == NULL)
- return -ETIMEDOUT;
+ return -ENOMEM;
dreq->item = item;
@@ -513,23 +525,24 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
list_add(&dreq->hash, &cache_defer_hash[hash]);
/* it is in, now maybe clean up */
- dreq = NULL;
+ discard = NULL;
if (++cache_defer_cnt > DFR_MAX) {
- dreq = list_entry(cache_defer_list.prev,
- struct cache_deferred_req, recent);
- list_del(&dreq->recent);
- list_del(&dreq->hash);
+ discard = list_entry(cache_defer_list.prev,
+ struct cache_deferred_req, recent);
+ list_del_init(&discard->recent);
+ list_del_init(&discard->hash);
cache_defer_cnt--;
}
spin_unlock(&cache_defer_lock);
- if (dreq) {
+ if (discard)
/* there was one too many */
- dreq->revisit(dreq, 1);
- }
+ discard->revisit(discard, 1);
+
if (!test_bit(CACHE_PENDING, &item->flags)) {
/* must have just been validated... */
cache_revisit_request(item);
+ return -EAGAIN;
}
return 0;
}
@@ -551,7 +564,7 @@ static void cache_revisit_request(struct cache_head *item)
dreq = list_entry(lp, struct cache_deferred_req, hash);
lp = lp->next;
if (dreq->item == item) {
- list_del(&dreq->hash);
+ list_del_init(&dreq->hash);
list_move(&dreq->recent, &pending);
cache_defer_cnt--;
}
@@ -577,7 +590,7 @@ void cache_clean_deferred(void *owner)
list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
if (dreq->owner == owner) {
- list_del(&dreq->hash);
+ list_del_init(&dreq->hash);
list_move(&dreq->recent, &pending);
cache_defer_cnt--;
}
@@ -887,7 +900,7 @@ static int cache_release(struct inode *inode, struct file *filp,
-static void queue_loose(struct cache_detail *detail, struct cache_head *ch)
+static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
{
struct cache_queue *cq;
spin_lock(&queue_lock);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index fac0ca93f06..38829e20500 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -288,6 +288,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
.srcaddr = args->saddress,
.dstaddr = args->address,
.addrlen = args->addrsize,
+ .bc_xprt = args->bc_xprt,
};
char servername[48];
@@ -639,10 +640,11 @@ EXPORT_SYMBOL_GPL(rpc_call_async);
/**
* rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
* rpc_execute against it
- * @ops: RPC call ops
+ * @req: RPC request
+ * @tk_ops: RPC call ops
*/
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
- const struct rpc_call_ops *tk_ops)
+ const struct rpc_call_ops *tk_ops)
{
struct rpc_task *task;
struct xdr_buf *xbufp = &req->rq_snd_buf;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 7f676bdf70d..49278f83036 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -860,7 +860,8 @@ static void rpc_clntdir_depopulate(struct dentry *dentry)
/**
* rpc_create_client_dir - Create a new rpc_client directory in rpc_pipefs
- * @path: path from the rpc_pipefs root to the new directory
+ * @dentry: dentry from the rpc_pipefs root to the new directory
+ * @name: &struct qstr for the name
* @rpc_client: rpc client to associate with this directory
*
* This creates a directory at the given @path associated with
@@ -930,7 +931,7 @@ void rpc_remove_cache_dir(struct dentry *dentry)
/*
* populate the filesystem
*/
-static struct super_operations s_ops = {
+static const struct super_operations s_ops = {
.alloc_inode = rpc_alloc_inode,
.destroy_inode = rpc_destroy_inode,
.statfs = simple_statfs,
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 8f459abe97c..cef74ba0666 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -21,6 +21,8 @@
#include <linux/sunrpc/clnt.h>
+#include "sunrpc.h"
+
#ifdef RPC_DEBUG
#define RPCDBG_FACILITY RPCDBG_SCHED
#define RPC_TASK_MAGIC_ID 0xf00baa
@@ -711,11 +713,6 @@ static void rpc_async_schedule(struct work_struct *work)
__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
}
-struct rpc_buffer {
- size_t len;
- char data[];
-};
-
/**
* rpc_malloc - allocate an RPC buffer
* @task: RPC task that will use this buffer
diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h
index 5d9dd742264..90c292e2738 100644
--- a/net/sunrpc/sunrpc.h
+++ b/net/sunrpc/sunrpc.h
@@ -27,11 +27,25 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef _NET_SUNRPC_SUNRPC_H
#define _NET_SUNRPC_SUNRPC_H
+#include <linux/net.h>
+
+/*
+ * Header for dynamically allocated rpc buffers.
+ */
+struct rpc_buffer {
+ size_t len;
+ char data[];
+};
+
static inline int rpc_reply_expected(struct rpc_task *task)
{
return (task->tk_msg.rpc_proc != NULL) &&
(task->tk_msg.rpc_proc->p_decode != NULL);
}
+int svc_send_common(struct socket *sock, struct xdr_buf *xdr,
+ struct page *headpage, unsigned long headoffset,
+ struct page *tailpage, unsigned long tailoffset);
+
#endif /* _NET_SUNRPC_SUNRPC_H */
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 27d44332f01..df124f78ee4 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -160,6 +160,7 @@ void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt,
mutex_init(&xprt->xpt_mutex);
spin_lock_init(&xprt->xpt_lock);
set_bit(XPT_BUSY, &xprt->xpt_flags);
+ rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending");
}
EXPORT_SYMBOL_GPL(svc_xprt_init);
@@ -710,10 +711,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
spin_unlock_bh(&pool->sp_lock);
len = 0;
- if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
- dprintk("svc_recv: found XPT_CLOSE\n");
- svc_delete_xprt(xprt);
- } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
+ if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
struct svc_xprt *newxpt;
newxpt = xprt->xpt_ops->xpo_accept(xprt);
if (newxpt) {
@@ -739,7 +737,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
svc_xprt_received(newxpt);
}
svc_xprt_received(xprt);
- } else {
+ } else if (!test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
rqstp, pool->sp_id, xprt,
atomic_read(&xprt->xpt_ref.refcount));
@@ -752,6 +750,11 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
dprintk("svc: got len=%d\n", len);
}
+ if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
+ dprintk("svc_recv: found XPT_CLOSE\n");
+ svc_delete_xprt(xprt);
+ }
+
/* No data, incomplete (TCP) read, or accept() */
if (len == 0 || len == -EAGAIN) {
rqstp->rq_res.len = 0;
@@ -808,6 +811,7 @@ int svc_send(struct svc_rqst *rqstp)
else
len = xprt->xpt_ops->xpo_sendto(rqstp);
mutex_unlock(&xprt->xpt_mutex);
+ rpc_wake_up(&xprt->xpt_bc_pending);
svc_xprt_release(rqstp);
if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
@@ -1166,11 +1170,6 @@ static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos)
dprintk("svc_pool_stats_start, *pidx=%u\n", pidx);
- lock_kernel();
- /* bump up the pseudo refcount while traversing */
- svc_get(serv);
- unlock_kernel();
-
if (!pidx)
return SEQ_START_TOKEN;
return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]);
@@ -1198,12 +1197,6 @@ static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
static void svc_pool_stats_stop(struct seq_file *m, void *p)
{
- struct svc_serv *serv = m->private;
-
- lock_kernel();
- /* this function really, really should have been called svc_put() */
- svc_destroy(serv);
- unlock_kernel();
}
static int svc_pool_stats_show(struct seq_file *m, void *p)
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 6caffa34ac0..117f68a8aa4 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -668,6 +668,7 @@ static int unix_gid_find(uid_t uid, struct group_info **gip,
case 0:
*gip = ug->gi;
get_group_info(*gip);
+ cache_put(&ug->h, &unix_gid_cache);
return 0;
default:
return -EAGAIN;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 23128ee191a..ccc5e83cae5 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -49,6 +49,7 @@
#include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/stats.h>
+#include <linux/sunrpc/xprt.h>
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
@@ -153,49 +154,27 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
}
/*
- * Generic sendto routine
+ * send routine intended to be shared by the fore- and back-channel
*/
-static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
+int svc_send_common(struct socket *sock, struct xdr_buf *xdr,
+ struct page *headpage, unsigned long headoffset,
+ struct page *tailpage, unsigned long tailoffset)
{
- struct svc_sock *svsk =
- container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
- struct socket *sock = svsk->sk_sock;
- int slen;
- union {
- struct cmsghdr hdr;
- long all[SVC_PKTINFO_SPACE / sizeof(long)];
- } buffer;
- struct cmsghdr *cmh = &buffer.hdr;
- int len = 0;
int result;
int size;
struct page **ppage = xdr->pages;
size_t base = xdr->page_base;
unsigned int pglen = xdr->page_len;
unsigned int flags = MSG_MORE;
- RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
+ int slen;
+ int len = 0;
slen = xdr->len;
- if (rqstp->rq_prot == IPPROTO_UDP) {
- struct msghdr msg = {
- .msg_name = &rqstp->rq_addr,
- .msg_namelen = rqstp->rq_addrlen,
- .msg_control = cmh,
- .msg_controllen = sizeof(buffer),
- .msg_flags = MSG_MORE,
- };
-
- svc_set_cmsg_data(rqstp, cmh);
-
- if (sock_sendmsg(sock, &msg, 0) < 0)
- goto out;
- }
-
/* send head */
if (slen == xdr->head[0].iov_len)
flags = 0;
- len = kernel_sendpage(sock, rqstp->rq_respages[0], 0,
+ len = kernel_sendpage(sock, headpage, headoffset,
xdr->head[0].iov_len, flags);
if (len != xdr->head[0].iov_len)
goto out;
@@ -219,16 +198,58 @@ static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
base = 0;
ppage++;
}
+
/* send tail */
if (xdr->tail[0].iov_len) {
- result = kernel_sendpage(sock, rqstp->rq_respages[0],
- ((unsigned long)xdr->tail[0].iov_base)
- & (PAGE_SIZE-1),
- xdr->tail[0].iov_len, 0);
-
+ result = kernel_sendpage(sock, tailpage, tailoffset,
+ xdr->tail[0].iov_len, 0);
if (result > 0)
len += result;
}
+
+out:
+ return len;
+}
+
+
+/*
+ * Generic sendto routine
+ */
+static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
+{
+ struct svc_sock *svsk =
+ container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
+ struct socket *sock = svsk->sk_sock;
+ union {
+ struct cmsghdr hdr;
+ long all[SVC_PKTINFO_SPACE / sizeof(long)];
+ } buffer;
+ struct cmsghdr *cmh = &buffer.hdr;
+ int len = 0;
+ unsigned long tailoff;
+ unsigned long headoff;
+ RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
+
+ if (rqstp->rq_prot == IPPROTO_UDP) {
+ struct msghdr msg = {
+ .msg_name = &rqstp->rq_addr,
+ .msg_namelen = rqstp->rq_addrlen,
+ .msg_control = cmh,
+ .msg_controllen = sizeof(buffer),
+ .msg_flags = MSG_MORE,
+ };
+
+ svc_set_cmsg_data(rqstp, cmh);
+
+ if (sock_sendmsg(sock, &msg, 0) < 0)
+ goto out;
+ }
+
+ tailoff = ((unsigned long)xdr->tail[0].iov_base) & (PAGE_SIZE-1);
+ headoff = 0;
+ len = svc_send_common(sock, xdr, rqstp->rq_respages[0], headoff,
+ rqstp->rq_respages[0], tailoff);
+
out:
dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n",
svsk, xdr->head[0].iov_base, xdr->head[0].iov_len,
@@ -432,29 +453,49 @@ static void svc_tcp_write_space(struct sock *sk)
}
/*
+ * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo
+ */
+static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,
+ struct cmsghdr *cmh)
+{
+ struct in_pktinfo *pki = CMSG_DATA(cmh);
+ if (cmh->cmsg_type != IP_PKTINFO)
+ return 0;
+ rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr;
+ return 1;
+}
+
+/*
+ * See net/ipv6/datagram.c : datagram_recv_ctl
+ */
+static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
+ struct cmsghdr *cmh)
+{
+ struct in6_pktinfo *pki = CMSG_DATA(cmh);
+ if (cmh->cmsg_type != IPV6_PKTINFO)
+ return 0;
+ ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr);
+ return 1;
+}
+
+/*
* Copy the UDP datagram's destination address to the rqstp structure.
* The 'destination' address in this case is the address to which the
* peer sent the datagram, i.e. our local address. For multihomed
* hosts, this can change from msg to msg. Note that only the IP
* address changes, the port number should remain the same.
*/
-static void svc_udp_get_dest_address(struct svc_rqst *rqstp,
- struct cmsghdr *cmh)
+static int svc_udp_get_dest_address(struct svc_rqst *rqstp,
+ struct cmsghdr *cmh)
{
- struct svc_sock *svsk =
- container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
- switch (svsk->sk_sk->sk_family) {
- case AF_INET: {
- struct in_pktinfo *pki = CMSG_DATA(cmh);
- rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr;
- break;
- }
- case AF_INET6: {
- struct in6_pktinfo *pki = CMSG_DATA(cmh);
- ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr);
- break;
- }
+ switch (cmh->cmsg_level) {
+ case SOL_IP:
+ return svc_udp_get_dest_address4(rqstp, cmh);
+ case SOL_IPV6:
+ return svc_udp_get_dest_address6(rqstp, cmh);
}
+
+ return 0;
}
/*
@@ -531,16 +572,15 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_prot = IPPROTO_UDP;
- if (cmh->cmsg_level != IPPROTO_IP ||
- cmh->cmsg_type != IP_PKTINFO) {
+ if (!svc_udp_get_dest_address(rqstp, cmh)) {
if (net_ratelimit())
- printk("rpcsvc: received unknown control message:"
- "%d/%d\n",
- cmh->cmsg_level, cmh->cmsg_type);
+ printk(KERN_WARNING
+ "svc: received unknown control message %d/%d; "
+ "dropping RPC reply datagram\n",
+ cmh->cmsg_level, cmh->cmsg_type);
skb_free_datagram(svsk->sk_sk, skb);
return 0;
}
- svc_udp_get_dest_address(rqstp, cmh);
if (skb_is_nonlinear(skb)) {
/* we have to copy */
@@ -651,8 +691,7 @@ static struct svc_xprt_class svc_udp_class = {
static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
{
- int one = 1;
- mm_segment_t oldfs;
+ int err, level, optname, one = 1;
svc_xprt_init(&svc_udp_class, &svsk->sk_xprt, serv);
clear_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags);
@@ -671,12 +710,22 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
- oldfs = get_fs();
- set_fs(KERNEL_DS);
/* make sure we get destination address info */
- svsk->sk_sock->ops->setsockopt(svsk->sk_sock, IPPROTO_IP, IP_PKTINFO,
- (char __user *)&one, sizeof(one));
- set_fs(oldfs);
+ switch (svsk->sk_sk->sk_family) {
+ case AF_INET:
+ level = SOL_IP;
+ optname = IP_PKTINFO;
+ break;
+ case AF_INET6:
+ level = SOL_IPV6;
+ optname = IPV6_RECVPKTINFO;
+ break;
+ default:
+ BUG();
+ }
+ err = kernel_setsockopt(svsk->sk_sock, level, optname,
+ (char *)&one, sizeof(one));
+ dprintk("svc: kernel_setsockopt returned %d\n", err);
}
/*
@@ -826,21 +875,15 @@ failed:
}
/*
- * Receive data from a TCP socket.
+ * Receive data.
+ * If we haven't gotten the record length yet, get the next four bytes.
+ * Otherwise try to gobble up as much as possible up to the complete
+ * record length.
*/
-static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
+static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
{
- struct svc_sock *svsk =
- container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
struct svc_serv *serv = svsk->sk_xprt.xpt_server;
- int len;
- struct kvec *vec;
- int pnum, vlen;
-
- dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
- svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags),
- test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
- test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
+ int len;
if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
/* sndbuf needs to have room for one request
@@ -861,10 +904,6 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
- /* Receive data. If we haven't got the record length yet, get
- * the next four bytes. Otherwise try to gobble up as much as
- * possible up to the complete record length.
- */
if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) {
int want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
struct kvec iov;
@@ -879,7 +918,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
dprintk("svc: short recvfrom while reading record "
"length (%d of %d)\n", len, want);
svc_xprt_received(&svsk->sk_xprt);
- return -EAGAIN; /* record header not complete */
+ goto err_again; /* record header not complete */
}
svsk->sk_reclen = ntohl(svsk->sk_reclen);
@@ -894,6 +933,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
"per record not supported\n");
goto err_delete;
}
+
svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK;
dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
if (svsk->sk_reclen > serv->sv_max_mesg) {
@@ -914,17 +954,121 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
dprintk("svc: incomplete TCP record (%d of %d)\n",
len, svsk->sk_reclen);
svc_xprt_received(&svsk->sk_xprt);
- return -EAGAIN; /* record not complete */
+ goto err_again; /* record not complete */
}
len = svsk->sk_reclen;
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
+ return len;
+ error:
+ if (len == -EAGAIN) {
+ dprintk("RPC: TCP recv_record got EAGAIN\n");
+ svc_xprt_received(&svsk->sk_xprt);
+ }
+ return len;
+ err_delete:
+ set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
+ err_again:
+ return -EAGAIN;
+}
+
+static int svc_process_calldir(struct svc_sock *svsk, struct svc_rqst *rqstp,
+ struct rpc_rqst **reqpp, struct kvec *vec)
+{
+ struct rpc_rqst *req = NULL;
+ u32 *p;
+ u32 xid;
+ u32 calldir;
+ int len;
+
+ len = svc_recvfrom(rqstp, vec, 1, 8);
+ if (len < 0)
+ goto error;
+
+ p = (u32 *)rqstp->rq_arg.head[0].iov_base;
+ xid = *p++;
+ calldir = *p;
+
+ if (calldir == 0) {
+ /* REQUEST is the most common case */
+ vec[0] = rqstp->rq_arg.head[0];
+ } else {
+ /* REPLY */
+ if (svsk->sk_bc_xprt)
+ req = xprt_lookup_rqst(svsk->sk_bc_xprt, xid);
+
+ if (!req) {
+ printk(KERN_NOTICE
+ "%s: Got unrecognized reply: "
+ "calldir 0x%x sk_bc_xprt %p xid %08x\n",
+ __func__, ntohl(calldir),
+ svsk->sk_bc_xprt, xid);
+ vec[0] = rqstp->rq_arg.head[0];
+ goto out;
+ }
+
+ memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
+ sizeof(struct xdr_buf));
+ /* copy the xid and call direction */
+ memcpy(req->rq_private_buf.head[0].iov_base,
+ rqstp->rq_arg.head[0].iov_base, 8);
+ vec[0] = req->rq_private_buf.head[0];
+ }
+ out:
+ vec[0].iov_base += 8;
+ vec[0].iov_len -= 8;
+ len = svsk->sk_reclen - 8;
+ error:
+ *reqpp = req;
+ return len;
+}
+
+/*
+ * Receive data from a TCP socket.
+ */
+static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
+{
+ struct svc_sock *svsk =
+ container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
+ struct svc_serv *serv = svsk->sk_xprt.xpt_server;
+ int len;
+ struct kvec *vec;
+ int pnum, vlen;
+ struct rpc_rqst *req = NULL;
+
+ dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
+ svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags),
+ test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
+ test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
+
+ len = svc_tcp_recv_record(svsk, rqstp);
+ if (len < 0)
+ goto error;
+
vec = rqstp->rq_vec;
vec[0] = rqstp->rq_arg.head[0];
vlen = PAGE_SIZE;
+
+ /*
+ * We have enough data for the whole tcp record. Let's try and read the
+ * first 8 bytes to get the xid and the call direction. We can use this
+ * to figure out if this is a call or a reply to a callback. If
+ * sk_reclen is < 8 (xid and calldir), then this is a malformed packet.
+ * In that case, don't bother with the calldir and just read the data.
+ * It will be rejected in svc_process.
+ */
+ if (len >= 8) {
+ len = svc_process_calldir(svsk, rqstp, &req, vec);
+ if (len < 0)
+ goto err_again;
+ vlen -= 8;
+ }
+
pnum = 1;
while (vlen < len) {
- vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]);
+ vec[pnum].iov_base = (req) ?
+ page_address(req->rq_private_buf.pages[pnum - 1]) :
+ page_address(rqstp->rq_pages[pnum]);
vec[pnum].iov_len = PAGE_SIZE;
pnum++;
vlen += PAGE_SIZE;
@@ -934,8 +1078,18 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
/* Now receive data */
len = svc_recvfrom(rqstp, vec, pnum, len);
if (len < 0)
- goto error;
+ goto err_again;
+ /*
+ * Account for the 8 bytes we read earlier
+ */
+ len += 8;
+
+ if (req) {
+ xprt_complete_rqst(req->rq_task, len);
+ len = 0;
+ goto out;
+ }
dprintk("svc: TCP complete record (%d bytes)\n", len);
rqstp->rq_arg.len = len;
rqstp->rq_arg.page_base = 0;
@@ -949,6 +1103,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_xprt_ctxt = NULL;
rqstp->rq_prot = IPPROTO_TCP;
+out:
/* Reset TCP read info */
svsk->sk_reclen = 0;
svsk->sk_tcplen = 0;
@@ -960,21 +1115,19 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
return len;
- err_delete:
- set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
- return -EAGAIN;
-
- error:
+err_again:
if (len == -EAGAIN) {
dprintk("RPC: TCP recvfrom got EAGAIN\n");
svc_xprt_received(&svsk->sk_xprt);
- } else {
+ return len;
+ }
+error:
+ if (len != -EAGAIN) {
printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
svsk->sk_xprt.xpt_server->sv_name, -len);
- goto err_delete;
+ set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
}
-
- return len;
+ return -EAGAIN;
}
/*
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
index 5231f7aaac0..42f9748ae09 100644
--- a/net/sunrpc/sysctl.c
+++ b/net/sunrpc/sysctl.c
@@ -56,7 +56,7 @@ rpc_unregister_sysctl(void)
}
}
-static int proc_do_xprt(ctl_table *table, int write, struct file *file,
+static int proc_do_xprt(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
char tmpbuf[256];
@@ -71,7 +71,7 @@ static int proc_do_xprt(ctl_table *table, int write, struct file *file,
}
static int
-proc_dodebug(ctl_table *table, int write, struct file *file,
+proc_dodebug(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
char tmpbuf[20], c, *s;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index f412a852bc7..fd46d42afa8 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -832,6 +832,11 @@ static void xprt_timer(struct rpc_task *task)
spin_unlock_bh(&xprt->transport_lock);
}
+static inline int xprt_has_timer(struct rpc_xprt *xprt)
+{
+ return xprt->idle_timeout != 0;
+}
+
/**
* xprt_prepare_transmit - reserve the transport before sending a request
* @task: RPC task about to send a request
@@ -1013,7 +1018,7 @@ void xprt_release(struct rpc_task *task)
if (!list_empty(&req->rq_list))
list_del(&req->rq_list);
xprt->last_used = jiffies;
- if (list_empty(&xprt->recv))
+ if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
mod_timer(&xprt->timer,
xprt->last_used + xprt->idle_timeout);
spin_unlock_bh(&xprt->transport_lock);
@@ -1082,8 +1087,11 @@ found:
#endif /* CONFIG_NFS_V4_1 */
INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
- setup_timer(&xprt->timer, xprt_init_autodisconnect,
- (unsigned long)xprt);
+ if (xprt_has_timer(xprt))
+ setup_timer(&xprt->timer, xprt_init_autodisconnect,
+ (unsigned long)xprt);
+ else
+ init_timer(&xprt->timer);
xprt->last_used = jiffies;
xprt->cwnd = RPC_INITCWND;
xprt->bind_index = 0;
@@ -1102,7 +1110,6 @@ found:
dprintk("RPC: created transport %p with %u slots\n", xprt,
xprt->max_reqs);
-
return xprt;
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
index 87101177825..35fb68b9c8e 100644
--- a/net/sunrpc/xprtrdma/svc_rdma.c
+++ b/net/sunrpc/xprtrdma/svc_rdma.c
@@ -80,7 +80,7 @@ struct kmem_cache *svc_rdma_ctxt_cachep;
* current value.
*/
static int read_reset_stat(ctl_table *table, int write,
- struct file *filp, void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp,
loff_t *ppos)
{
atomic_t *stat = (atomic_t *)table->data;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 5151f9f6c57..0cf5e8c27a1 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -730,12 +730,12 @@ static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
goto err;
mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES);
- if (!mr)
+ if (IS_ERR(mr))
goto err_free_frmr;
pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device,
RPCSVC_MAXPAGES);
- if (!pl)
+ if (IS_ERR(pl))
goto err_free_mr;
frmr->mr = mr;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 62438f3a914..37c5475ba25 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -32,6 +32,7 @@
#include <linux/tcp.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/sched.h>
+#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/xprtsock.h>
#include <linux/file.h>
#ifdef CONFIG_NFS_V4_1
@@ -43,6 +44,7 @@
#include <net/udp.h>
#include <net/tcp.h>
+#include "sunrpc.h"
/*
* xprtsock tunables
*/
@@ -771,6 +773,7 @@ static void xs_close(struct rpc_xprt *xprt)
dprintk("RPC: xs_close xprt %p\n", xprt);
xs_reset_transport(transport);
+ xprt->reestablish_timeout = 0;
smp_mb__before_clear_bit();
clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
@@ -1262,6 +1265,12 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
if (xprt->shutdown)
goto out;
+ /* Any data means we had a useful conversation, so
+ * the we don't need to delay the next reconnect
+ */
+ if (xprt->reestablish_timeout)
+ xprt->reestablish_timeout = 0;
+
/* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
rd_desc.arg.data = xprt;
do {
@@ -2032,6 +2041,8 @@ static void xs_connect(struct rpc_task *task)
&transport->connect_worker,
xprt->reestablish_timeout);
xprt->reestablish_timeout <<= 1;
+ if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
+ xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
} else {
@@ -2098,6 +2109,134 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
xprt->stat.bklog_u);
}
+/*
+ * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
+ * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
+ * to use the server side send routines.
+ */
+void *bc_malloc(struct rpc_task *task, size_t size)
+{
+ struct page *page;
+ struct rpc_buffer *buf;
+
+ BUG_ON(size > PAGE_SIZE - sizeof(struct rpc_buffer));
+ page = alloc_page(GFP_KERNEL);
+
+ if (!page)
+ return NULL;
+
+ buf = page_address(page);
+ buf->len = PAGE_SIZE;
+
+ return buf->data;
+}
+
+/*
+ * Free the space allocated in the bc_alloc routine
+ */
+void bc_free(void *buffer)
+{
+ struct rpc_buffer *buf;
+
+ if (!buffer)
+ return;
+
+ buf = container_of(buffer, struct rpc_buffer, data);
+ free_page((unsigned long)buf);
+}
+
+/*
+ * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
+ * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
+ */
+static int bc_sendto(struct rpc_rqst *req)
+{
+ int len;
+ struct xdr_buf *xbufp = &req->rq_snd_buf;
+ struct rpc_xprt *xprt = req->rq_xprt;
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
+ struct socket *sock = transport->sock;
+ unsigned long headoff;
+ unsigned long tailoff;
+
+ /*
+ * Set up the rpc header and record marker stuff
+ */
+ xs_encode_tcp_record_marker(xbufp);
+
+ tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
+ headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
+ len = svc_send_common(sock, xbufp,
+ virt_to_page(xbufp->head[0].iov_base), headoff,
+ xbufp->tail[0].iov_base, tailoff);
+
+ if (len != xbufp->len) {
+ printk(KERN_NOTICE "Error sending entire callback!\n");
+ len = -EAGAIN;
+ }
+
+ return len;
+}
+
+/*
+ * The send routine. Borrows from svc_send
+ */
+static int bc_send_request(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct svc_xprt *xprt;
+ struct svc_sock *svsk;
+ u32 len;
+
+ dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
+ /*
+ * Get the server socket associated with this callback xprt
+ */
+ xprt = req->rq_xprt->bc_xprt;
+ svsk = container_of(xprt, struct svc_sock, sk_xprt);
+
+ /*
+ * Grab the mutex to serialize data as the connection is shared
+ * with the fore channel
+ */
+ if (!mutex_trylock(&xprt->xpt_mutex)) {
+ rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL);
+ if (!mutex_trylock(&xprt->xpt_mutex))
+ return -EAGAIN;
+ rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task);
+ }
+ if (test_bit(XPT_DEAD, &xprt->xpt_flags))
+ len = -ENOTCONN;
+ else
+ len = bc_sendto(req);
+ mutex_unlock(&xprt->xpt_mutex);
+
+ if (len > 0)
+ len = 0;
+
+ return len;
+}
+
+/*
+ * The close routine. Since this is client initiated, we do nothing
+ */
+
+static void bc_close(struct rpc_xprt *xprt)
+{
+ return;
+}
+
+/*
+ * The xprt destroy routine. Again, because this connection is client
+ * initiated, we do nothing
+ */
+
+static void bc_destroy(struct rpc_xprt *xprt)
+{
+ return;
+}
+
static struct rpc_xprt_ops xs_udp_ops = {
.set_buffer_size = xs_udp_set_buffer_size,
.reserve_xprt = xprt_reserve_xprt_cong,
@@ -2134,6 +2273,22 @@ static struct rpc_xprt_ops xs_tcp_ops = {
.print_stats = xs_tcp_print_stats,
};
+/*
+ * The rpc_xprt_ops for the server backchannel
+ */
+
+static struct rpc_xprt_ops bc_tcp_ops = {
+ .reserve_xprt = xprt_reserve_xprt,
+ .release_xprt = xprt_release_xprt,
+ .buf_alloc = bc_malloc,
+ .buf_free = bc_free,
+ .send_request = bc_send_request,
+ .set_retrans_timeout = xprt_set_retrans_timeout_def,
+ .close = bc_close,
+ .destroy = bc_destroy,
+ .print_stats = xs_tcp_print_stats,
+};
+
static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
unsigned int slot_table_size)
{
@@ -2322,11 +2477,93 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
return ERR_PTR(-EINVAL);
}
+/**
+ * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
+ * @args: rpc transport creation arguments
+ *
+ */
+static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
+{
+ struct sockaddr *addr = args->dstaddr;
+ struct rpc_xprt *xprt;
+ struct sock_xprt *transport;
+ struct svc_sock *bc_sock;
+
+ if (!args->bc_xprt)
+ ERR_PTR(-EINVAL);
+
+ xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
+ if (IS_ERR(xprt))
+ return xprt;
+ transport = container_of(xprt, struct sock_xprt, xprt);
+
+ xprt->prot = IPPROTO_TCP;
+ xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
+ xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
+ xprt->timeout = &xs_tcp_default_timeout;
+
+ /* backchannel */
+ xprt_set_bound(xprt);
+ xprt->bind_timeout = 0;
+ xprt->connect_timeout = 0;
+ xprt->reestablish_timeout = 0;
+ xprt->idle_timeout = 0;
+
+ /*
+ * The backchannel uses the same socket connection as the
+ * forechannel
+ */
+ xprt->bc_xprt = args->bc_xprt;
+ bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
+ bc_sock->sk_bc_xprt = xprt;
+ transport->sock = bc_sock->sk_sock;
+ transport->inet = bc_sock->sk_sk;
+
+ xprt->ops = &bc_tcp_ops;
+
+ switch (addr->sa_family) {
+ case AF_INET:
+ xs_format_peer_addresses(xprt, "tcp",
+ RPCBIND_NETID_TCP);
+ break;
+ case AF_INET6:
+ xs_format_peer_addresses(xprt, "tcp",
+ RPCBIND_NETID_TCP6);
+ break;
+ default:
+ kfree(xprt);
+ return ERR_PTR(-EAFNOSUPPORT);
+ }
+
+ if (xprt_bound(xprt))
+ dprintk("RPC: set up xprt to %s (port %s) via %s\n",
+ xprt->address_strings[RPC_DISPLAY_ADDR],
+ xprt->address_strings[RPC_DISPLAY_PORT],
+ xprt->address_strings[RPC_DISPLAY_PROTO]);
+ else
+ dprintk("RPC: set up xprt to %s (autobind) via %s\n",
+ xprt->address_strings[RPC_DISPLAY_ADDR],
+ xprt->address_strings[RPC_DISPLAY_PROTO]);
+
+ /*
+ * Since we don't want connections for the backchannel, we set
+ * the xprt status to connected
+ */
+ xprt_set_connected(xprt);
+
+
+ if (try_module_get(THIS_MODULE))
+ return xprt;
+ kfree(xprt->slot);
+ kfree(xprt);
+ return ERR_PTR(-EINVAL);
+}
+
static struct xprt_class xs_udp_transport = {
.list = LIST_HEAD_INIT(xs_udp_transport.list),
.name = "udp",
.owner = THIS_MODULE,
- .ident = IPPROTO_UDP,
+ .ident = XPRT_TRANSPORT_UDP,
.setup = xs_setup_udp,
};
@@ -2334,10 +2571,18 @@ static struct xprt_class xs_tcp_transport = {
.list = LIST_HEAD_INIT(xs_tcp_transport.list),
.name = "tcp",
.owner = THIS_MODULE,
- .ident = IPPROTO_TCP,
+ .ident = XPRT_TRANSPORT_TCP,
.setup = xs_setup_tcp,
};
+static struct xprt_class xs_bc_tcp_transport = {
+ .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list),
+ .name = "tcp NFSv4.1 backchannel",
+ .owner = THIS_MODULE,
+ .ident = XPRT_TRANSPORT_BC_TCP,
+ .setup = xs_setup_bc_tcp,
+};
+
/**
* init_socket_xprt - set up xprtsock's sysctls, register with RPC client
*
@@ -2351,6 +2596,7 @@ int init_socket_xprt(void)
xprt_register_transport(&xs_udp_transport);
xprt_register_transport(&xs_tcp_transport);
+ xprt_register_transport(&xs_bc_tcp_transport);
return 0;
}
@@ -2370,6 +2616,7 @@ void cleanup_socket_xprt(void)
xprt_unregister_transport(&xs_udp_transport);
xprt_unregister_transport(&xs_tcp_transport);
+ xprt_unregister_transport(&xs_bc_tcp_transport);
}
static int param_set_uint_minmax(const char *val, struct kernel_param *kp,
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 429dd06a4ec..561a45cf2a6 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -834,7 +834,7 @@ int cfg80211_wext_siwtxpower(struct net_device *dev,
return 0;
}
- return rdev->ops->set_tx_power(wdev->wiphy, type, dbm);;
+ return rdev->ops->set_tx_power(wdev->wiphy, type, dbm);
}
EXPORT_SYMBOL_GPL(cfg80211_wext_siwtxpower);
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index d16cd9ea4d0..bf725275eb8 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -26,11 +26,11 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
wdev->wext.connect.ie = wdev->wext.ie;
wdev->wext.connect.ie_len = wdev->wext.ie_len;
- wdev->wext.connect.privacy = wdev->wext.default_key != -1;
if (wdev->wext.keys) {
wdev->wext.keys->def = wdev->wext.default_key;
wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key;
+ wdev->wext.connect.privacy = true;
}
if (!wdev->wext.connect.ssid_len)
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index c29be8f9024..4f9c1908593 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -83,11 +83,12 @@ TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/)
# is automatically cleaned up.
try-run = $(shell set -e; \
TMP="$(TMPOUT).$$$$.tmp"; \
+ TMPO="$(TMPOUT).$$$$.o"; \
if ($(1)) >/dev/null 2>&1; \
then echo "$(2)"; \
else echo "$(3)"; \
fi; \
- rm -f "$$TMP")
+ rm -f "$$TMP" "$$TMPO")
# as-option
# Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
@@ -105,12 +106,12 @@ as-instr = $(call try-run,\
# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
cc-option = $(call try-run,\
- $(CC) $(KBUILD_CFLAGS) $(1) -c -xc /dev/null -o "$$TMP",$(1),$(2))
+ $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -xc /dev/null -o "$$TMP",$(1),$(2))
# cc-option-yn
# Usage: flag := $(call cc-option-yn,-march=winchip-c6)
cc-option-yn = $(call try-run,\
- $(CC) $(KBUILD_CFLAGS) $(1) -c -xc /dev/null -o "$$TMP",y,n)
+ $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -xc /dev/null -o "$$TMP",y,n)
# cc-option-align
# Prefix align with either -falign or -malign
@@ -130,10 +131,15 @@ cc-fullversion = $(shell $(CONFIG_SHELL) \
# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
cc-ifversion = $(shell [ $(call cc-version, $(CC)) $(1) $(2) ] && echo $(3))
+# cc-ldoption
+# Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
+cc-ldoption = $(call try-run,\
+ $(CC) $(1) -nostdlib -xc /dev/null -o "$$TMP",$(1),$(2))
+
# ld-option
-# Usage: ldflags += $(call ld-option, -Wl$(comma)--hash-style=both)
+# Usage: LDFLAGS += $(call ld-option, -X)
ld-option = $(call try-run,\
- $(CC) $(1) -nostdlib -xc /dev/null -o "$$TMP",$(1),$(2))
+ $(CC) /dev/null -c -o "$$TMPO" ; $(LD) $(1) "$$TMPO" -o "$$TMP",$(1),$(2))
######
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 5c4b7a400c1..341b58902ff 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -206,7 +206,7 @@ cmd_modversions = \
endif
ifdef CONFIG_FTRACE_MCOUNT_RECORD
-cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
+cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
"$(if $(CONFIG_64BIT),64,32)" \
"$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" "$(MV)" \
"$(if $(part-of-module),1,0)" "$(@)";
@@ -216,6 +216,7 @@ define rule_cc_o_c
$(call echo-cmd,checksrc) $(cmd_checksrc) \
$(call echo-cmd,cc_o_c) $(cmd_cc_o_c); \
$(cmd_modversions) \
+ $(call echo-cmd,record_mcount) \
$(cmd_record_mcount) \
scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,cc_o_c)' > \
$(dot-target).tmp; \
@@ -269,7 +270,8 @@ targets += $(extra-y) $(MAKECMDGOALS) $(always)
# Linker scripts preprocessor (.lds.S -> .lds)
# ---------------------------------------------------------------------------
quiet_cmd_cpp_lds_S = LDS $@
- cmd_cpp_lds_S = $(CPP) $(cpp_flags) -D__ASSEMBLY__ -o $@ $<
+ cmd_cpp_lds_S = $(CPP) $(cpp_flags) -P -C -U$(ARCH) \
+ -D__ASSEMBLY__ -DLINKER_SCRIPT -o $@ $<
$(obj)/%.lds: $(src)/%.lds.S FORCE
$(call if_changed_dep,cpp_lds_S)
diff --git a/scripts/basic/docproc.c b/scripts/basic/docproc.c
index 99ca7a69868..79ab973fb43 100644
--- a/scripts/basic/docproc.c
+++ b/scripts/basic/docproc.c
@@ -71,7 +71,7 @@ FILELINE * docsection;
static char *srctree, *kernsrctree;
-void usage (void)
+static void usage (void)
{
fprintf(stderr, "Usage: docproc {doc|depend} file\n");
fprintf(stderr, "Input is read from file.tmpl. Output is sent to stdout\n");
@@ -84,7 +84,7 @@ void usage (void)
/*
* Execute kernel-doc with parameters given in svec
*/
-void exec_kernel_doc(char **svec)
+static void exec_kernel_doc(char **svec)
{
pid_t pid;
int ret;
@@ -129,7 +129,7 @@ struct symfile
struct symfile symfilelist[MAXFILES];
int symfilecnt = 0;
-void add_new_symbol(struct symfile *sym, char * symname)
+static void add_new_symbol(struct symfile *sym, char * symname)
{
sym->symbollist =
realloc(sym->symbollist, (sym->symbolcnt + 1) * sizeof(char *));
@@ -137,14 +137,14 @@ void add_new_symbol(struct symfile *sym, char * symname)
}
/* Add a filename to the list */
-struct symfile * add_new_file(char * filename)
+static struct symfile * add_new_file(char * filename)
{
symfilelist[symfilecnt++].filename = strdup(filename);
return &symfilelist[symfilecnt - 1];
}
/* Check if file already are present in the list */
-struct symfile * filename_exist(char * filename)
+static struct symfile * filename_exist(char * filename)
{
int i;
for (i=0; i < symfilecnt; i++)
@@ -157,20 +157,20 @@ struct symfile * filename_exist(char * filename)
* List all files referenced within the template file.
* Files are separated by tabs.
*/
-void adddep(char * file) { printf("\t%s", file); }
-void adddep2(char * file, char * line) { line = line; adddep(file); }
-void noaction(char * line) { line = line; }
-void noaction2(char * file, char * line) { file = file; line = line; }
+static void adddep(char * file) { printf("\t%s", file); }
+static void adddep2(char * file, char * line) { line = line; adddep(file); }
+static void noaction(char * line) { line = line; }
+static void noaction2(char * file, char * line) { file = file; line = line; }
/* Echo the line without further action */
-void printline(char * line) { printf("%s", line); }
+static void printline(char * line) { printf("%s", line); }
/*
* Find all symbols in filename that are exported with EXPORT_SYMBOL &
* EXPORT_SYMBOL_GPL (& EXPORT_SYMBOL_GPL_FUTURE implicitly).
* All symbols located are stored in symfilelist.
*/
-void find_export_symbols(char * filename)
+static void find_export_symbols(char * filename)
{
FILE * fp;
struct symfile *sym;
@@ -227,7 +227,7 @@ void find_export_symbols(char * filename)
* intfunc uses -nofunction
* extfunc uses -function
*/
-void docfunctions(char * filename, char * type)
+static void docfunctions(char * filename, char * type)
{
int i,j;
int symcnt = 0;
@@ -258,15 +258,15 @@ void docfunctions(char * filename, char * type)
fflush(stdout);
free(vec);
}
-void intfunc(char * filename) { docfunctions(filename, NOFUNCTION); }
-void extfunc(char * filename) { docfunctions(filename, FUNCTION); }
+static void intfunc(char * filename) { docfunctions(filename, NOFUNCTION); }
+static void extfunc(char * filename) { docfunctions(filename, FUNCTION); }
/*
* Document specific function(s) in a file.
* Call kernel-doc with the following parameters:
* kernel-doc -docbook -function function1 [-function function2]
*/
-void singfunc(char * filename, char * line)
+static void singfunc(char * filename, char * line)
{
char *vec[200]; /* Enough for specific functions */
int i, idx = 0;
@@ -297,7 +297,7 @@ void singfunc(char * filename, char * line)
* Call kernel-doc with the following parameters:
* kernel-doc -docbook -function "doc section" filename
*/
-void docsect(char *filename, char *line)
+static void docsect(char *filename, char *line)
{
char *vec[6]; /* kerneldoc -docbook -function "section" file NULL */
char *s;
@@ -324,7 +324,7 @@ void docsect(char *filename, char *line)
* 5) Lines containing !P
* 6) Default lines - lines not matching the above
*/
-void parse_file(FILE *infile)
+static void parse_file(FILE *infile)
{
char line[MAXLINESZ];
char * s;
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index 72c15205bb2..6bf21f83837 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -16,8 +16,7 @@
* tells make when to remake a file.
*
* To use this list as-is however has the drawback that virtually
- * every file in the kernel includes <linux/config.h> which then again
- * includes <linux/autoconf.h>
+ * every file in the kernel includes <linux/autoconf.h>.
*
* If the user re-runs make *config, linux/autoconf.h will be
* regenerated. make notices that and will rebuild every file which
@@ -125,8 +124,7 @@ char *target;
char *depfile;
char *cmdline;
-void usage(void)
-
+static void usage(void)
{
fprintf(stderr, "Usage: fixdep <depfile> <target> <cmdline>\n");
exit(1);
@@ -135,7 +133,7 @@ void usage(void)
/*
* Print out the commandline prefixed with cmd_<target filename> :=
*/
-void print_cmdline(void)
+static void print_cmdline(void)
{
printf("cmd_%s := %s\n\n", target, cmdline);
}
@@ -148,7 +146,7 @@ int len_config = 0;
* Grow the configuration string to a desired length.
* Usually the first growth is plenty.
*/
-void grow_config(int len)
+static void grow_config(int len)
{
while (len_config + len > size_config) {
if (size_config == 0)
@@ -164,7 +162,7 @@ void grow_config(int len)
/*
* Lookup a value in the configuration string.
*/
-int is_defined_config(const char * name, int len)
+static int is_defined_config(const char * name, int len)
{
const char * pconfig;
const char * plast = str_config + len_config - len;
@@ -180,7 +178,7 @@ int is_defined_config(const char * name, int len)
/*
* Add a new value to the configuration string.
*/
-void define_config(const char * name, int len)
+static void define_config(const char * name, int len)
{
grow_config(len + 1);
@@ -192,7 +190,7 @@ void define_config(const char * name, int len)
/*
* Clear the set of configuration strings.
*/
-void clear_config(void)
+static void clear_config(void)
{
len_config = 0;
define_config("", 0);
@@ -201,7 +199,7 @@ void clear_config(void)
/*
* Record the use of a CONFIG_* word.
*/
-void use_config(char *m, int slen)
+static void use_config(char *m, int slen)
{
char s[PATH_MAX];
char *p;
@@ -222,7 +220,7 @@ void use_config(char *m, int slen)
printf(" $(wildcard include/config/%s.h) \\\n", s);
}
-void parse_config_file(char *map, size_t len)
+static void parse_config_file(char *map, size_t len)
{
int *end = (int *) (map + len);
/* start at +1, so that p can never be < map */
@@ -256,7 +254,7 @@ void parse_config_file(char *map, size_t len)
}
/* test is s ends in sub */
-int strrcmp(char *s, char *sub)
+static int strrcmp(char *s, char *sub)
{
int slen = strlen(s);
int sublen = strlen(sub);
@@ -267,7 +265,7 @@ int strrcmp(char *s, char *sub)
return memcmp(s + slen - sublen, sub, sublen);
}
-void do_config_file(char *filename)
+static void do_config_file(char *filename)
{
struct stat st;
int fd;
@@ -298,7 +296,7 @@ void do_config_file(char *filename)
close(fd);
}
-void parse_dep_file(void *map, size_t len)
+static void parse_dep_file(void *map, size_t len)
{
char *m = map;
char *end = m + len;
@@ -338,7 +336,7 @@ void parse_dep_file(void *map, size_t len)
printf("$(deps_%s):\n", target);
}
-void print_deps(void)
+static void print_deps(void)
{
struct stat st;
int fd;
@@ -370,7 +368,7 @@ void print_deps(void)
close(fd);
}
-void traps(void)
+static void traps(void)
{
static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
int *p = (int *)test;
diff --git a/scripts/basic/hash.c b/scripts/basic/hash.c
index 3299ad7fc8c..2ef5d3f666b 100644
--- a/scripts/basic/hash.c
+++ b/scripts/basic/hash.c
@@ -21,7 +21,7 @@ static void usage(void)
* http://www.cse.yorku.ca/~oz/hash.html
*/
-unsigned int djb2_hash(char *str)
+static unsigned int djb2_hash(char *str)
{
unsigned long hash = 5381;
int c;
@@ -34,7 +34,7 @@ unsigned int djb2_hash(char *str)
return (unsigned int)(hash & ((1 << DYNAMIC_DEBUG_HASH_BITS) - 1));
}
-unsigned int r5_hash(char *str)
+static unsigned int r5_hash(char *str)
{
unsigned long hash = 0;
int c;
diff --git a/scripts/checkincludes.pl b/scripts/checkincludes.pl
index 8e6b716c191..676ddc07d6f 100755
--- a/scripts/checkincludes.pl
+++ b/scripts/checkincludes.pl
@@ -1,24 +1,85 @@
#!/usr/bin/perl
#
-# checkincludes: Find files included more than once in (other) files.
+# checkincludes: find/remove files included more than once
+#
# Copyright abandoned, 2000, Niels Kristian Bech Jensen <nkbj@image.dk>.
+# Copyright 2009 Luis R. Rodriguez <mcgrof@gmail.com>
+#
+# This script checks for duplicate includes. It also has support
+# to remove them in place. Note that this will not take into
+# consideration macros so you should run this only if you know
+# you do have real dups and do not have them under #ifdef's. You
+# could also just review the results.
+
+sub usage {
+ print "Usage: checkincludes.pl [-r]\n";
+ print "By default we just warn of duplicates\n";
+ print "To remove duplicated includes in place use -r\n";
+ exit 1;
+}
+
+my $remove = 0;
+
+if ($#ARGV < 0) {
+ usage();
+}
+
+if ($#ARGV >= 1) {
+ if ($ARGV[0] =~ /^-/) {
+ if ($ARGV[0] eq "-r") {
+ $remove = 1;
+ shift;
+ } else {
+ usage();
+ }
+ }
+}
foreach $file (@ARGV) {
open(FILE, $file) or die "Cannot open $file: $!.\n";
my %includedfiles = ();
+ my @file_lines = ();
while (<FILE>) {
if (m/^\s*#\s*include\s*[<"](\S*)[>"]/o) {
++$includedfiles{$1};
}
+ push(@file_lines, $_);
}
-
- foreach $filename (keys %includedfiles) {
- if ($includedfiles{$filename} > 1) {
- print "$file: $filename is included more than once.\n";
+
+ close(FILE);
+
+ if (!$remove) {
+ foreach $filename (keys %includedfiles) {
+ if ($includedfiles{$filename} > 1) {
+ print "$file: $filename is included more than once.\n";
+ }
}
+ next;
}
+ open(FILE,">$file") || die("Cannot write to $file: $!");
+
+ my $dups = 0;
+ foreach (@file_lines) {
+ if (m/^\s*#\s*include\s*[<"](\S*)[>"]/o) {
+ foreach $filename (keys %includedfiles) {
+ if ($1 eq $filename) {
+ if ($includedfiles{$filename} > 1) {
+ $includedfiles{$filename}--;
+ $dups++;
+ } else {
+ print FILE $_;
+ }
+ }
+ }
+ } else {
+ print FILE $_;
+ }
+ }
+ if ($dups > 0) {
+ print "$file: removed $dups duplicate includes\n";
+ }
close(FILE);
}
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 2d5ece798c4..87bbb8bce9b 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -10,7 +10,7 @@ use strict;
my $P = $0;
$P =~ s@.*/@@g;
-my $V = '0.28';
+my $V = '0.29';
use Getopt::Long qw(:config no_auto_abbrev);
@@ -28,6 +28,41 @@ my $mailback = 0;
my $summary_file = 0;
my $root;
my %debug;
+my $help = 0;
+
+sub help {
+ my ($exitcode) = @_;
+
+ print << "EOM";
+Usage: $P [OPTION]... [FILE]...
+Version: $V
+
+Options:
+ -q, --quiet quiet
+ --no-tree run without a kernel tree
+ --no-signoff do not check for 'Signed-off-by' line
+ --patch treat FILE as patchfile (default)
+ --emacs emacs compile window format
+ --terse one line per report
+ -f, --file treat FILE as regular source file
+ --subjective, --strict enable more subjective tests
+ --root=PATH PATH to the kernel tree root
+ --no-summary suppress the per-file summary
+ --mailback only produce a report in case of warnings/errors
+ --summary-file include the filename in summary
+ --debug KEY=[0|1] turn on/off debugging of KEY, where KEY is one of
+ 'values', 'possible', 'type', and 'attr' (default
+ is all off)
+ --test-only=WORD report only warnings/errors containing WORD
+ literally
+ -h, --help, --version display this help and exit
+
+When FILE is - read standard input.
+EOM
+
+ exit($exitcode);
+}
+
GetOptions(
'q|quiet+' => \$quiet,
'tree!' => \$tree,
@@ -35,7 +70,7 @@ GetOptions(
'patch!' => \$chk_patch,
'emacs!' => \$emacs,
'terse!' => \$terse,
- 'file!' => \$file,
+ 'f|file!' => \$file,
'subjective!' => \$check,
'strict!' => \$check,
'root=s' => \$root,
@@ -45,22 +80,16 @@ GetOptions(
'debug=s' => \%debug,
'test-only=s' => \$tst_only,
-) or exit;
+ 'h|help' => \$help,
+ 'version' => \$help
+) or help(1);
+
+help(0) if ($help);
my $exit = 0;
if ($#ARGV < 0) {
- print "usage: $P [options] patchfile\n";
- print "version: $V\n";
- print "options: -q => quiet\n";
- print " --no-tree => run without a kernel tree\n";
- print " --terse => one line per report\n";
- print " --emacs => emacs compile window format\n";
- print " --file => check a source file\n";
- print " --strict => enable more subjective tests\n";
- print " --root => path to the kernel tree root\n";
- print " --no-summary => suppress the per-file summary\n";
- print " --summary-file => include the filename in summary\n";
+ print "$P: no input files\n";
exit(1);
}
@@ -153,7 +182,7 @@ our $UTF8 = qr {
}x;
our $typeTypedefs = qr{(?x:
- (?:__)?(?:u|s|be|le)(?:\d|\d\d)|
+ (?:__)?(?:u|s|be|le)(?:8|16|32|64)|
atomic_t
)};
@@ -356,6 +385,13 @@ sub sanitise_line {
$off++;
next;
}
+ if ($sanitise_quote eq '' && substr($line, $off, 2) eq '//') {
+ $sanitise_quote = '//';
+
+ substr($res, $off, 2, $sanitise_quote);
+ $off++;
+ next;
+ }
# A \ in a string means ignore the next character.
if (($sanitise_quote eq "'" || $sanitise_quote eq '"') &&
@@ -379,6 +415,8 @@ sub sanitise_line {
#print "c<$c> SQ<$sanitise_quote>\n";
if ($off != 0 && $sanitise_quote eq '*/' && $c ne "\t") {
substr($res, $off, 1, $;);
+ } elsif ($off != 0 && $sanitise_quote eq '//' && $c ne "\t") {
+ substr($res, $off, 1, $;);
} elsif ($off != 0 && $sanitise_quote && $c ne "\t") {
substr($res, $off, 1, 'X');
} else {
@@ -386,6 +424,10 @@ sub sanitise_line {
}
}
+ if ($sanitise_quote eq '//') {
+ $sanitise_quote = '';
+ }
+
# The pathname on a #include may be surrounded by '<' and '>'.
if ($res =~ /^.\s*\#\s*include\s+\<(.*)\>/) {
my $clean = 'X' x length($1);
@@ -1336,6 +1378,18 @@ sub process {
WARN("adding a line without newline at end of file\n" . $herecurr);
}
+# Blackfin: use hi/lo macros
+ if ($realfile =~ m@arch/blackfin/.*\.S$@) {
+ if ($line =~ /\.[lL][[:space:]]*=.*&[[:space:]]*0x[fF][fF][fF][fF]/) {
+ my $herevet = "$here\n" . cat_vet($line) . "\n";
+ ERROR("use the LO() macro, not (... & 0xFFFF)\n" . $herevet);
+ }
+ if ($line =~ /\.[hH][[:space:]]*=.*>>[[:space:]]*16/) {
+ my $herevet = "$here\n" . cat_vet($line) . "\n";
+ ERROR("use the HI() macro, not (... >> 16)\n" . $herevet);
+ }
+ }
+
# check we are in a valid source file C or perl if not then ignore this hunk
next if ($realfile !~ /\.(h|c|pl)$/);
@@ -1355,6 +1409,16 @@ sub process {
WARN("CVS style keyword markers, these will _not_ be updated\n". $herecurr);
}
+# Blackfin: don't use __builtin_bfin_[cs]sync
+ if ($line =~ /__builtin_bfin_csync/) {
+ my $herevet = "$here\n" . cat_vet($line) . "\n";
+ ERROR("use the CSYNC() macro in asm/blackfin.h\n" . $herevet);
+ }
+ if ($line =~ /__builtin_bfin_ssync/) {
+ my $herevet = "$here\n" . cat_vet($line) . "\n";
+ ERROR("use the SSYNC() macro in asm/blackfin.h\n" . $herevet);
+ }
+
# Check for potential 'bare' types
my ($stat, $cond, $line_nr_next, $remain_next, $off_next);
if ($realcnt && $line =~ /.\s*\S/) {
@@ -1372,6 +1436,8 @@ sub process {
# Ignore functions being called
} elsif ($s =~ /^.\s*$Ident\s*\(/s) {
+ } elsif ($s =~ /^.\s*else\b/s) {
+
# declarations always start with types
} elsif ($prev_values eq 'E' && $s =~ /^.\s*(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?((?:\s*$Ident)+?)\b(?:\s+$Sparse)?\s*\**\s*(?:$Ident|\(\*[^\)]*\))(?:\s*$Modifier)?\s*(?:;|=|,|\()/s) {
my $type = $1;
@@ -1532,8 +1598,9 @@ sub process {
$s =~ /^\s*#\s*?/ ||
$s =~ /^\s*$Ident\s*:/) {
$continuation = ($s =~ /^.*?\\\n/) ? 1 : 0;
- $s =~ s/^.*?\n//;
- $cond_lines++;
+ if ($s =~ s/^.*?\n//) {
+ $cond_lines++;
+ }
}
}
@@ -1891,7 +1958,7 @@ sub process {
# A unary '*' may be const
} elsif ($ctx =~ /.xW/) {
- ERROR("Aspace prohibited after that '$op' $at\n" . $hereptr);
+ ERROR("space prohibited after that '$op' $at\n" . $hereptr);
}
# unary ++ and unary -- are allowed no space on one side.
@@ -2243,7 +2310,8 @@ sub process {
DECLARE_PER_CPU|
DEFINE_PER_CPU|
__typeof__\(|
- \.$Ident\s*=\s*
+ \.$Ident\s*=\s*|
+ ^\"|\"$
}x;
#print "REST<$rest> dstat<$dstat>\n";
if ($rest ne '') {
diff --git a/scripts/conmakehash.c b/scripts/conmakehash.c
index e0c6891a9ad..263a44d57fa 100644
--- a/scripts/conmakehash.c
+++ b/scripts/conmakehash.c
@@ -24,14 +24,14 @@
typedef unsigned short unicode;
-void usage(char *argv0)
+static void usage(char *argv0)
{
fprintf(stderr, "Usage: \n"
" %s chartable [hashsize] [hashstep] [maxhashlevel]\n", argv0);
exit(EX_USAGE);
}
-int getunicode(char **p0)
+static int getunicode(char **p0)
{
char *p = *p0;
@@ -49,7 +49,7 @@ unicode unitable[MAX_FONTLEN][255];
/* Massive overkill, but who cares? */
int unicount[MAX_FONTLEN];
-void addpair(int fp, int un)
+static void addpair(int fp, int un)
{
int i;
diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c
index 3a8297b5184..af6b8363a2d 100644
--- a/scripts/genksyms/genksyms.c
+++ b/scripts/genksyms/genksyms.c
@@ -176,7 +176,7 @@ static int is_unknown_symbol(struct symbol *sym)
strcmp(defn->string, "{") == 0);
}
-struct symbol *__add_symbol(const char *name, enum symbol_type type,
+static struct symbol *__add_symbol(const char *name, enum symbol_type type,
struct string_list *defn, int is_extern,
int is_reference)
{
@@ -265,7 +265,7 @@ struct symbol *add_symbol(const char *name, enum symbol_type type,
return __add_symbol(name, type, defn, is_extern, 0);
}
-struct symbol *add_reference_symbol(const char *name, enum symbol_type type,
+static struct symbol *add_reference_symbol(const char *name, enum symbol_type type,
struct string_list *defn, int is_extern)
{
return __add_symbol(name, type, defn, is_extern, 1);
@@ -313,7 +313,7 @@ static int equal_list(struct string_list *a, struct string_list *b)
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
-struct string_list *read_node(FILE *f)
+static struct string_list *read_node(FILE *f)
{
char buffer[256];
struct string_list node = {
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 278a45bd45a..cdb44b63342 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -13,7 +13,7 @@
use strict;
my $P = $0;
-my $V = '0.17';
+my $V = '0.20';
use Getopt::Long qw(:config no_auto_abbrev);
@@ -29,6 +29,8 @@ my $email_git_min_signatures = 1;
my $email_git_max_maintainers = 5;
my $email_git_min_percent = 5;
my $email_git_since = "1-year-ago";
+my $email_git_blame = 0;
+my $email_remove_duplicates = 1;
my $output_multiline = 1;
my $output_separator = ", ";
my $scm = 0;
@@ -36,6 +38,7 @@ my $web = 0;
my $subsystem = 0;
my $status = 0;
my $from_filename = 0;
+my $pattern_depth = 0;
my $version = 0;
my $help = 0;
@@ -68,6 +71,8 @@ if (!GetOptions(
'git-max-maintainers=i' => \$email_git_max_maintainers,
'git-min-percent=i' => \$email_git_min_percent,
'git-since=s' => \$email_git_since,
+ 'git-blame!' => \$email_git_blame,
+ 'remove-duplicates!' => \$email_remove_duplicates,
'm!' => \$email_maintainer,
'n!' => \$email_usename,
'l!' => \$email_list,
@@ -78,6 +83,7 @@ if (!GetOptions(
'status!' => \$status,
'scm!' => \$scm,
'web!' => \$web,
+ 'pattern-depth=i' => \$pattern_depth,
'f|file' => \$from_filename,
'v|version' => \$version,
'h|help' => \$help,
@@ -101,14 +107,19 @@ if ($#ARGV < 0) {
die "$P: argument missing: patchfile or -f file please\n";
}
+if ($output_separator ne ", ") {
+ $output_multiline = 0;
+}
+
my $selections = $email + $scm + $status + $subsystem + $web;
if ($selections == 0) {
usage();
die "$P: Missing required option: email, scm, status, subsystem or web\n";
}
-if ($email && ($email_maintainer + $email_list + $email_subscriber_list
- + $email_git + $email_git_penguin_chiefs) == 0) {
+if ($email &&
+ ($email_maintainer + $email_list + $email_subscriber_list +
+ $email_git + $email_git_penguin_chiefs + $email_git_blame) == 0) {
usage();
die "$P: Please select at least 1 email option\n";
}
@@ -147,9 +158,36 @@ while (<MAINT>) {
}
close(MAINT);
+my %mailmap;
+
+if ($email_remove_duplicates) {
+ open(MAILMAP, "<${lk_path}.mailmap") || warn "$P: Can't open .mailmap\n";
+ while (<MAILMAP>) {
+ my $line = $_;
+
+ next if ($line =~ m/^\s*#/);
+ next if ($line =~ m/^\s*$/);
+
+ my ($name, $address) = parse_email($line);
+ $line = format_email($name, $address);
+
+ next if ($line =~ m/^\s*$/);
+
+ if (exists($mailmap{$name})) {
+ my $obj = $mailmap{$name};
+ push(@$obj, $address);
+ } else {
+ my @arr = ($address);
+ $mailmap{$name} = \@arr;
+ }
+ }
+ close(MAILMAP);
+}
+
## use the filenames on the command line or find the filenames in the patchfiles
my @files = ();
+my @range = ();
foreach my $file (@ARGV) {
##if $file is a directory and it lacks a trailing slash, add one
@@ -162,13 +200,19 @@ foreach my $file (@ARGV) {
push(@files, $file);
} else {
my $file_cnt = @files;
+ my $lastfile;
open(PATCH, "<$file") or die "$P: Can't open ${file}\n";
while (<PATCH>) {
if (m/^\+\+\+\s+(\S+)/) {
my $filename = $1;
$filename =~ s@^[^/]*/@@;
$filename =~ s@\n@@;
+ $lastfile = $filename;
push(@files, $filename);
+ } elsif (m/^\@\@ -(\d+),(\d+)/) {
+ if ($email_git_blame) {
+ push(@range, "$lastfile:$1:$2");
+ }
}
}
close(PATCH);
@@ -201,6 +245,7 @@ foreach my $file (@files) {
if ($type eq 'X') {
if (file_match_pattern($file, $value)) {
$exclude = 1;
+ last;
}
}
}
@@ -208,35 +253,45 @@ foreach my $file (@files) {
if (!$exclude) {
my $tvi = 0;
+ my %hash;
foreach my $line (@typevalue) {
if ($line =~ m/^(\C):\s*(.*)/) {
my $type = $1;
my $value = $2;
if ($type eq 'F') {
if (file_match_pattern($file, $value)) {
- add_categories($tvi);
+ my $value_pd = ($value =~ tr@/@@);
+ my $file_pd = ($file =~ tr@/@@);
+ $value_pd++ if (substr($value,-1,1) ne "/");
+ if ($pattern_depth == 0 ||
+ (($file_pd - $value_pd) < $pattern_depth)) {
+ $hash{$tvi} = $value_pd;
+ }
}
}
}
$tvi++;
}
+ foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) {
+ add_categories($line);
+ }
}
if ($email && $email_git) {
recent_git_signoffs($file);
}
+ if ($email && $email_git_blame) {
+ git_assign_blame($file);
+ }
}
if ($email) {
foreach my $chief (@penguin_chief) {
if ($chief =~ m/^(.*):(.*)/) {
my $email_address;
- if ($email_usename) {
- $email_address = format_email($1, $2);
- } else {
- $email_address = $2;
- }
+
+ $email_address = format_email($1, $2);
if ($email_git_penguin_chiefs) {
push(@email_to, $email_address);
} else {
@@ -258,22 +313,22 @@ if ($email || $email_list) {
}
if ($scm) {
- @scm = sort_and_uniq(@scm);
+ @scm = uniq(@scm);
output(@scm);
}
if ($status) {
- @status = sort_and_uniq(@status);
+ @status = uniq(@status);
output(@status);
}
if ($subsystem) {
- @subsystem = sort_and_uniq(@subsystem);
+ @subsystem = uniq(@subsystem);
output(@subsystem);
}
if ($web) {
- @web = sort_and_uniq(@web);
+ @web = uniq(@web);
output(@web);
}
@@ -311,10 +366,12 @@ MAINTAINER field selection options:
--git-max-maintainers => maximum maintainers to add (default: 5)
--git-min-percent => minimum percentage of commits required (default: 5)
--git-since => git history to use (default: 1-year-ago)
+ --git-blame => use git blame to find modified commits for patch or file
--m => include maintainer(s) if any
--n => include name 'Full Name <addr\@domain.tld>'
--l => include list(s) if any
--s => include subscriber only list(s) if any
+ --remove-duplicates => minimize duplicate email names/addresses
--scm => print SCM tree(s) if any
--status => print status if any
--subsystem => print subsystem name if any
@@ -322,24 +379,28 @@ MAINTAINER field selection options:
Output type options:
--separator [, ] => separator for multiple entries on 1 line
+ using --separator also sets --nomultiline if --separator is not [, ]
--multiline => print 1 entry per line
-Default options:
- [--email --git --m --n --l --multiline]
-
Other options:
+ --pattern-depth => Number of pattern directory traversals (default: 0 (all))
--version => show version
--help => show this help information
+Default options:
+ [--email --git --m --n --l --multiline --pattern-depth=0 --remove-duplicates]
+
Notes:
Using "-f directory" may give unexpected results:
-
- Used with "--git", git signators for _all_ files in and below
- directory are examined as git recurses directories.
- Any specified X: (exclude) pattern matches are _not_ ignored.
- Used with "--nogit", directory is used as a pattern match,
- no individual file within the directory or subdirectory
- is matched.
+ Used with "--git", git signators for _all_ files in and below
+ directory are examined as git recurses directories.
+ Any specified X: (exclude) pattern matches are _not_ ignored.
+ Used with "--nogit", directory is used as a pattern match,
+ no individual file within the directory or subdirectory
+ is matched.
+ Used with "--git-blame", does not iterate all files in directory
+ Using "--git-blame" is slow and may add old committers and authors
+ that are no longer active maintainers to the output.
EOT
}
@@ -370,30 +431,100 @@ sub top_of_kernel_tree {
return 0;
}
-sub format_email {
- my ($name, $email) = @_;
+sub parse_email {
+ my ($formatted_email) = @_;
+
+ my $name = "";
+ my $address = "";
+
+ if ($formatted_email =~ /^([^<]+)<(.+\@.*)>.*$/) {
+ $name = $1;
+ $address = $2;
+ } elsif ($formatted_email =~ /^\s*<(.+\@\S*)>.*$/) {
+ $address = $1;
+ } elsif ($formatted_email =~ /^(.+\@\S*).*$/) {
+ $address = $1;
+ }
$name =~ s/^\s+|\s+$//g;
$name =~ s/^\"|\"$//g;
- $email =~ s/^\s+|\s+$//g;
+ $address =~ s/^\s+|\s+$//g;
+
+ if ($name =~ /[^a-z0-9 \.\-]/i) { ##has "must quote" chars
+ $name =~ s/(?<!\\)"/\\"/g; ##escape quotes
+ $name = "\"$name\"";
+ }
+
+ return ($name, $address);
+}
+
+sub format_email {
+ my ($name, $address) = @_;
- my $formatted_email = "";
+ my $formatted_email;
+
+ $name =~ s/^\s+|\s+$//g;
+ $name =~ s/^\"|\"$//g;
+ $address =~ s/^\s+|\s+$//g;
if ($name =~ /[^a-z0-9 \.\-]/i) { ##has "must quote" chars
$name =~ s/(?<!\\)"/\\"/g; ##escape quotes
- $formatted_email = "\"${name}\"\ \<${email}\>";
+ $name = "\"$name\"";
+ }
+
+ if ($email_usename) {
+ if ("$name" eq "") {
+ $formatted_email = "$address";
+ } else {
+ $formatted_email = "$name <${address}>";
+ }
} else {
- $formatted_email = "${name} \<${email}\>";
+ $formatted_email = $address;
}
+
return $formatted_email;
}
-sub add_categories {
+sub find_starting_index {
+
my ($index) = @_;
- $index = $index - 1;
- while ($index >= 0) {
+ while ($index > 0) {
my $tv = $typevalue[$index];
+ if (!($tv =~ m/^(\C):\s*(.*)/)) {
+ last;
+ }
+ $index--;
+ }
+
+ return $index;
+}
+
+sub find_ending_index {
+ my ($index) = @_;
+
+ while ($index < @typevalue) {
+ my $tv = $typevalue[$index];
+ if (!($tv =~ m/^(\C):\s*(.*)/)) {
+ last;
+ }
+ $index++;
+ }
+
+ return $index;
+}
+
+sub add_categories {
+ my ($index) = @_;
+
+ my $i;
+ my $start = find_starting_index($index);
+ my $end = find_ending_index($index);
+
+ push(@subsystem, $typevalue[$start]);
+
+ for ($i = $start + 1; $i < $end; $i++) {
+ my $tv = $typevalue[$i];
if ($tv =~ m/^(\C):\s*(.*)/) {
my $ptype = $1;
my $pvalue = $2;
@@ -414,19 +545,19 @@ sub add_categories {
}
}
} elsif ($ptype eq "M") {
- my $p_used = 0;
- if ($index >= 0) {
- my $tv = $typevalue[$index - 1];
- if ($tv =~ m/^(\C):\s*(.*)/) {
- if ($1 eq "P") {
- if ($email_usename) {
- push_email_address(format_email($2, $pvalue));
- $p_used = 1;
+ my ($name, $address) = parse_email($pvalue);
+ if ($name eq "") {
+ if ($i > 0) {
+ my $tv = $typevalue[$i - 1];
+ if ($tv =~ m/^(\C):\s*(.*)/) {
+ if ($1 eq "P") {
+ $name = $2;
+ $pvalue = format_email($name, $address);
}
}
}
}
- if (!$p_used) {
+ if ($email_maintainer) {
push_email_addresses($pvalue);
}
} elsif ($ptype eq "T") {
@@ -436,31 +567,41 @@ sub add_categories {
} elsif ($ptype eq "S") {
push(@status, $pvalue);
}
-
- $index--;
- } else {
- push(@subsystem,$tv);
- $index = -1;
}
}
}
+my %email_hash_name;
+my %email_hash_address;
+
+sub email_inuse {
+ my ($name, $address) = @_;
+
+ return 1 if (($name eq "") && ($address eq ""));
+ return 1 if (($name ne "") && exists($email_hash_name{$name}));
+ return 1 if (($address ne "") && exists($email_hash_address{$address}));
+
+ return 0;
+}
+
sub push_email_address {
- my ($email_address) = @_;
+ my ($line) = @_;
+
+ my ($name, $address) = parse_email($line);
- my $email_name = "";
- if ($email_address =~ m/([^<]+)<(.*\@.*)>$/) {
- $email_name = $1;
- $email_address = $2;
+ if ($address eq "") {
+ return 0;
}
- if ($email_maintainer) {
- if ($email_usename && $email_name) {
- push(@email_to, format_email($email_name, $email_address));
- } else {
- push(@email_to, $email_address);
- }
+ if (!$email_remove_duplicates) {
+ push(@email_to, format_email($name, $address));
+ } elsif (!email_inuse($name, $address)) {
+ push(@email_to, format_email($name, $address));
+ $email_hash_name{$name}++;
+ $email_hash_address{$address}++;
}
+
+ return 1;
}
sub push_email_addresses {
@@ -476,7 +617,9 @@ sub push_email_addresses {
push_email_address($entry);
}
} else {
- warn("Invalid MAINTAINERS address: '" . $address . "'\n");
+ if (!push_email_address($address)) {
+ warn("Invalid MAINTAINERS address: '" . $address . "'\n");
+ }
}
}
@@ -492,6 +635,32 @@ sub which {
return "";
}
+sub mailmap {
+ my @lines = @_;
+ my %hash;
+
+ foreach my $line (@lines) {
+ my ($name, $address) = parse_email($line);
+ if (!exists($hash{$name})) {
+ $hash{$name} = $address;
+ } elsif ($address ne $hash{$name}) {
+ $address = $hash{$name};
+ $line = format_email($name, $address);
+ }
+ if (exists($mailmap{$name})) {
+ my $obj = $mailmap{$name};
+ foreach my $map_address (@$obj) {
+ if (($map_address eq $address) &&
+ ($map_address ne $hash{$name})) {
+ $line = format_email($name, $hash{$name});
+ }
+ }
+ }
+ }
+
+ return @lines;
+}
+
sub recent_git_signoffs {
my ($file) = @_;
@@ -500,6 +669,7 @@ sub recent_git_signoffs {
my $output = "";
my $count = 0;
my @lines = ();
+ my %hash;
my $total_sign_offs;
if (which("git") eq "") {
@@ -513,52 +683,119 @@ sub recent_git_signoffs {
}
$cmd = "git log --since=${email_git_since} -- ${file}";
- $cmd .= " | grep -Ei \"^[-_ a-z]+by:.*\\\@.*\$\"";
- if (!$email_git_penguin_chiefs) {
- $cmd .= " | grep -Ev \"${penguin_chiefs}\"";
- }
- $cmd .= " | cut -f2- -d\":\"";
- $cmd .= " | sort | uniq -c | sort -rn";
$output = `${cmd}`;
$output =~ s/^\s*//gm;
@lines = split("\n", $output);
- $total_sign_offs = 0;
+ @lines = grep(/^[-_ a-z]+by:.*\@.*$/i, @lines);
+ if (!$email_git_penguin_chiefs) {
+ @lines = grep(!/${penguin_chiefs}/i, @lines);
+ }
+ # cut -f2- -d":"
+ s/.*:\s*(.+)\s*/$1/ for (@lines);
+
+ $total_sign_offs = @lines;
+
+ if ($email_remove_duplicates) {
+ @lines = mailmap(@lines);
+ }
+
+ @lines = sort(@lines);
+
+ # uniq -c
+ $hash{$_}++ for @lines;
+
+ # sort -rn
+ foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) {
+ my $sign_offs = $hash{$line};
+ $count++;
+ last if ($sign_offs < $email_git_min_signatures ||
+ $count > $email_git_max_maintainers ||
+ $sign_offs * 100 / $total_sign_offs < $email_git_min_percent);
+ push_email_address($line);
+ }
+}
+
+sub save_commits {
+ my ($cmd, @commits) = @_;
+ my $output;
+ my @lines = ();
+
+ $output = `${cmd}`;
+
+ @lines = split("\n", $output);
foreach my $line (@lines) {
- if ($line =~ m/([0-9]+)\s+(.*)/) {
- $total_sign_offs += $1;
- } else {
- die("$P: Unexpected git output: ${line}\n");
+ if ($line =~ m/^(\w+) /) {
+ push (@commits, $1);
}
}
+ return @commits;
+}
- foreach my $line (@lines) {
- if ($line =~ m/([0-9]+)\s+(.*)/) {
- my $sign_offs = $1;
- $line = $2;
- $count++;
- if ($sign_offs < $email_git_min_signatures ||
- $count > $email_git_max_maintainers ||
- $sign_offs * 100 / $total_sign_offs < $email_git_min_percent) {
- last;
- }
+sub git_assign_blame {
+ my ($file) = @_;
+
+ my @lines = ();
+ my @commits = ();
+ my $cmd;
+ my $output;
+ my %hash;
+ my $total_sign_offs;
+ my $count;
+
+ if (@range) {
+ foreach my $file_range_diff (@range) {
+ next if (!($file_range_diff =~ m/(.+):(.+):(.+)/));
+ my $diff_file = $1;
+ my $diff_start = $2;
+ my $diff_length = $3;
+ next if (!("$file" eq "$diff_file"));
+ $cmd = "git blame -l -L $diff_start,+$diff_length $file";
+ @commits = save_commits($cmd, @commits);
}
- if ($line =~ m/(.+)<(.+)>/) {
- my $git_name = $1;
- my $git_addr = $2;
- if ($email_usename) {
- push(@email_to, format_email($git_name, $git_addr));
- } else {
- push(@email_to, $git_addr);
- }
- } elsif ($line =~ m/<(.+)>/) {
- my $git_addr = $1;
- push(@email_to, $git_addr);
- } else {
- push(@email_to, $line);
+ } else {
+ if (-f $file) {
+ $cmd = "git blame -l $file";
+ @commits = save_commits($cmd, @commits);
+ }
+ }
+
+ $total_sign_offs = 0;
+ @commits = uniq(@commits);
+ foreach my $commit (@commits) {
+ $cmd = "git log -1 ${commit}";
+
+ $output = `${cmd}`;
+ $output =~ s/^\s*//gm;
+ @lines = split("\n", $output);
+
+ @lines = grep(/^[-_ a-z]+by:.*\@.*$/i, @lines);
+ if (!$email_git_penguin_chiefs) {
+ @lines = grep(!/${penguin_chiefs}/i, @lines);
+ }
+
+ # cut -f2- -d":"
+ s/.*:\s*(.+)\s*/$1/ for (@lines);
+
+ $total_sign_offs += @lines;
+
+ if ($email_remove_duplicates) {
+ @lines = mailmap(@lines);
}
+
+ $hash{$_}++ for @lines;
+ }
+
+ $count = 0;
+ foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) {
+ my $sign_offs = $hash{$line};
+ $count++;
+ last if ($sign_offs < $email_git_min_signatures ||
+ $count > $email_git_max_maintainers ||
+ $sign_offs * 100 / $total_sign_offs < $email_git_min_percent);
+ push_email_address($line);
}
}
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 64343cc084b..86c3896a1e0 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -585,7 +585,7 @@ static int prefix_underscores_count(const char *str)
{
const char *tail = str;
- while (*tail != '_')
+ while (*tail == '_')
tail++;
return tail - str;
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 3baaaecd6b1..9960d1c303f 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -38,14 +38,14 @@ static int conf_cnt;
static char line[128];
static struct menu *rootEntry;
-static char nohelp_text[] = N_("Sorry, no help available for this option yet.\n");
-
-static const char *get_help(struct menu *menu)
+static void print_help(struct menu *menu)
{
- if (menu_has_help(menu))
- return _(menu_get_help(menu));
- else
- return nohelp_text;
+ struct gstr help = str_new();
+
+ menu_get_ext_help(menu, &help);
+
+ printf("\n%s\n", str_get(&help));
+ str_free(&help);
}
static void strip(char *str)
@@ -121,7 +121,7 @@ static int conf_askvalue(struct symbol *sym, const char *def)
return 1;
}
-int conf_string(struct menu *menu)
+static int conf_string(struct menu *menu)
{
struct symbol *sym = menu->sym;
const char *def;
@@ -140,7 +140,7 @@ int conf_string(struct menu *menu)
case '?':
/* print help */
if (line[1] == '\n') {
- printf("\n%s\n", get_help(menu));
+ print_help(menu);
def = NULL;
break;
}
@@ -220,7 +220,7 @@ static int conf_sym(struct menu *menu)
if (sym_set_tristate_value(sym, newval))
return 0;
help:
- printf("\n%s\n", get_help(menu));
+ print_help(menu);
}
}
@@ -307,7 +307,7 @@ static int conf_choice(struct menu *menu)
fgets(line, 128, stdin);
strip(line);
if (line[0] == '?') {
- printf("\n%s\n", get_help(menu));
+ print_help(menu);
continue;
}
if (!line[0])
@@ -331,7 +331,7 @@ static int conf_choice(struct menu *menu)
if (!child)
continue;
if (line[strlen(line) - 1] == '?') {
- printf("\n%s\n", get_help(child));
+ print_help(child);
continue;
}
sym_set_choice_value(sym, child->sym);
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index a04da3459f0..b55e72ff2fc 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -560,7 +560,7 @@ int conf_write(const char *name)
return 0;
}
-int conf_split_config(void)
+static int conf_split_config(void)
{
const char *name;
char path[128];
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index 579ece4fa58..edd3f39a080 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -348,7 +348,7 @@ struct expr *expr_trans_bool(struct expr *e)
/*
* e1 || e2 -> ?
*/
-struct expr *expr_join_or(struct expr *e1, struct expr *e2)
+static struct expr *expr_join_or(struct expr *e1, struct expr *e2)
{
struct expr *tmp;
struct symbol *sym1, *sym2;
@@ -412,7 +412,7 @@ struct expr *expr_join_or(struct expr *e1, struct expr *e2)
return NULL;
}
-struct expr *expr_join_and(struct expr *e1, struct expr *e2)
+static struct expr *expr_join_and(struct expr *e1, struct expr *e2)
{
struct expr *tmp;
struct symbol *sym1, *sym2;
@@ -1098,6 +1098,8 @@ void expr_fprint(struct expr *e, FILE *out)
static void expr_print_gstr_helper(void *data, struct symbol *sym, const char *str)
{
str_append((struct gstr*)data, str);
+ if (sym)
+ str_printf((struct gstr*)data, " [=%s]", sym_get_string_value(sym));
}
void expr_gstr_print(struct expr *e, struct gstr *gs)
diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
index 199b22bb49e..65464366fe3 100644
--- a/scripts/kconfig/gconf.c
+++ b/scripts/kconfig/gconf.c
@@ -456,19 +456,9 @@ static void text_insert_help(struct menu *menu)
GtkTextBuffer *buffer;
GtkTextIter start, end;
const char *prompt = _(menu_get_prompt(menu));
- gchar *name;
- const char *help;
+ struct gstr help = str_new();
- help = menu_get_help(menu);
-
- /* Gettextize if the help text not empty */
- if ((help != 0) && (help[0] != 0))
- help = _(help);
-
- if (menu->sym && menu->sym->name)
- name = g_strdup_printf(menu->sym->name);
- else
- name = g_strdup("");
+ menu_get_ext_help(menu, &help);
buffer = gtk_text_view_get_buffer(GTK_TEXT_VIEW(text_w));
gtk_text_buffer_get_bounds(buffer, &start, &end);
@@ -478,14 +468,11 @@ static void text_insert_help(struct menu *menu)
gtk_text_buffer_get_end_iter(buffer, &end);
gtk_text_buffer_insert_with_tags(buffer, &end, prompt, -1, tag1,
NULL);
- gtk_text_buffer_insert_at_cursor(buffer, " ", 1);
- gtk_text_buffer_get_end_iter(buffer, &end);
- gtk_text_buffer_insert_with_tags(buffer, &end, name, -1, tag1,
- NULL);
gtk_text_buffer_insert_at_cursor(buffer, "\n\n", 2);
gtk_text_buffer_get_end_iter(buffer, &end);
- gtk_text_buffer_insert_with_tags(buffer, &end, help, -1, tag2,
+ gtk_text_buffer_insert_with_tags(buffer, &end, str_get(&help), -1, tag2,
NULL);
+ str_free(&help);
}
diff --git a/scripts/kconfig/gconf.glade b/scripts/kconfig/gconf.glade
index 803233fdd6d..b1c86c19292 100644
--- a/scripts/kconfig/gconf.glade
+++ b/scripts/kconfig/gconf.glade
@@ -547,7 +547,7 @@
<property name="headers_visible">True</property>
<property name="rules_hint">False</property>
<property name="reorderable">False</property>
- <property name="enable_search">True</property>
+ <property name="enable_search">False</property>
<signal name="cursor_changed" handler="on_treeview2_cursor_changed" last_modification_time="Sun, 12 Jan 2003 15:58:22 GMT"/>
<signal name="button_press_event" handler="on_treeview1_button_press_event" last_modification_time="Sun, 12 Jan 2003 16:03:52 GMT"/>
<signal name="key_press_event" handler="on_treeview2_key_press_event" last_modification_time="Sun, 12 Jan 2003 16:11:44 GMT"/>
@@ -582,7 +582,7 @@
<property name="headers_visible">True</property>
<property name="rules_hint">False</property>
<property name="reorderable">False</property>
- <property name="enable_search">True</property>
+ <property name="enable_search">False</property>
<signal name="cursor_changed" handler="on_treeview2_cursor_changed" last_modification_time="Sun, 12 Jan 2003 15:57:55 GMT"/>
<signal name="button_press_event" handler="on_treeview2_button_press_event" last_modification_time="Sun, 12 Jan 2003 15:57:58 GMT"/>
<signal name="key_press_event" handler="on_treeview2_key_press_event" last_modification_time="Sun, 12 Jan 2003 15:58:01 GMT"/>
diff --git a/scripts/kconfig/kxgettext.c b/scripts/kconfig/kxgettext.c
index 8d9ce22b0fc..dcc3fcc0cc9 100644
--- a/scripts/kconfig/kxgettext.c
+++ b/scripts/kconfig/kxgettext.c
@@ -166,7 +166,7 @@ static int message__add(const char *msg, char *option, char *file, int lineno)
return rc;
}
-void menu_build_message_list(struct menu *menu)
+static void menu_build_message_list(struct menu *menu)
{
struct menu *child;
@@ -211,7 +211,7 @@ static void message__print_gettext_msgid_msgstr(struct message *self)
"msgstr \"\"\n", self->msg);
}
-void menu__xgettext(void)
+static void menu__xgettext(void)
{
struct message *m = message__list;
diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h
index 8e69461313d..ffeb532b2cf 100644
--- a/scripts/kconfig/lkc_proto.h
+++ b/scripts/kconfig/lkc_proto.h
@@ -17,6 +17,8 @@ P(menu_get_root_menu,struct menu *,(struct menu *menu));
P(menu_get_parent_menu,struct menu *,(struct menu *menu));
P(menu_has_help,bool,(struct menu *menu));
P(menu_get_help,const char *,(struct menu *menu));
+P(get_symbol_str,void,(struct gstr *r, struct symbol *sym));
+P(menu_get_ext_help,void,(struct menu *menu, struct gstr *help));
/* symbol.c */
P(symbol_hash,struct symbol *,[SYMBOL_HASHSIZE]);
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 25b60bc117f..d8295357358 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -199,8 +199,6 @@ inputbox_instructions_string[] = N_(
setmod_text[] = N_(
"This feature depends on another which has been configured as a module.\n"
"As a result, this feature will be built as a module."),
-nohelp_text[] = N_(
- "There is no help available for this kernel option.\n"),
load_config_text[] = N_(
"Enter the name of the configuration file you wish to load. "
"Accept the name shown to restore the configuration you "
@@ -284,66 +282,6 @@ static void show_textbox(const char *title, const char *text, int r, int c);
static void show_helptext(const char *title, const char *text);
static void show_help(struct menu *menu);
-static void get_prompt_str(struct gstr *r, struct property *prop)
-{
- int i, j;
- struct menu *submenu[8], *menu;
-
- str_printf(r, _("Prompt: %s\n"), _(prop->text));
- str_printf(r, _(" Defined at %s:%d\n"), prop->menu->file->name,
- prop->menu->lineno);
- if (!expr_is_yes(prop->visible.expr)) {
- str_append(r, _(" Depends on: "));
- expr_gstr_print(prop->visible.expr, r);
- str_append(r, "\n");
- }
- menu = prop->menu->parent;
- for (i = 0; menu != &rootmenu && i < 8; menu = menu->parent)
- submenu[i++] = menu;
- if (i > 0) {
- str_printf(r, _(" Location:\n"));
- for (j = 4; --i >= 0; j += 2) {
- menu = submenu[i];
- str_printf(r, "%*c-> %s", j, ' ', _(menu_get_prompt(menu)));
- if (menu->sym) {
- str_printf(r, " (%s [=%s])", menu->sym->name ?
- menu->sym->name : _("<choice>"),
- sym_get_string_value(menu->sym));
- }
- str_append(r, "\n");
- }
- }
-}
-
-static void get_symbol_str(struct gstr *r, struct symbol *sym)
-{
- bool hit;
- struct property *prop;
-
- if (sym && sym->name)
- str_printf(r, "Symbol: %s [=%s]\n", sym->name,
- sym_get_string_value(sym));
- for_all_prompts(sym, prop)
- get_prompt_str(r, prop);
- hit = false;
- for_all_properties(sym, prop, P_SELECT) {
- if (!hit) {
- str_append(r, " Selects: ");
- hit = true;
- } else
- str_printf(r, " && ");
- expr_gstr_print(prop->expr, r);
- }
- if (hit)
- str_append(r, "\n");
- if (sym->rev_dep.expr) {
- str_append(r, _(" Selected by: "));
- expr_gstr_print(sym->rev_dep.expr, r);
- str_append(r, "\n");
- }
- str_append(r, "\n\n");
-}
-
static struct gstr get_relations_str(struct symbol **sym_arr)
{
struct symbol *sym;
@@ -699,19 +637,9 @@ static void show_helptext(const char *title, const char *text)
static void show_help(struct menu *menu)
{
struct gstr help = str_new();
- struct symbol *sym = menu->sym;
-
- if (menu_has_help(menu))
- {
- if (sym->name) {
- str_printf(&help, "CONFIG_%s:\n\n", sym->name);
- str_append(&help, _(menu_get_help(menu)));
- str_append(&help, "\n");
- }
- } else {
- str_append(&help, nohelp_text);
- }
- get_symbol_str(&help, sym);
+
+ menu_get_ext_help(menu, &help);
+
show_helptext(_(menu_get_prompt(menu)), str_get(&help));
str_free(&help);
}
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index 07ff8d105c9..059a2465c57 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -9,6 +9,9 @@
#define LKC_DIRECT_LINK
#include "lkc.h"
+static const char nohelp_text[] = N_(
+ "There is no help available for this kernel option.\n");
+
struct menu rootmenu;
static struct menu **last_entry_ptr;
@@ -74,7 +77,7 @@ void menu_end_menu(void)
current_menu = current_menu->parent;
}
-struct expr *menu_check_dep(struct expr *e)
+static struct expr *menu_check_dep(struct expr *e)
{
if (!e)
return e;
@@ -184,7 +187,7 @@ static int menu_range_valid_sym(struct symbol *sym, struct symbol *sym2)
(sym2->type == S_UNKNOWN && sym_string_valid(sym, sym2->name));
}
-void sym_check_prop(struct symbol *sym)
+static void sym_check_prop(struct symbol *sym)
{
struct property *prop;
struct symbol *sym2;
@@ -451,3 +454,80 @@ const char *menu_get_help(struct menu *menu)
else
return "";
}
+
+static void get_prompt_str(struct gstr *r, struct property *prop)
+{
+ int i, j;
+ struct menu *submenu[8], *menu;
+
+ str_printf(r, _("Prompt: %s\n"), _(prop->text));
+ str_printf(r, _(" Defined at %s:%d\n"), prop->menu->file->name,
+ prop->menu->lineno);
+ if (!expr_is_yes(prop->visible.expr)) {
+ str_append(r, _(" Depends on: "));
+ expr_gstr_print(prop->visible.expr, r);
+ str_append(r, "\n");
+ }
+ menu = prop->menu->parent;
+ for (i = 0; menu != &rootmenu && i < 8; menu = menu->parent)
+ submenu[i++] = menu;
+ if (i > 0) {
+ str_printf(r, _(" Location:\n"));
+ for (j = 4; --i >= 0; j += 2) {
+ menu = submenu[i];
+ str_printf(r, "%*c-> %s", j, ' ', _(menu_get_prompt(menu)));
+ if (menu->sym) {
+ str_printf(r, " (%s [=%s])", menu->sym->name ?
+ menu->sym->name : _("<choice>"),
+ sym_get_string_value(menu->sym));
+ }
+ str_append(r, "\n");
+ }
+ }
+}
+
+void get_symbol_str(struct gstr *r, struct symbol *sym)
+{
+ bool hit;
+ struct property *prop;
+
+ if (sym && sym->name)
+ str_printf(r, "Symbol: %s [=%s]\n", sym->name,
+ sym_get_string_value(sym));
+ for_all_prompts(sym, prop)
+ get_prompt_str(r, prop);
+ hit = false;
+ for_all_properties(sym, prop, P_SELECT) {
+ if (!hit) {
+ str_append(r, " Selects: ");
+ hit = true;
+ } else
+ str_printf(r, " && ");
+ expr_gstr_print(prop->expr, r);
+ }
+ if (hit)
+ str_append(r, "\n");
+ if (sym->rev_dep.expr) {
+ str_append(r, _(" Selected by: "));
+ expr_gstr_print(sym->rev_dep.expr, r);
+ str_append(r, "\n");
+ }
+ str_append(r, "\n\n");
+}
+
+void menu_get_ext_help(struct menu *menu, struct gstr *help)
+{
+ struct symbol *sym = menu->sym;
+
+ if (menu_has_help(menu)) {
+ if (sym->name) {
+ str_printf(help, "CONFIG_%s:\n\n", sym->name);
+ str_append(help, _(menu_get_help(menu)));
+ str_append(help, "\n");
+ }
+ } else {
+ str_append(help, nohelp_text);
+ }
+ if (sym)
+ get_symbol_str(help, sym);
+}
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index ce7d508c752..00c51507cfc 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -1042,12 +1042,10 @@ void ConfigInfoView::menuInfo(void)
if (showDebug())
debug = debug_info(sym);
- help = menu_get_help(menu);
- /* Gettextize if the help text not empty */
- if (help.isEmpty())
- help = print_filter(menu_get_help(menu));
- else
- help = print_filter(_(menu_get_help(menu)));
+ struct gstr help_gstr = str_new();
+ menu_get_ext_help(menu, &help_gstr);
+ help = print_filter(str_get(&help_gstr));
+ str_free(&help_gstr);
} else if (menu->prompt) {
head += "<big><b>";
head += print_filter(_(menu->prompt->text));
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 18f3e5c3363..6c8fbbb66eb 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -36,7 +36,7 @@ tristate modules_val;
struct expr *sym_env_list;
-void sym_add_default(struct symbol *sym, const char *def)
+static void sym_add_default(struct symbol *sym, const char *def)
{
struct property *prop = prop_alloc(P_DEFAULT, sym);
@@ -125,7 +125,7 @@ struct property *sym_get_default_prop(struct symbol *sym)
return NULL;
}
-struct property *sym_get_range_prop(struct symbol *sym)
+static struct property *sym_get_range_prop(struct symbol *sym)
{
struct property *prop;
@@ -943,7 +943,7 @@ const char *prop_get_type_name(enum prop_type type)
return "unknown";
}
-void prop_add_env(const char *env)
+static void prop_add_env(const char *env)
{
struct symbol *sym, *sym2;
struct property *prop;
diff --git a/scripts/markup_oops.pl b/scripts/markup_oops.pl
index 89774011965..5f0fcb712e2 100644
--- a/scripts/markup_oops.pl
+++ b/scripts/markup_oops.pl
@@ -184,10 +184,7 @@ if ($target eq "0") {
# if it's a module, we need to find the .ko file and calculate a load offset
if ($module ne "") {
- my $dir = dirname($filename);
- $dir = $dir . "/";
- my $mod = $module . ".ko";
- my $modulefile = `find $dir -name $mod | head -1`;
+ my $modulefile = `modinfo $module | grep '^filename:' | awk '{ print \$2 }'`;
chomp($modulefile);
$filename = $modulefile;
if ($filename eq "") {
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 40e0045876e..62a9025cdcc 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -657,6 +657,15 @@ static int do_i2c_entry(const char *filename, struct i2c_device_id *id,
return 1;
}
+/* Looks like: spi:S */
+static int do_spi_entry(const char *filename, struct spi_device_id *id,
+ char *alias)
+{
+ sprintf(alias, SPI_MODULE_PREFIX "%s", id->name);
+
+ return 1;
+}
+
static const struct dmifield {
const char *prefix;
int field;
@@ -853,6 +862,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
do_table(symval, sym->st_size,
sizeof(struct i2c_device_id), "i2c",
do_i2c_entry, mod);
+ else if (sym_is(symname, "__mod_spi_device_table"))
+ do_table(symval, sym->st_size,
+ sizeof(struct spi_device_id), "spi",
+ do_spi_entry, mod);
else if (sym_is(symname, "__mod_dmi_device_table"))
do_table(symval, sym->st_size,
sizeof(struct dmi_system_id), "dmi",
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 4522948a012..801a16a1754 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -691,7 +691,7 @@ static int number_prefix(const char *sym)
* The $ syntax is for sections where ld append a dot number
* to make section name unique.
*/
-int match(const char *sym, const char * const pat[])
+static int match(const char *sym, const char * const pat[])
{
const char *p;
while (*pat) {
@@ -1746,7 +1746,7 @@ static void add_header(struct buffer *b, struct module *mod)
buf_printf(b, "};\n");
}
-void add_staging_flag(struct buffer *b, const char *name)
+static void add_staging_flag(struct buffer *b, const char *name)
{
static const char *staging_dir = "drivers/staging";
diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
index aadc5223dcd..ecf9c7dc182 100644
--- a/scripts/mod/sumversion.c
+++ b/scripts/mod/sumversion.c
@@ -334,8 +334,6 @@ static int parse_source_files(const char *objfile, struct md4_ctx *md)
deps_drivers/net/dummy.o := \
drivers/net/dummy.c \
$(wildcard include/config/net/fastroute.h) \
- include/linux/config.h \
- $(wildcard include/config/h.h) \
include/linux/module.h \
Sum all files in the same dir or subdirs.
diff --git a/scripts/selinux/mdp/mdp.c b/scripts/selinux/mdp/mdp.c
index ca757d48618..b4ced856258 100644
--- a/scripts/selinux/mdp/mdp.c
+++ b/scripts/selinux/mdp/mdp.c
@@ -31,13 +31,13 @@
#include "flask.h"
-void usage(char *name)
+static void usage(char *name)
{
printf("usage: %s [-m] policy_file context_file\n", name);
exit(1);
}
-void find_common_name(char *cname, char *dest, int len)
+static void find_common_name(char *cname, char *dest, int len)
{
char *start, *end;
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 4a34ec591e8..d52f7a01557 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -101,7 +101,8 @@ exuberant()
-I ____cacheline_aligned_in_smp \
-I ____cacheline_internodealigned_in_smp \
-I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL \
- --extra=+f --c-kinds=+px \
+ -I DEFINE_TRACE,EXPORT_TRACEPOINT_SYMBOL,EXPORT_TRACEPOINT_SYMBOL_GPL \
+ --extra=+f --c-kinds=-px \
--regex-asm='/^ENTRY\(([^)]*)\).*/\1/' \
--regex-c='/^SYSCALL_DEFINE[[:digit:]]?\(([^,)]*).*/sys_\1/'
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index b8186bac8b7..6cf8fd2b79e 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -61,7 +61,8 @@ static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
struct cgroup_subsys devices_subsys;
static int devcgroup_can_attach(struct cgroup_subsys *ss,
- struct cgroup *new_cgroup, struct task_struct *task)
+ struct cgroup *new_cgroup, struct task_struct *task,
+ bool threadgroup)
{
if (current != task && !capable(CAP_SYS_ADMIN))
return -EPERM;
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index 6bfc7eaebfd..8e9777b7640 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -146,7 +146,7 @@ static int ima_measurements_show(struct seq_file *m, void *v)
return 0;
}
-static struct seq_operations ima_measurments_seqops = {
+static const struct seq_operations ima_measurments_seqops = {
.start = ima_measurements_start,
.next = ima_measurements_next,
.stop = ima_measurements_stop,
@@ -221,7 +221,7 @@ static int ima_ascii_measurements_show(struct seq_file *m, void *v)
return 0;
}
-static struct seq_operations ima_ascii_measurements_seqops = {
+static const struct seq_operations ima_ascii_measurements_seqops = {
.start = ima_measurements_start,
.next = ima_measurements_next,
.stop = ima_measurements_stop,
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 485fc6233c3..4770be375ff 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -169,9 +169,9 @@ static void key_garbage_collector(struct work_struct *work)
/* trawl through the keys looking for keyrings */
for (;;) {
- if (key->expiry > now && key->expiry < new_timer) {
+ if (key->expiry > limit && key->expiry < new_timer) {
kdebug("will expire %x in %ld",
- key_serial(key), key->expiry - now);
+ key_serial(key), key->expiry - limit);
new_timer = key->expiry;
}
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 500aad0ebd6..3bb90b6f1dd 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -187,7 +187,7 @@ static inline void print_ipv6_addr(struct audit_buffer *ab,
char *name1, char *name2)
{
if (!ipv6_addr_any(addr))
- audit_log_format(ab, " %s=%pI6", name1, addr);
+ audit_log_format(ab, " %s=%pI6c", name1, addr);
if (port)
audit_log_format(ab, " %s=%d", name2, ntohs(port));
}
diff --git a/security/min_addr.c b/security/min_addr.c
index 14cc7b3b8d0..c844eed7915 100644
--- a/security/min_addr.c
+++ b/security/min_addr.c
@@ -28,12 +28,12 @@ static void update_mmap_min_addr(void)
* sysctl handler which just sets dac_mmap_min_addr = the new value and then
* calls update_mmap_min_addr() so non MAP_FIXED hints get rounded properly
*/
-int mmap_min_addr_handler(struct ctl_table *table, int write, struct file *filp,
+int mmap_min_addr_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
- ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos);
+ ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
update_mmap_min_addr();
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 1ed0f076aad..b4b5da1c0a4 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -868,8 +868,19 @@ u32 avc_policy_seqno(void)
void avc_disable(void)
{
- avc_flush();
- synchronize_rcu();
- if (avc_node_cachep)
- kmem_cache_destroy(avc_node_cachep);
+ /*
+ * If you are looking at this because you have realized that we are
+ * not destroying the avc_node_cachep it might be easy to fix, but
+ * I don't know the memory barrier semantics well enough to know. It's
+ * possible that some other task dereferenced security_ops when
+ * it still pointed to selinux operations. If that is the case it's
+ * possible that it is about to use the avc and is about to need the
+ * avc_node_cachep. I know I could wrap the security.c security_ops call
+ * in an rcu_lock, but seriously, it's not worth it. Instead I just flush
+ * the cache and get that memory back.
+ */
+ if (avc_node_cachep) {
+ avc_flush();
+ /* kmem_cache_destroy(avc_node_cachep); */
+ }
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 417f7c99452..bb230d5d708 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2411,7 +2411,7 @@ static void selinux_bprm_committed_creds(struct linux_binprm *bprm)
/* Wake up the parent if it is waiting so that it can recheck
* wait permission to the new task SID. */
read_lock(&tasklist_lock);
- wake_up_interruptible(&current->real_parent->signal->wait_chldexit);
+ __wake_up_parent(current, current->real_parent);
read_unlock(&tasklist_lock);
}
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index acae7ef4092..c33b6bb9b6d 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -30,17 +30,11 @@
#include <net/netlabel.h>
#include <net/cipso_ipv4.h>
#include <linux/audit.h>
+#include <linux/magic.h>
#include "smack.h"
#define task_security(task) (task_cred_xxx((task), security))
-/*
- * I hope these are the hokeyist lines of code in the module. Casey.
- */
-#define DEVPTS_SUPER_MAGIC 0x1cd1
-#define SOCKFS_MAGIC 0x534F434B
-#define TMPFS_MAGIC 0x01021994
-
/**
* smk_fetch - Fetch the smack label from a file.
* @ip: a pointer to the inode
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index f83a8098072..aeead758509 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -187,7 +187,7 @@ static void load_seq_stop(struct seq_file *s, void *v)
/* No-op */
}
-static struct seq_operations load_seq_ops = {
+static const struct seq_operations load_seq_ops = {
.start = load_seq_start,
.next = load_seq_next,
.show = load_seq_show,
@@ -503,7 +503,7 @@ static void cipso_seq_stop(struct seq_file *s, void *v)
/* No-op */
}
-static struct seq_operations cipso_seq_ops = {
+static const struct seq_operations cipso_seq_ops = {
.start = cipso_seq_start,
.stop = cipso_seq_stop,
.next = cipso_seq_next,
@@ -697,7 +697,7 @@ static void netlbladdr_seq_stop(struct seq_file *s, void *v)
/* No-op */
}
-static struct seq_operations netlbladdr_seq_ops = {
+static const struct seq_operations netlbladdr_seq_ops = {
.start = netlbladdr_seq_start,
.stop = netlbladdr_seq_stop,
.next = netlbladdr_seq_next,
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index 4e34d19ddbc..b4b48afb6de 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -137,9 +137,9 @@ static int pxa2xx_ac97_do_resume(struct snd_card *card)
return 0;
}
-static int pxa2xx_ac97_suspend(struct platform_device *dev, pm_message_t state)
+static int pxa2xx_ac97_suspend(struct device *dev)
{
- struct snd_card *card = platform_get_drvdata(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
int ret = 0;
if (card)
@@ -148,9 +148,9 @@ static int pxa2xx_ac97_suspend(struct platform_device *dev, pm_message_t state)
return ret;
}
-static int pxa2xx_ac97_resume(struct platform_device *dev)
+static int pxa2xx_ac97_resume(struct device *dev)
{
- struct snd_card *card = platform_get_drvdata(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
int ret = 0;
if (card)
@@ -159,9 +159,10 @@ static int pxa2xx_ac97_resume(struct platform_device *dev)
return ret;
}
-#else
-#define pxa2xx_ac97_suspend NULL
-#define pxa2xx_ac97_resume NULL
+static struct dev_pm_ops pxa2xx_ac97_pm_ops = {
+ .suspend = pxa2xx_ac97_suspend,
+ .resume = pxa2xx_ac97_resume,
+};
#endif
static int __devinit pxa2xx_ac97_probe(struct platform_device *dev)
@@ -241,11 +242,12 @@ static int __devexit pxa2xx_ac97_remove(struct platform_device *dev)
static struct platform_driver pxa2xx_ac97_driver = {
.probe = pxa2xx_ac97_probe,
.remove = __devexit_p(pxa2xx_ac97_remove),
- .suspend = pxa2xx_ac97_suspend,
- .resume = pxa2xx_ac97_resume,
.driver = {
.name = "pxa2xx-ac97",
.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &pxa2xx_ac97_pm_ops,
+#endif
},
};
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 59e5fbe6af5..561d6d95a2d 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1387,11 +1387,6 @@ static struct action_ops snd_pcm_action_drain_init = {
.post_action = snd_pcm_post_drain_init
};
-struct drain_rec {
- struct snd_pcm_substream *substream;
- wait_queue_t wait;
-};
-
static int snd_pcm_drop(struct snd_pcm_substream *substream);
/*
@@ -1407,10 +1402,9 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
struct snd_card *card;
struct snd_pcm_runtime *runtime;
struct snd_pcm_substream *s;
+ wait_queue_t wait;
int result = 0;
- int i, num_drecs;
int nonblock = 0;
- struct drain_rec *drec, drec_tmp, *d;
card = substream->pcm->card;
runtime = substream->runtime;
@@ -1433,38 +1427,10 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
} else if (substream->f_flags & O_NONBLOCK)
nonblock = 1;
- if (nonblock)
- goto lock; /* no need to allocate waitqueues */
-
- /* allocate temporary record for drain sync */
down_read(&snd_pcm_link_rwsem);
- if (snd_pcm_stream_linked(substream)) {
- drec = kmalloc(substream->group->count * sizeof(*drec), GFP_KERNEL);
- if (! drec) {
- up_read(&snd_pcm_link_rwsem);
- snd_power_unlock(card);
- return -ENOMEM;
- }
- } else
- drec = &drec_tmp;
-
- /* count only playback streams */
- num_drecs = 0;
- snd_pcm_group_for_each_entry(s, substream) {
- runtime = s->runtime;
- if (s->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- d = &drec[num_drecs++];
- d->substream = s;
- init_waitqueue_entry(&d->wait, current);
- add_wait_queue(&runtime->sleep, &d->wait);
- }
- }
- up_read(&snd_pcm_link_rwsem);
-
- lock:
snd_pcm_stream_lock_irq(substream);
/* resume pause */
- if (substream->runtime->status->state == SNDRV_PCM_STATE_PAUSED)
+ if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
snd_pcm_pause(substream, 0);
/* pre-start/stop - all running streams are changed to DRAINING state */
@@ -1479,25 +1445,35 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
for (;;) {
long tout;
+ struct snd_pcm_runtime *to_check;
if (signal_pending(current)) {
result = -ERESTARTSYS;
break;
}
- /* all finished? */
- for (i = 0; i < num_drecs; i++) {
- runtime = drec[i].substream->runtime;
- if (runtime->status->state == SNDRV_PCM_STATE_DRAINING)
+ /* find a substream to drain */
+ to_check = NULL;
+ snd_pcm_group_for_each_entry(s, substream) {
+ if (s->stream != SNDRV_PCM_STREAM_PLAYBACK)
+ continue;
+ runtime = s->runtime;
+ if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
+ to_check = runtime;
break;
+ }
}
- if (i == num_drecs)
- break; /* yes, all drained */
-
+ if (!to_check)
+ break; /* all drained */
+ init_waitqueue_entry(&wait, current);
+ add_wait_queue(&to_check->sleep, &wait);
set_current_state(TASK_INTERRUPTIBLE);
snd_pcm_stream_unlock_irq(substream);
+ up_read(&snd_pcm_link_rwsem);
snd_power_unlock(card);
tout = schedule_timeout(10 * HZ);
snd_power_lock(card);
+ down_read(&snd_pcm_link_rwsem);
snd_pcm_stream_lock_irq(substream);
+ remove_wait_queue(&to_check->sleep, &wait);
if (tout == 0) {
if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
result = -ESTRPIPE;
@@ -1512,16 +1488,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
unlock:
snd_pcm_stream_unlock_irq(substream);
-
- if (!nonblock) {
- for (i = 0; i < num_drecs; i++) {
- d = &drec[i];
- runtime = d->substream->runtime;
- remove_wait_queue(&runtime->sleep, &d->wait);
- }
- if (drec != &drec_tmp)
- kfree(drec);
- }
+ up_read(&snd_pcm_link_rwsem);
snd_power_unlock(card);
return result;
diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
index 1edab7b4ea8..3136c88eacd 100644
--- a/sound/oss/swarm_cs4297a.c
+++ b/sound/oss/swarm_cs4297a.c
@@ -110,9 +110,6 @@ static void start_adc(struct cs4297a_state *s);
// rather than 64k as some of the games work more responsively.
// log base 2( buff sz = 32k).
-//static unsigned long defaultorder = 3;
-//MODULE_PARM(defaultorder, "i");
-
//
// Turn on/off debugging compilation by commenting out "#define CSDEBUG"
//
diff --git a/sound/oss/sys_timer.c b/sound/oss/sys_timer.c
index 107534477a2..8db6aefe15e 100644
--- a/sound/oss/sys_timer.c
+++ b/sound/oss/sys_timer.c
@@ -100,9 +100,6 @@ def_tmr_open(int dev, int mode)
curr_tempo = 60;
curr_timebase = 100;
opened = 1;
-
- ;
-
{
def_tmr.expires = (1) + jiffies;
add_timer(&def_tmr);
diff --git a/sound/pci/lx6464es/lx6464es.h b/sound/pci/lx6464es/lx6464es.h
index 012c010c8c8..51afc048961 100644
--- a/sound/pci/lx6464es/lx6464es.h
+++ b/sound/pci/lx6464es/lx6464es.h
@@ -86,7 +86,6 @@ struct lx6464es {
/* messaging */
spinlock_t msg_lock; /* message spinlock */
- atomic_t send_message_locked;
struct lx_rmh rmh;
/* configuration */
@@ -95,7 +94,6 @@ struct lx6464es {
uint hardware_running[2];
u32 board_sample_rate; /* sample rate read from
* board */
- u32 sample_rate; /* our sample rate */
u16 pcm_granularity; /* board blocksize */
/* dma */
diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
index 5812780d6e8..3086b751da4 100644
--- a/sound/pci/lx6464es/lx_core.c
+++ b/sound/pci/lx6464es/lx_core.c
@@ -314,98 +314,6 @@ static inline void lx_message_dump(struct lx_rmh *rmh)
#define XILINX_POLL_NO_SLEEP 100
#define XILINX_POLL_ITERATIONS 150
-#if 0 /* not used now */
-static int lx_message_send(struct lx6464es *chip, struct lx_rmh *rmh)
-{
- u32 reg = ED_DSP_TIMED_OUT;
- int dwloop;
- int answer_received;
-
- if (lx_dsp_reg_read(chip, eReg_CSM) & (Reg_CSM_MC | Reg_CSM_MR)) {
- snd_printk(KERN_ERR LXP "PIOSendMessage eReg_CSM %x\n", reg);
- return -EBUSY;
- }
-
- /* write command */
- lx_dsp_reg_writebuf(chip, eReg_CRM1, rmh->cmd, rmh->cmd_len);
-
- snd_BUG_ON(atomic_read(&chip->send_message_locked) != 0);
- atomic_set(&chip->send_message_locked, 1);
-
- /* MicoBlaze gogogo */
- lx_dsp_reg_write(chip, eReg_CSM, Reg_CSM_MC);
-
- /* wait for interrupt to answer */
- for (dwloop = 0; dwloop != XILINX_TIMEOUT_MS; ++dwloop) {
- answer_received = atomic_read(&chip->send_message_locked);
- if (answer_received == 0)
- break;
- msleep(1);
- }
-
- if (answer_received == 0) {
- /* in Debug mode verify Reg_CSM_MR */
- snd_BUG_ON(!(lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR));
-
- /* command finished, read status */
- if (rmh->dsp_stat == 0)
- reg = lx_dsp_reg_read(chip, eReg_CRM1);
- else
- reg = 0;
- } else {
- int i;
- snd_printk(KERN_WARNING LXP "TIMEOUT lx_message_send! "
- "Interrupts disabled?\n");
-
- /* attente bit Reg_CSM_MR */
- for (i = 0; i != XILINX_POLL_ITERATIONS; i++) {
- if ((lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR)) {
- if (rmh->dsp_stat == 0)
- reg = lx_dsp_reg_read(chip, eReg_CRM1);
- else
- reg = 0;
- goto polling_successful;
- }
-
- if (i > XILINX_POLL_NO_SLEEP)
- msleep(1);
- }
- snd_printk(KERN_WARNING LXP "TIMEOUT lx_message_send! "
- "polling failed\n");
-
-polling_successful:
- atomic_set(&chip->send_message_locked, 0);
- }
-
- if ((reg & ERROR_VALUE) == 0) {
- /* read response */
- if (rmh->stat_len) {
- snd_BUG_ON(rmh->stat_len >= (REG_CRM_NUMBER-1));
-
- lx_dsp_reg_readbuf(chip, eReg_CRM2, rmh->stat,
- rmh->stat_len);
- }
- } else
- snd_printk(KERN_WARNING LXP "lx_message_send: error_value %x\n",
- reg);
-
- /* clear Reg_CSM_MR */
- lx_dsp_reg_write(chip, eReg_CSM, 0);
-
- switch (reg) {
- case ED_DSP_TIMED_OUT:
- snd_printk(KERN_WARNING LXP "lx_message_send: dsp timeout\n");
- return -ETIMEDOUT;
-
- case ED_DSP_CRASHED:
- snd_printk(KERN_WARNING LXP "lx_message_send: dsp crashed\n");
- return -EAGAIN;
- }
-
- lx_message_dump(rmh);
- return 0;
-}
-#endif /* not used now */
static int lx_message_send_atomic(struct lx6464es *chip, struct lx_rmh *rmh)
{
@@ -423,7 +331,7 @@ static int lx_message_send_atomic(struct lx6464es *chip, struct lx_rmh *rmh)
/* MicoBlaze gogogo */
lx_dsp_reg_write(chip, eReg_CSM, Reg_CSM_MC);
- /* wait for interrupt to answer */
+ /* wait for device to answer */
for (dwloop = 0; dwloop != XILINX_TIMEOUT_MS * 1000; ++dwloop) {
if (lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR) {
if (rmh->dsp_stat == 0)
@@ -1175,10 +1083,6 @@ static int lx_interrupt_ack(struct lx6464es *chip, u32 *r_irqsrc,
*r_async_escmd = 1;
}
- if (irqsrc & MASK_SYS_STATUS_CMD_DONE)
- /* xilinx command notification */
- atomic_set(&chip->send_message_locked, 0);
-
if (irq_async) {
/* snd_printd("interrupt: async event pending\n"); */
*r_async_pending = 1;
diff --git a/sound/soc/blackfin/bf5xx-ac97.c b/sound/soc/blackfin/bf5xx-ac97.c
index 2758b9017a7..e6932297873 100644
--- a/sound/soc/blackfin/bf5xx-ac97.c
+++ b/sound/soc/blackfin/bf5xx-ac97.c
@@ -277,7 +277,11 @@ static int bf5xx_ac97_resume(struct snd_soc_dai *dai)
if (!dai->active)
return 0;
+#if defined(CONFIG_SND_BF5XX_MULTICHAN_SUPPORT)
+ ret = sport_set_multichannel(sport, 16, 0x3FF, 1);
+#else
ret = sport_set_multichannel(sport, 16, 0x1F, 1);
+#endif
if (ret) {
pr_err("SPORT is busy!\n");
return -EBUSY;
@@ -334,7 +338,11 @@ static int bf5xx_ac97_probe(struct platform_device *pdev,
goto sport_err;
}
/*SPORT works in TDM mode to simulate AC97 transfers*/
+#if defined(CONFIG_SND_BF5XX_MULTICHAN_SUPPORT)
+ ret = sport_set_multichannel(sport_handle, 16, 0x3FF, 1);
+#else
ret = sport_set_multichannel(sport_handle, 16, 0x1F, 1);
+#endif
if (ret) {
pr_err("SPORT is busy!\n");
ret = -EBUSY;
diff --git a/sound/soc/blackfin/bf5xx-ac97.h b/sound/soc/blackfin/bf5xx-ac97.h
index 3f2a911fe0c..a1f97dd809d 100644
--- a/sound/soc/blackfin/bf5xx-ac97.h
+++ b/sound/soc/blackfin/bf5xx-ac97.h
@@ -1,5 +1,5 @@
/*
- * linux/sound/arm/bf5xx-ac97.h
+ * sound/soc/blackfin/bf5xx-ac97.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/sound/soc/blackfin/bf5xx-i2s.c b/sound/soc/blackfin/bf5xx-i2s.c
index 876abade27e..1e9d161c76c 100644
--- a/sound/soc/blackfin/bf5xx-i2s.c
+++ b/sound/soc/blackfin/bf5xx-i2s.c
@@ -227,7 +227,8 @@ static int bf5xx_i2s_probe(struct platform_device *pdev,
return 0;
}
-static void bf5xx_i2s_remove(struct snd_soc_dai *dai)
+static void bf5xx_i2s_remove(struct platform_device *pdev,
+ struct snd_soc_dai *dai)
{
pr_debug("%s enter\n", __func__);
peripheral_free_list(&sport_req[sport_num][0]);
@@ -236,36 +237,31 @@ static void bf5xx_i2s_remove(struct snd_soc_dai *dai)
#ifdef CONFIG_PM
static int bf5xx_i2s_suspend(struct snd_soc_dai *dai)
{
- struct sport_device *sport =
- (struct sport_device *)dai->private_data;
pr_debug("%s : sport %d\n", __func__, dai->id);
- if (!dai->active)
- return 0;
+
if (dai->capture.active)
- sport_rx_stop(sport);
+ sport_rx_stop(sport_handle);
if (dai->playback.active)
- sport_tx_stop(sport);
+ sport_tx_stop(sport_handle);
return 0;
}
static int bf5xx_i2s_resume(struct snd_soc_dai *dai)
{
int ret;
- struct sport_device *sport =
- (struct sport_device *)dai->private_data;
pr_debug("%s : sport %d\n", __func__, dai->id);
- if (!dai->active)
- return 0;
- ret = sport_config_rx(sport, RFSR | RCKFE, RSFSE|0x1f, 0, 0);
+ ret = sport_config_rx(sport_handle, bf5xx_i2s.rcr1,
+ bf5xx_i2s.rcr2, 0, 0);
if (ret) {
pr_err("SPORT is busy!\n");
return -EBUSY;
}
- ret = sport_config_tx(sport, TFSR | TCKFE, TSFSE|0x1f, 0, 0);
+ ret = sport_config_tx(sport_handle, bf5xx_i2s.tcr1,
+ bf5xx_i2s.tcr2, 0, 0);
if (ret) {
pr_err("SPORT is busy!\n");
return -EBUSY;
diff --git a/sound/soc/blackfin/bf5xx-i2s.h b/sound/soc/blackfin/bf5xx-i2s.h
index 7107d1a0b06..264ecdcba35 100644
--- a/sound/soc/blackfin/bf5xx-i2s.h
+++ b/sound/soc/blackfin/bf5xx-i2s.h
@@ -1,5 +1,5 @@
/*
- * linux/sound/arm/bf5xx-i2s.h
+ * sound/soc/blackfin/bf5xx-i2s.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/sound/soc/blackfin/bf5xx-sport.c b/sound/soc/blackfin/bf5xx-sport.c
index 469ce7fab20..99051ff0954 100644
--- a/sound/soc/blackfin/bf5xx-sport.c
+++ b/sound/soc/blackfin/bf5xx-sport.c
@@ -326,7 +326,7 @@ static inline int sport_hook_tx_dummy(struct sport_device *sport)
int sport_tx_start(struct sport_device *sport)
{
- unsigned flags;
+ unsigned long flags;
pr_debug("%s: tx_run:%d, rx_run:%d\n", __func__,
sport->tx_run, sport->rx_run);
if (sport->tx_run)
diff --git a/sound/soc/codecs/ad1836.c b/sound/soc/codecs/ad1836.c
index 01343dc984f..c48485f2c55 100644
--- a/sound/soc/codecs/ad1836.c
+++ b/sound/soc/codecs/ad1836.c
@@ -251,8 +251,7 @@ static int __devexit ad1836_spi_remove(struct spi_device *spi)
static struct spi_driver ad1836_spi_driver = {
.driver = {
- .name = "ad1836-spi",
- .bus = &spi_bus_type,
+ .name = "ad1836",
.owner = THIS_MODULE,
},
.probe = ad1836_spi_probe,
diff --git a/sound/soc/codecs/ad1938.c b/sound/soc/codecs/ad1938.c
index 9a049a1995a..34b30efc3cb 100644
--- a/sound/soc/codecs/ad1938.c
+++ b/sound/soc/codecs/ad1938.c
@@ -456,7 +456,6 @@ static int __devexit ad1938_spi_remove(struct spi_device *spi)
static struct spi_driver ad1938_spi_driver = {
.driver = {
.name = "ad1938",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad1938_spi_probe,
@@ -515,6 +514,7 @@ static int ad1938_register(struct ad1938_priv *ad1938)
codec->num_dai = 1;
codec->write = ad1938_write_reg;
codec->read = ad1938_read_reg_cache;
+ codec->set_bias_level = ad1938_set_bias_level;
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index d80d414cfbb..5ad677ce80d 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -595,6 +595,7 @@ static const struct snd_soc_dapm_route audio_map[] = {
/* Mono Capture mixer-mux */
{"Capture Right Mixer", "Stereo", "Capture Right Mux"},
+ {"Capture Left Mixer", "Stereo", "Capture Left Mux"},
{"Capture Left Mixer", "Analogue Mix Left", "Capture Left Mux"},
{"Capture Left Mixer", "Analogue Mix Left", "Capture Right Mux"},
{"Capture Right Mixer", "Analogue Mix Right", "Capture Left Mux"},
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index c64e55aa63b..686e5aa9720 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -1027,7 +1027,7 @@ static int wm9081_hw_params(struct snd_pcm_substream *substream,
- wm9081->fs);
for (i = 1; i < ARRAY_SIZE(clk_sys_rates); i++) {
cur_val = abs((wm9081->sysclk_rate /
- clk_sys_rates[i].ratio) - wm9081->fs);;
+ clk_sys_rates[i].ratio) - wm9081->fs);
if (cur_val < best_val) {
best = i;
best_val = cur_val;
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index eca22d7829d..7a06c0a8666 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -512,34 +512,49 @@ static int davinci_config_channel_size(struct davinci_audio_dev *dev,
int channel_size)
{
u32 fmt = 0;
+ u32 mask, rotate;
switch (channel_size) {
case DAVINCI_AUDIO_WORD_8:
fmt = 0x03;
+ rotate = 6;
+ mask = 0x000000ff;
break;
case DAVINCI_AUDIO_WORD_12:
fmt = 0x05;
+ rotate = 5;
+ mask = 0x00000fff;
break;
case DAVINCI_AUDIO_WORD_16:
fmt = 0x07;
+ rotate = 4;
+ mask = 0x0000ffff;
break;
case DAVINCI_AUDIO_WORD_20:
fmt = 0x09;
+ rotate = 3;
+ mask = 0x000fffff;
break;
case DAVINCI_AUDIO_WORD_24:
fmt = 0x0B;
+ rotate = 2;
+ mask = 0x00ffffff;
break;
case DAVINCI_AUDIO_WORD_28:
fmt = 0x0D;
+ rotate = 1;
+ mask = 0x0fffffff;
break;
case DAVINCI_AUDIO_WORD_32:
fmt = 0x0F;
+ rotate = 0;
+ mask = 0xffffffff;
break;
default:
@@ -550,6 +565,13 @@ static int davinci_config_channel_size(struct davinci_audio_dev *dev,
RXSSZ(fmt), RXSSZ(0x0F));
mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
TXSSZ(fmt), TXSSZ(0x0F));
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXROT(rotate),
+ TXROT(7));
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG, RXROT(rotate),
+ RXROT(7));
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_TXMASK_REG, mask);
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_RXMASK_REG, mask);
+
return 0;
}
@@ -638,7 +660,6 @@ static void davinci_hw_param(struct davinci_audio_dev *dev, int stream)
printk(KERN_ERR "playback tdm slot %d not supported\n",
dev->tdm_slots);
- mcasp_set_reg(dev->base + DAVINCI_MCASP_TXMASK_REG, 0xFFFFFFFF);
mcasp_clr_bits(dev->base + DAVINCI_MCASP_TXFMCTL_REG, FSXDUR);
} else {
/* bit stream is MSB first with no delay */
@@ -655,7 +676,6 @@ static void davinci_hw_param(struct davinci_audio_dev *dev, int stream)
printk(KERN_ERR "capture tdm slot %d not supported\n",
dev->tdm_slots);
- mcasp_set_reg(dev->base + DAVINCI_MCASP_RXMASK_REG, 0xFFFFFFFF);
mcasp_clr_bits(dev->base + DAVINCI_MCASP_RXFMCTL_REG, FSRDUR);
}
}
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index 5b9ed646478..d11a6d7e384 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -351,7 +351,7 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai,
do_div(tmp, freq_out);
val = tmp;
- val = (val << 16) | 64;;
+ val = (val << 16) | 64;
ssp_write_reg(ssp, SSACDD, val);
ssacd |= (0x6 << 4);
diff --git a/sound/soc/s3c24xx/s3c24xx_uda134x.c b/sound/soc/s3c24xx/s3c24xx_uda134x.c
index 8e79a416db5..c215d32d632 100644
--- a/sound/soc/s3c24xx/s3c24xx_uda134x.c
+++ b/sound/soc/s3c24xx/s3c24xx_uda134x.c
@@ -67,7 +67,7 @@ static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
{
int ret = 0;
#ifdef ENFORCE_RATES
- struct snd_pcm_runtime *runtime = substream->runtime;;
+ struct snd_pcm_runtime *runtime = substream->runtime;
#endif
mutex_lock(&clk_lock);
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 0aba8b6e9c5..b5f1953b614 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -318,7 +318,7 @@ export PERL_PATH
LIB_FILE=libperf.a
-LIB_H += ../../include/linux/perf_counter.h
+LIB_H += ../../include/linux/perf_event.h
LIB_H += ../../include/linux/rbtree.h
LIB_H += ../../include/linux/list.h
LIB_H += util/include/linux/list.h
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 043d85b7e25..1ec74161581 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -505,7 +505,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
return -1;
}
- if (event->header.misc & PERF_EVENT_MISC_KERNEL) {
+ if (event->header.misc & PERF_RECORD_MISC_KERNEL) {
show = SHOW_KERNEL;
level = 'k';
@@ -513,7 +513,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
dump_printf(" ...... dso: %s\n", dso->name);
- } else if (event->header.misc & PERF_EVENT_MISC_USER) {
+ } else if (event->header.misc & PERF_RECORD_MISC_USER) {
show = SHOW_USER;
level = '.';
@@ -565,7 +565,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
thread = threads__findnew(event->mmap.pid, &threads, &last_match);
- dump_printf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
+ dump_printf("%p [%p]: PERF_RECORD_MMAP %d: [%p(%p) @ %p]: %s\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->mmap.pid,
@@ -575,7 +575,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
event->mmap.filename);
if (thread == NULL || map == NULL) {
- dump_printf("problem processing PERF_EVENT_MMAP, skipping event.\n");
+ dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
return 0;
}
@@ -591,14 +591,14 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
struct thread *thread;
thread = threads__findnew(event->comm.pid, &threads, &last_match);
- dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
+ dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->comm.comm, event->comm.pid);
if (thread == NULL ||
thread__set_comm(thread, event->comm.comm)) {
- dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n");
+ dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
return -1;
}
total_comm++;
@@ -614,7 +614,7 @@ process_fork_event(event_t *event, unsigned long offset, unsigned long head)
thread = threads__findnew(event->fork.pid, &threads, &last_match);
parent = threads__findnew(event->fork.ppid, &threads, &last_match);
- dump_printf("%p [%p]: PERF_EVENT_FORK: %d:%d\n",
+ dump_printf("%p [%p]: PERF_RECORD_FORK: %d:%d\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->fork.pid, event->fork.ppid);
@@ -627,7 +627,7 @@ process_fork_event(event_t *event, unsigned long offset, unsigned long head)
return 0;
if (!thread || !parent || thread__fork(thread, parent)) {
- dump_printf("problem processing PERF_EVENT_FORK, skipping event.\n");
+ dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
return -1;
}
total_fork++;
@@ -639,23 +639,23 @@ static int
process_event(event_t *event, unsigned long offset, unsigned long head)
{
switch (event->header.type) {
- case PERF_EVENT_SAMPLE:
+ case PERF_RECORD_SAMPLE:
return process_sample_event(event, offset, head);
- case PERF_EVENT_MMAP:
+ case PERF_RECORD_MMAP:
return process_mmap_event(event, offset, head);
- case PERF_EVENT_COMM:
+ case PERF_RECORD_COMM:
return process_comm_event(event, offset, head);
- case PERF_EVENT_FORK:
+ case PERF_RECORD_FORK:
return process_fork_event(event, offset, head);
/*
* We dont process them right now but they are fine:
*/
- case PERF_EVENT_THROTTLE:
- case PERF_EVENT_UNTHROTTLE:
+ case PERF_RECORD_THROTTLE:
+ case PERF_RECORD_UNTHROTTLE:
return 0;
default:
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 2459e5a22ed..a5a050af8e7 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -77,7 +77,7 @@ static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
static unsigned long mmap_read_head(struct mmap_data *md)
{
- struct perf_counter_mmap_page *pc = md->base;
+ struct perf_event_mmap_page *pc = md->base;
long head;
head = pc->data_head;
@@ -88,7 +88,7 @@ static unsigned long mmap_read_head(struct mmap_data *md)
static void mmap_write_tail(struct mmap_data *md, unsigned long tail)
{
- struct perf_counter_mmap_page *pc = md->base;
+ struct perf_event_mmap_page *pc = md->base;
/*
* ensure all reads are done before we write the tail out.
@@ -233,7 +233,7 @@ static pid_t pid_synthesize_comm_event(pid_t pid, int full)
}
}
- comm_ev.header.type = PERF_EVENT_COMM;
+ comm_ev.header.type = PERF_RECORD_COMM;
size = ALIGN(size, sizeof(u64));
comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size);
@@ -288,7 +288,7 @@ static void pid_synthesize_mmap_samples(pid_t pid, pid_t tgid)
while (1) {
char bf[BUFSIZ], *pbf = bf;
struct mmap_event mmap_ev = {
- .header = { .type = PERF_EVENT_MMAP },
+ .header = { .type = PERF_RECORD_MMAP },
};
int n;
size_t size;
@@ -355,7 +355,7 @@ static void synthesize_all(void)
static int group_fd;
-static struct perf_header_attr *get_header_attr(struct perf_counter_attr *a, int nr)
+static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int nr)
{
struct perf_header_attr *h_attr;
@@ -371,7 +371,7 @@ static struct perf_header_attr *get_header_attr(struct perf_counter_attr *a, int
static void create_counter(int counter, int cpu, pid_t pid)
{
- struct perf_counter_attr *attr = attrs + counter;
+ struct perf_event_attr *attr = attrs + counter;
struct perf_header_attr *h_attr;
int track = !counter; /* only the first counter needs these */
struct {
@@ -417,7 +417,7 @@ static void create_counter(int counter, int cpu, pid_t pid)
attr->disabled = 1;
try_again:
- fd[nr_cpu][counter] = sys_perf_counter_open(attr, pid, cpu, group_fd, 0);
+ fd[nr_cpu][counter] = sys_perf_event_open(attr, pid, cpu, group_fd, 0);
if (fd[nr_cpu][counter] < 0) {
int err = errno;
@@ -444,7 +444,7 @@ try_again:
printf("\n");
error("perfcounter syscall returned with %d (%s)\n",
fd[nr_cpu][counter], strerror(err));
- die("No CONFIG_PERF_COUNTERS=y kernel support configured?\n");
+ die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
exit(-1);
}
@@ -478,7 +478,7 @@ try_again:
if (multiplex && fd[nr_cpu][counter] != multiplex_fd) {
int ret;
- ret = ioctl(fd[nr_cpu][counter], PERF_COUNTER_IOC_SET_OUTPUT, multiplex_fd);
+ ret = ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd);
assert(ret != -1);
} else {
event_array[nr_poll].fd = fd[nr_cpu][counter];
@@ -496,7 +496,7 @@ try_again:
}
}
- ioctl(fd[nr_cpu][counter], PERF_COUNTER_IOC_ENABLE);
+ ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_ENABLE);
}
static void open_counters(int cpu, pid_t pid)
@@ -642,7 +642,7 @@ static int __cmd_record(int argc, const char **argv)
if (done) {
for (i = 0; i < nr_cpu; i++) {
for (counter = 0; counter < nr_counters; counter++)
- ioctl(fd[i][counter], PERF_COUNTER_IOC_DISABLE);
+ ioctl(fd[i][counter], PERF_EVENT_IOC_DISABLE);
}
}
}
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index cdf9a8d27bb..19669c20088 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -1121,7 +1121,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
more_data += sizeof(u64);
}
- dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
+ dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->header.misc,
@@ -1158,9 +1158,9 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
if (comm_list && !strlist__has_entry(comm_list, thread->comm))
return 0;
- cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK;
+ cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- if (cpumode == PERF_EVENT_MISC_KERNEL) {
+ if (cpumode == PERF_RECORD_MISC_KERNEL) {
show = SHOW_KERNEL;
level = 'k';
@@ -1168,7 +1168,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
dump_printf(" ...... dso: %s\n", dso->name);
- } else if (cpumode == PERF_EVENT_MISC_USER) {
+ } else if (cpumode == PERF_RECORD_MISC_USER) {
show = SHOW_USER;
level = '.';
@@ -1210,7 +1210,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
thread = threads__findnew(event->mmap.pid, &threads, &last_match);
- dump_printf("%p [%p]: PERF_EVENT_MMAP %d/%d: [%p(%p) @ %p]: %s\n",
+ dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->mmap.pid,
@@ -1221,7 +1221,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
event->mmap.filename);
if (thread == NULL || map == NULL) {
- dump_printf("problem processing PERF_EVENT_MMAP, skipping event.\n");
+ dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
return 0;
}
@@ -1238,14 +1238,14 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
thread = threads__findnew(event->comm.pid, &threads, &last_match);
- dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
+ dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->comm.comm, event->comm.pid);
if (thread == NULL ||
thread__set_comm_adjust(thread, event->comm.comm)) {
- dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n");
+ dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
return -1;
}
total_comm++;
@@ -1262,10 +1262,10 @@ process_task_event(event_t *event, unsigned long offset, unsigned long head)
thread = threads__findnew(event->fork.pid, &threads, &last_match);
parent = threads__findnew(event->fork.ppid, &threads, &last_match);
- dump_printf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n",
+ dump_printf("%p [%p]: PERF_RECORD_%s: (%d:%d):(%d:%d)\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
- event->header.type == PERF_EVENT_FORK ? "FORK" : "EXIT",
+ event->header.type == PERF_RECORD_FORK ? "FORK" : "EXIT",
event->fork.pid, event->fork.tid,
event->fork.ppid, event->fork.ptid);
@@ -1276,11 +1276,11 @@ process_task_event(event_t *event, unsigned long offset, unsigned long head)
if (thread == parent)
return 0;
- if (event->header.type == PERF_EVENT_EXIT)
+ if (event->header.type == PERF_RECORD_EXIT)
return 0;
if (!thread || !parent || thread__fork(thread, parent)) {
- dump_printf("problem processing PERF_EVENT_FORK, skipping event.\n");
+ dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
return -1;
}
total_fork++;
@@ -1291,7 +1291,7 @@ process_task_event(event_t *event, unsigned long offset, unsigned long head)
static int
process_lost_event(event_t *event, unsigned long offset, unsigned long head)
{
- dump_printf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n",
+ dump_printf("%p [%p]: PERF_RECORD_LOST: id:%Ld: lost:%Ld\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->lost.id,
@@ -1305,7 +1305,7 @@ process_lost_event(event_t *event, unsigned long offset, unsigned long head)
static int
process_read_event(event_t *event, unsigned long offset, unsigned long head)
{
- struct perf_counter_attr *attr;
+ struct perf_event_attr *attr;
attr = perf_header__find_attr(event->read.id, header);
@@ -1319,7 +1319,7 @@ process_read_event(event_t *event, unsigned long offset, unsigned long head)
event->read.value);
}
- dump_printf("%p [%p]: PERF_EVENT_READ: %d %d %s %Lu\n",
+ dump_printf("%p [%p]: PERF_RECORD_READ: %d %d %s %Lu\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->read.pid,
@@ -1337,31 +1337,31 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
trace_event(event);
switch (event->header.type) {
- case PERF_EVENT_SAMPLE:
+ case PERF_RECORD_SAMPLE:
return process_sample_event(event, offset, head);
- case PERF_EVENT_MMAP:
+ case PERF_RECORD_MMAP:
return process_mmap_event(event, offset, head);
- case PERF_EVENT_COMM:
+ case PERF_RECORD_COMM:
return process_comm_event(event, offset, head);
- case PERF_EVENT_FORK:
- case PERF_EVENT_EXIT:
+ case PERF_RECORD_FORK:
+ case PERF_RECORD_EXIT:
return process_task_event(event, offset, head);
- case PERF_EVENT_LOST:
+ case PERF_RECORD_LOST:
return process_lost_event(event, offset, head);
- case PERF_EVENT_READ:
+ case PERF_RECORD_READ:
return process_read_event(event, offset, head);
/*
* We dont process them right now but they are fine:
*/
- case PERF_EVENT_THROTTLE:
- case PERF_EVENT_UNTHROTTLE:
+ case PERF_RECORD_THROTTLE:
+ case PERF_RECORD_UNTHROTTLE:
return 0;
default:
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 275d79c6627..ea9c15c0cdf 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1573,7 +1573,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
more_data += sizeof(u64);
}
- dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
+ dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->header.misc,
@@ -1589,9 +1589,9 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
return -1;
}
- cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK;
+ cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- if (cpumode == PERF_EVENT_MISC_KERNEL) {
+ if (cpumode == PERF_RECORD_MISC_KERNEL) {
show = SHOW_KERNEL;
level = 'k';
@@ -1599,7 +1599,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
dump_printf(" ...... dso: %s\n", dso->name);
- } else if (cpumode == PERF_EVENT_MISC_USER) {
+ } else if (cpumode == PERF_RECORD_MISC_USER) {
show = SHOW_USER;
level = '.';
@@ -1626,23 +1626,23 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
nr_events++;
switch (event->header.type) {
- case PERF_EVENT_MMAP:
+ case PERF_RECORD_MMAP:
return 0;
- case PERF_EVENT_LOST:
+ case PERF_RECORD_LOST:
nr_lost_chunks++;
nr_lost_events += event->lost.lost;
return 0;
- case PERF_EVENT_COMM:
+ case PERF_RECORD_COMM:
return process_comm_event(event, offset, head);
- case PERF_EVENT_EXIT ... PERF_EVENT_READ:
+ case PERF_RECORD_EXIT ... PERF_RECORD_READ:
return 0;
- case PERF_EVENT_SAMPLE:
+ case PERF_RECORD_SAMPLE:
return process_sample_event(event, offset, head);
- case PERF_EVENT_MAX:
+ case PERF_RECORD_MAX:
default:
return -1;
}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 61b828236c1..16af2d82e85 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -48,7 +48,7 @@
#include <sys/prctl.h>
#include <math.h>
-static struct perf_counter_attr default_attrs[] = {
+static struct perf_event_attr default_attrs[] = {
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES},
@@ -130,11 +130,11 @@ struct stats runtime_cycles_stats;
attrs[counter].config == PERF_COUNT_##c)
#define ERR_PERF_OPEN \
-"Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n"
+"Error: counter %d, sys_perf_event_open() syscall returned with %d (%s)\n"
static void create_perf_stat_counter(int counter, int pid)
{
- struct perf_counter_attr *attr = attrs + counter;
+ struct perf_event_attr *attr = attrs + counter;
if (scale)
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
@@ -144,7 +144,7 @@ static void create_perf_stat_counter(int counter, int pid)
unsigned int cpu;
for (cpu = 0; cpu < nr_cpus; cpu++) {
- fd[cpu][counter] = sys_perf_counter_open(attr, -1, cpu, -1, 0);
+ fd[cpu][counter] = sys_perf_event_open(attr, -1, cpu, -1, 0);
if (fd[cpu][counter] < 0 && verbose)
fprintf(stderr, ERR_PERF_OPEN, counter,
fd[cpu][counter], strerror(errno));
@@ -154,7 +154,7 @@ static void create_perf_stat_counter(int counter, int pid)
attr->disabled = 1;
attr->enable_on_exec = 1;
- fd[0][counter] = sys_perf_counter_open(attr, pid, -1, -1, 0);
+ fd[0][counter] = sys_perf_event_open(attr, pid, -1, -1, 0);
if (fd[0][counter] < 0 && verbose)
fprintf(stderr, ERR_PERF_OPEN, counter,
fd[0][counter], strerror(errno));
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 60040639627..4405681b313 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -937,21 +937,21 @@ process_event(event_t *event)
switch (event->header.type) {
- case PERF_EVENT_COMM:
+ case PERF_RECORD_COMM:
return process_comm_event(event);
- case PERF_EVENT_FORK:
+ case PERF_RECORD_FORK:
return process_fork_event(event);
- case PERF_EVENT_EXIT:
+ case PERF_RECORD_EXIT:
return process_exit_event(event);
- case PERF_EVENT_SAMPLE:
+ case PERF_RECORD_SAMPLE:
return queue_sample_event(event);
/*
* We dont process them right now but they are fine:
*/
- case PERF_EVENT_MMAP:
- case PERF_EVENT_THROTTLE:
- case PERF_EVENT_UNTHROTTLE:
+ case PERF_RECORD_MMAP:
+ case PERF_RECORD_THROTTLE:
+ case PERF_RECORD_UNTHROTTLE:
return 0;
default:
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 4002ccb3675..1ca88896eee 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -901,7 +901,7 @@ struct mmap_data {
static unsigned int mmap_read_head(struct mmap_data *md)
{
- struct perf_counter_mmap_page *pc = md->base;
+ struct perf_event_mmap_page *pc = md->base;
int head;
head = pc->data_head;
@@ -977,9 +977,9 @@ static void mmap_read_counter(struct mmap_data *md)
old += size;
- if (event->header.type == PERF_EVENT_SAMPLE) {
+ if (event->header.type == PERF_RECORD_SAMPLE) {
int user =
- (event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK) == PERF_EVENT_MISC_USER;
+ (event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK) == PERF_RECORD_MISC_USER;
process_event(event->ip.ip, md->counter, user);
}
}
@@ -1005,7 +1005,7 @@ int group_fd;
static void start_counter(int i, int counter)
{
- struct perf_counter_attr *attr;
+ struct perf_event_attr *attr;
int cpu;
cpu = profile_cpu;
@@ -1019,7 +1019,7 @@ static void start_counter(int i, int counter)
attr->inherit = (cpu < 0) && inherit;
try_again:
- fd[i][counter] = sys_perf_counter_open(attr, target_pid, cpu, group_fd, 0);
+ fd[i][counter] = sys_perf_event_open(attr, target_pid, cpu, group_fd, 0);
if (fd[i][counter] < 0) {
int err = errno;
@@ -1044,7 +1044,7 @@ try_again:
printf("\n");
error("perfcounter syscall returned with %d (%s)\n",
fd[i][counter], strerror(err));
- die("No CONFIG_PERF_COUNTERS=y kernel support configured?\n");
+ die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
exit(-1);
}
assert(fd[i][counter] >= 0);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 914ab366e36..e9d256e2f47 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -35,14 +35,14 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
thread = threads__findnew(event->comm.pid, &threads, &last_match);
- dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
+ dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->comm.comm, event->comm.pid);
if (thread == NULL ||
thread__set_comm(thread, event->comm.comm)) {
- dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n");
+ dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
return -1;
}
total_comm++;
@@ -82,7 +82,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
more_data += sizeof(u64);
}
- dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
+ dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->header.misc,
@@ -98,9 +98,9 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
return -1;
}
- cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK;
+ cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- if (cpumode == PERF_EVENT_MISC_KERNEL) {
+ if (cpumode == PERF_RECORD_MISC_KERNEL) {
show = SHOW_KERNEL;
level = 'k';
@@ -108,7 +108,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
dump_printf(" ...... dso: %s\n", dso->name);
- } else if (cpumode == PERF_EVENT_MISC_USER) {
+ } else if (cpumode == PERF_RECORD_MISC_USER) {
show = SHOW_USER;
level = '.';
@@ -146,19 +146,19 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
trace_event(event);
switch (event->header.type) {
- case PERF_EVENT_MMAP ... PERF_EVENT_LOST:
+ case PERF_RECORD_MMAP ... PERF_RECORD_LOST:
return 0;
- case PERF_EVENT_COMM:
+ case PERF_RECORD_COMM:
return process_comm_event(event, offset, head);
- case PERF_EVENT_EXIT ... PERF_EVENT_READ:
+ case PERF_RECORD_EXIT ... PERF_RECORD_READ:
return 0;
- case PERF_EVENT_SAMPLE:
+ case PERF_RECORD_SAMPLE:
return process_sample_event(event, offset, head);
- case PERF_EVENT_MAX:
+ case PERF_RECORD_MAX:
default:
return -1;
}
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index f71e0d245cb..f1946d107b1 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -18,10 +18,10 @@ underlying hardware counters.
Performance counters are accessed via special file descriptors.
There's one file descriptor per virtual counter used.
-The special file descriptor is opened via the perf_counter_open()
+The special file descriptor is opened via the perf_event_open()
system call:
- int sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr,
+ int sys_perf_event_open(struct perf_event_hw_event *hw_event_uptr,
pid_t pid, int cpu, int group_fd,
unsigned long flags);
@@ -32,9 +32,9 @@ can be used to set the blocking mode, etc.
Multiple counters can be kept open at a time, and the counters
can be poll()ed.
-When creating a new counter fd, 'perf_counter_hw_event' is:
+When creating a new counter fd, 'perf_event_hw_event' is:
-struct perf_counter_hw_event {
+struct perf_event_hw_event {
/*
* The MSB of the config word signifies if the rest contains cpu
* specific (raw) counter configuration data, if unset, the next
@@ -93,7 +93,7 @@ specified by 'event_id':
/*
* Generalized performance counter event types, used by the hw_event.event_id
- * parameter of the sys_perf_counter_open() syscall:
+ * parameter of the sys_perf_event_open() syscall:
*/
enum hw_event_ids {
/*
@@ -159,7 +159,7 @@ in size.
* reads on the counter should return the indicated quantities,
* in increasing order of bit value, after the counter value.
*/
-enum perf_counter_read_format {
+enum perf_event_read_format {
PERF_FORMAT_TOTAL_TIME_ENABLED = 1,
PERF_FORMAT_TOTAL_TIME_RUNNING = 2,
};
@@ -178,7 +178,7 @@ interrupt:
* Bits that can be set in hw_event.record_type to request information
* in the overflow packets.
*/
-enum perf_counter_record_format {
+enum perf_event_record_format {
PERF_RECORD_IP = 1U << 0,
PERF_RECORD_TID = 1U << 1,
PERF_RECORD_TIME = 1U << 2,
@@ -228,7 +228,7 @@ these events are recorded in the ring-buffer (see below).
The 'comm' bit allows tracking of process comm data on process creation.
This too is recorded in the ring-buffer (see below).
-The 'pid' parameter to the perf_counter_open() system call allows the
+The 'pid' parameter to the perf_event_open() system call allows the
counter to be specific to a task:
pid == 0: if the pid parameter is zero, the counter is attached to the
@@ -258,7 +258,7 @@ The 'flags' parameter is currently unused and must be zero.
The 'group_fd' parameter allows counter "groups" to be set up. A
counter group has one counter which is the group "leader". The leader
-is created first, with group_fd = -1 in the perf_counter_open call
+is created first, with group_fd = -1 in the perf_event_open call
that creates it. The rest of the group members are created
subsequently, with group_fd giving the fd of the group leader.
(A single counter on its own is created with group_fd = -1 and is
@@ -277,13 +277,13 @@ tracking are logged into a ring-buffer. This ring-buffer is created and
accessed through mmap().
The mmap size should be 1+2^n pages, where the first page is a meta-data page
-(struct perf_counter_mmap_page) that contains various bits of information such
+(struct perf_event_mmap_page) that contains various bits of information such
as where the ring-buffer head is.
/*
* Structure of the page that can be mapped via mmap
*/
-struct perf_counter_mmap_page {
+struct perf_event_mmap_page {
__u32 version; /* version number of this structure */
__u32 compat_version; /* lowest version this is compat with */
@@ -317,7 +317,7 @@ struct perf_counter_mmap_page {
* Control data for the mmap() data buffer.
*
* User-space reading this value should issue an rmb(), on SMP capable
- * platforms, after reading this value -- see perf_counter_wakeup().
+ * platforms, after reading this value -- see perf_event_wakeup().
*/
__u32 data_head; /* head in the data section */
};
@@ -327,9 +327,9 @@ NOTE: the hw-counter userspace bits are arch specific and are currently only
The following 2^n pages are the ring-buffer which contains events of the form:
-#define PERF_EVENT_MISC_KERNEL (1 << 0)
-#define PERF_EVENT_MISC_USER (1 << 1)
-#define PERF_EVENT_MISC_OVERFLOW (1 << 2)
+#define PERF_RECORD_MISC_KERNEL (1 << 0)
+#define PERF_RECORD_MISC_USER (1 << 1)
+#define PERF_RECORD_MISC_OVERFLOW (1 << 2)
struct perf_event_header {
__u32 type;
@@ -353,8 +353,8 @@ enum perf_event_type {
* char filename[];
* };
*/
- PERF_EVENT_MMAP = 1,
- PERF_EVENT_MUNMAP = 2,
+ PERF_RECORD_MMAP = 1,
+ PERF_RECORD_MUNMAP = 2,
/*
* struct {
@@ -364,10 +364,10 @@ enum perf_event_type {
* char comm[];
* };
*/
- PERF_EVENT_COMM = 3,
+ PERF_RECORD_COMM = 3,
/*
- * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
+ * When header.misc & PERF_RECORD_MISC_OVERFLOW the event_type field
* will be PERF_RECORD_*
*
* struct {
@@ -397,7 +397,7 @@ Notification of new events is possible through poll()/select()/epoll() and
fcntl() managing signals.
Normally a notification is generated for every page filled, however one can
-additionally set perf_counter_hw_event.wakeup_events to generate one every
+additionally set perf_event_hw_event.wakeup_events to generate one every
so many counter overflow events.
Future work will include a splice() interface to the ring-buffer.
@@ -409,11 +409,11 @@ events but does continue to exist and maintain its count value.
An individual counter or counter group can be enabled with
- ioctl(fd, PERF_COUNTER_IOC_ENABLE);
+ ioctl(fd, PERF_EVENT_IOC_ENABLE);
or disabled with
- ioctl(fd, PERF_COUNTER_IOC_DISABLE);
+ ioctl(fd, PERF_EVENT_IOC_DISABLE);
Enabling or disabling the leader of a group enables or disables the
whole group; that is, while the group leader is disabled, none of the
@@ -424,16 +424,16 @@ other counter.
Additionally, non-inherited overflow counters can use
- ioctl(fd, PERF_COUNTER_IOC_REFRESH, nr);
+ ioctl(fd, PERF_EVENT_IOC_REFRESH, nr);
to enable a counter for 'nr' events, after which it gets disabled again.
A process can enable or disable all the counter groups that are
attached to it, using prctl:
- prctl(PR_TASK_PERF_COUNTERS_ENABLE);
+ prctl(PR_TASK_PERF_EVENTS_ENABLE);
- prctl(PR_TASK_PERF_COUNTERS_DISABLE);
+ prctl(PR_TASK_PERF_EVENTS_DISABLE);
This applies to all counters on the current process, whether created
by this process or by another, and doesn't affect any counters that
@@ -447,11 +447,11 @@ Arch requirements
If your architecture does not have hardware performance metrics, you can
still use the generic software counters based on hrtimers for sampling.
-So to start with, in order to add HAVE_PERF_COUNTERS to your Kconfig, you
+So to start with, in order to add HAVE_PERF_EVENTS to your Kconfig, you
will need at least this:
- - asm/perf_counter.h - a basic stub will suffice at first
+ - asm/perf_event.h - a basic stub will suffice at first
- support for atomic64 types (and associated helper functions)
- - set_perf_counter_pending() implemented
+ - set_perf_event_pending() implemented
If your architecture does have hardware capabilities, you can override the
-weak stub hw_perf_counter_init() to register hardware counters.
+weak stub hw_perf_event_init() to register hardware counters.
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 2abeb20d0bf..8cc4623afd6 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -52,15 +52,15 @@
#include <sys/types.h>
#include <sys/syscall.h>
-#include "../../include/linux/perf_counter.h"
+#include "../../include/linux/perf_event.h"
#include "util/types.h"
/*
- * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all
+ * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
* counters in the current task.
*/
-#define PR_TASK_PERF_COUNTERS_DISABLE 31
-#define PR_TASK_PERF_COUNTERS_ENABLE 32
+#define PR_TASK_PERF_EVENTS_DISABLE 31
+#define PR_TASK_PERF_EVENTS_ENABLE 32
#ifndef NSEC_PER_SEC
# define NSEC_PER_SEC 1000000000ULL
@@ -90,12 +90,12 @@ static inline unsigned long long rdclock(void)
_min1 < _min2 ? _min1 : _min2; })
static inline int
-sys_perf_counter_open(struct perf_counter_attr *attr,
+sys_perf_event_open(struct perf_event_attr *attr,
pid_t pid, int cpu, int group_fd,
unsigned long flags)
{
attr->size = sizeof(*attr);
- return syscall(__NR_perf_counter_open, attr, pid, cpu,
+ return syscall(__NR_perf_event_open, attr, pid, cpu,
group_fd, flags);
}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 018d414a09d..2c9c26d6ded 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -1,5 +1,5 @@
-#ifndef __PERF_EVENT_H
-#define __PERF_EVENT_H
+#ifndef __PERF_RECORD_H
+#define __PERF_RECORD_H
#include "../perf.h"
#include "util.h"
#include <linux/list.h>
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index bb4fca3efcc..e306857b2c2 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -9,7 +9,7 @@
/*
* Create new perf.data header attribute:
*/
-struct perf_header_attr *perf_header_attr__new(struct perf_counter_attr *attr)
+struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr)
{
struct perf_header_attr *self = malloc(sizeof(*self));
@@ -134,7 +134,7 @@ struct perf_file_section {
};
struct perf_file_attr {
- struct perf_counter_attr attr;
+ struct perf_event_attr attr;
struct perf_file_section ids;
};
@@ -320,7 +320,7 @@ u64 perf_header__sample_type(struct perf_header *header)
return type;
}
-struct perf_counter_attr *
+struct perf_event_attr *
perf_header__find_attr(u64 id, struct perf_header *header)
{
int i;
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 7b0e84a8717..a0761bc7863 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -1,12 +1,12 @@
#ifndef _PERF_HEADER_H
#define _PERF_HEADER_H
-#include "../../../include/linux/perf_counter.h"
+#include "../../../include/linux/perf_event.h"
#include <sys/types.h>
#include "types.h"
struct perf_header_attr {
- struct perf_counter_attr attr;
+ struct perf_event_attr attr;
int ids, size;
u64 *id;
off_t id_offset;
@@ -34,11 +34,11 @@ char *perf_header__find_event(u64 id);
struct perf_header_attr *
-perf_header_attr__new(struct perf_counter_attr *attr);
+perf_header_attr__new(struct perf_event_attr *attr);
void perf_header_attr__add_id(struct perf_header_attr *self, u64 id);
u64 perf_header__sample_type(struct perf_header *header);
-struct perf_counter_attr *
+struct perf_event_attr *
perf_header__find_attr(u64 id, struct perf_header *header);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 89172fd0038..13ab4b842d4 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -10,7 +10,7 @@
int nr_counters;
-struct perf_counter_attr attrs[MAX_COUNTERS];
+struct perf_event_attr attrs[MAX_COUNTERS];
struct event_symbol {
u8 type;
@@ -48,13 +48,13 @@ static struct event_symbol event_symbols[] = {
{ CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
};
-#define __PERF_COUNTER_FIELD(config, name) \
- ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT)
+#define __PERF_EVENT_FIELD(config, name) \
+ ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
-#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW)
-#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG)
-#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE)
-#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT)
+#define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW)
+#define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG)
+#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
+#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
static const char *hw_event_names[] = {
"cycles",
@@ -352,7 +352,7 @@ static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int
}
static enum event_result
-parse_generic_hw_event(const char **str, struct perf_counter_attr *attr)
+parse_generic_hw_event(const char **str, struct perf_event_attr *attr)
{
const char *s = *str;
int cache_type = -1, cache_op = -1, cache_result = -1;
@@ -417,7 +417,7 @@ parse_single_tracepoint_event(char *sys_name,
const char *evt_name,
unsigned int evt_length,
char *flags,
- struct perf_counter_attr *attr,
+ struct perf_event_attr *attr,
const char **strp)
{
char evt_path[MAXPATHLEN];
@@ -505,7 +505,7 @@ parse_subsystem_tracepoint_event(char *sys_name, char *flags)
static enum event_result parse_tracepoint_event(const char **strp,
- struct perf_counter_attr *attr)
+ struct perf_event_attr *attr)
{
const char *evt_name;
char *flags;
@@ -563,7 +563,7 @@ static int check_events(const char *str, unsigned int i)
}
static enum event_result
-parse_symbolic_event(const char **strp, struct perf_counter_attr *attr)
+parse_symbolic_event(const char **strp, struct perf_event_attr *attr)
{
const char *str = *strp;
unsigned int i;
@@ -582,7 +582,7 @@ parse_symbolic_event(const char **strp, struct perf_counter_attr *attr)
}
static enum event_result
-parse_raw_event(const char **strp, struct perf_counter_attr *attr)
+parse_raw_event(const char **strp, struct perf_event_attr *attr)
{
const char *str = *strp;
u64 config;
@@ -601,7 +601,7 @@ parse_raw_event(const char **strp, struct perf_counter_attr *attr)
}
static enum event_result
-parse_numeric_event(const char **strp, struct perf_counter_attr *attr)
+parse_numeric_event(const char **strp, struct perf_event_attr *attr)
{
const char *str = *strp;
char *endp;
@@ -623,7 +623,7 @@ parse_numeric_event(const char **strp, struct perf_counter_attr *attr)
}
static enum event_result
-parse_event_modifier(const char **strp, struct perf_counter_attr *attr)
+parse_event_modifier(const char **strp, struct perf_event_attr *attr)
{
const char *str = *strp;
int eu = 1, ek = 1, eh = 1;
@@ -656,7 +656,7 @@ parse_event_modifier(const char **strp, struct perf_counter_attr *attr)
* Symbolic names are (almost) exactly matched.
*/
static enum event_result
-parse_event_symbols(const char **str, struct perf_counter_attr *attr)
+parse_event_symbols(const char **str, struct perf_event_attr *attr)
{
enum event_result ret;
@@ -711,7 +711,7 @@ static void store_event_type(const char *orgname)
int parse_events(const struct option *opt __used, const char *str, int unset __used)
{
- struct perf_counter_attr attr;
+ struct perf_event_attr attr;
enum event_result ret;
if (strchr(str, ':'))
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 60704c15961..30c60811284 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -16,7 +16,7 @@ extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
extern int nr_counters;
-extern struct perf_counter_attr attrs[MAX_COUNTERS];
+extern struct perf_event_attr attrs[MAX_COUNTERS];
extern const char *event_name(int ctr);
extern const char *__event_name(int type, u64 config);
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index 1fd824c1f1c..af4b0573b37 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -480,12 +480,12 @@ out:
}
static struct tracepoint_path *
-get_tracepoints_path(struct perf_counter_attr *pattrs, int nb_counters)
+get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events)
{
struct tracepoint_path path, *ppath = &path;
int i;
- for (i = 0; i < nb_counters; i++) {
+ for (i = 0; i < nb_events; i++) {
if (pattrs[i].type != PERF_TYPE_TRACEPOINT)
continue;
ppath->next = tracepoint_id_to_path(pattrs[i].config);
@@ -496,7 +496,7 @@ get_tracepoints_path(struct perf_counter_attr *pattrs, int nb_counters)
return path.next;
}
-void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters)
+void read_tracing_data(struct perf_event_attr *pattrs, int nb_events)
{
char buf[BUFSIZ];
struct tracepoint_path *tps;
@@ -530,7 +530,7 @@ void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters)
page_size = getpagesize();
write_or_die(&page_size, 4);
- tps = get_tracepoints_path(pattrs, nb_counters);
+ tps = get_tracepoints_path(pattrs, nb_events);
read_header_files();
read_ftrace_files(tps);
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index d35ebf1e29f..693f815c942 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -240,6 +240,6 @@ unsigned long long
raw_field_value(struct event *event, const char *name, void *data);
void *raw_field_ptr(struct event *event, const char *name, void *data);
-void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters);
+void read_tracing_data(struct perf_event_attr *pattrs, int nb_events);
#endif /* _TRACE_EVENTS_H */
diff --git a/usr/.gitignore b/usr/.gitignore
index 69b2e89fa16..8e48117a3f3 100644
--- a/usr/.gitignore
+++ b/usr/.gitignore
@@ -4,5 +4,7 @@
gen_init_cpio
initramfs_data.cpio
initramfs_data.cpio.gz
+initramfs_data.cpio.bz2
+initramfs_data.cpio.lzma
initramfs_list
include
diff --git a/usr/Makefile b/usr/Makefile
index 245145a99c1..1e6a9e4a72c 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -6,7 +6,7 @@ klibcdirs:;
PHONY += klibcdirs
-# Gzip, but no bzip2
+# Gzip
suffix_$(CONFIG_INITRAMFS_COMPRESSION_GZIP) = .gz
# Bzip2
diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
index f1d3fe34176..83b3dde1a83 100644
--- a/usr/gen_init_cpio.c
+++ b/usr/gen_init_cpio.c
@@ -446,7 +446,7 @@ static int cpio_mkfile_line(const char *line)
return rc;
}
-void usage(const char *prog)
+static void usage(const char *prog)
{
fprintf(stderr, "Usage:\n"
"\t%s <cpio_list>\n"
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 897bff3b7df..034a798b043 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -738,8 +738,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
bool called = true;
struct kvm_vcpu *vcpu;
- if (alloc_cpumask_var(&cpus, GFP_ATOMIC))
- cpumask_clear(cpus);
+ zalloc_cpumask_var(&cpus, GFP_ATOMIC);
spin_lock(&kvm->requests_lock);
me = smp_processor_id();